Compare commits
334 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
82b087e51e | ||
|
da682eaef6 | ||
|
688562c049 | ||
|
50e6fad0e6 | ||
|
d5cce435ce | ||
|
8f75cfe247 | ||
|
686b775e35 | ||
|
a3be6bacc6 | ||
|
b493d554ad | ||
|
0fc3bcabfd | ||
|
7cf36c488d | ||
|
89c39d0be1 | ||
|
f5eb5a3ba6 | ||
|
64c2e759ab | ||
|
a09ee672a6 | ||
|
057b5d7e24 | ||
|
fe4c59e38d | ||
|
305d5d97d8 | ||
|
05f464798f | ||
|
96d8ee9e07 | ||
|
471a3f991b | ||
|
fd9e003ae1 | ||
|
e26ff09df7 | ||
|
fdcf589f7c | ||
|
f44dfc8d04 | ||
|
c736c4633e | ||
|
7099d07fd3 | ||
|
e3b47d22d7 | ||
|
f789da1e20 | ||
|
b74bb12ebe | ||
|
ddf019c1a4 | ||
|
74d57b1c2f | ||
|
086e653a0b | ||
|
225d15bde8 | ||
|
6d8d5d1379 | ||
|
fcd2a78d73 | ||
|
5cf52c3c20 | ||
|
bc334427e3 | ||
|
d0cac2a2ea | ||
|
3743e44fb3 | ||
|
f5ef999b31 | ||
|
55d5339daa | ||
|
594b69395a | ||
|
6dc62bfb77 | ||
|
08b53c0963 | ||
|
b5baa966ac | ||
|
ff38a46af6 | ||
|
841f596b26 | ||
|
5e1497856b | ||
|
e085b580b5 | ||
|
d967ff0138 | ||
|
71e3a99742 | ||
|
640f4a1ec7 | ||
|
8d904877ef | ||
|
fbe4e95e6a | ||
|
e7e7cbe632 | ||
|
7e5b75fa7e | ||
|
60beb509f7 | ||
|
15f6b6ccd6 | ||
|
f56f3d81b5 | ||
|
098c94352d | ||
|
c929e8e02b | ||
|
80f2c485ba | ||
|
f855f4d1c0 | ||
|
81a26aa4fc | ||
|
855cf9a362 | ||
|
96ba314281 | ||
|
e52e6dfbaa | ||
|
181ff3d13e | ||
|
195ce0ed79 | ||
|
b24e301201 | ||
|
fb492efda8 | ||
|
c40216350c | ||
|
d031bbcf2e | ||
|
c183c3a5ec | ||
|
f04d4af4f2 | ||
|
ea0a3521ed | ||
|
a75898a415 | ||
|
a77fce465a | ||
|
719f162229 | ||
|
a39cc8d21f | ||
|
87767b181d | ||
|
88b19e10cb | ||
|
b42cda32ff | ||
|
2344391c48 | ||
|
207d13e429 | ||
|
360f166f5a | ||
|
b4deeb8e36 | ||
|
b3e1fde8b2 | ||
|
b838aba840 | ||
|
c8b3d0ba07 | ||
|
e7106278e9 | ||
|
63cf168fef | ||
|
71ea198a07 | ||
|
f6b65b033e | ||
|
0311ad5ddf | ||
|
f5454e62a1 | ||
|
89ea4dfa8b | ||
|
fefcfdba80 | ||
|
1072bd7640 | ||
|
a7280f117a | ||
|
d46a19098a | ||
|
44fffcbb1c | ||
|
e14c2f94f4 | ||
|
437c356626 | ||
|
fd68f8ba2e | ||
|
2374664e95 | ||
|
2cb9ca5966 | ||
|
4f247a232f | ||
|
15a2c73826 | ||
|
d23f1436c5 | ||
|
70c87d1a23 | ||
|
053ce10ce5 | ||
|
055eb360c2 | ||
|
25cd1ceeeb | ||
|
52ee3b1cee | ||
|
bbadcca414 | ||
|
e9eba97299 | ||
|
920b63944e | ||
|
8104895a07 | ||
|
c9e646b86b | ||
|
7c47db1e3d | ||
|
c619e9b560 | ||
|
ccd48923a0 | ||
|
4e797cc867 | ||
|
9627bfced3 | ||
|
f823b10597 | ||
|
c9e56c9749 | ||
|
da7482d631 | ||
|
97650c7f37 | ||
|
e738bf1c9a | ||
|
afebb2a8a5 | ||
|
4e4fd03b65 | ||
|
049ca18dc5 | ||
|
495c64556e | ||
|
747e91d434 | ||
|
6d4f6e79b0 | ||
|
98e9f34704 | ||
|
70f74174e8 | ||
|
70985f82f1 | ||
|
3b2bdd9f8a | ||
|
d33ae59fbf | ||
|
9ead7ca11a | ||
|
dbcef35f7d | ||
|
9e733d7d9b | ||
|
39f1240ec2 | ||
|
fa249721fa | ||
|
137793cd4c | ||
|
47d8608aee | ||
|
ed410aea10 | ||
|
957dfa8f73 | ||
|
98095b6f8d | ||
|
a2c32d7d0e | ||
|
b15d826476 | ||
|
ed97a2578d | ||
|
89f61f0b41 | ||
|
04cc9c1148 | ||
|
8314ab4508 | ||
|
3a98042753 | ||
|
60d316c9fd | ||
|
e324c221a6 | ||
|
61246999ac | ||
|
e476dc4eaa | ||
|
ee18e7668b | ||
|
62db7f6562 | ||
|
2e9b501355 | ||
|
089a99f1e3 | ||
|
57961b1d17 | ||
|
fe8b2b7850 | ||
|
0bf45cbab6 | ||
|
5877427389 | ||
|
25141288f4 | ||
|
b28d10d46f | ||
|
b6dc48da75 | ||
|
f2d929c12d | ||
|
c49b89091a | ||
|
23fe3a86d9 | ||
|
2f778725d6 | ||
|
93a119a51e | ||
|
65a7b536c9 | ||
|
1281483a8c | ||
|
4312841433 | ||
|
b859acbfea | ||
|
40a3885d3b | ||
|
36b7c2ea97 | ||
|
24bd4ff6d4 | ||
|
69b3f10207 | ||
|
9922f09a1d | ||
|
38a99c0c25 | ||
|
7031235714 | ||
|
dfb2356a9a | ||
|
010794806a | ||
|
6f95d5f72a | ||
|
2720b939fd | ||
|
a25c3fcf7d | ||
|
7cc4810174 | ||
|
c1a55bf249 | ||
|
f19778b7d9 | ||
|
eecdacac42 | ||
|
429f130532 | ||
|
19b9839dfc | ||
|
ad2bf3afa6 | ||
|
5c739ba236 | ||
|
9fac507606 | ||
|
d5a37cb06e | ||
|
86eb0157c0 | ||
|
072dab0948 | ||
|
e20e79f412 | ||
|
f118db81ce | ||
|
4ecb78d303 | ||
|
0a28e40606 | ||
|
4d7a5a9daf | ||
|
64cf6b4388 | ||
|
f334c3b895 | ||
|
15a7bcd4fe | ||
|
8d6636d02a | ||
|
cf896dbeee | ||
|
e5b60b75f8 | ||
|
0e155fdbd9 | ||
|
b79a337ddd | ||
|
c4050f541d | ||
|
f0b74a4ecf | ||
|
f7979378fd | ||
|
d7c5607982 | ||
|
91ab5ae990 | ||
|
605e767259 | ||
|
597618846b | ||
|
712267bf51 | ||
|
eb9cef0cd4 | ||
|
62e0e19961 | ||
|
9aee9cb867 | ||
|
2b11558b36 | ||
|
18c4e1b023 | ||
|
6bac44ed92 | ||
|
8cb622084f | ||
|
38f7e9a979 | ||
|
a536f779ee | ||
|
84a5e5ec97 | ||
|
dd33aae3cf | ||
|
be2ace47e3 | ||
|
53b074aa35 | ||
|
a4ad2925a2 | ||
|
edfbd8d65a | ||
|
e0ae54fd7e | ||
|
60297951ec | ||
|
e0f9f72a2c | ||
|
5236acf4b0 | ||
|
5dd61b5db2 | ||
|
8752bf0826 | ||
|
b1712e80ec | ||
|
2fe1a4677c | ||
|
f76c128f4f | ||
|
b143b9c3c2 | ||
|
b4178b75e7 | ||
|
c54b751df7 | ||
|
0fde9e893f | ||
|
d24abbdac9 | ||
|
3b03985f28 | ||
|
d05bfa08c7 | ||
|
9da2ac7a44 | ||
|
9e95d0fb58 | ||
|
94cad9873c | ||
|
f33171b32f | ||
|
aa6406f263 | ||
|
77864a6bee | ||
|
b51715d33c | ||
|
7d395177d4 | ||
|
77ba6d6784 | ||
|
4bf0a54ed7 | ||
|
8a526f2f53 | ||
|
43f99bdb31 | ||
|
0008dc62e4 | ||
|
7e8174fb79 | ||
|
4ad2ebcde9 | ||
|
da183d655a | ||
|
2e449276be | ||
|
8cac6835c0 | ||
|
677c184e47 | ||
|
f36cfb92f7 | ||
|
e7062de05f | ||
|
a443e2e773 | ||
|
3a6db787e2 | ||
|
f3c986385f | ||
|
3df811348f | ||
|
e8c86ed3e5 | ||
|
489a7bb576 | ||
|
688dd85e61 | ||
|
fe54a30084 | ||
|
80942841a2 | ||
|
d2808a8e29 | ||
|
f8413a28b5 | ||
|
bc96332899 | ||
|
ceeeb3c9dd | ||
|
bd058ec8f1 | ||
|
4b5ac44fc8 | ||
|
fef979f0e5 | ||
|
cca2cdf39b | ||
|
6e91996606 | ||
|
99be00d61f | ||
|
68f808026e | ||
|
0c7ab0a1bb | ||
|
3d8ccbc079 | ||
|
275d096a46 | ||
|
6d70a06b23 | ||
|
7e68b2e1bd | ||
|
f0d761630e | ||
|
1986927eb6 | ||
|
9a0ea61007 | ||
|
51a70e52f2 | ||
|
9797c93db3 | ||
|
9598114658 | ||
|
d3ef061044 | ||
|
1f102d2617 | ||
|
5e97bd3d8a | ||
|
ed06e8b85d | ||
|
10b9225edb | ||
|
b1b5ddd2b9 | ||
|
6b9b107ead | ||
|
3fef98fd1e | ||
|
e999823b4b | ||
|
1e46a5b147 | ||
|
567a1cb944 | ||
|
2996cebfaa | ||
|
7a1889aaf9 | ||
|
9188153b7d | ||
|
4b9f2e987a | ||
|
bb5c76483a | ||
|
aafbb251b9 | ||
|
dd32540ceb | ||
|
e1a9cbaf3c | ||
|
83740246fc | ||
|
7a53ca18a6 | ||
|
c1a8637cb5 | ||
|
d6831309cd |
Binary file not shown.
4
.cache/fontconfig/CACHEDIR.TAG
Normal file
4
.cache/fontconfig/CACHEDIR.TAG
Normal file
@@ -0,0 +1,4 @@
|
||||
Signature: 8a477f597d28d172789f06886806bc55
|
||||
# This file is a cache directory tag created by fontconfig.
|
||||
# For information about cache directory tags, see:
|
||||
# http://www.brynosaurus.com/cachedir/
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
2
.gitignore
vendored
2
.gitignore
vendored
@@ -14,8 +14,6 @@
|
||||
|
||||
/config/
|
||||
|
||||
.cache
|
||||
|
||||
# log files
|
||||
*.log
|
||||
log-*.txt
|
||||
|
24
.mergify.yml
24
.mergify.yml
@@ -50,6 +50,14 @@ pull_request_rules:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v1.2 backport
|
||||
conditions:
|
||||
- label=v1.2
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.2
|
||||
- name: v1.3 backport
|
||||
conditions:
|
||||
- label=v1.3
|
||||
@@ -66,19 +74,3 @@ pull_request_rules:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.4
|
||||
- name: v1.5 backport
|
||||
conditions:
|
||||
- label=v1.5
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.5
|
||||
- name: v1.6 backport
|
||||
conditions:
|
||||
- label=v1.6
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.6
|
||||
|
876
Cargo.lock
generated
876
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,6 @@ members = [
|
||||
"remote-wallet",
|
||||
"ramp-tps",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
"sdk/cargo-build-bpf",
|
||||
"sdk/cargo-test-bpf",
|
||||
@@ -67,6 +66,7 @@ members = [
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
"vote-signer",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
"watchtower",
|
||||
|
@@ -29,7 +29,7 @@ On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc
|
||||
|
||||
```bash
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang
|
||||
```
|
||||
|
||||
## **2. Download the source code.**
|
||||
@@ -108,5 +108,3 @@ send us that patch!
|
||||
# Disclaimer
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Any content produced by Solana, or developer resources that Solana provides, are for educational and inspiration purposes only. Solana does not encourage, induce or sanction the deployment of any such applications in violation of applicable laws or regulations.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -18,11 +18,11 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.1" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
solana-config-program = { path = "../programs/config", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.13" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
||||
|
@@ -81,7 +81,7 @@ pub fn parse_account_data(
|
||||
) -> Result<ParsedAccount, ParseAccountError> {
|
||||
let program_name = PARSABLE_PROGRAM_IDS
|
||||
.get(program_id)
|
||||
.ok_or(ParseAccountError::ProgramNotParsable)?;
|
||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
||||
let additional_data = additional_data.unwrap_or_default();
|
||||
let parsed_json = match program_name {
|
||||
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
|
||||
@@ -118,7 +118,7 @@ mod test {
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::new_current(vote_state);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
|
@@ -217,6 +217,7 @@ mod test {
|
||||
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
@@ -249,9 +250,8 @@ mod test {
|
||||
let fee_calculator = FeeCalculator {
|
||||
lamports_per_signature: 10,
|
||||
};
|
||||
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let recent_blockhashes =
|
||||
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
|
||||
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
|
@@ -128,13 +128,11 @@ mod test {
|
||||
fn test_parse_vote() {
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::new_current(vote_state);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let expected_vote_state = UiVoteState {
|
||||
node_pubkey: Pubkey::default().to_string(),
|
||||
authorized_withdrawer: Pubkey::default().to_string(),
|
||||
..UiVoteState::default()
|
||||
};
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
||||
assert_eq!(
|
||||
parse_vote(&vote_account_data).unwrap(),
|
||||
VoteAccountType::Vote(expected_vote_state)
|
||||
|
@@ -2,20 +2,20 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
log = "0.4.6"
|
||||
rayon = "1.4.0"
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-measure = { path = "../measure", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-measure = { path = "../measure", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -98,10 +98,7 @@ fn main() {
|
||||
} else {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let mut time = Measure::start("hash");
|
||||
let hash = accounts
|
||||
.accounts_db
|
||||
.update_accounts_hash(0, &ancestors, true)
|
||||
.0;
|
||||
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors).0;
|
||||
time.stop();
|
||||
println!("hash: {} {}", hash, time);
|
||||
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,19 +11,19 @@ publish = false
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.0"
|
||||
solana-core = { path = "../core", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-streamer = { path = "../streamer", version = "1.5.1" }
|
||||
solana-perf = { path = "../perf", version = "1.5.1" }
|
||||
solana-ledger = { path = "../ledger", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-measure = { path = "../measure", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-core = { path = "../core", version = "1.4.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.13" }
|
||||
solana-perf = { path = "../perf", version = "1.4.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.4.13" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-measure = { path = "../measure", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -17,9 +17,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_runtime::{
|
||||
accounts_background_service::ABSRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::Keypair,
|
||||
@@ -325,7 +323,7 @@ fn main() {
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
bank_forks.set_root(root, &ABSRequestSender::default(), None);
|
||||
bank_forks.set_root(root, &None, None);
|
||||
root += 1;
|
||||
}
|
||||
debug!(
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,18 +9,18 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1.36"
|
||||
bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
tarpc = { version = "0.23.0", features = ["full"] }
|
||||
tokio = { version = "0.3.5", features = ["full"] }
|
||||
tokio = { version = "0.3", features = ["full"] }
|
||||
tokio-serde = { version = "0.6", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.4.13" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -5,8 +5,9 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
use futures::{future::join_all, Future, FutureExt};
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use async_trait::async_trait;
|
||||
use futures::future::join_all;
|
||||
pub use solana_banks_interface::{BanksClient, TransactionStatus};
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
@@ -18,237 +19,186 @@ use solana_sdk::{
|
||||
rent::Rent,
|
||||
signature::Signature,
|
||||
sysvar,
|
||||
transaction::{self, Transaction},
|
||||
transaction::Transaction,
|
||||
transport,
|
||||
};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
client, context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
};
|
||||
use tokio::{net::ToSocketAddrs, time::Duration};
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct BanksClient {
|
||||
inner: TarpcClient,
|
||||
}
|
||||
|
||||
impl BanksClient {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new<C>(
|
||||
config: client::Config,
|
||||
transport: C,
|
||||
) -> NewClient<TarpcClient, RequestDispatch<BanksRequest, BanksResponse, C>>
|
||||
where
|
||||
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>>,
|
||||
{
|
||||
TarpcClient::new(config, transport)
|
||||
}
|
||||
|
||||
pub fn send_transaction_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.inner.send_transaction_with_context(ctx, transaction)
|
||||
}
|
||||
|
||||
pub fn get_fees_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn get_transaction_status_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
}
|
||||
|
||||
pub fn get_slot_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
}
|
||||
|
||||
pub fn get_account_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait BanksClientExt {
|
||||
/// Send a transaction and return immediately. The server will resend the
|
||||
/// transaction until either it is accepted by the cluster or the transaction's
|
||||
/// blockhash expires.
|
||||
pub fn send_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||
/// use them to calculate the transaction fee.
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::Root)
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_account(sysvar::rent::id()).map(|result| {
|
||||
let rent_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
from_account::<Rent>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
})
|
||||
}
|
||||
async fn send_transaction(&mut self, transaction: Transaction) -> io::Result<()>;
|
||||
|
||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
async fn get_recent_blockhash(&mut self) -> io::Result<Hash>;
|
||||
|
||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||
/// use them to calculate the transaction fee.
|
||||
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)>;
|
||||
|
||||
/// Return the cluster rent
|
||||
async fn get_rent(&mut self) -> io::Result<Rent>;
|
||||
|
||||
/// Send a transaction and return after the transaction has been rejected or
|
||||
/// reached the given level of commitment.
|
||||
pub fn process_transaction_with_commitment(
|
||||
async fn process_transaction_with_commitment(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map(|result| match result? {
|
||||
None => {
|
||||
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
|
||||
}
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
}
|
||||
) -> transport::Result<()>;
|
||||
|
||||
/// Send a transaction and return until the transaction has been finalized or rejected.
|
||||
pub fn process_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
pub async fn process_transactions_with_commitment(
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
commitment: CommitmentLevel,
|
||||
) -> transport::Result<()> {
|
||||
let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect();
|
||||
let futures = clients
|
||||
.iter_mut()
|
||||
.zip(transactions)
|
||||
.map(|(client, transaction)| {
|
||||
client.process_transaction_with_commitment(transaction, commitment)
|
||||
});
|
||||
let statuses = join_all(futures).await;
|
||||
statuses.into_iter().collect() // Convert Vec<Result<_, _>> to Result<Vec<_>>
|
||||
}
|
||||
|
||||
/// Send transactions and return until the transaction has been finalized or rejected.
|
||||
pub fn process_transactions(
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot height.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::Root)
|
||||
}
|
||||
|
||||
/// Return the account at the given address at the slot corresponding to the given
|
||||
/// commitment level. If the account is not found, None is returned.
|
||||
pub fn get_account_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
}
|
||||
|
||||
/// Return the account at the given address at the time of the most recent root slot.
|
||||
/// If the account is not found, None is returned.
|
||||
pub fn get_account(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.get_account_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the slot
|
||||
/// corresponding to the given commitment level.
|
||||
pub fn get_balance_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0)))
|
||||
}
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the time
|
||||
/// of the most recent root slot.
|
||||
pub fn get_balance(&mut self, address: Pubkey) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
self.get_balance_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
/// Send a transaction and return after the transaction has been finalized or rejected.
|
||||
async fn process_transaction(&mut self, transaction: Transaction) -> transport::Result<()>;
|
||||
|
||||
/// Return the status of a transaction with a signature matching the transaction's first
|
||||
/// signature. Return None if the transaction is not found, which may be because the
|
||||
/// blockhash was expired or the fee-paying account had insufficient funds to pay the
|
||||
/// transaction fee. Note that servers rarely store the full transaction history. This
|
||||
/// method may return None if the transaction status has been discarded.
|
||||
pub fn get_transaction_status(
|
||||
async fn get_transaction_status(
|
||||
&mut self,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.get_transaction_status_with_context(context::current(), signature)
|
||||
}
|
||||
) -> io::Result<Option<TransactionStatus>>;
|
||||
|
||||
/// Same as get_transaction_status, but for multiple transactions.
|
||||
pub async fn get_transaction_statuses(
|
||||
async fn get_transaction_statuses(
|
||||
&mut self,
|
||||
signatures: Vec<Signature>,
|
||||
) -> io::Result<Vec<Option<TransactionStatus>>>;
|
||||
|
||||
/// Return the most recent rooted slot height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot height.
|
||||
async fn get_root_slot(&mut self) -> io::Result<Slot>;
|
||||
|
||||
/// Return the account at the given address at the slot corresponding to the given
|
||||
/// commitment level. If the account is not found, None is returned.
|
||||
async fn get_account_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> io::Result<Option<Account>>;
|
||||
|
||||
/// Return the account at the given address at the time of the most recent root slot.
|
||||
/// If the account is not found, None is returned.
|
||||
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>>;
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the slot
|
||||
/// corresponding to the given commitment level.
|
||||
async fn get_balance_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> io::Result<u64>;
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the time
|
||||
/// of the most recent root slot.
|
||||
async fn get_balance(&mut self, address: Pubkey) -> io::Result<u64>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl BanksClientExt for BanksClient {
|
||||
async fn send_transaction(&mut self, transaction: Transaction) -> io::Result<()> {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)> {
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::Root)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_rent(&mut self) -> io::Result<Rent> {
|
||||
let rent_sysvar = self
|
||||
.get_account(sysvar::rent::id())
|
||||
.await?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
|
||||
from_account::<Rent>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_recent_blockhash(&mut self) -> io::Result<Hash> {
|
||||
Ok(self.get_fees().await?.1)
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> transport::Result<()> {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
let result = self
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.await?;
|
||||
match result {
|
||||
None => Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into()),
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_transaction(&mut self, transaction: Transaction) -> transport::Result<()> {
|
||||
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_root_slot(&mut self) -> io::Result<Slot> {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::Root)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_account_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> io::Result<Option<Account>> {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_account(&mut self, address: Pubkey) -> io::Result<Option<Account>> {
|
||||
self.get_account_with_commitment(address, CommitmentLevel::default())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_balance_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> io::Result<u64> {
|
||||
let account = self
|
||||
.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.await?;
|
||||
Ok(account.map(|x| x.lamports).unwrap_or(0))
|
||||
}
|
||||
|
||||
async fn get_balance(&mut self, address: Pubkey) -> io::Result<u64> {
|
||||
self.get_balance_with_commitment(address, CommitmentLevel::default())
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_transaction_status(
|
||||
&mut self,
|
||||
signature: Signature,
|
||||
) -> io::Result<Option<TransactionStatus>> {
|
||||
self.get_transaction_status_with_context(context::current(), signature)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_transaction_statuses(
|
||||
&mut self,
|
||||
signatures: Vec<Signature>,
|
||||
) -> io::Result<Vec<Option<TransactionStatus>>> {
|
||||
@@ -269,20 +219,15 @@ impl BanksClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_client<C>(transport: C) -> io::Result<BanksClient>
|
||||
where
|
||||
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static,
|
||||
{
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn()?,
|
||||
})
|
||||
pub async fn start_client(
|
||||
transport: UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>>,
|
||||
) -> io::Result<BanksClient> {
|
||||
BanksClient::new(client::Config::default(), transport).spawn()
|
||||
}
|
||||
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
|
||||
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn()?,
|
||||
})
|
||||
BanksClient::new(client::Config::default(), transport).spawn()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -319,7 +264,8 @@ mod tests {
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(&bank_forks).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let mut banks_client =
|
||||
BanksClient::new(client::Config::default(), client_transport).spawn()?;
|
||||
|
||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
@@ -347,7 +293,8 @@ mod tests {
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(&bank_forks).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let mut banks_client =
|
||||
BanksClient::new(client::Config::default(), client_transport).spawn()?;
|
||||
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,14 +9,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.112", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
tarpc = { version = "0.23.0", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.3.5", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_banks_interface"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,12 +11,11 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-metrics = { path = "../metrics", version = "1.5.1" }
|
||||
log = "0.4.8"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.4.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.13" }
|
||||
tarpc = { version = "0.23.0", features = ["full"] }
|
||||
tokio = { version = "0.3", features = ["full"] }
|
||||
tokio-serde = { version = "0.6", features = ["bincode"] }
|
||||
|
@@ -122,16 +122,6 @@ impl BanksServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tarpc::server]
|
||||
impl Banks for BanksServer {
|
||||
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
||||
@@ -193,10 +183,6 @@ impl Banks for BanksServer {
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_slot = self
|
||||
.bank_forks
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,28 +11,28 @@ publish = false
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.11"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-core = { path = "../core", version = "1.5.1" }
|
||||
solana-genesis = { path = "../genesis", version = "1.5.1" }
|
||||
solana-client = { path = "../client", version = "1.5.1" }
|
||||
solana-faucet = { path = "../faucet", version = "1.5.1" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-metrics = { path = "../metrics", version = "1.5.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-core = { path = "../core", version = "1.4.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.4.13" }
|
||||
solana-client = { path = "../client", version = "1.4.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.13" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.4.13" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.5.1" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.4.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -163,8 +163,7 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
let mut args = Config::default();
|
||||
|
||||
args.entrypoint_addr = solana_net_utils::parse_host_port(
|
||||
|
@@ -22,17 +22,15 @@ fn test_exchange_local_cluster() {
|
||||
|
||||
const NUM_NODES: usize = 1;
|
||||
|
||||
let config = Config {
|
||||
identity: Keypair::new(),
|
||||
duration: Duration::from_secs(1),
|
||||
fund_amount: 100_000,
|
||||
threads: 1,
|
||||
transfer_delay: 20, // 15
|
||||
batch_size: 100, // 1000
|
||||
chunk_size: 10, // 200
|
||||
account_groups: 1, // 10
|
||||
..Config::default()
|
||||
};
|
||||
let mut config = Config::default();
|
||||
config.identity = Keypair::new();
|
||||
config.duration = Duration::from_secs(1);
|
||||
config.fund_amount = 100_000;
|
||||
config.threads = 1;
|
||||
config.transfer_delay = 20; // 15
|
||||
config.batch_size = 100; // 1000;
|
||||
config.chunk_size = 10; // 200;
|
||||
config.account_groups = 1; // 10;
|
||||
let Config {
|
||||
fund_amount,
|
||||
batch_size,
|
||||
@@ -91,18 +89,15 @@ fn test_exchange_bank_client() {
|
||||
bank.add_builtin("exchange_program", id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
do_bench_exchange(
|
||||
clients,
|
||||
Config {
|
||||
identity,
|
||||
duration: Duration::from_secs(1),
|
||||
fund_amount: 100_000,
|
||||
threads: 1,
|
||||
transfer_delay: 20, // 0;
|
||||
batch_size: 100, // 1500;
|
||||
chunk_size: 10, // 1500;
|
||||
account_groups: 1, // 50;
|
||||
..Config::default()
|
||||
},
|
||||
);
|
||||
let mut config = Config::default();
|
||||
config.identity = identity;
|
||||
config.duration = Duration::from_secs(1);
|
||||
config.fund_amount = 100_000;
|
||||
config.threads = 1;
|
||||
config.transfer_delay = 20; // 0;
|
||||
config.batch_size = 100; // 1500;
|
||||
config.chunk_size = 10; // 1500;
|
||||
config.account_groups = 1; // 50;
|
||||
|
||||
do_bench_exchange(clients, config);
|
||||
}
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-streamer = { path = "../streamer", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.13" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,27 +11,27 @@ publish = false
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
log = "0.4.8"
|
||||
rayon = "1.4.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-core = { path = "../core", version = "1.5.1" }
|
||||
solana-genesis = { path = "../genesis", version = "1.5.1" }
|
||||
solana-client = { path = "../client", version = "1.5.1" }
|
||||
solana-faucet = { path = "../faucet", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-metrics = { path = "../metrics", version = "1.5.1" }
|
||||
solana-measure = { path = "../measure", version = "1.5.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-core = { path = "../core", version = "1.4.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.4.13" }
|
||||
solana-client = { path = "../client", version = "1.4.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.13" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.13" }
|
||||
solana-measure = { path = "../measure", version = "1.4.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.5.1" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.4.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -938,12 +938,10 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
|
||||
let config = Config {
|
||||
id,
|
||||
tx_count: 10,
|
||||
duration: Duration::from_secs(5),
|
||||
..Config::default()
|
||||
};
|
||||
let mut config = Config::default();
|
||||
config.id = id;
|
||||
config.tx_count = 10;
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs =
|
||||
|
@@ -196,7 +196,7 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
/// * `matches` - command line arguments parsed by clap
|
||||
/// # Panics
|
||||
/// Panics if there is trouble parsing any of the arguments
|
||||
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
let mut args = Config::default();
|
||||
|
||||
if let Some(addr) = matches.value_of("entrypoint") {
|
||||
|
@@ -60,9 +60,9 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_solana() {
|
||||
test_bench_tps_local_cluster(Config {
|
||||
tx_count: 100,
|
||||
duration: Duration::from_secs(10),
|
||||
..Config::default()
|
||||
});
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
@@ -47,8 +47,6 @@ sudo ./setup-new-buildkite-agent/setup-buildkite.sh
|
||||
```
|
||||
- Copy the pubkey contents from `~buildkite-agent/.ssh/id_ecdsa.pub` and
|
||||
add the pubkey as an authorized SSH key on github.
|
||||
- In net/scripts/solana-user-authorized_keys.sh
|
||||
- Bug mvines to add it to the "solana-grimes" github user
|
||||
- Edit `/etc/buildkite-agent/buildkite-agent.cfg` and/or `/etc/systemd/system/buildkite-agent@*` to the desired configuration of the agent(s)
|
||||
- Copy `ejson` keys from another CI node at `/opt/ejson/keys/`
|
||||
to the same location on the new node.
|
||||
|
@@ -263,7 +263,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
annotate --style info --context pr-backlink \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
|
||||
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
|
||||
if [[ $GITHUB_USER = "dependabot-preview[bot]" ]]; then
|
||||
command_step dependabot "ci/dependabot-pr.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
|
@@ -6,13 +6,13 @@ source ci/_
|
||||
|
||||
commit_range="$(git merge-base HEAD origin/master)..HEAD"
|
||||
parsed_update_args="$(
|
||||
git log "$commit_range" --author "dependabot\[bot\]" --oneline -n1 |
|
||||
git log "$commit_range" --author "dependabot-preview" --oneline -n1 |
|
||||
grep -o '[Bb]ump.*$' |
|
||||
sed -r 's/[Bb]ump ([^ ]+) from ([^ ]+) to ([^ ]+)/-p \1:\2 --precise \3/'
|
||||
)"
|
||||
# relaxed_parsed_update_args is temporal measure...
|
||||
relaxed_parsed_update_args="$(
|
||||
git log "$commit_range" --author "dependabot\[bot\]" --oneline -n1 |
|
||||
git log "$commit_range" --author "dependabot-preview" --oneline -n1 |
|
||||
grep -o '[Bb]ump.*$' |
|
||||
sed -r 's/[Bb]ump ([^ ]+) from [^ ]+ to ([^ ]+)/-p \1 --precise \2/'
|
||||
)"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.48.0
|
||||
FROM solanalabs/rust:1.46.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.48.0
|
||||
FROM rust:1.46.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@@ -16,12 +16,21 @@ fi
|
||||
[[ -f bpf-sdk.tar.bz2 ]]
|
||||
)
|
||||
|
||||
source ci/upload-ci-artifact.sh
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Skipped
|
||||
else
|
||||
upload-s3-artifact "/solana/bpf-sdk.tar.bz2" "s3://solana-sdk/$CHANNEL_OR_TAG/bpf-sdk.tar.bz2"
|
||||
(
|
||||
set -x
|
||||
docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put /solana/bpf-sdk.tar.bz2 \
|
||||
s3://solana-sdk/"$CHANNEL_OR_TAG"/bpf-sdk.tar.bz2
|
||||
)
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
@@ -113,10 +113,19 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
|
||||
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
echo --- AWS S3 Store: "$file"
|
||||
upload-s3-artifact "/solana/$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
(
|
||||
set -x
|
||||
$DRYRUN docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
)
|
||||
|
||||
if [[ -n $TAG ]]; then
|
||||
ci/upload-github-release-asset.sh "$file"
|
||||
@@ -140,9 +149,7 @@ done
|
||||
|
||||
|
||||
# Create install wrapper for release.solana.com
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
echo "Skipping publishing install wrapper"
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
cat > release.solana.com-install <<EOF
|
||||
SOLANA_RELEASE=$CHANNEL_OR_TAG
|
||||
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
|
||||
@@ -151,9 +158,19 @@ EOF
|
||||
cat install/solana-install-init.sh >> release.solana.com-install
|
||||
|
||||
echo --- AWS S3 Store: "install"
|
||||
$DRYRUN upload-s3-artifact "/solana/release.solana.com-install" "s3://release.solana.com/$CHANNEL_OR_TAG/install"
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
|
||||
(
|
||||
set -x
|
||||
$DRYRUN docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put /solana/release.solana.com-install s3://release.solana.com/"$CHANNEL_OR_TAG"/install
|
||||
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
|
||||
)
|
||||
fi
|
||||
|
||||
echo --- ok
|
||||
|
@@ -18,13 +18,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.48.0
|
||||
stable_version=1.46.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2020-12-13
|
||||
nightly_version=2020-08-17
|
||||
fi
|
||||
|
||||
|
||||
|
@@ -7,7 +7,6 @@ SOLANA_ROOT="$HERE"/../..
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
||||
check_ssh_authorized_keys || exit 1
|
||||
|
||||
set -ex
|
||||
|
||||
|
@@ -6,11 +6,6 @@ HERE="$(dirname "$0")"
|
||||
source "$HERE"/utils.sh
|
||||
|
||||
ensure_env || exit 1
|
||||
# This is a last ditch effort to prevent the caller from locking themselves
|
||||
# out of the machine. Exiting here will likely leave the system in some
|
||||
# half-configured state. To prevent this, duplicate the next line at the top
|
||||
# of the entrypoint script.
|
||||
check_ssh_authorized_keys || exit 1
|
||||
|
||||
set -xe
|
||||
# Setup sshd
|
||||
|
@@ -14,33 +14,3 @@ ensure_env() {
|
||||
$RC
|
||||
}
|
||||
|
||||
# Some scripts disable SSH password logins. If no one hash setup authorized_keys
|
||||
# this will result in the machine being remotely inaccessible. Check that the
|
||||
# user running this script has setup their keys
|
||||
check_ssh_authorized_keys() {
|
||||
declare rc=false
|
||||
declare user_home=
|
||||
if [[ -n "$SUDO_USER" ]]; then
|
||||
declare user uid gid home
|
||||
declare passwd_entry
|
||||
passwd_entry="$(grep "$SUDO_USER:[^:]*:$SUDO_UID:$SUDO_GID" /etc/passwd)"
|
||||
IFS=: read -r user _ uid gid _ home _ <<<"$passwd_entry"
|
||||
if [[ "$user" == "$SUDO_USER" && "$uid" == "$SUDO_UID" && "$gid" == "$SUDO_GID" ]]; then
|
||||
user_home="$home"
|
||||
fi
|
||||
else
|
||||
user_home="$HOME"
|
||||
fi
|
||||
declare authorized_keys="${user_home}/.ssh/authorized_keys"
|
||||
if [[ -n "$user_home" ]]; then
|
||||
[[ -s "$authorized_keys" ]] && rc=true
|
||||
fi
|
||||
if ! $rc; then
|
||||
echo "ERROR! This script will disable SSH password logins and you don't"
|
||||
echo "appear to have set up any authorized keys. Please add you SSH"
|
||||
echo "public key to ${authorized_keys} before continuing!"
|
||||
fi
|
||||
$rc
|
||||
}
|
||||
|
||||
check_ssh_authorized_keys
|
||||
|
@@ -56,7 +56,9 @@ _ "$cargo" stable fmt --all -- --check
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings
|
||||
_ "$cargo" nightly clippy \
|
||||
-Zunstable-options --workspace --all-targets \
|
||||
-- --deny=warnings --allow=clippy::stable_sort_primitive
|
||||
|
||||
cargo_audit_ignores=(
|
||||
# failure is officially deprecated/unmaintained
|
||||
@@ -90,7 +92,9 @@ _ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignor
|
||||
cd "$project"
|
||||
_ "$cargo" stable fmt -- --check
|
||||
_ "$cargo" nightly test
|
||||
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||
_ "$cargo" nightly clippy -- --deny=warnings \
|
||||
--allow=clippy::missing_safety_doc \
|
||||
--allow=clippy::stable_sort_primitive
|
||||
)
|
||||
done
|
||||
}
|
||||
|
@@ -8,16 +8,10 @@ source ci/_
|
||||
(
|
||||
echo --- git diff --check
|
||||
set -x
|
||||
|
||||
if [[ -n $CI_BASE_BRANCH ]]
|
||||
then branch="$CI_BASE_BRANCH"
|
||||
else branch="master"
|
||||
fi
|
||||
|
||||
# Look for failed mergify.io backports by searching leftover conflict markers
|
||||
# Also check for any trailing whitespaces!
|
||||
git fetch origin "$branch"
|
||||
git diff "$(git merge-base HEAD "origin/$branch")" --check --oneline
|
||||
git fetch origin "$CI_BASE_BRANCH"
|
||||
git diff "$(git merge-base HEAD "origin/$CI_BASE_BRANCH")..HEAD" --check --oneline
|
||||
)
|
||||
|
||||
echo
|
||||
|
@@ -21,6 +21,9 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# Clear cached json keypair files
|
||||
rm -rf "$HOME/.config/solana"
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
@@ -16,16 +16,3 @@ upload-ci-artifact() {
|
||||
fi
|
||||
}
|
||||
|
||||
upload-s3-artifact() {
|
||||
echo "--- artifact: $1 to $2"
|
||||
(
|
||||
set -x
|
||||
docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||
)
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,9 +11,9 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
thiserror = "1.0.21"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
thiserror = "1.0.20"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
chrono = "0.4"
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use crate::keypair::{parse_keypair_path, KeypairUrl, ASK_KEYWORD};
|
||||
use chrono::DateTime;
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
clock::Slot,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
@@ -148,13 +148,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_epoch<T>(epoch: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Epoch, _>(epoch)
|
||||
}
|
||||
|
||||
pub fn is_slot<T>(slot: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
|
@@ -58,15 +58,6 @@ impl CliSignerInfo {
|
||||
Some(0)
|
||||
}
|
||||
}
|
||||
pub fn index_of_or_none(&self, pubkey: Option<Pubkey>) -> Option<usize> {
|
||||
if let Some(pubkey) = pubkey {
|
||||
self.signers
|
||||
.iter()
|
||||
.position(|signer| signer.pubkey() == pubkey)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DefaultSigner {
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -76,6 +76,17 @@ impl Config {
|
||||
ws_url.to_string()
|
||||
}
|
||||
|
||||
pub fn compute_rpc_banks_url(json_rpc_url: &str) -> String {
|
||||
let json_rpc_url: Option<Url> = json_rpc_url.parse().ok();
|
||||
if json_rpc_url.is_none() {
|
||||
return "".to_string();
|
||||
}
|
||||
let mut url = json_rpc_url.unwrap();
|
||||
let port = url.port().unwrap_or(8899);
|
||||
url.set_port(Some(port + 3)).expect("unable to set port");
|
||||
url.to_string()
|
||||
}
|
||||
|
||||
pub fn import_address_labels<P>(&mut self, filename: P) -> Result<(), io::Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
@@ -122,4 +133,28 @@ mod test {
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compute_rpc_banks_url() {
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"http://devnet.solana.com"),
|
||||
"http://devnet.solana.com:8902/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"https://devnet.solana.com"),
|
||||
"https://devnet.solana.com:8902/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"http://example.com:8899"),
|
||||
"http://example.com:8902/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"https://example.com:1234"),
|
||||
"https://example.com:1237/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_rpc_banks_url(&"garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,13 +17,13 @@ indicatif = "0.15.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-client = { path = "../client", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.1" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.5.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.1" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-client = { path = "../client", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -241,9 +241,6 @@ impl fmt::Display for CliEpochInfo {
|
||||
)?;
|
||||
writeln_name_value(f, "Slot:", &self.epoch_info.absolute_slot.to_string())?;
|
||||
writeln_name_value(f, "Epoch:", &self.epoch_info.epoch.to_string())?;
|
||||
if let Some(transaction_count) = &self.epoch_info.transaction_count {
|
||||
writeln_name_value(f, "Transaction Count:", &transaction_count.to_string())?;
|
||||
}
|
||||
let start_slot = self.epoch_info.absolute_slot - self.epoch_info.slot_index;
|
||||
let end_slot = start_slot + self.epoch_info.slots_in_epoch;
|
||||
writeln_name_value(
|
||||
@@ -544,15 +541,7 @@ impl CliStakeVec {
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliStakeVec {}
|
||||
impl VerboseDisplay for CliStakeVec {
|
||||
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
|
||||
for state in &self.0 {
|
||||
writeln!(w)?;
|
||||
VerboseDisplay::write_str(state, w)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl VerboseDisplay for CliStakeVec {}
|
||||
|
||||
impl fmt::Display for CliStakeVec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@@ -573,12 +562,7 @@ pub struct CliKeyedStakeState {
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliKeyedStakeState {}
|
||||
impl VerboseDisplay for CliKeyedStakeState {
|
||||
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
|
||||
writeln!(w, "Stake Pubkey: {}", self.stake_pubkey)?;
|
||||
VerboseDisplay::write_str(&self.stake_state, w)
|
||||
}
|
||||
}
|
||||
impl VerboseDisplay for CliKeyedStakeState {}
|
||||
|
||||
impl fmt::Display for CliKeyedStakeState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@@ -595,46 +579,7 @@ pub struct CliEpochReward {
|
||||
pub amount: u64, // lamports
|
||||
pub post_balance: u64, // lamports
|
||||
pub percent_change: f64,
|
||||
pub apr: Option<f64>,
|
||||
}
|
||||
|
||||
fn show_votes_and_credits(
|
||||
f: &mut fmt::Formatter,
|
||||
votes: &[CliLockout],
|
||||
epoch_voting_history: &[CliEpochVotingHistory],
|
||||
) -> fmt::Result {
|
||||
if votes.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
writeln!(f, "Recent Votes:")?;
|
||||
for vote in votes {
|
||||
writeln!(f, "- slot: {}", vote.slot)?;
|
||||
writeln!(f, " confirmation count: {}", vote.confirmation_count)?;
|
||||
}
|
||||
writeln!(f, "Epoch Voting History:")?;
|
||||
writeln!(
|
||||
f,
|
||||
"* missed credits include slots unavailable to vote on due to delinquent leaders",
|
||||
)?;
|
||||
for entry in epoch_voting_history {
|
||||
writeln!(
|
||||
f, // tame fmt so that this will be folded like following
|
||||
"- epoch: {}",
|
||||
entry.epoch
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
" credits range: [{}..{})",
|
||||
entry.prev_credits, entry.credits
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
" credits/slots: {}/{}",
|
||||
entry.credits_earned, entry.slots_in_epoch
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
pub apr: f64,
|
||||
}
|
||||
|
||||
fn show_epoch_rewards(
|
||||
@@ -649,22 +594,19 @@ fn show_epoch_rewards(
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} {:<16} {:<16} {:>14} {:>14}",
|
||||
" {:<8} {:<11} {:<15} {:<15} {:>14} {:>14}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR"
|
||||
)?;
|
||||
for reward in epoch_rewards {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} ◎{:<16.9} ◎{:<14.9} {:>13.2}% {}",
|
||||
" {:<8} {:<11} ◎{:<14.9} ◎{:<14.9} {:>13.9}% {:>13.9}%",
|
||||
reward.epoch,
|
||||
reward.effective_slot,
|
||||
lamports_to_sol(reward.amount),
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward.apr,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@@ -677,8 +619,6 @@ pub struct CliStakeState {
|
||||
pub stake_type: CliStakeType,
|
||||
pub account_balance: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub credits_observed: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_vote_account_address: Option<String>,
|
||||
@@ -707,15 +647,7 @@ pub struct CliStakeState {
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliStakeState {}
|
||||
impl VerboseDisplay for CliStakeState {
|
||||
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
|
||||
write!(w, "{}", self)?;
|
||||
if let Some(credits) = self.credits_observed {
|
||||
writeln!(w, "Credits Observed: {}", credits)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl VerboseDisplay for CliStakeState {}
|
||||
|
||||
impl fmt::Display for CliStakeState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@@ -1077,7 +1009,24 @@ impl fmt::Display for CliVoteAccount {
|
||||
unix_timestamp_to_string(self.recent_timestamp.timestamp),
|
||||
self.recent_timestamp.slot
|
||||
)?;
|
||||
show_votes_and_credits(f, &self.votes, &self.epoch_voting_history)?;
|
||||
if !self.votes.is_empty() {
|
||||
writeln!(f, "Recent Votes:")?;
|
||||
for vote in &self.votes {
|
||||
writeln!(
|
||||
f,
|
||||
"- slot: {}\n confirmation count: {}",
|
||||
vote.slot, vote.confirmation_count
|
||||
)?;
|
||||
}
|
||||
writeln!(f, "Epoch Voting History:")?;
|
||||
for epoch_info in &self.epoch_voting_history {
|
||||
writeln!(
|
||||
f,
|
||||
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
|
||||
epoch_info.epoch, epoch_info.slots_in_epoch, epoch_info.credits_earned,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
show_epoch_rewards(f, &self.epoch_rewards)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1116,8 +1065,6 @@ pub struct CliEpochVotingHistory {
|
||||
pub epoch: Epoch,
|
||||
pub slots_in_epoch: u64,
|
||||
pub credits_earned: u64,
|
||||
pub credits: u64,
|
||||
pub prev_credits: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
@@ -27,7 +27,7 @@ pub fn build_balance_message(lamports: u64, use_lamports_unit: bool, show_unit:
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
let styled_value = if value.is_empty() {
|
||||
let styled_value = if value == "" {
|
||||
style("(not set)").italic()
|
||||
} else {
|
||||
style(value)
|
||||
@@ -36,7 +36,7 @@ pub fn println_name_value(name: &str, value: &str) {
|
||||
}
|
||||
|
||||
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
|
||||
let styled_value = if value.is_empty() {
|
||||
let styled_value = if value == "" {
|
||||
style("(not set)").italic()
|
||||
} else {
|
||||
style(value)
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,39 +17,40 @@ criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.5", features = ["termination"] }
|
||||
console = "0.11.3"
|
||||
dirs-next = "2.0.0"
|
||||
log = "0.4.11"
|
||||
log = "0.4.8"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.15.0"
|
||||
humantime = "2.0.1"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.2.1"
|
||||
pretty-hex = "0.1.1"
|
||||
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.5.1" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.5.1" }
|
||||
solana-cli-output = { path = "../cli-output", version = "1.5.1" }
|
||||
solana-client = { path = "../client", version = "1.5.1" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.5.1" }
|
||||
solana-faucet = { path = "../faucet", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.5.1" }
|
||||
solana_rbpf = "=0.2.2"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.1" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.1" }
|
||||
thiserror = "1.0.21"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.13" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.4.13" }
|
||||
solana-cli-output = { path = "../cli-output", version = "1.4.13" }
|
||||
solana-client = { path = "../client", version = "1.4.13" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.4.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.13" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.13" }
|
||||
solana_rbpf = "=0.1.33"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.13" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.4.13" }
|
||||
thiserror = "1.0.20"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.5.1" }
|
||||
solana-core = { path = "../core", version = "1.4.13" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
499
cli/src/cli.rs
499
cli/src/cli.rs
@@ -1,12 +1,15 @@
|
||||
use crate::{
|
||||
cluster_query::*, feature::*, inflation::*, nonce::*, program::*, spend_utils::*, stake::*,
|
||||
validator_info::*, vote::*,
|
||||
checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, send_tpu::*, spend_utils::*,
|
||||
stake::*, validator_info::*, vote::*,
|
||||
};
|
||||
use bincode::serialize;
|
||||
use bip39::{Language, Mnemonic, MnemonicType, Seed};
|
||||
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use log::*;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::{self, Value};
|
||||
use serde_json::{self, json, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_bpf_loader_program::bpf_verifier;
|
||||
use solana_clap_utils::{
|
||||
self,
|
||||
commitment::commitment_arg_with_default,
|
||||
@@ -18,7 +21,9 @@ use solana_clap_utils::{
|
||||
offline::*,
|
||||
};
|
||||
use solana_cli_output::{
|
||||
display::{build_balance_message, println_name_value, println_transaction},
|
||||
display::{
|
||||
build_balance_message, new_spinner_progress_bar, println_name_value, println_transaction,
|
||||
},
|
||||
return_signers, CliAccount, CliSignature, OutputFormat,
|
||||
};
|
||||
use solana_client::{
|
||||
@@ -27,22 +32,28 @@ use solana_client::{
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionLogsFilter},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
|
||||
rpc_response::{RpcKeyedAccount, RpcLeaderSchedule},
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(test)]
|
||||
use solana_faucet::faucet_mock::request_airdrop_transaction;
|
||||
use solana_rbpf::vm::EbpfVm;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
bpf_loader, bpf_loader_deprecated,
|
||||
clock::{Epoch, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
decode_error::DecodeError,
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
instruction::{Instruction, InstructionError},
|
||||
loader_instruction,
|
||||
message::Message,
|
||||
native_token::Sol,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{Signature, Signer, SignerError},
|
||||
signature::{keypair_from_seed, Keypair, Signature, Signer, SignerError},
|
||||
signers::Signers,
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@@ -54,12 +65,13 @@ use solana_stake_program::{
|
||||
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
cmp::min,
|
||||
collections::HashMap,
|
||||
error,
|
||||
fmt::Write as FmtWrite,
|
||||
fs::File,
|
||||
io::Write,
|
||||
net::{IpAddr, SocketAddr},
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, SocketAddr, UdpSocket},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
thread::sleep,
|
||||
@@ -68,6 +80,7 @@ use std::{
|
||||
use thiserror::Error;
|
||||
use url::Url;
|
||||
|
||||
const DATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA_SIZE
|
||||
pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30";
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -105,9 +118,7 @@ pub enum CliCommand {
|
||||
LargestAccounts {
|
||||
filter: Option<RpcLargestAccountsFilter>,
|
||||
},
|
||||
LeaderSchedule {
|
||||
epoch: Option<Epoch>,
|
||||
},
|
||||
LeaderSchedule,
|
||||
LiveSlots,
|
||||
Logs {
|
||||
filter: RpcTransactionLogsFilter,
|
||||
@@ -117,8 +128,6 @@ pub enum CliCommand {
|
||||
interval: Duration,
|
||||
count: Option<u64>,
|
||||
timeout: Duration,
|
||||
blockhash: Option<Hash>,
|
||||
print_timestamp: bool,
|
||||
},
|
||||
ShowBlockProduction {
|
||||
epoch: Option<Epoch>,
|
||||
@@ -143,9 +152,6 @@ pub enum CliCommand {
|
||||
limit: usize,
|
||||
show_transactions: bool,
|
||||
},
|
||||
WaitForMaxStake {
|
||||
max_stake_percent: f32,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
@@ -180,7 +186,6 @@ pub enum CliCommand {
|
||||
use_deprecated_loader: bool,
|
||||
allow_excessive_balance: bool,
|
||||
},
|
||||
Program(ProgramCliCommand),
|
||||
// Stake Commands
|
||||
CreateStakeAccount {
|
||||
stake_account: SignerIndex,
|
||||
@@ -434,7 +439,7 @@ impl CliConfig<'_> {
|
||||
) -> (SettingType, String) {
|
||||
settings
|
||||
.into_iter()
|
||||
.find(|(_, value)| !value.is_empty())
|
||||
.find(|(_, value)| value != "")
|
||||
.expect("no nonempty setting")
|
||||
}
|
||||
|
||||
@@ -492,15 +497,13 @@ impl CliConfig<'_> {
|
||||
}
|
||||
|
||||
pub fn recent_for_tests() -> Self {
|
||||
Self {
|
||||
commitment: CommitmentConfig::recent(),
|
||||
send_transaction_config: RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
preflight_commitment: Some(CommitmentConfig::recent().commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
..Self::default()
|
||||
}
|
||||
let mut config = Self::default();
|
||||
config.commitment = CommitmentConfig::recent();
|
||||
config.send_transaction_config = RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
..RpcSendTransactionConfig::default()
|
||||
};
|
||||
config
|
||||
}
|
||||
}
|
||||
|
||||
@@ -573,7 +576,10 @@ pub fn parse_command(
|
||||
("supply", Some(matches)) => parse_supply(matches),
|
||||
("total-supply", Some(matches)) => parse_total_supply(matches),
|
||||
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
("leader-schedule", Some(matches)) => parse_leader_schedule(matches),
|
||||
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::LeaderSchedule,
|
||||
signers: vec![],
|
||||
}),
|
||||
("ping", Some(matches)) => parse_cluster_ping(matches, default_signer, wallet_manager),
|
||||
("live-slots", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::LiveSlots,
|
||||
@@ -622,16 +628,6 @@ pub fn parse_command(
|
||||
signers,
|
||||
})
|
||||
}
|
||||
("program", Some(matches)) => {
|
||||
parse_program_subcommand(matches, default_signer, wallet_manager)
|
||||
}
|
||||
("wait-for-max-stake", Some(matches)) => {
|
||||
let max_stake_percent = value_t_or_exit!(matches, "max_percent", f32);
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::WaitForMaxStake { max_stake_percent },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
// Stake Commands
|
||||
("create-stake-account", Some(matches)) => {
|
||||
parse_stake_create_account(matches, default_signer, wallet_manager)
|
||||
@@ -987,7 +983,6 @@ fn process_confirm(
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
||||
println_transaction(transaction, &None, "");
|
||||
Ok("".to_string())
|
||||
@@ -1033,6 +1028,394 @@ fn process_show_account(
|
||||
Ok(account_string)
|
||||
}
|
||||
|
||||
fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
rpc_client: &RpcClient,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
commitment: CommitmentConfig,
|
||||
mut last_valid_slot: Slot,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
let mut leader_schedule: Option<RpcLeaderSchedule> = None;
|
||||
let mut leader_schedule_epoch = 0;
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes().ok();
|
||||
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
progress_bar.set_message("Finding leader node...");
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment)?;
|
||||
if epoch_info.epoch > leader_schedule_epoch || leader_schedule.is_none() {
|
||||
leader_schedule = rpc_client
|
||||
.get_leader_schedule_with_commitment(Some(epoch_info.absolute_slot), commitment)?;
|
||||
leader_schedule_epoch = epoch_info.epoch;
|
||||
}
|
||||
let tpu_address = get_leader_tpu(
|
||||
min(epoch_info.slot_index + 1, epoch_info.slots_in_epoch),
|
||||
leader_schedule.as_ref(),
|
||||
cluster_nodes.as_ref(),
|
||||
);
|
||||
|
||||
// Send all transactions
|
||||
let mut pending_transactions = HashMap::new();
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if let Some(tpu_address) = tpu_address {
|
||||
let wire_transaction =
|
||||
serialize(&transaction).expect("serialization should succeed");
|
||||
send_transaction_tpu(&send_socket, &tpu_address, &wire_transaction);
|
||||
} else {
|
||||
let _result = rpc_client
|
||||
.send_transaction_with_config(
|
||||
&transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
}
|
||||
pending_transactions.insert(transaction.signatures[0], transaction);
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Total Transactions sent",
|
||||
pending_transactions.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - pending_transactions.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
let mut statuses = vec![];
|
||||
let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>();
|
||||
for pending_signatures_chunk in
|
||||
pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS - 1)
|
||||
{
|
||||
statuses.extend(
|
||||
rpc_client
|
||||
.get_signature_statuses_with_history(pending_signatures_chunk)?
|
||||
.value
|
||||
.into_iter(),
|
||||
);
|
||||
}
|
||||
assert_eq!(statuses.len(), pending_signatures.len());
|
||||
|
||||
for (signature, status) in pending_signatures.into_iter().zip(statuses.into_iter()) {
|
||||
if let Some(status) = status {
|
||||
if status.confirmations.is_none() || status.confirmations.unwrap() > 1 {
|
||||
let _ = pending_transactions.remove(&signature);
|
||||
}
|
||||
}
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - pending_transactions.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
if pending_transactions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let slot = rpc_client.get_slot_with_commitment(commitment)?;
|
||||
if slot > last_valid_slot {
|
||||
break;
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err("Transactions failed".into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator, new_last_valid_slot) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(commitment)?
|
||||
.value;
|
||||
last_valid_slot = new_last_valid_slot;
|
||||
transactions = vec![];
|
||||
for (_, mut transaction) in pending_transactions.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
program_location: &str,
|
||||
address: Option<SignerIndex>,
|
||||
use_deprecated_loader: bool,
|
||||
allow_excessive_balance: bool,
|
||||
) -> ProcessResult {
|
||||
const WORDS: usize = 12;
|
||||
// Create ephemeral keypair to use for program address, if not provided
|
||||
let mnemonic = Mnemonic::new(MnemonicType::for_word_count(WORDS)?, Language::English);
|
||||
let seed = Seed::new(&mnemonic, "");
|
||||
let new_keypair = keypair_from_seed(seed.as_bytes())?;
|
||||
|
||||
let result = do_process_deploy(
|
||||
rpc_client,
|
||||
config,
|
||||
program_location,
|
||||
address,
|
||||
use_deprecated_loader,
|
||||
allow_excessive_balance,
|
||||
new_keypair,
|
||||
);
|
||||
|
||||
if result.is_err() && address.is_none() {
|
||||
let phrase: &str = mnemonic.phrase();
|
||||
let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
|
||||
eprintln!(
|
||||
"{}\nTo reuse this address, recover the ephemeral keypair file with",
|
||||
divider
|
||||
);
|
||||
eprintln!(
|
||||
"`solana-keygen recover` and the following {}-word seed phrase,",
|
||||
WORDS
|
||||
);
|
||||
eprintln!(
|
||||
"then pass it as the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy ...`\n{}\n{}\n{}",
|
||||
divider, phrase, divider
|
||||
);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn do_process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
program_location: &str,
|
||||
address: Option<SignerIndex>,
|
||||
use_deprecated_loader: bool,
|
||||
allow_excessive_balance: bool,
|
||||
new_keypair: Keypair,
|
||||
) -> ProcessResult {
|
||||
let program_id = if let Some(i) = address {
|
||||
config.signers[i]
|
||||
} else {
|
||||
&new_keypair
|
||||
};
|
||||
let mut file = File::open(program_location).map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Unable to open program file: {}", err))
|
||||
})?;
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
|
||||
})?;
|
||||
|
||||
EbpfVm::create_executable_from_elf(&program_data, Some(|x| bpf_verifier::check(x, false)))
|
||||
.map_err(|err| CliError::DynamicProgramError(format!("ELF error: {}", err)))?;
|
||||
|
||||
let loader_id = if use_deprecated_loader {
|
||||
bpf_loader_deprecated::id()
|
||||
} else {
|
||||
bpf_loader::id()
|
||||
};
|
||||
|
||||
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(program_data.len())?;
|
||||
let signers = [config.signers[0], program_id];
|
||||
|
||||
// Check program account to see if partial initialization has occurred
|
||||
let (initial_instructions, balance_needed) = if let Some(account) = rpc_client
|
||||
.get_account_with_commitment(&program_id.pubkey(), config.commitment)?
|
||||
.value
|
||||
{
|
||||
let mut instructions: Vec<Instruction> = vec![];
|
||||
let mut balance_needed = 0;
|
||||
if account.executable {
|
||||
return Err(CliError::DynamicProgramError(
|
||||
"Program account is already executable".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
if account.owner != loader_id && !system_program::check_id(&account.owner) {
|
||||
return Err(CliError::DynamicProgramError(
|
||||
"Program account is already owned by another account".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
if account.data.is_empty() && system_program::check_id(&account.owner) {
|
||||
instructions.push(system_instruction::allocate(
|
||||
&program_id.pubkey(),
|
||||
program_data.len() as u64,
|
||||
));
|
||||
if account.owner != loader_id {
|
||||
instructions.push(system_instruction::assign(&program_id.pubkey(), &loader_id));
|
||||
}
|
||||
}
|
||||
if account.lamports < minimum_balance {
|
||||
let balance = minimum_balance - account.lamports;
|
||||
instructions.push(system_instruction::transfer(
|
||||
&config.signers[0].pubkey(),
|
||||
&program_id.pubkey(),
|
||||
balance,
|
||||
));
|
||||
balance_needed = balance;
|
||||
} else if account.lamports > minimum_balance
|
||||
&& system_program::check_id(&account.owner)
|
||||
&& !allow_excessive_balance
|
||||
{
|
||||
return Err(CliError::DynamicProgramError(format!(
|
||||
"Program account has a balance: {:?}; it may already be in use",
|
||||
Sol(account.lamports)
|
||||
))
|
||||
.into());
|
||||
}
|
||||
(instructions, balance_needed)
|
||||
} else {
|
||||
(
|
||||
vec![system_instruction::create_account(
|
||||
&config.signers[0].pubkey(),
|
||||
&program_id.pubkey(),
|
||||
minimum_balance,
|
||||
program_data.len() as u64,
|
||||
&loader_id,
|
||||
)],
|
||||
minimum_balance,
|
||||
)
|
||||
};
|
||||
let initial_message = if !initial_instructions.is_empty() {
|
||||
Some(Message::new(
|
||||
&initial_instructions,
|
||||
Some(&config.signers[0].pubkey()),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Build transactions to calculate fees
|
||||
let mut messages: Vec<&Message> = Vec::new();
|
||||
|
||||
if let Some(message) = &initial_message {
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
let mut write_messages = vec![];
|
||||
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
|
||||
let instruction = loader_instruction::write(
|
||||
&program_id.pubkey(),
|
||||
&loader_id,
|
||||
(i * DATA_CHUNK_SIZE) as u32,
|
||||
chunk.to_vec(),
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
write_messages.push(message);
|
||||
}
|
||||
let mut write_message_refs = vec![];
|
||||
for message in write_messages.iter() {
|
||||
write_message_refs.push(message);
|
||||
}
|
||||
messages.append(&mut write_message_refs);
|
||||
|
||||
let instruction = loader_instruction::finalize(&program_id.pubkey(), &loader_id);
|
||||
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
messages.push(&finalize_message);
|
||||
|
||||
let (blockhash, fee_calculator, _) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(config.commitment)?
|
||||
.value;
|
||||
|
||||
check_account_for_spend_multiple_fees_with_commitment(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
balance_needed,
|
||||
&fee_calculator,
|
||||
&messages,
|
||||
config.commitment,
|
||||
)?;
|
||||
|
||||
if let Some(message) = initial_message {
|
||||
trace!("Creating or modifying program account");
|
||||
let num_required_signatures = message.header.num_required_signatures;
|
||||
|
||||
let mut initial_transaction = Transaction::new_unsigned(message);
|
||||
// Most of the initial_transaction combinations require both the fee-payer and new program
|
||||
// account to sign the transaction. One (transfer) only requires the fee-payer signature.
|
||||
// This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an
|
||||
// extraneous signature.
|
||||
if num_required_signatures == 2 {
|
||||
initial_transaction.try_sign(&signers, blockhash)?;
|
||||
} else {
|
||||
initial_transaction.try_sign(&[signers[0]], blockhash)?;
|
||||
}
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&initial_transaction,
|
||||
config.commitment,
|
||||
config.send_transaction_config,
|
||||
);
|
||||
log_instruction_custom_error::<SystemError>(result, &config).map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Program account allocation failed: {}", err))
|
||||
})?;
|
||||
}
|
||||
|
||||
let (blockhash, _, last_valid_slot) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(config.commitment)?
|
||||
.value;
|
||||
|
||||
let mut write_transactions = vec![];
|
||||
for message in write_messages.into_iter() {
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
}
|
||||
|
||||
trace!("Writing program data");
|
||||
send_and_confirm_transactions_with_spinner(
|
||||
&rpc_client,
|
||||
write_transactions,
|
||||
&signers,
|
||||
config.commitment,
|
||||
last_valid_slot,
|
||||
)
|
||||
.map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Data writes to program account failed: {}", err))
|
||||
})?;
|
||||
|
||||
let (blockhash, _, _) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(config.commitment)?
|
||||
.value;
|
||||
let mut finalize_tx = Transaction::new_unsigned(finalize_message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&finalize_tx,
|
||||
config.commitment,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
CliError::DynamicProgramError(format!("Finalizing program account failed: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(json!({
|
||||
"programId": format!("{}", program_id.pubkey()),
|
||||
})
|
||||
.to_string())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn process_transfer(
|
||||
rpc_client: &RpcClient,
|
||||
@@ -1165,7 +1548,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::Inflation(inflation_subcommand) => {
|
||||
process_inflation_subcommand(&rpc_client, config, inflation_subcommand)
|
||||
}
|
||||
CliCommand::LeaderSchedule { epoch } => process_leader_schedule(&rpc_client, *epoch),
|
||||
CliCommand::LeaderSchedule => process_leader_schedule(&rpc_client),
|
||||
CliCommand::LiveSlots => process_live_slots(&config),
|
||||
CliCommand::Logs { filter } => process_logs(&config, filter),
|
||||
CliCommand::Ping {
|
||||
@@ -1173,18 +1556,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
blockhash,
|
||||
print_timestamp,
|
||||
} => process_ping(
|
||||
&rpc_client,
|
||||
config,
|
||||
*lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
blockhash,
|
||||
*print_timestamp,
|
||||
),
|
||||
} => process_ping(&rpc_client, config, *lamports, interval, count, timeout),
|
||||
CliCommand::ShowBlockProduction { epoch, slot_limit } => {
|
||||
process_show_block_production(&rpc_client, config, *epoch, *slot_limit)
|
||||
}
|
||||
@@ -1198,9 +1570,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*use_lamports_unit,
|
||||
vote_account_pubkeys.as_deref(),
|
||||
),
|
||||
CliCommand::WaitForMaxStake { max_stake_percent } => {
|
||||
process_wait_for_max_stake(&rpc_client, config, *max_stake_percent)
|
||||
}
|
||||
CliCommand::ShowValidators { use_lamports_unit } => {
|
||||
process_show_validators(&rpc_client, config, *use_lamports_unit)
|
||||
}
|
||||
@@ -1302,9 +1671,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*use_deprecated_loader,
|
||||
*allow_excessive_balance,
|
||||
),
|
||||
CliCommand::Program(program_subcommand) => {
|
||||
process_program_subcommand(&rpc_client, config, program_subcommand)
|
||||
}
|
||||
|
||||
// Stake Commands
|
||||
|
||||
@@ -1790,7 +2156,6 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.feature_subcommands()
|
||||
.inflation_subcommands()
|
||||
.nonce_subcommands()
|
||||
.program_subcommands()
|
||||
.stake_subcommands()
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
@@ -1939,7 +2304,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.takes_value(false)
|
||||
.help("Use the designated program id, even if the account already holds a large balance of SOL")
|
||||
)
|
||||
.arg(commitment_arg_with_default("singleGossip")),
|
||||
.arg(commitment_arg_with_default("max")),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("pay")
|
||||
@@ -2044,7 +2409,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::{json, Value};
|
||||
use serde_json::Value;
|
||||
use solana_client::{
|
||||
blockhash_query,
|
||||
mock_sender::SIGNATURE,
|
||||
@@ -2053,7 +2418,7 @@ mod tests {
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Keypair, Presigner},
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Presigner},
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use std::path::PathBuf;
|
||||
@@ -2368,11 +2733,9 @@ mod tests {
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn test_cli_process_command() {
|
||||
// Success cases
|
||||
let mut config = CliConfig {
|
||||
rpc_client: Some(RpcClient::new_mock("succeeds".to_string())),
|
||||
json_rpc_url: "http://127.0.0.1:8899".to_string(),
|
||||
..CliConfig::default()
|
||||
};
|
||||
let mut config = CliConfig::default();
|
||||
config.rpc_client = Some(RpcClient::new_mock("succeeds".to_string()));
|
||||
config.json_rpc_url = "http://127.0.0.1:8899".to_string();
|
||||
|
||||
let keypair = Keypair::new();
|
||||
let pubkey = keypair.pubkey().to_string();
|
||||
|
@@ -11,7 +11,6 @@ use solana_clap_utils::{
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::DefaultSigner,
|
||||
offline::{blockhash_arg, BLOCKHASH_ARG},
|
||||
};
|
||||
use solana_cli_output::{
|
||||
display::{
|
||||
@@ -37,7 +36,6 @@ use solana_sdk::{
|
||||
clock::{self, Clock, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::Epoch,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
@@ -58,7 +56,7 @@ use std::{
|
||||
Arc,
|
||||
},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
static CHECK_MARK: Emoji = Emoji("✅ ", "");
|
||||
@@ -132,17 +130,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.help("Slot number of the block to query")
|
||||
)
|
||||
)
|
||||
.subcommand(SubCommand::with_name("leader-schedule")
|
||||
.about("Display leader schedule")
|
||||
.arg(
|
||||
Arg::with_name("epoch")
|
||||
.long("epoch")
|
||||
.takes_value(true)
|
||||
.value_name("EPOCH")
|
||||
.validator(is_epoch)
|
||||
.help("Epoch to show leader schedule for. (default: current)")
|
||||
)
|
||||
)
|
||||
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
|
||||
.subcommand(
|
||||
SubCommand::with_name("epoch-info")
|
||||
.about("Get information about the current epoch")
|
||||
@@ -224,13 +212,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.help("Stop after submitting count transactions"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("print_timestamp")
|
||||
.short("D")
|
||||
.long("print-timestamp")
|
||||
.takes_value(false)
|
||||
.help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
@@ -249,7 +230,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.default_value("15")
|
||||
.help("Wait up to timeout seconds for transaction confirmation"),
|
||||
)
|
||||
.arg(blockhash_arg())
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -361,17 +341,6 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.help("Display the full transactions"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("wait-for-max-stake")
|
||||
.about("Wait for the max stake of any one node to drop below a percentage of total.")
|
||||
.arg(
|
||||
Arg::with_name("max_percent")
|
||||
.long("max-percent")
|
||||
.value_name("PERCENT")
|
||||
.takes_value(true)
|
||||
.index(1),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -405,16 +374,12 @@ pub fn parse_cluster_ping(
|
||||
None
|
||||
};
|
||||
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
|
||||
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
|
||||
let print_timestamp = matches.is_present("print_timestamp");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
blockhash,
|
||||
print_timestamp,
|
||||
},
|
||||
signers: vec![default_signer.signer_from_path(matches, wallet_manager)?],
|
||||
})
|
||||
@@ -723,23 +688,9 @@ pub fn process_first_available_block(rpc_client: &RpcClient) -> ProcessResult {
|
||||
Ok(format!("{}", first_available_block))
|
||||
}
|
||||
|
||||
pub fn parse_leader_schedule(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let epoch = value_of(matches, "epoch");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::LeaderSchedule { epoch },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_leader_schedule(rpc_client: &RpcClient, epoch: Option<Epoch>) -> ProcessResult {
|
||||
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let epoch = epoch.unwrap_or(epoch_info.epoch);
|
||||
if epoch > epoch_info.epoch {
|
||||
return Err(format!("Epoch {} is in the future", epoch).into());
|
||||
}
|
||||
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
||||
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
|
||||
|
||||
let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot_in_epoch))?;
|
||||
if leader_schedule.is_none() {
|
||||
@@ -824,9 +775,8 @@ pub fn process_get_block(
|
||||
format!(
|
||||
"◎{:<19.9} {:>13.9}%",
|
||||
lamports_to_sol(reward.post_balance),
|
||||
(reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64))
|
||||
* 100.0
|
||||
reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64)
|
||||
)
|
||||
}
|
||||
);
|
||||
@@ -911,7 +861,7 @@ pub fn process_show_block_production(
|
||||
slot_limit: Option<u64>,
|
||||
) -> ProcessResult {
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::root())?;
|
||||
|
||||
let epoch = epoch.unwrap_or(epoch_info.epoch);
|
||||
if epoch > epoch_info.epoch {
|
||||
@@ -1099,8 +1049,6 @@ pub fn process_ping(
|
||||
interval: &Duration,
|
||||
count: &Option<u64>,
|
||||
timeout: &Duration,
|
||||
fixed_blockhash: &Option<Hash>,
|
||||
print_timestamp: bool,
|
||||
) -> ProcessResult {
|
||||
println_name_value("Source Account:", &config.signers[0].pubkey().to_string());
|
||||
println!();
|
||||
@@ -1118,21 +1066,9 @@ pub fn process_ping(
|
||||
let (mut blockhash, mut fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let mut blockhash_transaction_count = 0;
|
||||
let mut blockhash_acquired = Instant::now();
|
||||
if let Some(fixed_blockhash) = fixed_blockhash {
|
||||
let blockhash_origin = if *fixed_blockhash != Hash::default() {
|
||||
blockhash = *fixed_blockhash;
|
||||
"supplied from cli arguments"
|
||||
} else {
|
||||
"fetched from cluster"
|
||||
};
|
||||
println!(
|
||||
"Fixed blockhash is used: {} ({})",
|
||||
blockhash, blockhash_origin
|
||||
);
|
||||
}
|
||||
'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) {
|
||||
let now = Instant::now();
|
||||
if fixed_blockhash.is_none() && now.duration_since(blockhash_acquired).as_secs() > 60 {
|
||||
if now.duration_since(blockhash_acquired).as_secs() > 60 {
|
||||
// Fetch a new blockhash every minute
|
||||
let (new_blockhash, new_fee_calculator) = rpc_client.get_new_blockhash(&blockhash)?;
|
||||
blockhash = new_blockhash;
|
||||
@@ -1163,18 +1099,6 @@ pub fn process_ping(
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, blockhash)?;
|
||||
|
||||
let timestamp = || {
|
||||
let micros = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_micros();
|
||||
if print_timestamp {
|
||||
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
|
||||
} else {
|
||||
format!("")
|
||||
}
|
||||
};
|
||||
|
||||
match rpc_client.send_transaction(&tx) {
|
||||
Ok(signature) => {
|
||||
let transaction_sent = Instant::now();
|
||||
@@ -1188,20 +1112,15 @@ pub fn process_ping(
|
||||
let elapsed_time_millis = elapsed_time.as_millis() as u64;
|
||||
confirmation_time.push_back(elapsed_time_millis);
|
||||
println!(
|
||||
"{}{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
timestamp(),
|
||||
"{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
CHECK_MARK, lamports, seq, elapsed_time_millis, signature
|
||||
);
|
||||
confirmed_count += 1;
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{}{}Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
err,
|
||||
signature
|
||||
"{}Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
CROSS_MARK, seq, err, signature
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1210,11 +1129,8 @@ pub fn process_ping(
|
||||
|
||||
if elapsed_time >= *timeout {
|
||||
println!(
|
||||
"{}{}Confirmation timeout: seq={:<3} signature={}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
signature
|
||||
"{}Confirmation timeout: seq={:<3} signature={}",
|
||||
CROSS_MARK, seq, signature
|
||||
);
|
||||
break;
|
||||
}
|
||||
@@ -1232,11 +1148,8 @@ pub fn process_ping(
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{}{}Submit failed: seq={:<3} error={:?}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
err
|
||||
"{}Submit failed: seq={:<3} error={:?}",
|
||||
CROSS_MARK, seq, err
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1569,16 +1482,6 @@ pub fn process_show_stakes(
|
||||
.formatted_string(&CliStakeVec::new(stake_accounts)))
|
||||
}
|
||||
|
||||
pub fn process_wait_for_max_stake(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
max_stake_percent: f32,
|
||||
) -> ProcessResult {
|
||||
let now = std::time::Instant::now();
|
||||
rpc_client.wait_for_max_stake(config.commitment, max_stake_percent)?;
|
||||
Ok(format!("Done waiting, took: {}s", now.elapsed().as_secs()))
|
||||
}
|
||||
|
||||
pub fn process_show_validators(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@@ -1739,7 +1642,6 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::cli::{app, parse_command};
|
||||
use solana_sdk::signature::{write_keypair, Keypair};
|
||||
use std::str::FromStr;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
@@ -1875,11 +1777,8 @@ mod tests {
|
||||
"2",
|
||||
"-t",
|
||||
"3",
|
||||
"-D",
|
||||
"--commitment",
|
||||
"max",
|
||||
"--blockhash",
|
||||
"4CCNp28j6AhGq7PkjPDP4wbQWBS8LLbQin2xV5n8frKX",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_ping, &default_signer, &mut None).unwrap(),
|
||||
@@ -1889,10 +1788,6 @@ mod tests {
|
||||
interval: Duration::from_secs(1),
|
||||
count: Some(2),
|
||||
timeout: Duration::from_secs(3),
|
||||
blockhash: Some(
|
||||
Hash::from_str("4CCNp28j6AhGq7PkjPDP4wbQWBS8LLbQin2xV5n8frKX").unwrap()
|
||||
),
|
||||
print_timestamp: true,
|
||||
},
|
||||
signers: vec![default_keypair.into()],
|
||||
}
|
||||
|
@@ -26,7 +26,6 @@ pub mod cluster_query;
|
||||
pub mod feature;
|
||||
pub mod inflation;
|
||||
pub mod nonce;
|
||||
pub mod program;
|
||||
pub mod send_tpu;
|
||||
pub mod spend_utils;
|
||||
pub mod stake;
|
||||
|
@@ -168,7 +168,6 @@ pub fn parse_args<'a>(
|
||||
let CliCommandInfo { command, signers } =
|
||||
parse_command(&matches, &default_signer, &mut wallet_manager)?;
|
||||
|
||||
let verbose = matches.is_present("verbose");
|
||||
let output_format = matches
|
||||
.value_of("output_format")
|
||||
.map(|value| match value {
|
||||
@@ -176,22 +175,13 @@ pub fn parse_args<'a>(
|
||||
"json-compact" => OutputFormat::JsonCompact,
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.unwrap_or(if verbose {
|
||||
OutputFormat::DisplayVerbose
|
||||
} else {
|
||||
OutputFormat::Display
|
||||
});
|
||||
.unwrap_or(OutputFormat::Display);
|
||||
|
||||
let commitment = {
|
||||
let mut sub_matches = matches;
|
||||
while let Some(subcommand_name) = sub_matches.subcommand_name() {
|
||||
sub_matches = sub_matches
|
||||
.subcommand_matches(subcommand_name)
|
||||
.expect("subcommand_matches");
|
||||
}
|
||||
commitment_of(sub_matches, COMMITMENT_ARG.long)
|
||||
}
|
||||
.unwrap_or_default();
|
||||
let commitment = matches
|
||||
.subcommand_name()
|
||||
.and_then(|name| matches.subcommand_matches(name))
|
||||
.and_then(|sub_matches| commitment_of(sub_matches, COMMITMENT_ARG.long))
|
||||
.unwrap_or_default();
|
||||
|
||||
let address_labels = if matches.is_present("no_address_labels") {
|
||||
HashMap::new()
|
||||
@@ -208,13 +198,10 @@ pub fn parse_args<'a>(
|
||||
keypair_path: default_signer_path,
|
||||
rpc_client: None,
|
||||
rpc_timeout,
|
||||
verbose,
|
||||
verbose: matches.is_present("verbose"),
|
||||
output_format,
|
||||
commitment,
|
||||
send_transaction_config: RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
send_transaction_config: RpcSendTransactionConfig::default(),
|
||||
address_labels,
|
||||
},
|
||||
signers,
|
||||
|
1540
cli/src/program.rs
1540
cli/src/program.rs
File diff suppressed because it is too large
Load Diff
@@ -1528,7 +1528,6 @@ pub fn build_stake_state(
|
||||
CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
account_balance,
|
||||
credits_observed: Some(stake.credits_observed),
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey
|
||||
!= Pubkey::default()
|
||||
@@ -1580,7 +1579,6 @@ pub fn build_stake_state(
|
||||
CliStakeState {
|
||||
stake_type: CliStakeType::Initialized,
|
||||
account_balance,
|
||||
credits_observed: Some(0),
|
||||
authorized: Some(authorized.into()),
|
||||
lockup,
|
||||
use_lamports_unit,
|
||||
@@ -1647,37 +1645,29 @@ pub(crate) fn fetch_epoch_rewards(
|
||||
let previous_epoch_rewards = first_confirmed_block.rewards;
|
||||
|
||||
if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info {
|
||||
let wallclock_epoch_duration = if epoch_end_time > epoch_start_time {
|
||||
Some(
|
||||
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
|
||||
.to_std()?
|
||||
.as_secs_f64(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let wallclock_epoch_duration =
|
||||
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
|
||||
.to_std()?
|
||||
.as_secs_f64();
|
||||
|
||||
let wallclock_epochs_per_year =
|
||||
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
|
||||
|
||||
if let Some(reward) = epoch_rewards
|
||||
.into_iter()
|
||||
.find(|reward| reward.pubkey == address.to_string())
|
||||
{
|
||||
if reward.post_balance > reward.lamports.try_into().unwrap_or(0) {
|
||||
let rate_change = reward.lamports.abs() as f64
|
||||
let balance_increase_percent = reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64);
|
||||
|
||||
let apr = wallclock_epoch_duration.map(|wallclock_epoch_duration| {
|
||||
let wallclock_epochs_per_year =
|
||||
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
|
||||
rate_change * wallclock_epochs_per_year
|
||||
});
|
||||
|
||||
all_epoch_rewards.push(CliEpochReward {
|
||||
epoch,
|
||||
effective_slot,
|
||||
amount: reward.lamports.abs() as u64,
|
||||
post_balance: reward.post_balance,
|
||||
percent_change: rate_change * 100.0,
|
||||
apr: apr.map(|r| r * 100.0),
|
||||
percent_change: balance_increase_percent,
|
||||
apr: balance_increase_percent * wallclock_epochs_per_year,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1730,12 +1720,11 @@ pub fn process_show_stake_account(
|
||||
|
||||
if state.stake_type == CliStakeType::Stake {
|
||||
if let Some(activation_epoch) = state.activation_epoch {
|
||||
let rewards =
|
||||
fetch_epoch_rewards(rpc_client, stake_account_address, activation_epoch);
|
||||
match rewards {
|
||||
Ok(rewards) => state.epoch_rewards = Some(rewards),
|
||||
Err(error) => eprintln!("Failed to fetch epoch rewards: {:?}", error),
|
||||
};
|
||||
state.epoch_rewards = Some(fetch_epoch_rewards(
|
||||
rpc_client,
|
||||
stake_account_address,
|
||||
activation_epoch,
|
||||
)?);
|
||||
}
|
||||
}
|
||||
Ok(config.output_format.formatted_string(&state))
|
||||
|
@@ -685,27 +685,22 @@ pub fn process_show_vote_account(
|
||||
for vote in &vote_state.votes {
|
||||
votes.push(vote.into());
|
||||
}
|
||||
for (epoch, credits, prev_credits) in vote_state.epoch_credits().iter().copied() {
|
||||
for (epoch, credits, prev_credits) in vote_state.epoch_credits() {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
epoch_voting_history.push(CliEpochVotingHistory {
|
||||
epoch,
|
||||
epoch: *epoch,
|
||||
slots_in_epoch,
|
||||
credits_earned,
|
||||
credits,
|
||||
prev_credits,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let epoch_rewards = match crate::stake::fetch_epoch_rewards(rpc_client, vote_account_address, 1)
|
||||
{
|
||||
Ok(rewards) => Some(rewards),
|
||||
Err(error) => {
|
||||
eprintln!("Failed to fetch epoch rewards: {:?}", error);
|
||||
None
|
||||
}
|
||||
};
|
||||
let epoch_rewards = Some(crate::stake::fetch_epoch_rewards(
|
||||
rpc_client,
|
||||
vote_account_address,
|
||||
1,
|
||||
)?);
|
||||
|
||||
let vote_account_data = CliVoteAccount {
|
||||
account_balance: vote_account.lamports,
|
||||
|
158
cli/tests/deploy.rs
Normal file
158
cli/tests/deploy.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use serde_json::Value;
|
||||
use solana_cli::cli::{process_command, CliCommand, CliConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
commitment_config::CommitmentConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
fs::{remove_dir_all, File},
|
||||
io::Read,
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::mpsc::channel,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_cli_deploy_program() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
pathbuf.push("tests");
|
||||
pathbuf.push("fixtures");
|
||||
pathbuf.push("noop");
|
||||
pathbuf.set_extension("so");
|
||||
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap()).unwrap();
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).unwrap();
|
||||
let minimum_balance_for_rent_exemption = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(program_data.len())
|
||||
.unwrap();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
let keypair = Keypair::new();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 4 * minimum_balance_for_rent_exemption, // min balance for rent exemption for three programs + leftover for tx processing
|
||||
};
|
||||
config.signers = vec![&keypair];
|
||||
process_command(&config).unwrap();
|
||||
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: None,
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let account0 = rpc_client
|
||||
.get_account_with_commitment(&program_id, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account0.owner, bpf_loader::id());
|
||||
assert_eq!(account0.executable, true);
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap().to_string()).unwrap();
|
||||
let mut elf = Vec::new();
|
||||
file.read_to_end(&mut elf).unwrap();
|
||||
|
||||
assert_eq!(account0.data, elf);
|
||||
|
||||
// Test custom address
|
||||
let custom_address_keypair = Keypair::new();
|
||||
config.signers = vec![&keypair, &custom_address_keypair];
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let account1 = rpc_client
|
||||
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account1.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account1.owner, bpf_loader::id());
|
||||
assert_eq!(account1.executable, true);
|
||||
assert_eq!(account0.data, account1.data);
|
||||
|
||||
// Attempt to redeploy to the same address
|
||||
process_command(&config).unwrap_err();
|
||||
|
||||
// Attempt to deploy to account with excess balance
|
||||
let custom_address_keypair = Keypair::new();
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 2 * minimum_balance_for_rent_exemption, // Anything over minimum_balance_for_rent_exemption should trigger err
|
||||
};
|
||||
config.signers = vec![&custom_address_keypair];
|
||||
process_command(&config).unwrap();
|
||||
|
||||
config.signers = vec![&keypair, &custom_address_keypair];
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
process_command(&config).unwrap_err();
|
||||
|
||||
// Use forcing parameter to deploy to account with excess balance
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: true,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let account2 = rpc_client
|
||||
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account2.lamports, 2 * minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account2.owner, bpf_loader::id());
|
||||
assert_eq!(account2.executable, true);
|
||||
assert_eq!(account0.data, account2.data);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
@@ -9,6 +9,7 @@ use solana_client::{
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
@@ -18,53 +19,69 @@ use solana_sdk::{
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
system_program,
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_nonce() {
|
||||
let mint_keypair = Keypair::new();
|
||||
full_battery_tests(
|
||||
TestValidator::with_no_fees(mint_keypair.pubkey()),
|
||||
mint_keypair,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
solana_logger::setup();
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
|
||||
full_battery_tests(leader_data, alice, None, false);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonce_with_seed() {
|
||||
let mint_keypair = Keypair::new();
|
||||
full_battery_tests(
|
||||
TestValidator::with_no_fees(mint_keypair.pubkey()),
|
||||
mint_keypair,
|
||||
Some(String::from("seed")),
|
||||
false,
|
||||
);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
|
||||
full_battery_tests(leader_data, alice, Some(String::from("seed")), false);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonce_with_authority() {
|
||||
let mint_keypair = Keypair::new();
|
||||
full_battery_tests(
|
||||
TestValidator::with_no_fees(mint_keypair.pubkey()),
|
||||
mint_keypair,
|
||||
None,
|
||||
true,
|
||||
);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
|
||||
full_battery_tests(leader_data, alice, None, true);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn full_battery_tests(
|
||||
test_validator: TestValidator,
|
||||
mint_keypair: Keypair,
|
||||
leader_data: ContactInfo,
|
||||
alice: Keypair,
|
||||
seed: Option<String>,
|
||||
use_nonce_authority: bool,
|
||||
) {
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let json_rpc_url = test_validator.rpc_url();
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_payer = CliConfig::recent_for_tests();
|
||||
config_payer.json_rpc_url = json_rpc_url.clone();
|
||||
@@ -215,8 +232,13 @@ fn full_battery_tests(
|
||||
#[test]
|
||||
fn test_create_account_with_seed() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
@@ -228,7 +250,7 @@ fn test_create_account_with_seed() {
|
||||
let config = CliConfig::recent_for_tests();
|
||||
|
||||
// Setup accounts
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
@@ -260,7 +282,8 @@ fn test_create_account_with_seed() {
|
||||
check_recent_balance(0, &rpc_client, &nonce_address);
|
||||
|
||||
let mut creator_config = CliConfig::recent_for_tests();
|
||||
creator_config.json_rpc_url = test_validator.rpc_url();
|
||||
creator_config.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
creator_config.signers = vec![&online_nonce_creator_signer];
|
||||
creator_config.command = CliCommand::CreateNonceAccount {
|
||||
nonce_account: 0,
|
||||
@@ -310,7 +333,8 @@ fn test_create_account_with_seed() {
|
||||
|
||||
// And submit it
|
||||
let mut submit_config = CliConfig::recent_for_tests();
|
||||
submit_config.json_rpc_url = test_validator.rpc_url();
|
||||
submit_config.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
submit_config.signers = vec![&authority_presigner];
|
||||
submit_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
@@ -331,4 +355,7 @@ fn test_create_account_with_seed() {
|
||||
check_recent_balance(31, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(10, &rpc_client, &to_address);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@@ -1,525 +0,0 @@
|
||||
use serde_json::Value;
|
||||
use solana_cli::{
|
||||
cli::{process_command, CliCommand, CliConfig},
|
||||
program::ProgramCliCommand,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
|
||||
commitment_config::CommitmentConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{fs::File, io::Read, path::PathBuf, str::FromStr, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_cli_program_deploy_non_upgradeable() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
pathbuf.push("tests");
|
||||
pathbuf.push("fixtures");
|
||||
pathbuf.push("noop");
|
||||
pathbuf.set_extension("so");
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap()).unwrap();
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).unwrap();
|
||||
let minimum_balance_for_rent_exemption = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(program_data.len())
|
||||
.unwrap();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
let keypair = Keypair::new();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.signers = vec![&keypair];
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 4 * minimum_balance_for_rent_exemption, // min balance for rent exemption for three programs + leftover for tx processing
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: None,
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let account0 = rpc_client
|
||||
.get_account_with_commitment(&program_id, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account0.owner, bpf_loader::id());
|
||||
assert_eq!(account0.executable, true);
|
||||
let mut file = File::open(pathbuf.to_str().unwrap().to_string()).unwrap();
|
||||
let mut elf = Vec::new();
|
||||
file.read_to_end(&mut elf).unwrap();
|
||||
assert_eq!(account0.data, elf);
|
||||
|
||||
// Test custom address
|
||||
let custom_address_keypair = Keypair::new();
|
||||
config.signers = vec![&keypair, &custom_address_keypair];
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let account1 = rpc_client
|
||||
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account1.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account1.owner, bpf_loader::id());
|
||||
assert_eq!(account1.executable, true);
|
||||
assert_eq!(account1.data, account0.data);
|
||||
|
||||
// Attempt to redeploy to the same address
|
||||
process_command(&config).unwrap_err();
|
||||
|
||||
// Attempt to deploy to account with excess balance
|
||||
let custom_address_keypair = Keypair::new();
|
||||
config.signers = vec![&custom_address_keypair];
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 2 * minimum_balance_for_rent_exemption, // Anything over minimum_balance_for_rent_exemption should trigger err
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
config.signers = vec![&keypair, &custom_address_keypair];
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
process_command(&config).unwrap_err();
|
||||
|
||||
// Use forcing parameter to deploy to account with excess balance
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: true,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let account2 = rpc_client
|
||||
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account2.lamports, 2 * minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account2.owner, bpf_loader::id());
|
||||
assert_eq!(account2.executable, true);
|
||||
assert_eq!(account2.data, account0.data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_program_deploy_no_authority() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
pathbuf.push("tests");
|
||||
pathbuf.push("fixtures");
|
||||
pathbuf.push("noop");
|
||||
pathbuf.set_extension("so");
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap()).unwrap();
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).unwrap();
|
||||
let max_len = program_data.len();
|
||||
let minimum_balance_for_programdata = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(
|
||||
UpgradeableLoaderState::programdata_len(max_len).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let minimum_balance_for_program = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::program_len().unwrap())
|
||||
.unwrap();
|
||||
let upgrade_authority = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
let keypair = Keypair::new();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 100 * minimum_balance_for_programdata + minimum_balance_for_program,
|
||||
};
|
||||
config.signers = vec![&keypair];
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Deploy a program with no authority
|
||||
config.signers = vec![&keypair];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: None,
|
||||
program_pubkey: None,
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: None,
|
||||
upgrade_authority_pubkey: None,
|
||||
max_len: None,
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
|
||||
// Attempt to upgrade the program
|
||||
config.signers = vec![&keypair, &upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: None,
|
||||
program_pubkey: Some(program_id),
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: Some(1),
|
||||
upgrade_authority_pubkey: Some(upgrade_authority.pubkey()),
|
||||
max_len: None,
|
||||
});
|
||||
process_command(&config).unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_program_deploy_with_authority() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
pathbuf.push("tests");
|
||||
pathbuf.push("fixtures");
|
||||
pathbuf.push("noop");
|
||||
pathbuf.set_extension("so");
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap()).unwrap();
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).unwrap();
|
||||
let max_len = program_data.len();
|
||||
let minimum_balance_for_programdata = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(
|
||||
UpgradeableLoaderState::programdata_len(max_len).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let minimum_balance_for_program = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(UpgradeableLoaderState::program_len().unwrap())
|
||||
.unwrap();
|
||||
let upgrade_authority = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
let keypair = Keypair::new();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.signers = vec![&keypair];
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 100 * minimum_balance_for_programdata + minimum_balance_for_program,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Deploy the upgradeable program with specified program_id
|
||||
let program_keypair = Keypair::new();
|
||||
config.signers = vec![&keypair, &upgrade_authority, &program_keypair];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: Some(2),
|
||||
program_pubkey: Some(program_keypair.pubkey()),
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: Some(1),
|
||||
upgrade_authority_pubkey: Some(upgrade_authority.pubkey()),
|
||||
max_len: Some(max_len),
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
program_keypair.pubkey(),
|
||||
Pubkey::from_str(&program_id_str).unwrap()
|
||||
);
|
||||
let program_account = rpc_client
|
||||
.get_account_with_commitment(&program_keypair.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) = Pubkey::find_program_address(
|
||||
&[program_keypair.pubkey().as_ref()],
|
||||
&bpf_loader_upgradeable::id(),
|
||||
);
|
||||
let programdata_account = rpc_client
|
||||
.get_account_with_commitment(&programdata_pubkey, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
programdata_account.lamports,
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
);
|
||||
|
||||
// Deploy the upgradeable program
|
||||
config.signers = vec![&keypair, &upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: None,
|
||||
program_pubkey: None,
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: Some(1),
|
||||
upgrade_authority_pubkey: Some(upgrade_authority.pubkey()),
|
||||
max_len: Some(max_len),
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let program_account = rpc_client
|
||||
.get_account_with_commitment(&program_id, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_id.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client
|
||||
.get_account_with_commitment(&programdata_pubkey, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
programdata_account.lamports,
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
);
|
||||
|
||||
// Upgrade the program
|
||||
config.signers = vec![&keypair, &upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: None,
|
||||
program_pubkey: Some(program_id),
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: Some(1),
|
||||
upgrade_authority_pubkey: Some(upgrade_authority.pubkey()),
|
||||
max_len: Some(max_len),
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let program_account = rpc_client
|
||||
.get_account_with_commitment(&program_id, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_id.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client
|
||||
.get_account_with_commitment(&programdata_pubkey, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
programdata_account.lamports,
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
);
|
||||
|
||||
// Set a new authority
|
||||
let new_upgrade_authority = Keypair::new();
|
||||
config.signers = vec![&keypair, &upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority {
|
||||
program: program_id,
|
||||
upgrade_authority_index: Some(1),
|
||||
new_upgrade_authority: Some(new_upgrade_authority.pubkey()),
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let new_upgrade_authority_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("UpgradeAuthority")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
Pubkey::from_str(&new_upgrade_authority_str).unwrap(),
|
||||
new_upgrade_authority.pubkey()
|
||||
);
|
||||
|
||||
// Upgrade with new authority
|
||||
config.signers = vec![&keypair, &new_upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: None,
|
||||
program_pubkey: Some(program_id),
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: Some(1),
|
||||
upgrade_authority_pubkey: Some(new_upgrade_authority.pubkey()),
|
||||
max_len: None,
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let program_account = rpc_client
|
||||
.get_account_with_commitment(&program_id, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(program_account.executable, true);
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_id.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client
|
||||
.get_account_with_commitment(&programdata_pubkey, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
programdata_account.lamports,
|
||||
minimum_balance_for_programdata
|
||||
);
|
||||
assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id());
|
||||
assert_eq!(programdata_account.executable, false);
|
||||
assert_eq!(
|
||||
programdata_account.data[UpgradeableLoaderState::programdata_data_offset().unwrap()..],
|
||||
program_data[..]
|
||||
);
|
||||
|
||||
// Set no authority
|
||||
config.signers = vec![&keypair, &new_upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority {
|
||||
program: program_id,
|
||||
upgrade_authority_index: Some(1),
|
||||
new_upgrade_authority: None,
|
||||
});
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let new_upgrade_authority_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("UpgradeAuthority")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert_eq!(new_upgrade_authority_str, "None");
|
||||
|
||||
// Upgrade with no authority
|
||||
config.signers = vec![&keypair, &new_upgrade_authority];
|
||||
config.command = CliCommand::Program(ProgramCliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
program_signer_index: None,
|
||||
program_pubkey: Some(program_id),
|
||||
buffer_signer_index: None,
|
||||
allow_excessive_balance: false,
|
||||
upgrade_authority_signer_index: Some(1),
|
||||
upgrade_authority_pubkey: Some(new_upgrade_authority.pubkey()),
|
||||
max_len: None,
|
||||
});
|
||||
process_command(&config).unwrap_err();
|
||||
}
|
@@ -2,23 +2,24 @@ use solana_cli::cli::{process_command, CliCommand, CliConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
use solana_sdk::{commitment_config::CommitmentConfig, signature::Keypair};
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_cli_request_airdrop() {
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let mut bob_config = CliConfig::recent_for_tests();
|
||||
bob_config.json_rpc_url = test_validator.rpc_url();
|
||||
bob_config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
bob_config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
@@ -31,11 +32,14 @@ fn test_cli_request_airdrop() {
|
||||
let sig_response = process_command(&bob_config);
|
||||
sig_response.unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let balance = rpc_client
|
||||
.get_balance_with_commitment(&bob_config.signers[0].pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value;
|
||||
assert_eq!(balance, 50);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@@ -22,21 +22,26 @@ use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize, StakeState},
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_stake_delegation_force() {
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
@@ -108,23 +113,33 @@ fn test_stake_delegation_force() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_seed_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
vote_pubkey,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let validator_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
let mut config_validator = CliConfig::recent_for_tests();
|
||||
config_validator.json_rpc_url = test_validator.rpc_url();
|
||||
config_validator.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config_validator.signers = vec![&validator_keypair];
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
@@ -165,7 +180,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
// Delegate stake
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_address,
|
||||
vote_account_pubkey: test_validator.vote_account_address(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: 0,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
@@ -187,23 +202,33 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
vote_pubkey,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let validator_keypair = Keypair::new();
|
||||
|
||||
let mut config_validator = CliConfig::recent_for_tests();
|
||||
config_validator.json_rpc_url = test_validator.rpc_url();
|
||||
config_validator.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config_validator.signers = vec![&validator_keypair];
|
||||
|
||||
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
@@ -240,7 +265,7 @@ fn test_stake_delegation_and_deactivation() {
|
||||
config_validator.signers.pop();
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: test_validator.vote_account_address(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: 0,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
@@ -262,27 +287,38 @@ fn test_stake_delegation_and_deactivation() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offline_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
vote_pubkey,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config_validator = CliConfig::recent_for_tests();
|
||||
config_validator.json_rpc_url = test_validator.rpc_url();
|
||||
config_validator.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let validator_keypair = Keypair::new();
|
||||
config_validator.signers = vec![&validator_keypair];
|
||||
|
||||
let mut config_payer = CliConfig::recent_for_tests();
|
||||
config_payer.json_rpc_url = test_validator.rpc_url();
|
||||
config_payer.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
|
||||
@@ -336,7 +372,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: test_validator.vote_account_address(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: 0,
|
||||
force: true,
|
||||
sign_only: true,
|
||||
@@ -355,7 +391,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
config_payer.signers = vec![&offline_presigner];
|
||||
config_payer.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: test_validator.vote_account_address(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: 0,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
@@ -394,24 +430,33 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonced_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
vote_pubkey,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let config_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.signers = vec![&config_keypair];
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let minimum_nonce_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(NonceState::size())
|
||||
@@ -470,7 +515,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
config.signers = vec![&config_keypair];
|
||||
config.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: stake_keypair.pubkey(),
|
||||
vote_account_pubkey: test_validator.vote_account_address(),
|
||||
vote_account_pubkey: vote_pubkey,
|
||||
stake_authority: 0,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
@@ -508,23 +553,31 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_authorize() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
@@ -779,6 +832,9 @@ fn test_stake_authorize() {
|
||||
.unwrap()
|
||||
.blockhash;
|
||||
assert_ne!(nonce_hash, new_nonce_hash);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -786,24 +842,30 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
solana_logger::setup();
|
||||
const SIG_FEE: u64 = 42;
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), SIG_FEE);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(SIG_FEE);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
let default_pubkey = default_signer.pubkey();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
let payer_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
let mut config_payer = CliConfig::recent_for_tests();
|
||||
config_payer.signers = vec![&payer_keypair];
|
||||
config_payer.json_rpc_url = test_validator.rpc_url();
|
||||
config_payer.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let payer_pubkey = config_payer.signers[0].pubkey();
|
||||
|
||||
let mut config_offline = CliConfig::recent_for_tests();
|
||||
@@ -904,24 +966,32 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
// `config_offline` however has paid 1 sig due to being both authority
|
||||
// and fee payer
|
||||
check_recent_balance(100_000 - SIG_FEE, &rpc_client, &offline_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_split() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(1);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
let offline_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
let mut config_offline = CliConfig::recent_for_tests();
|
||||
@@ -1047,24 +1117,32 @@ fn test_stake_split() {
|
||||
&rpc_client,
|
||||
&split_account.pubkey(),
|
||||
);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_set_lockup() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(1);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
let offline_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
let mut config_offline = CliConfig::recent_for_tests();
|
||||
@@ -1097,10 +1175,8 @@ fn test_stake_set_lockup() {
|
||||
let stake_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
let stake_account_pubkey = stake_keypair.pubkey();
|
||||
|
||||
let lockup = Lockup {
|
||||
custodian: config.signers[0].pubkey(),
|
||||
..Lockup::default()
|
||||
};
|
||||
let mut lockup = Lockup::default();
|
||||
lockup.custodian = config.signers[0].pubkey();
|
||||
|
||||
config.signers.push(&stake_keypair);
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
@@ -1310,23 +1386,32 @@ fn test_stake_set_lockup() {
|
||||
);
|
||||
assert_eq!(current_lockup.epoch, lockup.epoch.unwrap());
|
||||
assert_eq!(current_lockup.custodian, offline_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
let default_signer = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
config.signers = vec![&default_signer];
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_offline = CliConfig::recent_for_tests();
|
||||
let offline_signer = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
@@ -1522,4 +1607,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
let seed_address =
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &seed_address);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@@ -17,25 +17,30 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let default_signer = Keypair::new();
|
||||
let default_offline_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
@@ -237,13 +242,21 @@ fn test_transfer() {
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(28, &rpc_client, &offline_pubkey);
|
||||
check_recent_balance(40, &rpc_client, &recipient_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_multisession_signing() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
@@ -256,7 +269,7 @@ fn test_transfer_multisession_signing() {
|
||||
let config = CliConfig::recent_for_tests();
|
||||
|
||||
// Setup accounts
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&faucet_addr,
|
||||
@@ -338,7 +351,7 @@ fn test_transfer_multisession_signing() {
|
||||
|
||||
// Finally submit to the cluster
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&fee_payer_presigner, &from_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
@@ -356,24 +369,32 @@ fn test_transfer_multisession_signing() {
|
||||
check_recent_balance(1, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_recent_balance(1, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_recent_balance(42, &rpc_client, &to_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_all() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_custom_fees(mint_keypair.pubkey(), 1);
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
@@ -401,4 +422,7 @@ fn test_transfer_all() {
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(49_999, &rpc_client, &recipient_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@@ -15,21 +15,26 @@ use solana_sdk::{
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::{fs::remove_dir_all, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_vote_authorize_and_withdraw() {
|
||||
let mint_keypair = Keypair::new();
|
||||
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey());
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new(test_validator.rpc_url());
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
@@ -125,4 +130,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
withdraw_authority: 1,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -15,21 +15,20 @@ bs58 = "0.3.1"
|
||||
clap = "2.33.0"
|
||||
indicatif = "0.15.0"
|
||||
jsonrpc-core = "15.0.0"
|
||||
log = "0.4.11"
|
||||
net2 = "0.2.37"
|
||||
log = "0.4.8"
|
||||
rayon = "1.4.0"
|
||||
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "0.11.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.1" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.13" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -38,7 +37,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "15.0.0"
|
||||
jsonrpc-http-server = "15.0.0"
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -5,8 +5,6 @@ use solana_sdk::{
|
||||
use std::io;
|
||||
use thiserror::Error;
|
||||
|
||||
pub use reqwest; // export `reqwest` for clients
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ClientErrorKind {
|
||||
#[error(transparent)]
|
||||
|
@@ -48,10 +48,6 @@ impl RpcSender for MockSender {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let val = match request {
|
||||
RpcRequest::GetAccountInfo => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Null,
|
||||
})?,
|
||||
RpcRequest::GetBalance => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(50)),
|
||||
@@ -69,7 +65,6 @@ impl RpcSender for MockSender {
|
||||
slots_in_epoch: 32,
|
||||
absolute_slot: 34,
|
||||
block_height: 34,
|
||||
transaction_count: Some(123),
|
||||
})?,
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => {
|
||||
let value = if self.url == "blockhash_expired" {
|
||||
|
@@ -384,38 +384,6 @@ impl RpcClient {
|
||||
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn wait_for_max_stake(
|
||||
&self,
|
||||
commitment: CommitmentConfig,
|
||||
max_stake_percent: f32,
|
||||
) -> ClientResult<()> {
|
||||
let mut current_percent;
|
||||
loop {
|
||||
let vote_accounts = self.get_vote_accounts_with_commitment(commitment)?;
|
||||
|
||||
let mut max = 0;
|
||||
let total_active_stake = vote_accounts
|
||||
.current
|
||||
.iter()
|
||||
.chain(vote_accounts.delinquent.iter())
|
||||
.map(|vote_account| {
|
||||
max = std::cmp::max(max, vote_account.activated_stake);
|
||||
vote_account.activated_stake
|
||||
})
|
||||
.sum::<u64>();
|
||||
current_percent = 100f32 * max as f32 / total_active_stake as f32;
|
||||
if current_percent < max_stake_percent {
|
||||
break;
|
||||
}
|
||||
info!(
|
||||
"Waiting for stake to drop below {} current: {:.1}",
|
||||
max_stake_percent, current_percent
|
||||
);
|
||||
sleep(Duration::from_secs(10));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
|
||||
self.send(RpcRequest::GetClusterNodes, Value::Null)
|
||||
}
|
||||
@@ -1437,7 +1405,7 @@ fn new_spinner_progress_bar() -> ProgressBar {
|
||||
progress_bar
|
||||
}
|
||||
|
||||
fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
if tls {
|
||||
format!("https://{}", rpc_addr)
|
||||
} else {
|
||||
|
@@ -82,7 +82,6 @@ pub enum RpcTransactionLogsFilter {
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionLogsConfig {
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
|
@@ -10,7 +10,6 @@ pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64
|
||||
pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
|
||||
pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY: i64 = -32005;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
|
||||
pub const JSON_RPC_SERVER_ERROR_SLOT_SKIPPED: i64 = -32007;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
BlockCleanedUp {
|
||||
@@ -27,9 +26,6 @@ pub enum RpcCustomError {
|
||||
},
|
||||
RpcNodeUnhealthy,
|
||||
TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError),
|
||||
SlotSkipped {
|
||||
slot: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@@ -77,14 +73,6 @@ impl From<RpcCustomError> for Error {
|
||||
message: format!("Transaction precompile verification failure {:?}", e),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SlotSkipped { slot } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_SLOT_SKIPPED),
|
||||
message: format!(
|
||||
"Slot {} was skipped, or missing due to ledger jump to recent snapshot",
|
||||
slot
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -399,12 +399,6 @@ impl SyncClient for ThinClient {
|
||||
.map(|r| r.value)
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> TransportResult<u64> {
|
||||
self.rpc_client()
|
||||
.get_minimum_balance_for_rent_exemption(data_len)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
let (blockhash, fee_calculator, _last_valid_slot) =
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.5.1"
|
||||
version = "1.4.13"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -14,7 +14,6 @@ edition = "2018"
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.6.1"
|
||||
base64 = "0.12.3"
|
||||
bincode = "1.3.1"
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
@@ -34,10 +33,8 @@ jsonrpc-derive = "15.0.0"
|
||||
jsonrpc-http-server = "15.0.0"
|
||||
jsonrpc-pubsub = "15.0.0"
|
||||
jsonrpc-ws-server = "15.0.0"
|
||||
log = "0.4.11"
|
||||
lru = "0.6.1"
|
||||
miow = "0.2.2"
|
||||
net2 = "0.2.37"
|
||||
log = "0.4.8"
|
||||
lru = "0.6.0"
|
||||
num_cpus = "1.13.0"
|
||||
num-traits = "0.2"
|
||||
rand = "0.7.0"
|
||||
@@ -46,43 +43,40 @@ raptorq = "1.4.2"
|
||||
rayon = "1.4.1"
|
||||
regex = "1.3.9"
|
||||
serde = "1.0.112"
|
||||
serde_bytes = "0.11"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.5.1" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.5.1" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.5.1" }
|
||||
solana-client = { path = "../client", version = "1.5.1" }
|
||||
solana-faucet = { path = "../faucet", version = "1.5.1" }
|
||||
solana-ledger = { path = "../ledger", version = "1.5.1" }
|
||||
solana-logger = { path = "../logger", version = "1.5.1" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.5.1" }
|
||||
solana-metrics = { path = "../metrics", version = "1.5.1" }
|
||||
solana-measure = { path = "../measure", version = "1.5.1" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.5.1" }
|
||||
solana-perf = { path = "../perf", version = "1.5.1" }
|
||||
solana-program-test = { path = "../program-test", version = "1.5.1" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.1" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.1" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "1.5.1" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.5.1" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.1" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.5.1" }
|
||||
solana-streamer = { path = "../streamer", version = "1.5.1" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.5.1" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.5.1" }
|
||||
solana-version = { path = "../version", version = "1.5.1" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.1" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.13" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.4.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.13" }
|
||||
solana-client = { path = "../client", version = "1.4.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.13" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "1.4.13" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.4.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.4.13" }
|
||||
solana-logger = { path = "../logger", version = "1.4.13" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.4.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.13" }
|
||||
solana-measure = { path = "../measure", version = "1.4.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.13" }
|
||||
solana-perf = { path = "../perf", version = "1.4.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.13" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.13" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.13" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.4.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.13" }
|
||||
solana-version = { path = "../version", version = "1.4.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.13" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.4.13" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "0.2", features = ["full"] }
|
||||
tokio_01 = { version = "0.1", package = "tokio" }
|
||||
tokio_01_bytes = { version = "0.4.7", package = "bytes" }
|
||||
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
|
||||
tokio_io_01 = { version = "0.1", package = "tokio-io" }
|
||||
tokio_codec_01 = { version = "0.1", package = "tokio-codec" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.5.1" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.13" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -7,18 +7,14 @@ use log::*;
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::retransmit_stage::retransmitter;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::{Packet, Packets};
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
@@ -67,24 +63,14 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
|
||||
// To work reliably with higher values, this needs larger udp rmem size
|
||||
let entries: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 =
|
||||
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
Entry::new(&Hash::default(), 1, vec![tx0])
|
||||
})
|
||||
.collect();
|
||||
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0;
|
||||
let parent = 0;
|
||||
let shredder =
|
||||
Shredder::new(slot, parent, 0.0, keypair, 0, 0).expect("Failed to create entry shredder");
|
||||
let mut data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
|
||||
|
||||
let num_packets = data_shreds.len();
|
||||
let tx = test_tx();
|
||||
const NUM_PACKETS: usize = 50;
|
||||
let chunk_size = NUM_PACKETS / (4 * NUM_THREADS);
|
||||
let batches = to_packets_chunked(
|
||||
&std::iter::repeat(tx).take(NUM_PACKETS).collect::<Vec<_>>(),
|
||||
chunk_size,
|
||||
);
|
||||
info!("batches: {}", batches.len());
|
||||
|
||||
let retransmitter_handles = retransmitter(
|
||||
Arc::new(sockets),
|
||||
@@ -94,8 +80,6 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
packet_receiver,
|
||||
);
|
||||
|
||||
let mut index = 0;
|
||||
let mut slot = 0;
|
||||
let total = Arc::new(AtomicUsize::new(0));
|
||||
bencher.iter(move || {
|
||||
let peer_sockets1 = peer_sockets.clone();
|
||||
@@ -112,7 +96,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
while peer_sockets2[p].recv(&mut buf).is_ok() {
|
||||
total2.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
if total2.load(Ordering::Relaxed) >= num_packets {
|
||||
if total2.load(Ordering::Relaxed) >= NUM_PACKETS {
|
||||
break;
|
||||
}
|
||||
info!("{} recv", total2.load(Ordering::Relaxed));
|
||||
@@ -123,17 +107,9 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
})
|
||||
.collect();
|
||||
|
||||
for shred in data_shreds.iter_mut() {
|
||||
shred.set_slot(slot);
|
||||
shred.set_index(index);
|
||||
index += 1;
|
||||
index %= 200;
|
||||
let mut p = Packet::default();
|
||||
shred.copy_to_packet(&mut p);
|
||||
let _ = packet_sender.send(Packets::new(vec![p]));
|
||||
for packets in batches.clone() {
|
||||
packet_sender.send(packets).unwrap();
|
||||
}
|
||||
slot += 1;
|
||||
|
||||
info!("sent...");
|
||||
|
||||
let mut join_time = Measure::start("join");
|
||||
|
@@ -6,7 +6,7 @@ use rand::seq::SliceRandom;
|
||||
use raptorq::{Decoder, Encoder};
|
||||
use solana_ledger::entry::{create_ticks, Entry};
|
||||
use solana_ledger::shred::{
|
||||
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, Shredder,
|
||||
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder,
|
||||
MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE, SHRED_PAYLOAD_SIZE,
|
||||
SIZE_OF_DATA_SHRED_IGNORED_TAIL, SIZE_OF_DATA_SHRED_PAYLOAD,
|
||||
};
|
||||
@@ -40,9 +40,7 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
|
||||
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
|
||||
let shredder =
|
||||
Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap();
|
||||
let data_shreds = shredder
|
||||
.entries_to_data_shreds(&entries, true, 0, &mut ProcessShredsStats::default())
|
||||
.0;
|
||||
let data_shreds = shredder.entries_to_data_shreds(&entries, true, 0).0;
|
||||
assert!(data_shreds.len() >= num_shreds);
|
||||
data_shreds
|
||||
}
|
||||
@@ -125,14 +123,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
|
||||
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
|
||||
let data_shreds = make_shreds(symbol_count);
|
||||
bencher.iter(|| {
|
||||
Shredder::generate_coding_shreds(
|
||||
0,
|
||||
RECOMMENDED_FEC_RATE,
|
||||
&data_shreds[..symbol_count],
|
||||
0,
|
||||
symbol_count,
|
||||
)
|
||||
.len();
|
||||
Shredder::generate_coding_shreds(0, RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], 0)
|
||||
.len();
|
||||
})
|
||||
}
|
||||
|
||||
@@ -140,13 +132,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
|
||||
fn bench_shredder_decoding(bencher: &mut Bencher) {
|
||||
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
|
||||
let data_shreds = make_shreds(symbol_count);
|
||||
let coding_shreds = Shredder::generate_coding_shreds(
|
||||
0,
|
||||
RECOMMENDED_FEC_RATE,
|
||||
&data_shreds[..symbol_count],
|
||||
0,
|
||||
symbol_count,
|
||||
);
|
||||
let coding_shreds =
|
||||
Shredder::generate_coding_shreds(0, RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], 0);
|
||||
bencher.iter(|| {
|
||||
Shredder::try_recovery(
|
||||
coding_shreds[..].to_vec(),
|
||||
|
@@ -254,9 +254,6 @@ mod tests {
|
||||
0,
|
||||
100,
|
||||
);
|
||||
// sleep for 1ms to create a newer timestmap for gossip entry
|
||||
// otherwise the timestamp won't be newer.
|
||||
std::thread::sleep(Duration::from_millis(1));
|
||||
}
|
||||
cluster_info.flush_push_queue();
|
||||
let cluster_hashes = cluster_info
|
||||
|
151
core/src/bank_weight_fork_choice.rs
Normal file
151
core/src/bank_weight_fork_choice.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
progress_map::{ForkStats, ProgressMap},
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::timing;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BankWeightForkChoice {}
|
||||
|
||||
impl ForkChoice for BankWeightForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
_tower: &Tower,
|
||||
progress: &mut ProgressMap,
|
||||
computed_bank_state: &ComputedBankState,
|
||||
) {
|
||||
let bank_slot = bank.slot();
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let parent_weight = bank
|
||||
.parent()
|
||||
.and_then(|b| progress.get(&b.slot()))
|
||||
.map(|x| x.fork_stats.fork_weight)
|
||||
.unwrap_or(0);
|
||||
|
||||
let stats = progress
|
||||
.get_fork_stats_mut(bank_slot)
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
let ComputedBankState { bank_weight, .. } = computed_bank_state;
|
||||
stats.weight = *bank_weight;
|
||||
stats.fork_weight = stats.weight + parent_weight;
|
||||
}
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bank
|
||||
// 2) The heaviest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
_bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>) {
|
||||
let tower_start = Instant::now();
|
||||
assert!(!frozen_banks.is_empty());
|
||||
let num_frozen_banks = frozen_banks.len();
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let num_old_banks = frozen_banks
|
||||
.iter()
|
||||
.filter(|b| b.slot() < tower.root())
|
||||
.count();
|
||||
|
||||
let last_voted_slot = tower.last_voted_slot();
|
||||
let mut heaviest_bank_on_same_fork = None;
|
||||
let mut heaviest_same_fork_weight = 0;
|
||||
let stats: Vec<&ForkStats> = frozen_banks
|
||||
.iter()
|
||||
.map(|bank| {
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let stats = progress
|
||||
.get_fork_stats(bank.slot())
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
if let Some(last_voted_slot) = last_voted_slot {
|
||||
if ancestors
|
||||
.get(&bank.slot())
|
||||
.expect("Entry in frozen banks must exist in ancestors")
|
||||
.contains(&last_voted_slot)
|
||||
{
|
||||
// Descendant of last vote cannot be locked out
|
||||
assert!(!stats.is_locked_out);
|
||||
|
||||
// ancestors(slot) should not contain the slot itself,
|
||||
// so we should never get the same bank as the last vote
|
||||
assert_ne!(bank.slot(), last_voted_slot);
|
||||
// highest weight, lowest slot first. frozen_banks is sorted
|
||||
// from least slot to greatest slot, so if two banks have
|
||||
// the same fork weight, the lower slot will be picked
|
||||
if stats.fork_weight > heaviest_same_fork_weight {
|
||||
heaviest_bank_on_same_fork = Some(bank.clone());
|
||||
heaviest_same_fork_weight = stats.fork_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats
|
||||
})
|
||||
.collect();
|
||||
let num_not_recent = stats.iter().filter(|s| !s.is_recent).count();
|
||||
let num_has_voted = stats.iter().filter(|s| s.has_voted).count();
|
||||
let num_empty = stats.iter().filter(|s| s.is_empty).count();
|
||||
let num_threshold_failure = stats.iter().filter(|s| !s.vote_threshold).count();
|
||||
let num_votable_threshold_failure = stats
|
||||
.iter()
|
||||
.filter(|s| s.is_recent && !s.has_voted && !s.vote_threshold)
|
||||
.count();
|
||||
|
||||
let mut candidates: Vec<_> = frozen_banks.iter().zip(stats.iter()).collect();
|
||||
|
||||
//highest weight, lowest slot first
|
||||
candidates.sort_by_key(|b| (b.1.fork_weight, 0i64 - b.0.slot() as i64));
|
||||
let rv = candidates
|
||||
.last()
|
||||
.expect("frozen banks was nonempty so candidates must also be nonempty");
|
||||
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
||||
let weights: Vec<(u128, u64, u64)> = candidates
|
||||
.iter()
|
||||
.map(|x| (x.1.weight, x.0.slot(), x.1.block_height))
|
||||
.collect();
|
||||
debug!(
|
||||
"@{:?} tower duration: {:?} len: {}/{} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
candidates.len(),
|
||||
stats.iter().filter(|s| !s.has_voted).count(),
|
||||
weights,
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-select_forks",
|
||||
("frozen_banks", num_frozen_banks as i64, i64),
|
||||
("not_recent", num_not_recent as i64, i64),
|
||||
("has_voted", num_has_voted as i64, i64),
|
||||
("old_banks", num_old_banks as i64, i64),
|
||||
("empty_banks", num_empty as i64, i64),
|
||||
("threshold_failure", num_threshold_failure as i64, i64),
|
||||
(
|
||||
"votable_threshold_failure",
|
||||
num_votable_threshold_failure as i64,
|
||||
i64
|
||||
),
|
||||
("tower_duration", ms as i64, i64),
|
||||
);
|
||||
|
||||
(rv.0.clone(), heaviest_bank_on_same_fork)
|
||||
}
|
||||
}
|
@@ -4,7 +4,7 @@
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
|
||||
poh_service::{self, PohService},
|
||||
poh_service::PohService,
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
||||
use itertools::Itertools;
|
||||
@@ -15,7 +15,7 @@ use solana_ledger::{
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_measure::{measure::Measure, thread_mem_usage};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
|
||||
use solana_perf::{
|
||||
cuda_runtime::PinnedVec,
|
||||
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
|
||||
@@ -23,7 +23,7 @@ use solana_perf::{
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_db::ErrorCounters,
|
||||
bank::{Bank, TransactionBalancesSet, TransactionCheckResult, TransactionExecutionResult},
|
||||
bank::{Bank, TransactionBalancesSet, TransactionProcessResult},
|
||||
bank_utils,
|
||||
transaction_batch::TransactionBatch,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
@@ -38,13 +38,8 @@ use solana_sdk::{
|
||||
timing::{duration_as_ms, timestamp},
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::token_balances::{
|
||||
collect_token_balances, TransactionTokenBalancesSet,
|
||||
};
|
||||
use std::{
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
env,
|
||||
cmp, env,
|
||||
net::UdpSocket,
|
||||
sync::atomic::AtomicBool,
|
||||
sync::mpsc::Receiver,
|
||||
@@ -58,7 +53,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
|
||||
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
||||
|
||||
/// Transaction forwarding
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2;
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
|
||||
|
||||
// Fixed thread size seems to be fastest on GCP setup
|
||||
pub const NUM_THREADS: u32 = 4;
|
||||
@@ -465,7 +460,7 @@ impl BankingStage {
|
||||
fn record_transactions(
|
||||
bank_slot: Slot,
|
||||
txs: &[Transaction],
|
||||
results: &[TransactionExecutionResult],
|
||||
results: &[TransactionProcessResult],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
|
||||
let mut processed_generation = Measure::start("record::process_generation");
|
||||
@@ -487,7 +482,7 @@ impl BankingStage {
|
||||
debug!("num_to_commit: {} ", num_to_commit);
|
||||
// unlock all the accounts with errors which are filtered by the above `filter_map`
|
||||
if !processed_transactions.is_empty() {
|
||||
inc_new_counter_info!("banking_stage-record_transactions", num_to_commit);
|
||||
inc_new_counter_warn!("banking_stage-record_transactions", num_to_commit);
|
||||
|
||||
let mut hash_time = Measure::start("record::hash");
|
||||
let hash = hash_transactions(&processed_transactions[..]);
|
||||
@@ -535,15 +530,6 @@ impl BankingStage {
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
|
||||
|
||||
let pre_token_balances = if transaction_status_sender.is_some() {
|
||||
collect_token_balances(&bank, &batch, &mut mint_decimals)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
let (
|
||||
mut loaded_accounts,
|
||||
results,
|
||||
@@ -588,14 +574,12 @@ impl BankingStage {
|
||||
bank_utils::find_and_send_votes(txs, &tx_results, Some(gossip_vote_sender));
|
||||
if let Some(sender) = transaction_status_sender {
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals);
|
||||
send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
tx_results.execution_results,
|
||||
tx_results.processing_results,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances),
|
||||
inner_instructions,
|
||||
transaction_logs,
|
||||
sender,
|
||||
@@ -735,7 +719,7 @@ impl BankingStage {
|
||||
// This function returns a vector containing index of all valid transactions. A valid
|
||||
// transaction has result Ok() as the value
|
||||
fn filter_valid_transaction_indexes(
|
||||
valid_txs: &[TransactionCheckResult],
|
||||
valid_txs: &[TransactionProcessResult],
|
||||
transaction_indexes: &[usize],
|
||||
) -> Vec<usize> {
|
||||
let valid_transactions = valid_txs
|
||||
@@ -1088,13 +1072,7 @@ pub fn create_test_recorder(
|
||||
poh_recorder.set_bank(&bank);
|
||||
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(
|
||||
poh_recorder.clone(),
|
||||
&poh_config,
|
||||
&exit,
|
||||
bank.ticks_per_slot(),
|
||||
poh_service::DEFAULT_PINNED_CPU_CORE,
|
||||
);
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
||||
|
||||
(exit, poh_recorder, poh_service, entry_receiver)
|
||||
}
|
||||
@@ -1115,6 +1093,7 @@ mod tests {
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_perf::packet::to_packets;
|
||||
use solana_runtime::bank::HashAgeKind;
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
signature::{Keypair, Signer},
|
||||
@@ -1177,10 +1156,8 @@ mod tests {
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let poh_config = PohConfig {
|
||||
target_tick_count: Some(bank.max_tick_height() + num_extra_ticks),
|
||||
..PohConfig::default()
|
||||
};
|
||||
let mut poh_config = PohConfig::default();
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
@@ -1244,12 +1221,9 @@ mod tests {
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let poh_config = PohConfig {
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then
|
||||
// PohRecorderError(MaxHeightReached) at BankingStage
|
||||
target_tick_count: Some(bank.max_tick_height() - 1),
|
||||
..PohConfig::default()
|
||||
};
|
||||
let mut poh_config = PohConfig::default();
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
@@ -1392,12 +1366,9 @@ mod tests {
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let poh_config = PohConfig {
|
||||
// limit tick count to avoid clearing working_bank at
|
||||
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
target_tick_count: Some(bank.max_tick_height() - 1),
|
||||
..PohConfig::default()
|
||||
};
|
||||
let mut poh_config = PohConfig::default();
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info =
|
||||
@@ -1486,7 +1457,10 @@ mod tests {
|
||||
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()),
|
||||
];
|
||||
|
||||
let mut results = vec![(Ok(()), None), (Ok(()), None)];
|
||||
let mut results = vec![
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
];
|
||||
let _ = BankingStage::record_transactions(
|
||||
bank.slot(),
|
||||
&transactions,
|
||||
@@ -1502,7 +1476,7 @@ mod tests {
|
||||
1,
|
||||
SystemError::ResultWithNegativeLamports.into(),
|
||||
)),
|
||||
None,
|
||||
Some(HashAgeKind::Extant),
|
||||
);
|
||||
let (res, retryable) = BankingStage::record_transactions(
|
||||
bank.slot(),
|
||||
@@ -1678,10 +1652,10 @@ mod tests {
|
||||
&[
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
],
|
||||
&[2, 4, 5, 9, 11, 13]
|
||||
),
|
||||
@@ -1691,12 +1665,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
BankingStage::filter_valid_transaction_indexes(
|
||||
&[
|
||||
(Ok(()), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
],
|
||||
&[1, 6, 7, 9, 31, 43]
|
||||
),
|
||||
@@ -1987,7 +1961,7 @@ mod tests {
|
||||
|
||||
assert_eq!(processed_transactions_count, 0,);
|
||||
|
||||
retryable_txs.sort_unstable();
|
||||
retryable_txs.sort();
|
||||
let expected: Vec<usize> = (0..transactions.len()).collect();
|
||||
assert_eq!(retryable_txs, expected);
|
||||
}
|
||||
|
@@ -1,5 +1,4 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
#![allow(clippy::rc_buffer)]
|
||||
use self::{
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
@@ -17,7 +16,7 @@ use crossbeam_channel::{
|
||||
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
|
||||
Sender as CrossbeamSender,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred};
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
use solana_runtime::bank::Bank;
|
||||
@@ -306,7 +305,7 @@ impl BroadcastStage {
|
||||
|
||||
for (_, bank) in retransmit_slots.iter() {
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = bank.epoch_staked_nodes(bank_epoch);
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let data_shreds = Arc::new(
|
||||
blockstore
|
||||
@@ -347,7 +346,7 @@ fn update_peer_stats(
|
||||
) {
|
||||
let now = timestamp();
|
||||
let last = last_datapoint_submit.load(Ordering::Relaxed);
|
||||
if now.saturating_sub(last) > 1000
|
||||
if now - last > 1000
|
||||
&& last_datapoint_submit.compare_and_swap(last, now, Ordering::Relaxed) == last
|
||||
{
|
||||
datapoint_info!(
|
||||
@@ -446,7 +445,7 @@ pub mod test {
|
||||
entry::create_ticks,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
shred::{max_ticks_per_n_shreds, ProcessShredsStats, Shredder, RECOMMENDED_FEC_RATE},
|
||||
shred::{max_ticks_per_n_shreds, Shredder, RECOMMENDED_FEC_RATE},
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
@@ -475,8 +474,7 @@ pub mod test {
|
||||
let shredder = Shredder::new(slot, 0, RECOMMENDED_FEC_RATE, keypair, 0, 0)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let coding_shreds = shredder
|
||||
.data_shreds_to_coding_shreds(&data_shreds[0..], &mut ProcessShredsStats::default());
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..]);
|
||||
(
|
||||
data_shreds.clone(),
|
||||
coding_shreds.clone(),
|
||||
@@ -519,10 +517,8 @@ pub mod test {
|
||||
|
||||
#[test]
|
||||
fn test_num_live_peers() {
|
||||
let mut ci = ContactInfo {
|
||||
wallclock: std::u64::MAX,
|
||||
..ContactInfo::default()
|
||||
};
|
||||
let mut ci = ContactInfo::default();
|
||||
ci.wallclock = std::u64::MAX;
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 1);
|
||||
ci.wallclock = timestamp() - 1;
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 2);
|
||||
|
@@ -12,6 +12,22 @@ pub(crate) struct BroadcastShredBatchInfo {
|
||||
pub(crate) slot_start_ts: Instant,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct ProcessShredsStats {
|
||||
// Per-slot elapsed time
|
||||
pub(crate) shredding_elapsed: u64,
|
||||
pub(crate) receive_elapsed: u64,
|
||||
}
|
||||
impl ProcessShredsStats {
|
||||
pub(crate) fn update(&mut self, new_stats: &ProcessShredsStats) {
|
||||
self.shredding_elapsed += new_stats.shredding_elapsed;
|
||||
self.receive_elapsed += new_stats.receive_elapsed;
|
||||
}
|
||||
pub(crate) fn reset(&mut self) {
|
||||
*self = Self::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct TransmitShredsStats {
|
||||
pub transmit_elapsed: u64,
|
||||
@@ -270,9 +286,10 @@ mod test {
|
||||
}
|
||||
|
||||
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
|
||||
let (returned_count, returned_slot, _returned_instant) = receiver.recv().unwrap();
|
||||
let (returned_count, returned_slot, returned_instant) = receiver.recv().unwrap();
|
||||
assert_eq!(returned_count, num_threads);
|
||||
assert_eq!(returned_slot, slot);
|
||||
assert_eq!(returned_instant, returned_instant);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -102,7 +102,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
blockstore_sender.send((data_shreds.clone(), None))?;
|
||||
// 4) Start broadcast step
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = bank.epoch_staked_nodes(bank_epoch);
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
socket_sender.send(((stakes.clone(), data_shreds), None))?;
|
||||
if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds {
|
||||
|
@@ -1,5 +1,3 @@
|
||||
#![allow(clippy::rc_buffer)]
|
||||
|
||||
use super::{
|
||||
broadcast_utils::{self, ReceiveResults},
|
||||
*,
|
||||
@@ -7,7 +5,7 @@ use super::{
|
||||
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
|
||||
use solana_ledger::{
|
||||
entry::Entry,
|
||||
shred::{ProcessShredsStats, Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK},
|
||||
shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK},
|
||||
};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
@@ -117,10 +115,9 @@ impl StandardBroadcastRun {
|
||||
next_shred_index: u32,
|
||||
entries: &[Entry],
|
||||
is_slot_end: bool,
|
||||
process_stats: &mut ProcessShredsStats,
|
||||
) -> Vec<Shred> {
|
||||
let (data_shreds, new_next_shred_index) =
|
||||
shredder.entries_to_data_shreds(entries, is_slot_end, next_shred_index, process_stats);
|
||||
shredder.entries_to_data_shreds(entries, is_slot_end, next_shred_index);
|
||||
|
||||
self.unfinished_slot = Some(UnfinishedSlotInfo {
|
||||
next_shred_index: new_next_shred_index,
|
||||
@@ -179,9 +176,7 @@ impl StandardBroadcastRun {
|
||||
receive_elapsed = Duration::new(0, 0);
|
||||
}
|
||||
|
||||
let mut process_stats = ProcessShredsStats::default();
|
||||
|
||||
let mut to_shreds_time = Measure::start("broadcast_to_shreds");
|
||||
let to_shreds_start = Instant::now();
|
||||
|
||||
// 1) Check if slot was interrupted
|
||||
let last_unfinished_slot_shred =
|
||||
@@ -198,7 +193,6 @@ impl StandardBroadcastRun {
|
||||
next_shred_index,
|
||||
&receive_results.entries,
|
||||
is_last_in_slot,
|
||||
&mut process_stats,
|
||||
);
|
||||
// Insert the first shred so blockstore stores that the leader started this block
|
||||
// This must be done before the blocks are sent out over the wire.
|
||||
@@ -209,11 +203,10 @@ impl StandardBroadcastRun {
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
}
|
||||
let last_data_shred = data_shreds.len();
|
||||
to_shreds_time.stop();
|
||||
let to_shreds_elapsed = to_shreds_start.elapsed();
|
||||
|
||||
let mut get_leader_schedule_time = Measure::start("broadcast_get_leader_schedule");
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = bank.epoch_staked_nodes(bank_epoch);
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
|
||||
// Broadcast the last shred of the interrupted slot if necessary
|
||||
@@ -248,31 +241,18 @@ impl StandardBroadcastRun {
|
||||
.clone()
|
||||
.expect("Start timestamp must exist for a slot if we're broadcasting the slot"),
|
||||
});
|
||||
get_leader_schedule_time.stop();
|
||||
|
||||
let mut coding_send_time = Measure::start("broadcast_coding_send");
|
||||
|
||||
// Send data shreds
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
socket_sender.send(((stakes.clone(), data_shreds.clone()), batch_info.clone()))?;
|
||||
blockstore_sender.send((data_shreds.clone(), batch_info.clone()))?;
|
||||
|
||||
// Create and send coding shreds
|
||||
let coding_shreds = shredder
|
||||
.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred], &mut process_stats);
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
|
||||
let coding_shreds = Arc::new(coding_shreds);
|
||||
socket_sender.send(((stakes, coding_shreds.clone()), batch_info.clone()))?;
|
||||
blockstore_sender.send((coding_shreds, batch_info))?;
|
||||
|
||||
coding_send_time.stop();
|
||||
|
||||
process_stats.shredding_elapsed = to_shreds_time.as_us();
|
||||
process_stats.get_leader_schedule_elapsed = get_leader_schedule_time.as_us();
|
||||
process_stats.receive_elapsed = duration_as_us(&receive_elapsed);
|
||||
process_stats.coding_send_elapsed = coding_send_time.as_us();
|
||||
|
||||
self.process_shreds_stats.update(&process_stats);
|
||||
|
||||
self.process_shreds_stats.update(&ProcessShredsStats {
|
||||
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
|
||||
receive_elapsed: duration_as_us(&receive_elapsed),
|
||||
});
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
self.report_and_reset_stats();
|
||||
self.unfinished_slot = None;
|
||||
@@ -286,7 +266,7 @@ impl StandardBroadcastRun {
|
||||
blockstore: &Arc<Blockstore>,
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) {
|
||||
) -> Result<()> {
|
||||
// Insert shreds into blockstore
|
||||
let insert_shreds_start = Instant::now();
|
||||
// The first shred is inserted synchronously
|
||||
@@ -304,6 +284,7 @@ impl StandardBroadcastRun {
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_insertion_metrics(
|
||||
@@ -381,8 +362,8 @@ impl StandardBroadcastRun {
|
||||
datapoint_info!(
|
||||
"broadcast-process-shreds-stats",
|
||||
("slot", self.unfinished_slot.unwrap().slot as i64, i64),
|
||||
("shredding_time", stats.shredding_elapsed, i64),
|
||||
("receive_time", stats.receive_elapsed, i64),
|
||||
("shredding_time", stats.shredding_elapsed as i64, i64),
|
||||
("receive_time", stats.receive_elapsed as i64, i64),
|
||||
(
|
||||
"num_data_shreds",
|
||||
i64::from(self.unfinished_slot.unwrap().next_shred_index),
|
||||
@@ -393,16 +374,6 @@ impl StandardBroadcastRun {
|
||||
self.slot_broadcast_start.unwrap().elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get_leader_schedule_time",
|
||||
stats.get_leader_schedule_elapsed,
|
||||
i64
|
||||
),
|
||||
("serialize_shreds_time", stats.serialize_elapsed, i64),
|
||||
("gen_data_time", stats.gen_data_elapsed, i64),
|
||||
("gen_coding_time", stats.gen_coding_elapsed, i64),
|
||||
("sign_coding_time", stats.sign_coding_elapsed, i64),
|
||||
("coding_send_time", stats.coding_send_elapsed, i64),
|
||||
);
|
||||
self.process_shreds_stats.reset();
|
||||
}
|
||||
@@ -439,8 +410,7 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
|
||||
self.insert(blockstore, shreds, slot_start_ts);
|
||||
Ok(())
|
||||
self.insert(blockstore, shreds, slot_start_ts)
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -85,7 +85,7 @@ impl ClusterSlots {
|
||||
}
|
||||
|
||||
fn update_peers(&self, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank();
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
let root_epoch = root_bank.epoch();
|
||||
let my_epoch = *self.epoch.read().unwrap();
|
||||
|
||||
@@ -106,30 +106,28 @@ impl ClusterSlots {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<u64> {
|
||||
let stakes = {
|
||||
let validator_stakes = self.validator_stakes.read().unwrap();
|
||||
repair_peers
|
||||
.iter()
|
||||
.map(|peer| {
|
||||
validator_stakes
|
||||
.get(&peer.id)
|
||||
.map(|node| node.total_stake)
|
||||
.unwrap_or(0)
|
||||
+ 1
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
let slot_peers = match self.lookup(slot) {
|
||||
None => return stakes,
|
||||
Some(slot_peers) => slot_peers,
|
||||
};
|
||||
let slot_peers = slot_peers.read().unwrap();
|
||||
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<(u64, usize)> {
|
||||
let slot_peers = self.lookup(slot);
|
||||
repair_peers
|
||||
.iter()
|
||||
.map(|peer| slot_peers.get(&peer.id).cloned().unwrap_or(0))
|
||||
.zip(stakes)
|
||||
.map(|(a, b)| a + b)
|
||||
.enumerate()
|
||||
.map(|(i, x)| {
|
||||
let peer_stake = slot_peers
|
||||
.as_ref()
|
||||
.and_then(|v| v.read().unwrap().get(&x.id).cloned())
|
||||
.unwrap_or(0);
|
||||
(
|
||||
1 + peer_stake
|
||||
+ self
|
||||
.validator_stakes
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&x.id)
|
||||
.map(|v| v.total_stake)
|
||||
.unwrap_or(0),
|
||||
i,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -230,7 +228,7 @@ mod tests {
|
||||
fn test_compute_weights() {
|
||||
let cs = ClusterSlots::default();
|
||||
let ci = ContactInfo::default();
|
||||
assert_eq!(cs.compute_weights(0, &[ci]), vec![1]);
|
||||
assert_eq!(cs.compute_weights(0, &[ci]), vec![(1, 0)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -251,7 +249,7 @@ mod tests {
|
||||
c2.id = k2;
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![std::u64::MAX / 2 + 1, 1]
|
||||
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -283,7 +281,7 @@ mod tests {
|
||||
c2.id = k2;
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![std::u64::MAX / 2 + 1, 1]
|
||||
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
|
||||
);
|
||||
}
|
||||
|
||||
|
@@ -125,7 +125,6 @@ impl ClusterSlotsService {
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
#[allow(clippy::stable_sort_primitive)]
|
||||
slots.sort();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
@@ -164,7 +163,7 @@ impl ClusterSlotsService {
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort_unstable();
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
|
@@ -113,17 +113,7 @@ impl AggregateCommitmentService {
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"highest-confirmed-root",
|
||||
update_commitment_slots.highest_confirmed_root as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"highest-confirmed-slot",
|
||||
update_commitment_slots.highest_confirmed_slot as i64,
|
||||
i64
|
||||
),
|
||||
)
|
||||
);
|
||||
|
||||
// Triggers rpc_subscription notifications as soon as new commitment data is available,
|
||||
@@ -186,15 +176,19 @@ impl AggregateCommitmentService {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
if let Ok(vote_state) = account.vote_state().as_ref() {
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
(commitment, rooted_stake)
|
||||
@@ -249,7 +243,6 @@ mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::ABSRequestSender,
|
||||
bank_forks::BankForks,
|
||||
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
|
||||
};
|
||||
@@ -434,26 +427,26 @@ mod tests {
|
||||
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
|
||||
vote_state1.process_slot_vote_unchecked(3);
|
||||
vote_state1.process_slot_vote_unchecked(5);
|
||||
let versioned = VoteStateVersions::new_current(vote_state1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state1));
|
||||
VoteState::to(&versioned, &mut vote_account1).unwrap();
|
||||
bank.store_account(&pk1, &vote_account1);
|
||||
|
||||
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
|
||||
vote_state2.process_slot_vote_unchecked(9);
|
||||
vote_state2.process_slot_vote_unchecked(10);
|
||||
let versioned = VoteStateVersions::new_current(vote_state2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state2));
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::new_current(vote_state3);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::new_current(vote_state4);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
@@ -489,14 +482,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_highest_confirmed_root_advance() {
|
||||
fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Arc<Bank>) -> Slot {
|
||||
let (_stake, vote_account) = bank.get_vote_account(&vote_pubkey).unwrap();
|
||||
let slot = vote_account
|
||||
.vote_state()
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.root_slot
|
||||
.unwrap();
|
||||
slot
|
||||
let account = &bank.vote_accounts()[&vote_pubkey].1;
|
||||
let vote_state = VoteState::from(account).unwrap();
|
||||
vote_state.root_slot.unwrap()
|
||||
}
|
||||
|
||||
let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests());
|
||||
@@ -540,7 +528,7 @@ mod tests {
|
||||
&working_bank,
|
||||
);
|
||||
for x in 0..root {
|
||||
bank_forks.set_root(x, &ABSRequestSender::default(), None);
|
||||
bank_forks.set_root(x, &None, None);
|
||||
}
|
||||
|
||||
// Add an additional bank/vote that will root slot 2
|
||||
@@ -577,11 +565,7 @@ mod tests {
|
||||
.read()
|
||||
.unwrap()
|
||||
.highest_confirmed_root();
|
||||
bank_forks.set_root(
|
||||
root,
|
||||
&ABSRequestSender::default(),
|
||||
Some(highest_confirmed_root),
|
||||
);
|
||||
bank_forks.set_root(root, &None, Some(highest_confirmed_root));
|
||||
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
|
||||
assert!(highest_confirmed_root_bank.is_some());
|
||||
|
||||
@@ -646,11 +630,7 @@ mod tests {
|
||||
.read()
|
||||
.unwrap()
|
||||
.highest_confirmed_root();
|
||||
bank_forks.set_root(
|
||||
root,
|
||||
&ABSRequestSender::default(),
|
||||
Some(highest_confirmed_root),
|
||||
);
|
||||
bank_forks.set_root(root, &None, Some(highest_confirmed_root));
|
||||
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
|
||||
assert!(highest_confirmed_root_bank.is_some());
|
||||
}
|
||||
|
@@ -5,11 +5,9 @@ use crate::{
|
||||
use chrono::prelude::*;
|
||||
use solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE,
|
||||
vote_account::ArcVoteAccount,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
hash::Hash,
|
||||
instruction::Instruction,
|
||||
@@ -26,10 +24,7 @@ use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fs::{self, File},
|
||||
io::BufReader,
|
||||
ops::{
|
||||
Bound::{Included, Unbounded},
|
||||
Deref,
|
||||
},
|
||||
ops::Bound::{Included, Unbounded},
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
@@ -184,21 +179,23 @@ impl Tower {
|
||||
vote_account: &Pubkey,
|
||||
) -> Self {
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let (_progress, heaviest_subtree_fork_choice) =
|
||||
let (_progress, heaviest_subtree_fork_choice, unlock_heaviest_subtree_fork_choice_slot) =
|
||||
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
|
||||
root_bank.deref(),
|
||||
root_bank,
|
||||
bank_forks.frozen_banks().values().cloned().collect(),
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
);
|
||||
let root = root_bank.slot();
|
||||
|
||||
let heaviest_bank = bank_forks
|
||||
.get(heaviest_subtree_fork_choice.best_overall_slot())
|
||||
.expect(
|
||||
"The best overall slot must be one of `frozen_banks` which all exist in bank_forks",
|
||||
)
|
||||
.clone();
|
||||
let heaviest_bank = if root > unlock_heaviest_subtree_fork_choice_slot {
|
||||
bank_forks
|
||||
.get(heaviest_subtree_fork_choice.best_overall_slot())
|
||||
.expect("The best overall slot must be one of `frozen_banks` which all exist in bank_forks")
|
||||
.clone()
|
||||
} else {
|
||||
Tower::find_heaviest_bank(&bank_forks, &my_pubkey).unwrap_or_else(|| root_bank.clone())
|
||||
};
|
||||
|
||||
Self::new(
|
||||
&my_pubkey,
|
||||
@@ -217,9 +214,8 @@ impl Tower {
|
||||
all_pubkeys: &mut PubkeyReferences,
|
||||
) -> ComputedBankState
|
||||
where
|
||||
F: IntoIterator<Item = (Pubkey, (u64, ArcVoteAccount))>,
|
||||
F: Iterator<Item = (Pubkey, (u64, Account))>,
|
||||
{
|
||||
let mut vote_slots = HashSet::new();
|
||||
let mut voted_stakes = HashMap::new();
|
||||
let mut total_stake = 0;
|
||||
let mut bank_weight = 0;
|
||||
@@ -232,20 +228,20 @@ impl Tower {
|
||||
continue;
|
||||
}
|
||||
trace!("{} {} with stake {}", node_pubkey, key, voted_stake);
|
||||
let mut vote_state = match account.vote_state().as_ref() {
|
||||
Err(_) => {
|
||||
datapoint_warn!(
|
||||
"tower_warn",
|
||||
(
|
||||
"warn",
|
||||
format!("Unable to get vote_state from account {}", key),
|
||||
String
|
||||
),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
Ok(vote_state) => vote_state.clone(),
|
||||
};
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
datapoint_warn!(
|
||||
"tower_warn",
|
||||
(
|
||||
"warn",
|
||||
format!("Unable to get vote_state from account {}", key),
|
||||
String
|
||||
),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let mut vote_state = vote_state.unwrap();
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
let key = all_pubkeys.get_or_insert(&key);
|
||||
lockout_intervals
|
||||
@@ -282,7 +278,7 @@ impl Tower {
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
bank_weight += vote.lockout() as u128 * voted_stake as u128;
|
||||
vote_slots.insert(vote.slot);
|
||||
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
|
||||
}
|
||||
|
||||
if start_root != vote_state.root_slot {
|
||||
@@ -293,7 +289,7 @@ impl Tower {
|
||||
};
|
||||
trace!("ROOT: {}", vote.slot);
|
||||
bank_weight += vote.lockout() as u128 * voted_stake as u128;
|
||||
vote_slots.insert(vote.slot);
|
||||
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
|
||||
}
|
||||
}
|
||||
if let Some(root) = vote_state.root_slot {
|
||||
@@ -302,7 +298,7 @@ impl Tower {
|
||||
slot: root,
|
||||
};
|
||||
bank_weight += vote.lockout() as u128 * voted_stake as u128;
|
||||
vote_slots.insert(vote.slot);
|
||||
Self::populate_ancestor_voted_stakes(&mut voted_stakes, &vote, ancestors);
|
||||
}
|
||||
|
||||
// The last vote in the vote stack is a simulated vote on bank_slot, which
|
||||
@@ -330,9 +326,6 @@ impl Tower {
|
||||
total_stake += voted_stake;
|
||||
}
|
||||
|
||||
// TODO: populate_ancestor_voted_stakes only adds zeros. Comment why
|
||||
// that is necessary (if so).
|
||||
Self::populate_ancestor_voted_stakes(&mut voted_stakes, vote_slots, ancestors);
|
||||
ComputedBankState {
|
||||
voted_stakes,
|
||||
total_stake,
|
||||
@@ -383,9 +376,9 @@ impl Tower {
|
||||
}
|
||||
|
||||
fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
|
||||
let (_stake, vote_account) = bank.get_vote_account(vote_account_pubkey)?;
|
||||
let slot = vote_account.vote_state().as_ref().ok()?.last_voted_slot();
|
||||
slot
|
||||
let vote_account = bank.vote_accounts().get(vote_account_pubkey)?.1.clone();
|
||||
let bank_vote_state = VoteState::deserialize(&vote_account.data).ok()?;
|
||||
bank_vote_state.last_voted_slot()
|
||||
}
|
||||
|
||||
pub fn new_vote_from_bank(&self, bank: &Bank, vote_account_pubkey: &Pubkey) -> (Vote, usize) {
|
||||
@@ -516,7 +509,7 @@ impl Tower {
|
||||
descendants: &HashMap<Slot, HashSet<u64>>,
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) -> SwitchForkDecision {
|
||||
self.last_voted_slot()
|
||||
.map(|last_voted_slot| {
|
||||
@@ -710,7 +703,7 @@ impl Tower {
|
||||
descendants: &HashMap<Slot, HashSet<u64>>,
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) -> SwitchForkDecision {
|
||||
let decision = self.make_check_switch_threshold_decision(
|
||||
switch_slot,
|
||||
@@ -773,20 +766,41 @@ impl Tower {
|
||||
/// Update lockouts for all the ancestors
|
||||
pub(crate) fn populate_ancestor_voted_stakes(
|
||||
voted_stakes: &mut VotedStakes,
|
||||
vote_slots: impl IntoIterator<Item = Slot>,
|
||||
vote: &Lockout,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) {
|
||||
// If there's no ancestors, that means this slot must be from before the current root,
|
||||
// in which case the lockouts won't be calculated in bank_weight anyways, so ignore
|
||||
// this slot
|
||||
for vote_slot in vote_slots {
|
||||
if let Some(slot_ancestors) = ancestors.get(&vote_slot) {
|
||||
voted_stakes.entry(vote_slot).or_default();
|
||||
for slot in slot_ancestors {
|
||||
voted_stakes.entry(*slot).or_default();
|
||||
}
|
||||
}
|
||||
let vote_slot_ancestors = ancestors.get(&vote.slot);
|
||||
if vote_slot_ancestors.is_none() {
|
||||
return;
|
||||
}
|
||||
let mut slot_with_ancestors = vec![vote.slot];
|
||||
slot_with_ancestors.extend(vote_slot_ancestors.unwrap());
|
||||
for slot in slot_with_ancestors {
|
||||
voted_stakes.entry(slot).or_default();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn find_heaviest_bank(
|
||||
bank_forks: &BankForks,
|
||||
node_pubkey: &Pubkey,
|
||||
) -> Option<Arc<Bank>> {
|
||||
let ancestors = bank_forks.ancestors();
|
||||
let mut bank_weights: Vec<_> = bank_forks
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.map(|b| {
|
||||
(
|
||||
Self::bank_weight(node_pubkey, b, &ancestors),
|
||||
b.parents().len(),
|
||||
b.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bank_weights.sort_by_key(|b| (b.0, b.1));
|
||||
bank_weights.pop().map(|b| b.2)
|
||||
}
|
||||
|
||||
/// Update stake for all the ancestors.
|
||||
@@ -799,12 +813,31 @@ impl Tower {
|
||||
) {
|
||||
// If there's no ancestors, that means this slot must be from
|
||||
// before the current root, so ignore this slot
|
||||
if let Some(vote_slot_ancestors) = ancestors.get(&voted_slot) {
|
||||
*voted_stakes.entry(voted_slot).or_default() += voted_stake;
|
||||
for slot in vote_slot_ancestors {
|
||||
*voted_stakes.entry(*slot).or_default() += voted_stake;
|
||||
}
|
||||
let vote_slot_ancestors = ancestors.get(&voted_slot);
|
||||
if vote_slot_ancestors.is_none() {
|
||||
return;
|
||||
}
|
||||
let mut slot_with_ancestors = vec![voted_slot];
|
||||
slot_with_ancestors.extend(vote_slot_ancestors.unwrap());
|
||||
for slot in slot_with_ancestors {
|
||||
let current = voted_stakes.entry(slot).or_default();
|
||||
*current += voted_stake;
|
||||
}
|
||||
}
|
||||
|
||||
fn bank_weight(
|
||||
node_pubkey: &Pubkey,
|
||||
bank: &Bank,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) -> u128 {
|
||||
let ComputedBankState { bank_weight, .. } = Self::collect_vote_lockouts(
|
||||
node_pubkey,
|
||||
bank.slot(),
|
||||
bank.vote_accounts().into_iter(),
|
||||
ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
bank_weight
|
||||
}
|
||||
|
||||
fn voted_slots(&self) -> Vec<Slot> {
|
||||
@@ -1025,12 +1058,10 @@ impl Tower {
|
||||
root: Slot,
|
||||
bank: &Bank,
|
||||
) {
|
||||
if let Some((_stake, vote_account)) = bank.get_vote_account(vote_account_pubkey) {
|
||||
self.lockouts = vote_account
|
||||
.vote_state()
|
||||
.as_ref()
|
||||
.expect("vote_account isn't a VoteState?")
|
||||
.clone();
|
||||
if let Some((_stake, vote_account)) = bank.vote_accounts().get(vote_account_pubkey) {
|
||||
let vote_state = VoteState::deserialize(&vote_account.data)
|
||||
.expect("vote_account isn't a VoteState?");
|
||||
self.lockouts = vote_state;
|
||||
self.initialize_root(root);
|
||||
self.initialize_lockouts(|v| v.slot > root);
|
||||
trace!(
|
||||
@@ -1238,6 +1269,7 @@ pub fn reconcile_blockstore_roots_with_tower(
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
bank_weight_fork_choice::BankWeightForkChoice,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
@@ -1247,7 +1279,6 @@ pub mod test {
|
||||
};
|
||||
use solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::ABSRequestSender,
|
||||
bank::Bank,
|
||||
bank_forks::BankForks,
|
||||
genesis_utils::{
|
||||
@@ -1255,8 +1286,7 @@ pub mod test {
|
||||
},
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account, clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signer,
|
||||
slot_history::SlotHistory,
|
||||
clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signer, slot_history::SlotHistory,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY},
|
||||
@@ -1378,6 +1408,7 @@ pub mod test {
|
||||
&self.bank_forks,
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
);
|
||||
|
||||
let vote_bank = self
|
||||
@@ -1420,7 +1451,7 @@ pub mod test {
|
||||
new_root,
|
||||
&self.bank_forks,
|
||||
&mut self.progress,
|
||||
&ABSRequestSender::default(),
|
||||
&None,
|
||||
&mut PubkeyReferences::default(),
|
||||
None,
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
@@ -1573,27 +1604,22 @@ pub mod test {
|
||||
(bank_forks, progress, heaviest_subtree_fork_choice)
|
||||
}
|
||||
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, Account))> {
|
||||
let mut stakes = vec![];
|
||||
for (lamports, votes) in stake_votes {
|
||||
let mut account = Account {
|
||||
data: vec![0; VoteState::size_of()],
|
||||
lamports: *lamports,
|
||||
..Account::default()
|
||||
};
|
||||
let mut account = Account::default();
|
||||
account.data = vec![0; VoteState::size_of()];
|
||||
account.lamports = *lamports;
|
||||
let mut vote_state = VoteState::default();
|
||||
for slot in *votes {
|
||||
vote_state.process_slot_vote_unchecked(*slot);
|
||||
}
|
||||
VoteState::serialize(
|
||||
&VoteStateVersions::new_current(vote_state),
|
||||
&VoteStateVersions::Current(Box::new(vote_state)),
|
||||
&mut account.data,
|
||||
)
|
||||
.expect("serialize state");
|
||||
stakes.push((
|
||||
solana_sdk::pubkey::new_rand(),
|
||||
(*lamports, ArcVoteAccount::from(account)),
|
||||
));
|
||||
stakes.push((solana_sdk::pubkey::new_rand(), (*lamports, account)));
|
||||
}
|
||||
stakes
|
||||
}
|
||||
@@ -1947,16 +1973,16 @@ pub mod test {
|
||||
}
|
||||
|
||||
info!("local tower: {:#?}", tower.lockouts.votes);
|
||||
let observed = vote_simulator
|
||||
let vote_accounts = vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(next_unlocked_slot)
|
||||
.unwrap()
|
||||
.get_vote_account(&vote_pubkey)
|
||||
.unwrap();
|
||||
let state = observed.1.vote_state();
|
||||
info!("observed tower: {:#?}", state.as_ref().unwrap().votes);
|
||||
.vote_accounts();
|
||||
let observed = vote_accounts.get(&vote_pubkey).unwrap();
|
||||
let state = VoteState::from(&observed.1).unwrap();
|
||||
info!("observed tower: {:#?}", state.votes);
|
||||
|
||||
let num_slots_to_try = 200;
|
||||
cluster_votes
|
||||
@@ -2063,7 +2089,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_vote_threshold_without_votes() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1)].into_iter().collect();
|
||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
||||
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
|
||||
}
|
||||
|
||||
@@ -2073,7 +2099,7 @@ pub mod test {
|
||||
let mut tower = Tower::new_for_tests(4, 0.67);
|
||||
let mut stakes = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
|
||||
stakes.insert(i, 1);
|
||||
stakes.insert(i, 1 as Stake);
|
||||
tower.record_vote(i, Hash::default());
|
||||
}
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
|
||||
@@ -2082,7 +2108,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_not_enough_stake_failure() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1)].into_iter().collect();
|
||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
||||
assert!(!tower.is_slot_confirmed(0, &stakes, 2));
|
||||
}
|
||||
|
||||
@@ -2096,7 +2122,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_pass() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 2)].into_iter().collect();
|
||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
||||
assert!(tower.is_slot_confirmed(0, &stakes, 2));
|
||||
}
|
||||
|
||||
@@ -2209,14 +2235,14 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_vote_threshold_below_threshold() {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1)].into_iter().collect();
|
||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
|
||||
}
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold() {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 2)].into_iter().collect();
|
||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(tower.check_vote_stake_threshold(1, &stakes, 2));
|
||||
}
|
||||
@@ -2224,7 +2250,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold_after_pop() {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 2)].into_iter().collect();
|
||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
@@ -2243,7 +2269,7 @@ pub mod test {
|
||||
fn test_check_vote_threshold_lockouts_not_updated() {
|
||||
solana_logger::setup();
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
|
||||
let stakes = vec![(0, 1 as Stake), (1, 2 as Stake)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
@@ -2253,10 +2279,8 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_stake_is_updated_for_entire_branch() {
|
||||
let mut voted_stakes = HashMap::new();
|
||||
let account = Account {
|
||||
lamports: 1,
|
||||
..Account::default()
|
||||
};
|
||||
let mut account = Account::default();
|
||||
account.lamports = 1;
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
|
||||
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports, &ancestors);
|
||||
|
@@ -26,7 +26,7 @@ pub struct ContactInfo {
|
||||
/// address to forward unprocessed transactions to
|
||||
pub tpu_forwards: SocketAddr,
|
||||
/// address to which to send bank state requests
|
||||
pub unused: SocketAddr,
|
||||
pub rpc_banks: SocketAddr,
|
||||
/// address to which to send JSON-RPC requests
|
||||
pub rpc: SocketAddr,
|
||||
/// websocket for JSON-RPC push notifications
|
||||
@@ -95,7 +95,7 @@ impl Default for ContactInfo {
|
||||
repair: socketaddr_any!(),
|
||||
tpu: socketaddr_any!(),
|
||||
tpu_forwards: socketaddr_any!(),
|
||||
unused: socketaddr_any!(),
|
||||
rpc_banks: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
serve_repair: socketaddr_any!(),
|
||||
@@ -115,7 +115,7 @@ impl ContactInfo {
|
||||
repair: socketaddr!("127.0.0.1:1237"),
|
||||
tpu: socketaddr!("127.0.0.1:1238"),
|
||||
tpu_forwards: socketaddr!("127.0.0.1:1239"),
|
||||
unused: socketaddr!("127.0.0.1:1240"),
|
||||
rpc_banks: socketaddr!("127.0.0.1:1240"),
|
||||
rpc: socketaddr!("127.0.0.1:1241"),
|
||||
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
|
||||
serve_repair: socketaddr!("127.0.0.1:1243"),
|
||||
@@ -145,7 +145,7 @@ impl ContactInfo {
|
||||
repair: addr,
|
||||
tpu: addr,
|
||||
tpu_forwards: addr,
|
||||
unused: addr,
|
||||
rpc_banks: addr,
|
||||
rpc: addr,
|
||||
rpc_pubsub: addr,
|
||||
serve_repair: addr,
|
||||
@@ -170,6 +170,7 @@ impl ContactInfo {
|
||||
let repair = next_port(&bind_addr, 5);
|
||||
let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
|
||||
let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
let rpc_banks = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_BANKS_PORT);
|
||||
let serve_repair = next_port(&bind_addr, 6);
|
||||
Self {
|
||||
id: *pubkey,
|
||||
@@ -179,7 +180,7 @@ impl ContactInfo {
|
||||
repair,
|
||||
tpu,
|
||||
tpu_forwards,
|
||||
unused: "0.0.0.0:0".parse().unwrap(),
|
||||
rpc_banks,
|
||||
rpc,
|
||||
rpc_pubsub,
|
||||
serve_repair,
|
||||
@@ -256,7 +257,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_unspecified());
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert!(ci.rpc_banks.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
@@ -268,7 +269,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_multicast());
|
||||
assert!(ci.rpc_pubsub.ip().is_multicast());
|
||||
assert!(ci.tpu.ip().is_multicast());
|
||||
assert!(ci.unused.ip().is_multicast());
|
||||
assert!(ci.rpc_banks.ip().is_multicast());
|
||||
assert!(ci.serve_repair.ip().is_multicast());
|
||||
}
|
||||
#[test]
|
||||
@@ -281,7 +282,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_unspecified());
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert!(ci.rpc_banks.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
@@ -294,7 +295,7 @@ mod tests {
|
||||
assert_eq!(ci.tpu_forwards.port(), 13);
|
||||
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
|
||||
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert_eq!(ci.rpc_banks.port(), rpc_port::DEFAULT_RPC_BANKS_PORT);
|
||||
assert_eq!(ci.serve_repair.port(), 16);
|
||||
}
|
||||
|
||||
@@ -318,6 +319,10 @@ mod tests {
|
||||
d1.rpc_pubsub,
|
||||
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT))
|
||||
);
|
||||
assert_eq!(
|
||||
d1.rpc_banks,
|
||||
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_BANKS_PORT))
|
||||
);
|
||||
assert_eq!(d1.tvu_forwards, socketaddr!("127.0.0.1:1238"));
|
||||
assert_eq!(d1.repair, socketaddr!("127.0.0.1:1239"));
|
||||
assert_eq!(d1.serve_repair, socketaddr!("127.0.0.1:1240"));
|
||||
|
188
core/src/crds.rs
188
core/src/crds.rs
@@ -26,9 +26,9 @@
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_shards::CrdsShards;
|
||||
use crate::crds_value::{CrdsData, CrdsValue, CrdsValueLabel, LowestSlot};
|
||||
use crate::crds_value::{CrdsData, CrdsValue, CrdsValueLabel};
|
||||
use bincode::serialize;
|
||||
use indexmap::map::{rayon::ParValues, Entry, IndexMap, Values};
|
||||
use indexmap::map::{rayon::ParValues, Entry, IndexMap, Iter, Values};
|
||||
use indexmap::set::IndexSet;
|
||||
use rayon::{prelude::*, ThreadPool};
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
@@ -36,8 +36,8 @@ use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::cmp;
|
||||
use std::collections::{hash_map, HashMap};
|
||||
use std::ops::{Index, IndexMut};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Index;
|
||||
|
||||
const CRDS_SHARDS_BITS: u32 = 8;
|
||||
|
||||
@@ -47,10 +47,8 @@ pub struct Crds {
|
||||
table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize, // Only used in tests.
|
||||
shards: CrdsShards,
|
||||
nodes: IndexSet<usize>, // Indices of nodes' ContactInfo.
|
||||
votes: IndexSet<usize>, // Indices of Vote crds values.
|
||||
// Indices of all crds values associated with a node.
|
||||
records: HashMap<Pubkey, IndexSet<usize>>,
|
||||
// Indices of all crds values which are node ContactInfo.
|
||||
nodes: IndexSet<usize>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@@ -109,8 +107,6 @@ impl Default for Crds {
|
||||
num_inserts: 0,
|
||||
shards: CrdsShards::new(CRDS_SHARDS_BITS),
|
||||
nodes: IndexSet::default(),
|
||||
votes: IndexSet::default(),
|
||||
records: HashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -142,19 +138,9 @@ impl Crds {
|
||||
Entry::Vacant(entry) => {
|
||||
let entry_index = entry.index();
|
||||
self.shards.insert(entry_index, &new_value);
|
||||
match new_value.value.data {
|
||||
CrdsData::ContactInfo(_) => {
|
||||
self.nodes.insert(entry_index);
|
||||
}
|
||||
CrdsData::Vote(_, _) => {
|
||||
self.votes.insert(entry_index);
|
||||
}
|
||||
_ => (),
|
||||
};
|
||||
self.records
|
||||
.entry(new_value.value.pubkey())
|
||||
.or_default()
|
||||
.insert(entry_index);
|
||||
if let CrdsData::ContactInfo(_) = new_value.value.data {
|
||||
self.nodes.insert(entry_index);
|
||||
}
|
||||
entry.insert(new_value);
|
||||
self.num_inserts += 1;
|
||||
Ok(None)
|
||||
@@ -164,9 +150,6 @@ impl Crds {
|
||||
self.shards.remove(index, entry.get());
|
||||
self.shards.insert(index, &new_value);
|
||||
self.num_inserts += 1;
|
||||
// As long as the pubkey does not change, self.records
|
||||
// does not need to be updated.
|
||||
debug_assert_eq!(entry.get().value.pubkey(), new_value.value.pubkey());
|
||||
Ok(Some(entry.insert(new_value)))
|
||||
}
|
||||
_ => {
|
||||
@@ -199,16 +182,11 @@ impl Crds {
|
||||
self.table.get(label)
|
||||
}
|
||||
|
||||
pub fn get_contact_info(&self, pubkey: Pubkey) -> Option<&ContactInfo> {
|
||||
let label = CrdsValueLabel::ContactInfo(pubkey);
|
||||
pub fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo> {
|
||||
let label = CrdsValueLabel::ContactInfo(*pubkey);
|
||||
self.table.get(&label)?.value.contact_info()
|
||||
}
|
||||
|
||||
pub fn get_lowest_slot(&self, pubkey: Pubkey) -> Option<&LowestSlot> {
|
||||
let lable = CrdsValueLabel::LowestSlot(pubkey);
|
||||
self.table.get(&lable)?.value.lowest_slot()
|
||||
}
|
||||
|
||||
/// Returns all entries which are ContactInfo.
|
||||
pub fn get_nodes(&self) -> impl Iterator<Item = &VersionedCrdsValue> {
|
||||
self.nodes.iter().map(move |i| self.table.index(*i))
|
||||
@@ -222,11 +200,6 @@ impl Crds {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns all entries which are Vote.
|
||||
pub(crate) fn get_votes(&self) -> impl Iterator<Item = &VersionedCrdsValue> {
|
||||
self.votes.iter().map(move |i| self.table.index(*i))
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.table.len()
|
||||
}
|
||||
@@ -235,6 +208,10 @@ impl Crds {
|
||||
self.table.is_empty()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> Iter<'_, CrdsValueLabel, VersionedCrdsValue> {
|
||||
self.table.iter()
|
||||
}
|
||||
|
||||
pub fn values(&self) -> Values<'_, CrdsValueLabel, VersionedCrdsValue> {
|
||||
self.table.values()
|
||||
}
|
||||
@@ -255,15 +232,16 @@ impl Crds {
|
||||
.map(move |i| self.table.index(i))
|
||||
}
|
||||
|
||||
fn update_label_timestamp(&mut self, id: &CrdsValueLabel, now: u64) {
|
||||
if let Some(e) = self.table.get_mut(id) {
|
||||
e.local_timestamp = cmp::max(e.local_timestamp, now);
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the timestamp's of all the labels that are associated with Pubkey
|
||||
pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) {
|
||||
if let Some(indices) = self.records.get(pubkey) {
|
||||
for index in indices {
|
||||
let entry = self.table.index_mut(*index);
|
||||
if entry.local_timestamp < now {
|
||||
entry.local_timestamp = now;
|
||||
}
|
||||
}
|
||||
for label in CrdsValue::record_labels(*pubkey) {
|
||||
self.update_label_timestamp(&label, now);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,7 +253,6 @@ impl Crds {
|
||||
now: u64,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<CrdsValueLabel> {
|
||||
// TODO: need custom logic for purging duplicate shreds.
|
||||
let default_timeout = *timeouts
|
||||
.get(&Pubkey::default())
|
||||
.expect("must have default timeout");
|
||||
@@ -296,26 +273,10 @@ impl Crds {
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, key: &CrdsValueLabel) -> Option<VersionedCrdsValue> {
|
||||
let (index, _ /*label*/, value) = self.table.swap_remove_full(key)?;
|
||||
let (index, _, value) = self.table.swap_remove_full(key)?;
|
||||
self.shards.remove(index, &value);
|
||||
match value.value.data {
|
||||
CrdsData::ContactInfo(_) => {
|
||||
self.nodes.swap_remove(&index);
|
||||
}
|
||||
CrdsData::Vote(_, _) => {
|
||||
self.votes.swap_remove(&index);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
// Remove the index from records associated with the value's pubkey.
|
||||
let pubkey = value.value.pubkey();
|
||||
let mut records_entry = match self.records.entry(pubkey) {
|
||||
hash_map::Entry::Vacant(_) => panic!("this should not happen!"),
|
||||
hash_map::Entry::Occupied(entry) => entry,
|
||||
};
|
||||
records_entry.get_mut().swap_remove(&index);
|
||||
if records_entry.get().is_empty() {
|
||||
records_entry.remove();
|
||||
if let CrdsData::ContactInfo(_) = value.value.data {
|
||||
self.nodes.swap_remove(&index);
|
||||
}
|
||||
// If index == self.table.len(), then the removed entry was the last
|
||||
// entry in the table, in which case no other keys were modified.
|
||||
@@ -327,21 +288,10 @@ impl Crds {
|
||||
let value = self.table.index(index);
|
||||
self.shards.remove(size, value);
|
||||
self.shards.insert(index, value);
|
||||
match value.value.data {
|
||||
CrdsData::ContactInfo(_) => {
|
||||
self.nodes.swap_remove(&size);
|
||||
self.nodes.insert(index);
|
||||
}
|
||||
CrdsData::Vote(_, _) => {
|
||||
self.votes.swap_remove(&size);
|
||||
self.votes.insert(index);
|
||||
}
|
||||
_ => (),
|
||||
};
|
||||
let pubkey = value.value.pubkey();
|
||||
let records = self.records.get_mut(&pubkey).unwrap();
|
||||
records.swap_remove(&size);
|
||||
records.insert(index);
|
||||
if let CrdsData::ContactInfo(_) = value.value.data {
|
||||
self.nodes.swap_remove(&size);
|
||||
self.nodes.insert(index);
|
||||
}
|
||||
}
|
||||
Some(value)
|
||||
}
|
||||
@@ -353,7 +303,6 @@ mod test {
|
||||
use crate::contact_info::ContactInfo;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::ThreadPoolBuilder;
|
||||
use std::iter::repeat_with;
|
||||
|
||||
#[test]
|
||||
fn test_insert() {
|
||||
@@ -399,6 +348,8 @@ mod test {
|
||||
)));
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
|
||||
crds.update_label_timestamp(&val.label(), 1);
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 1);
|
||||
assert_eq!(crds.table[&val.label()].insert_timestamp, 0);
|
||||
|
||||
let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
@@ -558,109 +509,54 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crds_value_indices() {
|
||||
fn check_crds_value_indices(crds: &Crds) -> (usize, usize) {
|
||||
fn test_crds_nodes() {
|
||||
fn check_crds_nodes(crds: &Crds) -> usize {
|
||||
let num_nodes = crds
|
||||
.table
|
||||
.values()
|
||||
.filter(|value| matches!(value.value.data, CrdsData::ContactInfo(_)))
|
||||
.count();
|
||||
let num_votes = crds
|
||||
.table
|
||||
.values()
|
||||
.filter(|value| matches!(value.value.data, CrdsData::Vote(_, _)))
|
||||
.count();
|
||||
assert_eq!(num_nodes, crds.get_nodes_contact_info().count());
|
||||
assert_eq!(num_votes, crds.get_votes().count());
|
||||
for vote in crds.get_votes() {
|
||||
match vote.value.data {
|
||||
CrdsData::Vote(_, _) => (),
|
||||
_ => panic!("not a vote!"),
|
||||
}
|
||||
}
|
||||
(num_nodes, num_votes)
|
||||
num_nodes
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let keypairs: Vec<_> = repeat_with(Keypair::new).take(128).collect();
|
||||
let keypairs: Vec<_> = std::iter::repeat_with(Keypair::new).take(256).collect();
|
||||
let mut crds = Crds::default();
|
||||
let mut num_inserts = 0;
|
||||
let mut num_overrides = 0;
|
||||
for k in 0..4096 {
|
||||
for _ in 0..4096 {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
match crds.insert_versioned(value) {
|
||||
Ok(None) => {
|
||||
num_inserts += 1;
|
||||
check_crds_nodes(&crds);
|
||||
}
|
||||
Ok(Some(_)) => {
|
||||
num_inserts += 1;
|
||||
num_overrides += 1;
|
||||
check_crds_nodes(&crds);
|
||||
}
|
||||
Err(_) => (),
|
||||
}
|
||||
if k % 64 == 0 {
|
||||
check_crds_value_indices(&crds);
|
||||
}
|
||||
}
|
||||
assert_eq!(num_inserts, crds.num_inserts);
|
||||
assert!(num_inserts > 700);
|
||||
assert!(num_overrides > 500);
|
||||
assert!(crds.table.len() > 200);
|
||||
assert!(num_inserts > crds.table.len());
|
||||
let (num_nodes, num_votes) = check_crds_value_indices(&crds);
|
||||
let num_nodes = check_crds_nodes(&crds);
|
||||
assert!(num_nodes * 3 < crds.table.len());
|
||||
assert!(num_nodes > 100, "num nodes: {}", num_nodes);
|
||||
assert!(num_votes > 100, "num votes: {}", num_votes);
|
||||
assert!(num_nodes > 150);
|
||||
// Remove values one by one and assert that nodes indices stay valid.
|
||||
while !crds.table.is_empty() {
|
||||
let index = rng.gen_range(0, crds.table.len());
|
||||
let key = crds.table.get_index(index).unwrap().0.clone();
|
||||
crds.remove(&key);
|
||||
if crds.table.len() % 64 == 0 {
|
||||
check_crds_value_indices(&crds);
|
||||
}
|
||||
check_crds_nodes(&crds);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crds_records() {
|
||||
fn check_crds_records(crds: &Crds) {
|
||||
assert_eq!(
|
||||
crds.table.len(),
|
||||
crds.records.values().map(IndexSet::len).sum::<usize>()
|
||||
);
|
||||
for (pubkey, indices) in &crds.records {
|
||||
for index in indices {
|
||||
let value = crds.table.index(*index);
|
||||
assert_eq!(*pubkey, value.value.pubkey());
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let keypairs: Vec<_> = repeat_with(Keypair::new).take(128).collect();
|
||||
let mut crds = Crds::default();
|
||||
for k in 0..4096 {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
let _ = crds.insert_versioned(value);
|
||||
if k % 64 == 0 {
|
||||
check_crds_records(&crds);
|
||||
}
|
||||
}
|
||||
assert!(crds.records.len() > 96);
|
||||
assert!(crds.records.len() <= keypairs.len());
|
||||
// Remove values one by one and assert that records stay valid.
|
||||
while !crds.table.is_empty() {
|
||||
let index = rng.gen_range(0, crds.table.len());
|
||||
let key = crds.table.get_index(index).unwrap().0.clone();
|
||||
crds.remove(&key);
|
||||
if crds.table.len() % 64 == 0 {
|
||||
check_crds_records(&crds);
|
||||
}
|
||||
}
|
||||
assert!(crds.records.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_staked() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
|
@@ -184,11 +184,9 @@ impl CrdsGossip {
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
output_size_limit: usize, // Limit number of crds values returned.
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull
|
||||
.generate_pull_responses(&self.crds, filters, output_size_limit, now)
|
||||
self.pull.generate_pull_responses(&self.crds, filters, now)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
@@ -306,10 +304,8 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_prune_errors() {
|
||||
let mut crds_gossip = CrdsGossip {
|
||||
id: Pubkey::new(&[0; 32]),
|
||||
..CrdsGossip::default()
|
||||
};
|
||||
let mut crds_gossip = CrdsGossip::default();
|
||||
crds_gossip.id = Pubkey::new(&[0; 32]);
|
||||
let id = crds_gossip.id;
|
||||
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||
|
@@ -14,7 +14,6 @@ use crate::crds::{Crds, VersionedCrdsValue};
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use itertools::Itertools;
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::Rng;
|
||||
use rayon::{prelude::*, ThreadPool};
|
||||
@@ -305,10 +304,9 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
output_size_limit: usize, // Limit number of crds values returned.
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests, output_size_limit, now)
|
||||
self.filter_crds_values(crds, requests, now)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
@@ -339,7 +337,10 @@ impl CrdsGossipPull {
|
||||
for r in responses {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now > r.wallclock().checked_add(self.msg_timeout).unwrap_or(0)
|
||||
if now
|
||||
> r.wallclock()
|
||||
.checked_add(self.msg_timeout)
|
||||
.unwrap_or_else(|| 0)
|
||||
|| now + self.msg_timeout < r.wallclock()
|
||||
{
|
||||
match &r.label() {
|
||||
@@ -349,7 +350,7 @@ impl CrdsGossipPull {
|
||||
let timeout = *timeouts
|
||||
.get(&owner)
|
||||
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or(0)
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
stats.timeout_count += 1;
|
||||
@@ -476,7 +477,6 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &Crds,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
mut output_size_limit: usize, // Limit number of crds values returned.
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
@@ -486,20 +486,16 @@ impl CrdsGossipPull {
|
||||
let past = now.saturating_sub(msg_timeout);
|
||||
let mut dropped_requests = 0;
|
||||
let mut total_skipped = 0;
|
||||
let ret: Vec<_> = filters
|
||||
let ret = filters
|
||||
.iter()
|
||||
.map(|(caller, filter)| {
|
||||
if output_size_limit == 0 {
|
||||
return None;
|
||||
}
|
||||
let caller_wallclock = caller.wallclock();
|
||||
if caller_wallclock >= future || caller_wallclock < past {
|
||||
dropped_requests += 1;
|
||||
return Some(vec![]);
|
||||
return vec![];
|
||||
}
|
||||
let caller_wallclock = caller_wallclock.checked_add(jitter).unwrap_or(0);
|
||||
let out: Vec<_> = crds
|
||||
.filter_bitmask(filter.mask, filter.mask_bits)
|
||||
crds.filter_bitmask(filter.mask, filter.mask_bits)
|
||||
.filter_map(|item| {
|
||||
debug_assert!(filter.test_mask(&item.value_hash));
|
||||
//skip values that are too new
|
||||
@@ -512,16 +508,12 @@ impl CrdsGossipPull {
|
||||
Some(item.value.clone())
|
||||
}
|
||||
})
|
||||
.take(output_size_limit)
|
||||
.collect();
|
||||
output_size_limit -= out.len();
|
||||
Some(out)
|
||||
.collect()
|
||||
})
|
||||
.while_some()
|
||||
.collect();
|
||||
inc_new_counter_info!(
|
||||
"gossip_filter_crds_values-dropped_requests",
|
||||
dropped_requests + filters.len() - ret.len()
|
||||
dropped_requests
|
||||
);
|
||||
inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped);
|
||||
ret
|
||||
@@ -1040,12 +1032,7 @@ mod test {
|
||||
let dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let mut filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(
|
||||
&dest_crds,
|
||||
&filters,
|
||||
/*output_size_limit=*/ usize::MAX,
|
||||
0,
|
||||
);
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
|
||||
@@ -1058,12 +1045,8 @@ mod test {
|
||||
.unwrap();
|
||||
|
||||
//should skip new value since caller is to old
|
||||
let rsp = dest.generate_pull_responses(
|
||||
&dest_crds,
|
||||
&filters,
|
||||
/*output_size_limit=*/ usize::MAX,
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
);
|
||||
let rsp =
|
||||
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
|
||||
assert_eq!(filters.len(), 1);
|
||||
@@ -1074,12 +1057,8 @@ mod test {
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
|
||||
)));
|
||||
|
||||
let rsp = dest.generate_pull_responses(
|
||||
&dest_crds,
|
||||
&filters,
|
||||
/*output_size_limit=*/ usize::MAX,
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
);
|
||||
let rsp =
|
||||
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
|
||||
assert_eq!(rsp.len(), 2);
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
assert_eq!(rsp[1].len(), 1); // Orders are also preserved.
|
||||
@@ -1116,12 +1095,7 @@ mod test {
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(
|
||||
&dest_crds,
|
||||
&filters,
|
||||
/*output_size_limit=*/ usize::MAX,
|
||||
0,
|
||||
);
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(
|
||||
&mut dest_crds,
|
||||
filters.into_iter().map(|(caller, _)| caller),
|
||||
@@ -1199,12 +1173,7 @@ mod test {
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(
|
||||
&dest_crds,
|
||||
&filters,
|
||||
/*output_size_limit=*/ usize::MAX,
|
||||
0,
|
||||
);
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(
|
||||
&mut dest_crds,
|
||||
filters.into_iter().map(|(caller, _)| caller),
|
||||
|
@@ -175,7 +175,12 @@ impl CrdsGossipPush {
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
self.num_total += 1;
|
||||
if now > value.wallclock().checked_add(self.msg_timeout).unwrap_or(0) {
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
.checked_add(self.msg_timeout)
|
||||
.unwrap_or_else(|| 0)
|
||||
{
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
if now + self.msg_timeout < value.wallclock() {
|
||||
@@ -203,7 +208,7 @@ impl CrdsGossipPush {
|
||||
/// push pull responses
|
||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||
for (label, value_hash, wc) in values {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or(0) {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
||||
continue;
|
||||
}
|
||||
self.push_messages.insert(label, value_hash);
|
||||
@@ -242,7 +247,7 @@ impl CrdsGossipPush {
|
||||
for i in start..(start + push_fanout) {
|
||||
let index = i % self.active_set.len();
|
||||
let (peer, filter) = self.active_set.get_index(index).unwrap();
|
||||
if !filter.contains(&origin) || value.should_force_push(peer) {
|
||||
if !filter.contains(&origin) {
|
||||
trace!("new_push_messages insert {} {:?}", *peer, value);
|
||||
push_messages.entry(*peer).or_default().push(value.clone());
|
||||
num_pushes += 1;
|
||||
|
@@ -1,12 +1,9 @@
|
||||
use crate::{
|
||||
cluster_info::MAX_SNAPSHOT_HASHES,
|
||||
contact_info::ContactInfo,
|
||||
deprecated,
|
||||
duplicate_shred::{DuplicateShred, DuplicateShredIndex},
|
||||
epoch_slots::EpochSlots,
|
||||
};
|
||||
use crate::cluster_info::MAX_SNAPSHOT_HASHES;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::deprecated;
|
||||
use crate::epoch_slots::EpochSlots;
|
||||
use bincode::{serialize, serialized_size};
|
||||
use rand::{CryptoRng, Rng};
|
||||
use rand::Rng;
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
@@ -82,8 +79,6 @@ pub enum CrdsData {
|
||||
EpochSlots(EpochSlotsIndex, EpochSlots),
|
||||
LegacyVersion(LegacyVersion),
|
||||
Version(Version),
|
||||
NodeInstance(NodeInstance),
|
||||
DuplicateShred(DuplicateShred),
|
||||
}
|
||||
|
||||
impl Sanitize for CrdsData {
|
||||
@@ -112,8 +107,6 @@ impl Sanitize for CrdsData {
|
||||
}
|
||||
CrdsData::LegacyVersion(version) => version.sanitize(),
|
||||
CrdsData::Version(version) => version.sanitize(),
|
||||
CrdsData::NodeInstance(node) => node.sanitize(),
|
||||
CrdsData::DuplicateShred(shred) => shred.sanitize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -127,7 +120,7 @@ pub(crate) fn new_rand_timestamp<R: Rng>(rng: &mut R) -> u64 {
|
||||
impl CrdsData {
|
||||
/// New random CrdsData for tests and benchmarks.
|
||||
fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> CrdsData {
|
||||
let kind = rng.gen_range(0, 6);
|
||||
let kind = rng.gen_range(0, 5);
|
||||
// TODO: Implement other kinds of CrdsData here.
|
||||
// TODO: Assign ranges to each arm proportional to their frequency in
|
||||
// the mainnet crds table.
|
||||
@@ -136,8 +129,7 @@ impl CrdsData {
|
||||
1 => CrdsData::LowestSlot(rng.gen(), LowestSlot::new_rand(rng, pubkey)),
|
||||
2 => CrdsData::SnapshotHashes(SnapshotHash::new_rand(rng, pubkey)),
|
||||
3 => CrdsData::AccountsHashes(SnapshotHash::new_rand(rng, pubkey)),
|
||||
4 => CrdsData::Version(Version::new_rand(rng, pubkey)),
|
||||
_ => CrdsData::Vote(rng.gen_range(0, MAX_VOTES), Vote::new_rand(rng, pubkey)),
|
||||
_ => CrdsData::Version(Version::new_rand(rng, pubkey)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -151,7 +143,9 @@ pub struct SnapshotHash {
|
||||
|
||||
impl Sanitize for SnapshotHash {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
for (slot, _) in &self.hashes {
|
||||
if *slot >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
@@ -224,7 +218,9 @@ impl LowestSlot {
|
||||
|
||||
impl Sanitize for LowestSlot {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.lowest >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
@@ -250,7 +246,9 @@ pub struct Vote {
|
||||
|
||||
impl Sanitize for Vote {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()?;
|
||||
self.transaction.sanitize()
|
||||
}
|
||||
@@ -264,15 +262,6 @@ impl Vote {
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
|
||||
/// New random Vote for tests and benchmarks.
|
||||
fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
Self {
|
||||
from: pubkey.unwrap_or_else(pubkey::new_rand),
|
||||
transaction: Transaction::default(),
|
||||
wallclock: new_rand_timestamp(rng),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample)]
|
||||
@@ -284,7 +273,9 @@ pub struct LegacyVersion {
|
||||
|
||||
impl Sanitize for LegacyVersion {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()?;
|
||||
self.version.sanitize()
|
||||
}
|
||||
@@ -299,7 +290,9 @@ pub struct Version {
|
||||
|
||||
impl Sanitize for Version {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()?;
|
||||
self.version.sanitize()
|
||||
}
|
||||
@@ -330,56 +323,6 @@ impl Version {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, AbiExample, Deserialize, Serialize)]
|
||||
pub struct NodeInstance {
|
||||
from: Pubkey,
|
||||
wallclock: u64,
|
||||
timestamp: u64, // Timestamp when the instance was created.
|
||||
token: u64, // Randomly generated value at node instantiation.
|
||||
}
|
||||
|
||||
impl NodeInstance {
|
||||
pub fn new<R>(rng: &mut R, pubkey: Pubkey, now: u64) -> Self
|
||||
where
|
||||
R: Rng + CryptoRng,
|
||||
{
|
||||
Self {
|
||||
from: pubkey,
|
||||
wallclock: now,
|
||||
timestamp: now,
|
||||
token: rng.gen(),
|
||||
}
|
||||
}
|
||||
|
||||
// Clones the value with an updated wallclock.
|
||||
pub fn with_wallclock(&self, now: u64) -> Self {
|
||||
Self {
|
||||
wallclock: now,
|
||||
..*self
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the crds-value is a duplicate instance
|
||||
// of this node, with a more recent timestamp.
|
||||
pub fn check_duplicate(&self, other: &CrdsValue) -> bool {
|
||||
match &other.data {
|
||||
CrdsData::NodeInstance(other) => {
|
||||
self.token != other.token
|
||||
&& self.timestamp <= other.timestamp
|
||||
&& self.from == other.from
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for NodeInstance {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of the replicated value
|
||||
/// These are labels for values in a record that is associated with `Pubkey`
|
||||
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
|
||||
@@ -392,8 +335,6 @@ pub enum CrdsValueLabel {
|
||||
AccountsHashes(Pubkey),
|
||||
LegacyVersion(Pubkey),
|
||||
Version(Pubkey),
|
||||
NodeInstance(Pubkey, u64 /*token*/),
|
||||
DuplicateShred(DuplicateShredIndex, Pubkey),
|
||||
}
|
||||
|
||||
impl fmt::Display for CrdsValueLabel {
|
||||
@@ -407,8 +348,6 @@ impl fmt::Display for CrdsValueLabel {
|
||||
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
|
||||
CrdsValueLabel::LegacyVersion(_) => write!(f, "LegacyVersion({})", self.pubkey()),
|
||||
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
|
||||
CrdsValueLabel::NodeInstance(pk, token) => write!(f, "NodeInstance({}, {})", pk, token),
|
||||
CrdsValueLabel::DuplicateShred(ix, pk) => write!(f, "DuplicateShred({:?}, {})", ix, pk),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -424,8 +363,6 @@ impl CrdsValueLabel {
|
||||
CrdsValueLabel::AccountsHashes(p) => *p,
|
||||
CrdsValueLabel::LegacyVersion(p) => *p,
|
||||
CrdsValueLabel::Version(p) => *p,
|
||||
CrdsValueLabel::NodeInstance(p, _ /*token*/) => *p,
|
||||
CrdsValueLabel::DuplicateShred(_, p) => *p,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -472,8 +409,6 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, p) => p.wallclock,
|
||||
CrdsData::LegacyVersion(version) => version.wallclock,
|
||||
CrdsData::Version(version) => version.wallclock,
|
||||
CrdsData::NodeInstance(node) => node.wallclock,
|
||||
CrdsData::DuplicateShred(shred) => shred.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
@@ -486,8 +421,6 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, p) => p.from,
|
||||
CrdsData::LegacyVersion(version) => version.from,
|
||||
CrdsData::Version(version) => version.from,
|
||||
CrdsData::NodeInstance(node) => node.from,
|
||||
CrdsData::DuplicateShred(shred) => shred.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
@@ -500,10 +433,6 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
|
||||
CrdsData::LegacyVersion(_) => CrdsValueLabel::LegacyVersion(self.pubkey()),
|
||||
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
|
||||
CrdsData::NodeInstance(node) => CrdsValueLabel::NodeInstance(node.from, node.token),
|
||||
CrdsData::DuplicateShred(shred) => {
|
||||
CrdsValueLabel::DuplicateShred(DuplicateShredIndex::from(shred), shred.from)
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
@@ -568,6 +497,23 @@ impl CrdsValue {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return all the possible labels for a record identified by Pubkey.
|
||||
pub fn record_labels(key: Pubkey) -> impl Iterator<Item = CrdsValueLabel> {
|
||||
const CRDS_VALUE_LABEL_STUBS: [fn(Pubkey) -> CrdsValueLabel; 6] = [
|
||||
CrdsValueLabel::ContactInfo,
|
||||
CrdsValueLabel::LowestSlot,
|
||||
CrdsValueLabel::SnapshotHashes,
|
||||
CrdsValueLabel::AccountsHashes,
|
||||
CrdsValueLabel::LegacyVersion,
|
||||
CrdsValueLabel::Version,
|
||||
];
|
||||
CRDS_VALUE_LABEL_STUBS
|
||||
.iter()
|
||||
.map(move |f| (f)(key))
|
||||
.chain((0..MAX_VOTES).map(move |ix| CrdsValueLabel::Vote(ix, key)))
|
||||
.chain((0..MAX_EPOCH_SLOTS).map(move |ix| CrdsValueLabel::EpochSlots(ix, key)))
|
||||
}
|
||||
|
||||
/// Returns the size (in bytes) of a CrdsValue
|
||||
pub fn size(&self) -> u64 {
|
||||
serialized_size(&self).expect("unable to serialize contact info")
|
||||
@@ -599,15 +545,6 @@ impl CrdsValue {
|
||||
.vote_index()
|
||||
.expect("all values must be votes")
|
||||
}
|
||||
|
||||
/// Returns true if, regardless of prunes, this crds-value
|
||||
/// should be pushed to the receiving node.
|
||||
pub fn should_force_push(&self, peer: &Pubkey) -> bool {
|
||||
match &self.data {
|
||||
CrdsData::NodeInstance(node) => node.from == *peer,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Filters out an iterator of crds values, returning
|
||||
@@ -634,14 +571,6 @@ where
|
||||
out.into_iter().map(|(_, (v, _))| v)
|
||||
}
|
||||
|
||||
pub(crate) fn sanitize_wallclock(wallclock: u64) -> Result<(), SanitizeError> {
|
||||
if wallclock >= MAX_WALLCLOCK {
|
||||
Err(SanitizeError::ValueOutOfBounds)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
@@ -653,6 +582,26 @@ mod test {
|
||||
use std::cmp::Ordering;
|
||||
use std::iter::repeat_with;
|
||||
|
||||
#[test]
|
||||
fn test_labels() {
|
||||
let mut hits = [false; 6 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
|
||||
// this method should cover all the possible labels
|
||||
for v in CrdsValue::record_labels(Pubkey::default()) {
|
||||
match &v {
|
||||
CrdsValueLabel::ContactInfo(_) => hits[0] = true,
|
||||
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
|
||||
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
|
||||
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
|
||||
CrdsValueLabel::LegacyVersion(_) => hits[4] = true,
|
||||
CrdsValueLabel::Version(_) => hits[5] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 6] = true,
|
||||
CrdsValueLabel::EpochSlots(ix, _) => {
|
||||
hits[*ix as usize + MAX_VOTES as usize + 6] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(hits.iter().all(|x| *x));
|
||||
}
|
||||
#[test]
|
||||
fn test_keys_and_values() {
|
||||
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
@@ -831,7 +780,7 @@ mod test {
|
||||
let index = rng.gen_range(0, keys.len());
|
||||
CrdsValue::new_rand(&mut rng, Some(&keys[index]))
|
||||
})
|
||||
.take(2048)
|
||||
.take(256)
|
||||
.collect();
|
||||
let mut currents = HashMap::new();
|
||||
for value in filter_current(&values) {
|
||||
@@ -853,132 +802,8 @@ mod test {
|
||||
}
|
||||
assert_eq!(count, currents.len());
|
||||
// Currently CrdsData::new_rand is only implemented for 5 different
|
||||
// kinds and excludes EpochSlots, and so the unique labels cannot be
|
||||
// more than (5 + MAX_VOTES) times number of keys.
|
||||
assert!(currents.len() <= keys.len() * (5 + MAX_VOTES as usize));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_instance_crds_lable() {
|
||||
fn make_crds_value(node: NodeInstance) -> CrdsValue {
|
||||
CrdsValue::new_unsigned(CrdsData::NodeInstance(node))
|
||||
}
|
||||
let mut rng = rand::thread_rng();
|
||||
let now = timestamp();
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let node = NodeInstance::new(&mut rng, pubkey, now);
|
||||
assert_eq!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(node.with_wallclock(now + 8)).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
from: Pubkey::new_unique(),
|
||||
..node
|
||||
};
|
||||
assert_ne!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
wallclock: now + 8,
|
||||
..node
|
||||
};
|
||||
assert_eq!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
timestamp: now + 8,
|
||||
..node
|
||||
};
|
||||
assert_eq!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
token: rng.gen(),
|
||||
..node
|
||||
};
|
||||
assert_ne!(
|
||||
make_crds_value(node).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_duplicate_instance() {
|
||||
fn make_crds_value(node: NodeInstance) -> CrdsValue {
|
||||
CrdsValue::new_unsigned(CrdsData::NodeInstance(node))
|
||||
}
|
||||
let now = timestamp();
|
||||
let mut rng = rand::thread_rng();
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let node = NodeInstance::new(&mut rng, pubkey, now);
|
||||
// Same token is not a duplicate.
|
||||
assert!(!node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: now + 1,
|
||||
timestamp: now + 1,
|
||||
token: node.token,
|
||||
})));
|
||||
// Older timestamp is not a duplicate.
|
||||
assert!(!node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: now + 1,
|
||||
timestamp: now - 1,
|
||||
token: rng.gen(),
|
||||
})));
|
||||
// Updated wallclock is not a duplicate.
|
||||
let other = node.with_wallclock(now + 8);
|
||||
assert_eq!(
|
||||
other,
|
||||
NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: now + 8,
|
||||
timestamp: now,
|
||||
token: node.token,
|
||||
}
|
||||
);
|
||||
assert!(!node.check_duplicate(&make_crds_value(other)));
|
||||
// Duplicate instance.
|
||||
assert!(node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: 0,
|
||||
timestamp: now,
|
||||
token: rng.gen(),
|
||||
})));
|
||||
// Different pubkey is not a duplicate.
|
||||
assert!(!node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: Pubkey::new_unique(),
|
||||
wallclock: now + 1,
|
||||
timestamp: now + 1,
|
||||
token: rng.gen(),
|
||||
})));
|
||||
// Differnt crds value is not a duplicate.
|
||||
assert!(
|
||||
!node.check_duplicate(&CrdsValue::new_unsigned(CrdsData::ContactInfo(
|
||||
ContactInfo::new_rand(&mut rng, Some(pubkey))
|
||||
)))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_force_push() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let pubkey = Pubkey::new_unique();
|
||||
assert!(
|
||||
!CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_rand(
|
||||
&mut rng,
|
||||
Some(pubkey),
|
||||
)))
|
||||
.should_force_push(&pubkey)
|
||||
);
|
||||
let node = CrdsValue::new_unsigned(CrdsData::NodeInstance(NodeInstance::new(
|
||||
&mut rng,
|
||||
pubkey,
|
||||
timestamp(),
|
||||
)));
|
||||
assert!(node.should_force_push(&pubkey));
|
||||
assert!(!node.should_force_push(&Pubkey::new_unique()));
|
||||
// kinds and excludes Vote and EpochSlots, and so the unique labels
|
||||
// cannot be more than 5 times number of keys.
|
||||
assert!(currents.len() <= keys.len() * 5);
|
||||
}
|
||||
}
|
||||
|
@@ -52,28 +52,27 @@ impl DataBudget {
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the budget if at least given milliseconds has passed since last
|
||||
/// update. Updater function maps current value of bytes to the new one.
|
||||
/// Returns current data-budget after the update.
|
||||
pub fn update<F>(&self, duration_millis: u64, updater: F) -> usize
|
||||
// Updates the budget if at least given milliseconds has passed since last
|
||||
// update. Updater function maps current value of bytes to the new one.
|
||||
pub fn update<F>(&self, duration_millis: u64, updater: F)
|
||||
where
|
||||
F: Fn(usize) -> usize,
|
||||
{
|
||||
if self.can_update(duration_millis) {
|
||||
let mut bytes = self.bytes.load(Ordering::Acquire);
|
||||
loop {
|
||||
match self.bytes.compare_exchange_weak(
|
||||
bytes,
|
||||
updater(bytes),
|
||||
Ordering::AcqRel,
|
||||
Ordering::Acquire,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(b) => bytes = b,
|
||||
}
|
||||
if !self.can_update(duration_millis) {
|
||||
return;
|
||||
}
|
||||
let mut bytes = self.bytes.load(Ordering::Acquire);
|
||||
loop {
|
||||
match self.bytes.compare_exchange_weak(
|
||||
bytes,
|
||||
updater(bytes),
|
||||
Ordering::AcqRel,
|
||||
Ordering::Acquire,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(b) => bytes = b,
|
||||
}
|
||||
}
|
||||
self.bytes.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
// Non-atomic clone only for tests and simulations.
|
||||
@@ -95,16 +94,16 @@ mod tests {
|
||||
let budget = DataBudget::default();
|
||||
assert!(!budget.take(1)); // budget = 0.
|
||||
|
||||
assert_eq!(budget.update(1000, |bytes| bytes + 5), 5); // budget updates to 5.
|
||||
budget.update(1000, |bytes| bytes + 5); // budget updates to 5.
|
||||
assert!(budget.take(1));
|
||||
assert!(budget.take(2));
|
||||
assert!(!budget.take(3)); // budget = 2, out of budget.
|
||||
|
||||
assert_eq!(budget.update(30, |_| 10), 2); // no update, budget = 2.
|
||||
budget.update(30, |_| 10); // no update, budget = 2.
|
||||
assert!(!budget.take(3)); // budget = 2, out of budget.
|
||||
|
||||
std::thread::sleep(Duration::from_millis(50));
|
||||
assert_eq!(budget.update(30, |bytes| bytes * 2), 4); // budget updates to 4.
|
||||
budget.update(30, |bytes| bytes * 2); // budget updates to 4.
|
||||
|
||||
assert!(budget.take(3));
|
||||
assert!(budget.take(1));
|
||||
|
@@ -1,367 +0,0 @@
|
||||
use crate::crds_value::sanitize_wallclock;
|
||||
use itertools::Itertools;
|
||||
use solana_ledger::{
|
||||
blockstore_meta::DuplicateSlotProof,
|
||||
shred::{Shred, ShredError, ShredType},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
pubkey::Pubkey,
|
||||
sanitize::{Sanitize, SanitizeError},
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
convert::TryFrom,
|
||||
num::TryFromIntError,
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
const DUPLICATE_SHRED_HEADER_SIZE: usize = 63;
|
||||
|
||||
/// Function returning leader at a given slot.
|
||||
pub trait LeaderScheduleFn: FnOnce(Slot) -> Option<Pubkey> {}
|
||||
impl<F> LeaderScheduleFn for F where F: FnOnce(Slot) -> Option<Pubkey> {}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, AbiExample, Deserialize, Serialize)]
|
||||
pub struct DuplicateShred {
|
||||
pub(crate) from: Pubkey,
|
||||
pub(crate) wallclock: u64,
|
||||
slot: Slot,
|
||||
shred_index: u32,
|
||||
shred_type: ShredType,
|
||||
// Serialized DuplicateSlotProof split into chunks.
|
||||
num_chunks: u8,
|
||||
chunk_index: u8,
|
||||
#[serde(with = "serde_bytes")]
|
||||
chunk: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct DuplicateShredIndex {
|
||||
slot: Slot,
|
||||
shred_index: u32,
|
||||
shred_type: ShredType,
|
||||
num_chunks: u8,
|
||||
chunk_index: u8,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("data chunk mismatch")]
|
||||
DataChunkMismatch,
|
||||
#[error("decoding error")]
|
||||
DecodingError(std::io::Error),
|
||||
#[error("encoding error")]
|
||||
EncodingError(std::io::Error),
|
||||
#[error("invalid chunk index")]
|
||||
InvalidChunkIndex,
|
||||
#[error("invalid duplicate shreds")]
|
||||
InvalidDuplicateShreds,
|
||||
#[error("invalid duplicate slot proof")]
|
||||
InvalidDuplicateSlotProof,
|
||||
#[error("invalid signature")]
|
||||
InvalidSignature,
|
||||
#[error("invalid size limit")]
|
||||
InvalidSizeLimit,
|
||||
#[error("invalid shred")]
|
||||
InvalidShred(#[from] ShredError),
|
||||
#[error("number of chunks mismatch")]
|
||||
NumChunksMismatch,
|
||||
#[error("missing data chunk")]
|
||||
MissingDataChunk,
|
||||
#[error("(de)serialization error")]
|
||||
SerializationError(#[from] bincode::Error),
|
||||
#[error("shred index mismatch")]
|
||||
ShredIndexMismatch,
|
||||
#[error("shred type mismatch")]
|
||||
ShredTypeMismatch,
|
||||
#[error("slot mismatch")]
|
||||
SlotMismatch,
|
||||
#[error("type conversion error")]
|
||||
TryFromIntError(#[from] TryFromIntError),
|
||||
#[error("unknown slot leader")]
|
||||
UnknownSlotLeader,
|
||||
}
|
||||
|
||||
// Asserts that the two shreds can indicate duplicate proof for
|
||||
// the same triplet of (slot, shred-index, and shred-type_), and
|
||||
// that they have valid signatures from the slot leader.
|
||||
fn check_shreds(
|
||||
leader: impl LeaderScheduleFn,
|
||||
shred1: &Shred,
|
||||
shred2: &Shred,
|
||||
) -> Result<(), Error> {
|
||||
if shred1.slot() != shred2.slot() {
|
||||
Err(Error::SlotMismatch)
|
||||
} else if shred1.index() != shred2.index() {
|
||||
Err(Error::ShredIndexMismatch)
|
||||
} else if shred1.common_header.shred_type != shred2.common_header.shred_type {
|
||||
Err(Error::ShredTypeMismatch)
|
||||
} else if shred1.payload == shred2.payload {
|
||||
Err(Error::InvalidDuplicateShreds)
|
||||
} else {
|
||||
let slot_leader = leader(shred1.slot()).ok_or(Error::UnknownSlotLeader)?;
|
||||
if !shred1.verify(&slot_leader) || !shred2.verify(&slot_leader) {
|
||||
Err(Error::InvalidSignature)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits a DuplicateSlotProof into DuplicateShred
|
||||
/// chunks with a size limit on each chunk.
|
||||
pub fn from_duplicate_slot_proof(
|
||||
proof: &DuplicateSlotProof,
|
||||
self_pubkey: Pubkey, // Pubkey of my node broadcasting crds value.
|
||||
leader: impl LeaderScheduleFn,
|
||||
wallclock: u64,
|
||||
max_size: usize, // Maximum serialized size of each DuplicateShred.
|
||||
encoder: impl FnOnce(Vec<u8>) -> Result<Vec<u8>, std::io::Error>,
|
||||
) -> Result<impl Iterator<Item = DuplicateShred>, Error> {
|
||||
if proof.shred1 == proof.shred2 {
|
||||
return Err(Error::InvalidDuplicateSlotProof);
|
||||
}
|
||||
let shred1 = Shred::new_from_serialized_shred(proof.shred1.clone())?;
|
||||
let shred2 = Shred::new_from_serialized_shred(proof.shred2.clone())?;
|
||||
check_shreds(leader, &shred1, &shred2)?;
|
||||
let (slot, shred_index, shred_type) = (
|
||||
shred1.slot(),
|
||||
shred1.index(),
|
||||
shred1.common_header.shred_type,
|
||||
);
|
||||
let data = bincode::serialize(proof)?;
|
||||
let data = encoder(data).map_err(Error::EncodingError)?;
|
||||
let chunk_size = if DUPLICATE_SHRED_HEADER_SIZE < max_size {
|
||||
max_size - DUPLICATE_SHRED_HEADER_SIZE
|
||||
} else {
|
||||
return Err(Error::InvalidSizeLimit);
|
||||
};
|
||||
let chunks: Vec<_> = data.chunks(chunk_size).map(Vec::from).collect();
|
||||
let num_chunks = u8::try_from(chunks.len())?;
|
||||
let chunks = chunks
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(move |(i, chunk)| DuplicateShred {
|
||||
from: self_pubkey,
|
||||
wallclock,
|
||||
slot,
|
||||
shred_index,
|
||||
shred_type,
|
||||
num_chunks,
|
||||
chunk_index: i as u8,
|
||||
chunk,
|
||||
});
|
||||
Ok(chunks)
|
||||
}
|
||||
|
||||
// Returns a predicate checking if a duplicate-shred chunk matches
|
||||
// (slot, shred_index, shred_type) and has valid chunk_index.
|
||||
fn check_chunk(
|
||||
slot: Slot,
|
||||
shred_index: u32,
|
||||
shred_type: ShredType,
|
||||
num_chunks: u8,
|
||||
) -> impl Fn(&DuplicateShred) -> Result<(), Error> {
|
||||
move |dup| {
|
||||
if dup.slot != slot {
|
||||
Err(Error::SlotMismatch)
|
||||
} else if dup.shred_index != shred_index {
|
||||
Err(Error::ShredIndexMismatch)
|
||||
} else if dup.shred_type != shred_type {
|
||||
Err(Error::ShredTypeMismatch)
|
||||
} else if dup.num_chunks != num_chunks {
|
||||
Err(Error::NumChunksMismatch)
|
||||
} else if dup.chunk_index >= num_chunks {
|
||||
Err(Error::InvalidChunkIndex)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reconstructs the duplicate shreds from chunks of DuplicateShred.
|
||||
pub fn into_shreds(
|
||||
chunks: impl IntoIterator<Item = DuplicateShred>,
|
||||
leader: impl LeaderScheduleFn,
|
||||
decoder: impl FnOnce(Vec<u8>) -> Result<Vec<u8>, std::io::Error>,
|
||||
) -> Result<(Shred, Shred), Error> {
|
||||
let mut chunks = chunks.into_iter();
|
||||
let DuplicateShred {
|
||||
slot,
|
||||
shred_index,
|
||||
shred_type,
|
||||
num_chunks,
|
||||
chunk_index,
|
||||
chunk,
|
||||
..
|
||||
} = match chunks.next() {
|
||||
None => return Err(Error::InvalidDuplicateShreds),
|
||||
Some(chunk) => chunk,
|
||||
};
|
||||
let slot_leader = leader(slot).ok_or(Error::UnknownSlotLeader)?;
|
||||
let check_chunk = check_chunk(slot, shred_index, shred_type, num_chunks);
|
||||
let mut data = HashMap::new();
|
||||
data.insert(chunk_index, chunk);
|
||||
for chunk in chunks {
|
||||
check_chunk(&chunk)?;
|
||||
match data.entry(chunk.chunk_index) {
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(chunk.chunk);
|
||||
}
|
||||
Entry::Occupied(entry) => {
|
||||
if *entry.get() != chunk.chunk {
|
||||
return Err(Error::DataChunkMismatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if data.len() != num_chunks as usize {
|
||||
return Err(Error::MissingDataChunk);
|
||||
}
|
||||
let data = (0..num_chunks).map(|k| data.remove(&k).unwrap());
|
||||
let data = decoder(data.concat()).map_err(Error::DecodingError)?;
|
||||
let proof: DuplicateSlotProof = bincode::deserialize(&data)?;
|
||||
if proof.shred1 == proof.shred2 {
|
||||
return Err(Error::InvalidDuplicateSlotProof);
|
||||
}
|
||||
let shred1 = Shred::new_from_serialized_shred(proof.shred1)?;
|
||||
let shred2 = Shred::new_from_serialized_shred(proof.shred2)?;
|
||||
if shred1.slot() != slot || shred2.slot() != slot {
|
||||
Err(Error::SlotMismatch)
|
||||
} else if shred1.index() != shred_index || shred2.index() != shred_index {
|
||||
Err(Error::ShredIndexMismatch)
|
||||
} else if shred1.common_header.shred_type != shred_type
|
||||
|| shred2.common_header.shred_type != shred_type
|
||||
{
|
||||
Err(Error::ShredTypeMismatch)
|
||||
} else if shred1.payload == shred2.payload {
|
||||
Err(Error::InvalidDuplicateShreds)
|
||||
} else if !shred1.verify(&slot_leader) || !shred2.verify(&slot_leader) {
|
||||
Err(Error::InvalidSignature)
|
||||
} else {
|
||||
Ok((shred1, shred2))
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for DuplicateShred {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
sanitize_wallclock(self.wallclock)?;
|
||||
if self.chunk_index >= self.num_chunks {
|
||||
return Err(SanitizeError::IndexOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&DuplicateShred> for DuplicateShredIndex {
|
||||
fn from(shred: &DuplicateShred) -> Self {
|
||||
Self {
|
||||
slot: shred.slot,
|
||||
shred_index: shred.shred_index,
|
||||
shred_type: shred.shred_type,
|
||||
num_chunks: shred.num_chunks,
|
||||
chunk_index: shred.chunk_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rand::Rng;
|
||||
use solana_ledger::{entry::Entry, shred::Shredder};
|
||||
use solana_sdk::{hash, signature::Keypair, signature::Signer, system_transaction};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_shred_header_size() {
|
||||
let dup = DuplicateShred {
|
||||
from: Pubkey::new_unique(),
|
||||
wallclock: u64::MAX,
|
||||
slot: Slot::MAX,
|
||||
shred_index: u32::MAX,
|
||||
shred_type: ShredType(u8::MAX),
|
||||
num_chunks: u8::MAX,
|
||||
chunk_index: u8::MAX,
|
||||
chunk: Vec::default(),
|
||||
};
|
||||
assert_eq!(
|
||||
bincode::serialize(&dup).unwrap().len(),
|
||||
DUPLICATE_SHRED_HEADER_SIZE
|
||||
);
|
||||
assert_eq!(
|
||||
bincode::serialized_size(&dup).unwrap(),
|
||||
DUPLICATE_SHRED_HEADER_SIZE as u64
|
||||
);
|
||||
}
|
||||
|
||||
fn new_rand_shred<R: Rng>(rng: &mut R, next_shred_index: u32, shredder: &Shredder) -> Shred {
|
||||
let entries: Vec<_> = std::iter::repeat_with(|| {
|
||||
let tx = system_transaction::transfer(
|
||||
&Keypair::new(), // from
|
||||
&Pubkey::new_unique(), // to
|
||||
rng.gen(), // lamports
|
||||
hash::new_rand(rng), // recent blockhash
|
||||
);
|
||||
Entry::new(
|
||||
&hash::new_rand(rng), // prev_hash
|
||||
1, // num_hashes,
|
||||
vec![tx], // transactions
|
||||
)
|
||||
})
|
||||
.take(5)
|
||||
.collect();
|
||||
let (mut data_shreds, _coding_shreds, _last_shred_index) = shredder.entries_to_shreds(
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
next_shred_index,
|
||||
);
|
||||
data_shreds.swap_remove(0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_shred_round_trip() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let leader = Arc::new(Keypair::new());
|
||||
let (slot, parent_slot, fec_rate, reference_tick, version) =
|
||||
(53084024, 53084023, 0.0, 0, 0);
|
||||
let shredder = Shredder::new(
|
||||
slot,
|
||||
parent_slot,
|
||||
fec_rate,
|
||||
leader.clone(),
|
||||
reference_tick,
|
||||
version,
|
||||
)
|
||||
.unwrap();
|
||||
let next_shred_index = rng.gen();
|
||||
let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder);
|
||||
let shred2 = new_rand_shred(&mut rng, next_shred_index, &shredder);
|
||||
let leader = |s| {
|
||||
if s == slot {
|
||||
Some(leader.pubkey())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
let proof = DuplicateSlotProof {
|
||||
shred1: shred1.payload.clone(),
|
||||
shred2: shred2.payload.clone(),
|
||||
};
|
||||
let chunks: Vec<_> = from_duplicate_slot_proof(
|
||||
&proof,
|
||||
Pubkey::new_unique(), // self_pubkey
|
||||
leader,
|
||||
rng.gen(), // wallclock
|
||||
512, // max_size
|
||||
Ok, // encoder
|
||||
)
|
||||
.unwrap()
|
||||
.collect();
|
||||
assert!(chunks.len() > 4);
|
||||
let (shred3, shred4) = into_shreds(chunks, leader, Ok).unwrap();
|
||||
assert_eq!(shred1, shred3);
|
||||
assert_eq!(shred2, shred4);
|
||||
}
|
||||
}
|
@@ -24,15 +24,6 @@ impl Sanitize for Uncompressed {
|
||||
if self.num >= MAX_SLOTS_PER_ENTRY {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.slots.len() % 8 != 0 {
|
||||
// Uncompressed::new() ensures the length is always a multiple of 8
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.slots.len() != self.slots.capacity() {
|
||||
// A BitVec<u8> with a length that's a multiple of 8 will always have len() equal to
|
||||
// capacity(), assuming no bit manipulation
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -116,7 +107,7 @@ impl Uncompressed {
|
||||
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
|
||||
let mut rv = vec![];
|
||||
let start = if min_slot < self.first_slot {
|
||||
0
|
||||
0 as usize
|
||||
} else {
|
||||
(min_slot - self.first_slot) as usize
|
||||
};
|
||||
@@ -141,7 +132,7 @@ impl Uncompressed {
|
||||
if *s < self.first_slot {
|
||||
return i;
|
||||
}
|
||||
if *s - self.first_slot >= self.slots.len() {
|
||||
if *s - self.first_slot >= self.slots.capacity() {
|
||||
return i;
|
||||
}
|
||||
self.slots.set(*s - self.first_slot, true);
|
||||
@@ -402,14 +393,6 @@ mod tests {
|
||||
o.num = MAX_SLOTS_PER_ENTRY;
|
||||
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
|
||||
let mut o = slots.clone();
|
||||
o.slots = BitVec::new_fill(false, 7); // Length not a multiple of 8
|
||||
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
|
||||
let mut o = slots.clone();
|
||||
o.slots = BitVec::with_capacity(8); // capacity() not equal to len()
|
||||
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
|
||||
let compressed = Flate2::deflate(slots).unwrap();
|
||||
assert!(compressed.sanitize().is_ok());
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user