Compare commits
428 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
375295a605 | ||
|
664e772d0f | ||
|
45381acd82 | ||
|
f925828f79 | ||
|
f67820cc56 | ||
|
db26cdf8f0 | ||
|
683d748784 | ||
|
72ef444257 | ||
|
d85780e092 | ||
|
3f3058fc7d | ||
|
667402531f | ||
|
ac86d6ede4 | ||
|
caa26b755c | ||
|
4470afceaa | ||
|
35b42e1541 | ||
|
463e377ccc | ||
|
d815fe37c8 | ||
|
709ec20d7c | ||
|
e7bb83c037 | ||
|
27c272a12c | ||
|
cbcbc3f1c8 | ||
|
1ab4c616be | ||
|
04eed1b57e | ||
|
533a8249c8 | ||
|
d8e878998c | ||
|
50ebc3f4d8 | ||
|
599f5a16b2 | ||
|
d916a16115 | ||
|
d44d8e929b | ||
|
f05347f549 | ||
|
56acbcdf5f | ||
|
c083661a6b | ||
|
b8d3800f57 | ||
|
6960eed856 | ||
|
2e67f093be | ||
|
72c6faff99 | ||
|
d8753631db | ||
|
789e2d7f33 | ||
|
bbafd9783b | ||
|
624abc7434 | ||
|
a8bd92ef70 | ||
|
3fa08e620a | ||
|
bfe9f12fe0 | ||
|
b88c56cd36 | ||
|
cfb28a1b2e | ||
|
a9ee6ef2d1 | ||
|
4e6e415e40 | ||
|
7eb5db98cf | ||
|
fb4204b135 | ||
|
cd5bb89a15 | ||
|
30d7f6fa0b | ||
|
50e733837a | ||
|
403220a559 | ||
|
ebfbe5ed8a | ||
|
1ae516fd4a | ||
|
db55eead5c | ||
|
715fdfefe4 | ||
|
9dfe50ecab | ||
|
9ea5e78852 | ||
|
cad95e8f4f | ||
|
97f6e87322 | ||
|
8fb9992dde | ||
|
c9a8736a5a | ||
|
6388603ba0 | ||
|
69ef980307 | ||
|
bc8300a308 | ||
|
34b3387104 | ||
|
6f2b37b015 | ||
|
09a3b5001c | ||
|
7e4bd087ae | ||
|
266c63f105 | ||
|
8afa345eb3 | ||
|
4e7049bdf2 | ||
|
9466ad3c1f | ||
|
6d0b1bd93e | ||
|
a0565e2fbd | ||
|
1f309d0bae | ||
|
a3342c57db | ||
|
71eef63fce | ||
|
420e4c772f | ||
|
9f69f79577 | ||
|
d4b2b787eb | ||
|
91a2d03f28 | ||
|
5490c47c68 | ||
|
7424f7ba7f | ||
|
206accb59a | ||
|
120074f1c1 | ||
|
dcc2b10efa | ||
|
7adcbae866 | ||
|
238b596773 | ||
|
cb145b9dd5 | ||
|
e91b124cd9 | ||
|
5c379db925 | ||
|
82add14782 | ||
|
599b22baf3 | ||
|
fdb1c5ab06 | ||
|
db339cb925 | ||
|
ff76fccf2e | ||
|
539f108ea6 | ||
|
78c4f946f4 | ||
|
87a578a1f9 | ||
|
7e3bf25991 | ||
|
a844310911 | ||
|
4ad66fa7a4 | ||
|
5d07195b7e | ||
|
a209984ec9 | ||
|
a7e6cd56e9 | ||
|
bd6cad3891 | ||
|
6d7e7f47b8 | ||
|
82f00bf9f7 | ||
|
8fdfb291a9 | ||
|
1e5a662706 | ||
|
277d4c9ae4 | ||
|
e6abf50bf7 | ||
|
f7621c091a | ||
|
069f83be97 | ||
|
5b31f3d790 | ||
|
980c7cdf64 | ||
|
0b5beac561 | ||
|
83801f2b05 | ||
|
13527fc937 | ||
|
bfc9932b17 | ||
|
1ea1a70d2b | ||
|
0039861984 | ||
|
fb3e88bb7e | ||
|
c01bff61f8 | ||
|
6a0ba2587d | ||
|
3520990271 | ||
|
f7d2ba24f0 | ||
|
90c6f8ae1a | ||
|
24bd147b1a | ||
|
07191dc224 | ||
|
c20e74a248 | ||
|
f52feddfcd | ||
|
a46d14a2d7 | ||
|
fb9f35d2f0 | ||
|
3687dccda6 | ||
|
8d96797e68 | ||
|
820aa7e0ea | ||
|
eea3c4e4f1 | ||
|
c66ee3c623 | ||
|
871066fb8d | ||
|
aa084e1d3c | ||
|
be0f9d4837 | ||
|
38485489c9 | ||
|
abbc146508 | ||
|
d58e2f1c68 | ||
|
fa32b4a848 | ||
|
aeb6ef3969 | ||
|
a0c1e4a6e2 | ||
|
c1047b48fe | ||
|
add6989289 | ||
|
363b9d7209 | ||
|
9846a8089e | ||
|
d96bcc9ead | ||
|
de445ec83e | ||
|
bb4943d8a7 | ||
|
0f3705340b | ||
|
d972a21aab | ||
|
f705340971 | ||
|
e313cf8332 | ||
|
38d7d7df50 | ||
|
eb0a448b99 | ||
|
e6f54ffd56 | ||
|
44315895cf | ||
|
9899e6fb39 | ||
|
da1796f97a | ||
|
501fea7a3c | ||
|
2583174cbd | ||
|
72aa24db2c | ||
|
bbf53f9050 | ||
|
e7a95b1189 | ||
|
311a1a38dc | ||
|
a1828a0e45 | ||
|
01d9062f7b | ||
|
446e02bad1 | ||
|
3955778cb6 | ||
|
4f4a0b9dc8 | ||
|
97c1c944fb | ||
|
7890957250 | ||
|
535c7a44db | ||
|
fbbc640958 | ||
|
3559062c00 | ||
|
1a7e5669bf | ||
|
5511d52c62 | ||
|
50d5978b00 | ||
|
cea722ac09 | ||
|
82b087e51e | ||
|
da682eaef6 | ||
|
688562c049 | ||
|
50e6fad0e6 | ||
|
d5cce435ce | ||
|
8f75cfe247 | ||
|
686b775e35 | ||
|
a3be6bacc6 | ||
|
b493d554ad | ||
|
0fc3bcabfd | ||
|
7cf36c488d | ||
|
89c39d0be1 | ||
|
f5eb5a3ba6 | ||
|
64c2e759ab | ||
|
a09ee672a6 | ||
|
057b5d7e24 | ||
|
fe4c59e38d | ||
|
305d5d97d8 | ||
|
05f464798f | ||
|
96d8ee9e07 | ||
|
471a3f991b | ||
|
fd9e003ae1 | ||
|
e26ff09df7 | ||
|
fdcf589f7c | ||
|
f44dfc8d04 | ||
|
c736c4633e | ||
|
7099d07fd3 | ||
|
e3b47d22d7 | ||
|
f789da1e20 | ||
|
b74bb12ebe | ||
|
ddf019c1a4 | ||
|
74d57b1c2f | ||
|
086e653a0b | ||
|
225d15bde8 | ||
|
6d8d5d1379 | ||
|
fcd2a78d73 | ||
|
5cf52c3c20 | ||
|
bc334427e3 | ||
|
d0cac2a2ea | ||
|
3743e44fb3 | ||
|
f5ef999b31 | ||
|
55d5339daa | ||
|
594b69395a | ||
|
6dc62bfb77 | ||
|
08b53c0963 | ||
|
b5baa966ac | ||
|
ff38a46af6 | ||
|
841f596b26 | ||
|
5e1497856b | ||
|
e085b580b5 | ||
|
d967ff0138 | ||
|
71e3a99742 | ||
|
640f4a1ec7 | ||
|
8d904877ef | ||
|
fbe4e95e6a | ||
|
e7e7cbe632 | ||
|
7e5b75fa7e | ||
|
60beb509f7 | ||
|
15f6b6ccd6 | ||
|
f56f3d81b5 | ||
|
098c94352d | ||
|
c929e8e02b | ||
|
80f2c485ba | ||
|
f855f4d1c0 | ||
|
81a26aa4fc | ||
|
855cf9a362 | ||
|
96ba314281 | ||
|
e52e6dfbaa | ||
|
181ff3d13e | ||
|
195ce0ed79 | ||
|
b24e301201 | ||
|
fb492efda8 | ||
|
c40216350c | ||
|
d031bbcf2e | ||
|
c183c3a5ec | ||
|
f04d4af4f2 | ||
|
ea0a3521ed | ||
|
a75898a415 | ||
|
a77fce465a | ||
|
719f162229 | ||
|
a39cc8d21f | ||
|
87767b181d | ||
|
88b19e10cb | ||
|
b42cda32ff | ||
|
2344391c48 | ||
|
207d13e429 | ||
|
360f166f5a | ||
|
b4deeb8e36 | ||
|
b3e1fde8b2 | ||
|
b838aba840 | ||
|
c8b3d0ba07 | ||
|
e7106278e9 | ||
|
63cf168fef | ||
|
71ea198a07 | ||
|
f6b65b033e | ||
|
0311ad5ddf | ||
|
f5454e62a1 | ||
|
89ea4dfa8b | ||
|
fefcfdba80 | ||
|
1072bd7640 | ||
|
a7280f117a | ||
|
d46a19098a | ||
|
44fffcbb1c | ||
|
e14c2f94f4 | ||
|
437c356626 | ||
|
fd68f8ba2e | ||
|
2374664e95 | ||
|
2cb9ca5966 | ||
|
4f247a232f | ||
|
15a2c73826 | ||
|
d23f1436c5 | ||
|
70c87d1a23 | ||
|
053ce10ce5 | ||
|
055eb360c2 | ||
|
25cd1ceeeb | ||
|
52ee3b1cee | ||
|
bbadcca414 | ||
|
e9eba97299 | ||
|
920b63944e | ||
|
8104895a07 | ||
|
c9e646b86b | ||
|
7c47db1e3d | ||
|
c619e9b560 | ||
|
ccd48923a0 | ||
|
4e797cc867 | ||
|
9627bfced3 | ||
|
f823b10597 | ||
|
c9e56c9749 | ||
|
da7482d631 | ||
|
97650c7f37 | ||
|
e738bf1c9a | ||
|
afebb2a8a5 | ||
|
4e4fd03b65 | ||
|
049ca18dc5 | ||
|
495c64556e | ||
|
747e91d434 | ||
|
6d4f6e79b0 | ||
|
98e9f34704 | ||
|
70f74174e8 | ||
|
70985f82f1 | ||
|
3b2bdd9f8a | ||
|
d33ae59fbf | ||
|
9ead7ca11a | ||
|
dbcef35f7d | ||
|
9e733d7d9b | ||
|
39f1240ec2 | ||
|
fa249721fa | ||
|
137793cd4c | ||
|
47d8608aee | ||
|
ed410aea10 | ||
|
957dfa8f73 | ||
|
98095b6f8d | ||
|
a2c32d7d0e | ||
|
b15d826476 | ||
|
ed97a2578d | ||
|
89f61f0b41 | ||
|
04cc9c1148 | ||
|
8314ab4508 | ||
|
3a98042753 | ||
|
60d316c9fd | ||
|
e324c221a6 | ||
|
61246999ac | ||
|
e476dc4eaa | ||
|
ee18e7668b | ||
|
62db7f6562 | ||
|
2e9b501355 | ||
|
089a99f1e3 | ||
|
57961b1d17 | ||
|
fe8b2b7850 | ||
|
0bf45cbab6 | ||
|
5877427389 | ||
|
25141288f4 | ||
|
b28d10d46f | ||
|
b6dc48da75 | ||
|
f2d929c12d | ||
|
c49b89091a | ||
|
23fe3a86d9 | ||
|
2f778725d6 | ||
|
93a119a51e | ||
|
65a7b536c9 | ||
|
1281483a8c | ||
|
4312841433 | ||
|
b859acbfea | ||
|
40a3885d3b | ||
|
36b7c2ea97 | ||
|
24bd4ff6d4 | ||
|
69b3f10207 | ||
|
9922f09a1d | ||
|
38a99c0c25 | ||
|
7031235714 | ||
|
dfb2356a9a | ||
|
010794806a | ||
|
6f95d5f72a | ||
|
2720b939fd | ||
|
a25c3fcf7d | ||
|
7cc4810174 | ||
|
c1a55bf249 | ||
|
f19778b7d9 | ||
|
eecdacac42 | ||
|
429f130532 | ||
|
19b9839dfc | ||
|
ad2bf3afa6 | ||
|
5c739ba236 | ||
|
9fac507606 | ||
|
d5a37cb06e | ||
|
86eb0157c0 | ||
|
072dab0948 | ||
|
e20e79f412 | ||
|
f118db81ce | ||
|
4ecb78d303 | ||
|
0a28e40606 | ||
|
4d7a5a9daf | ||
|
64cf6b4388 | ||
|
f334c3b895 | ||
|
15a7bcd4fe | ||
|
8d6636d02a | ||
|
cf896dbeee | ||
|
e5b60b75f8 | ||
|
0e155fdbd9 | ||
|
b79a337ddd | ||
|
c4050f541d | ||
|
f0b74a4ecf | ||
|
f7979378fd | ||
|
d7c5607982 | ||
|
91ab5ae990 | ||
|
605e767259 | ||
|
597618846b | ||
|
712267bf51 | ||
|
eb9cef0cd4 | ||
|
62e0e19961 | ||
|
9aee9cb867 | ||
|
2b11558b36 | ||
|
18c4e1b023 | ||
|
6bac44ed92 | ||
|
8cb622084f | ||
|
38f7e9a979 | ||
|
a536f779ee | ||
|
84a5e5ec97 | ||
|
dd33aae3cf | ||
|
be2ace47e3 | ||
|
53b074aa35 |
@@ -31,4 +31,9 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
|
||||
mkdir -p "$CARGO_TARGET_CACHE"/target
|
||||
rsync -a --delete --link-dest="$CARGO_TARGET_CACHE" "$CARGO_TARGET_CACHE"/target .
|
||||
|
||||
# Don't reuse BPF target build artifacts due to incremental build issues with
|
||||
# `std:
|
||||
# "found possibly newer version of crate `std` which `xyz` depends on
|
||||
rm -rf target/bpfel-unknown-unknown
|
||||
)
|
||||
|
Binary file not shown.
4
.cache/fontconfig/CACHEDIR.TAG
Normal file
4
.cache/fontconfig/CACHEDIR.TAG
Normal file
@@ -0,0 +1,4 @@
|
||||
Signature: 8a477f597d28d172789f06886806bc55
|
||||
# This file is a cache directory tag created by fontconfig.
|
||||
# For information about cache directory tags, see:
|
||||
# http://www.brynosaurus.com/cachedir/
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -34,6 +34,8 @@ jobs:
|
||||
- stable
|
||||
install:
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
1287
Cargo.lock
generated
1287
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -38,6 +38,7 @@ members = [
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
@@ -54,6 +55,7 @@ members = [
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk/cargo-build-bpf",
|
||||
"sdk/cargo-test-bpf",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
|
@@ -61,8 +61,9 @@ $ cargo test
|
||||
### Starting a local testnet
|
||||
Start your own testnet locally, instructions are in the [online docs](https://docs.solana.com/cluster/bench-tps).
|
||||
|
||||
### Accessing the remote testnet
|
||||
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7
|
||||
### Accessing the remote development cluster
|
||||
* `devnet` - stable public cluster for development accessible via
|
||||
devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solana.com/clusters)
|
||||
|
||||
# Benchmarking
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -18,12 +18,13 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.2" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=2.0.8" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.23" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -12,9 +12,14 @@ pub mod parse_token;
|
||||
pub mod parse_vote;
|
||||
pub mod validator_info;
|
||||
|
||||
use crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount};
|
||||
use solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey};
|
||||
use std::str::FromStr;
|
||||
use {
|
||||
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
||||
solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey},
|
||||
std::{
|
||||
io::{Read, Write},
|
||||
str::FromStr,
|
||||
},
|
||||
};
|
||||
|
||||
pub type StringAmount = String;
|
||||
|
||||
@@ -44,6 +49,8 @@ pub enum UiAccountEncoding {
|
||||
Base58,
|
||||
Base64,
|
||||
JsonParsed,
|
||||
#[serde(rename = "base64+zstd")]
|
||||
Base64Zstd,
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
@@ -66,6 +73,19 @@ impl UiAccount {
|
||||
base64::encode(slice_data(&account.data, data_slice_config)),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64Zstd => {
|
||||
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
||||
match encoder
|
||||
.write_all(slice_data(&account.data, data_slice_config))
|
||||
.and_then(|()| encoder.finish())
|
||||
{
|
||||
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
||||
Err(_) => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data, data_slice_config)),
|
||||
UiAccountEncoding::Base64,
|
||||
),
|
||||
}
|
||||
}
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
|
||||
@@ -92,6 +112,16 @@ impl UiAccount {
|
||||
UiAccountData::Binary(blob, encoding) => match encoding {
|
||||
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob)
|
||||
.ok()
|
||||
.map(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
})
|
||||
.flatten(),
|
||||
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
|
||||
},
|
||||
}?;
|
||||
@@ -179,4 +209,25 @@ mod test {
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &[] as &[u8]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base64_zstd() {
|
||||
let encoded_account = UiAccount::encode(
|
||||
&Pubkey::default(),
|
||||
Account {
|
||||
data: vec![0; 1024],
|
||||
..Account::default()
|
||||
},
|
||||
UiAccountEncoding::Base64Zstd,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert!(matches!(
|
||||
encoded_account.data,
|
||||
UiAccountData::Binary(_, UiAccountEncoding::Base64Zstd)
|
||||
));
|
||||
|
||||
let decoded_account = encoded_account.decode().unwrap();
|
||||
assert_eq!(decoded_account.data, vec![0; 1024]);
|
||||
}
|
||||
}
|
||||
|
@@ -118,7 +118,7 @@ mod test {
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
let versioned = VoteStateVersions::new_current(vote_state);
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
|
@@ -105,6 +105,7 @@ pub enum SysvarAccountType {
|
||||
pub struct UiClock {
|
||||
pub slot: Slot,
|
||||
pub epoch: Epoch,
|
||||
pub epoch_start_timestamp: UnixTimestamp,
|
||||
pub leader_schedule_epoch: Epoch,
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
}
|
||||
@@ -114,6 +115,7 @@ impl From<Clock> for UiClock {
|
||||
Self {
|
||||
slot: clock.slot,
|
||||
epoch: clock.epoch,
|
||||
epoch_start_timestamp: clock.epoch_start_timestamp,
|
||||
leader_schedule_epoch: clock.leader_schedule_epoch,
|
||||
unix_timestamp: clock.unix_timestamp,
|
||||
}
|
||||
@@ -212,15 +214,14 @@ pub struct UiStakeHistoryEntry {
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
sysvar::{recent_blockhashes::IterItem, Sysvar},
|
||||
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let clock_sysvar = Clock::default().create_account(1);
|
||||
let clock_sysvar = create_account(&Clock::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
SysvarAccountType::Clock(UiClock::default()),
|
||||
@@ -233,13 +234,13 @@ mod test {
|
||||
first_normal_epoch: 1,
|
||||
first_normal_slot: 12,
|
||||
};
|
||||
let epoch_schedule_sysvar = epoch_schedule.create_account(1);
|
||||
let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
let fees_sysvar = Fees::default().create_account(1);
|
||||
let fees_sysvar = create_account(&Fees::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
@@ -251,7 +252,7 @@ mod test {
|
||||
};
|
||||
let recent_blockhashes =
|
||||
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
|
||||
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1);
|
||||
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
@@ -269,13 +270,13 @@ mod test {
|
||||
exemption_threshold: 2.0,
|
||||
burn_percent: 5,
|
||||
};
|
||||
let rent_sysvar = rent.create_account(1);
|
||||
let rent_sysvar = create_account(&rent, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
|
||||
SysvarAccountType::Rent(rent.into()),
|
||||
);
|
||||
|
||||
let rewards_sysvar = Rewards::default().create_account(1);
|
||||
let rewards_sysvar = create_account(&Rewards::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
|
||||
SysvarAccountType::Rewards(UiRewards::default()),
|
||||
@@ -283,7 +284,7 @@ mod test {
|
||||
|
||||
let mut slot_hashes = SlotHashes::default();
|
||||
slot_hashes.add(1, hash);
|
||||
let slot_hashes_sysvar = slot_hashes.create_account(1);
|
||||
let slot_hashes_sysvar = create_account(&slot_hashes, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
|
||||
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
|
||||
@@ -294,7 +295,7 @@ mod test {
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
let slot_history_sysvar = slot_history.create_account(1);
|
||||
let slot_history_sysvar = create_account(&slot_history, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
@@ -310,7 +311,7 @@ mod test {
|
||||
deactivating: 3,
|
||||
};
|
||||
stake_history.add(1, stake_history_entry.clone());
|
||||
let stake_history_sysvar = stake_history.create_account(1);
|
||||
let stake_history_sysvar = create_account(&stake_history, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
|
||||
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
|
||||
|
@@ -4,7 +4,9 @@ use crate::{
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v2_0::{
|
||||
solana_sdk::{program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey},
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
};
|
||||
use std::str::FromStr;
|
||||
@@ -21,6 +23,16 @@ pub fn spl_token_v2_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::from_str(&pubkey.to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
||||
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::from_str(&pubkey.to_string()).unwrap()
|
||||
}
|
||||
|
||||
pub fn parse_token(
|
||||
data: &[u8],
|
||||
mint_decimals: Option<u8>,
|
||||
|
@@ -128,7 +128,7 @@ mod test {
|
||||
fn test_parse_vote() {
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
let versioned = VoteStateVersions::new_current(vote_state);
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.4.0"
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-measure = { path = "../measure", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-measure = { path = "../measure", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -98,7 +98,10 @@ fn main() {
|
||||
} else {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let mut time = Measure::start("hash");
|
||||
let hash = accounts.accounts_db.update_accounts_hash(0, &ancestors).0;
|
||||
let hash = accounts
|
||||
.accounts_db
|
||||
.update_accounts_hash(0, &ancestors, true)
|
||||
.0;
|
||||
time.stop();
|
||||
println!("hash: {} {}", hash, time);
|
||||
create_test_accounts(&accounts, &mut pubkeys, 1, 0);
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,16 +14,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.0"
|
||||
solana-core = { path = "../core", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.2" }
|
||||
solana-perf = { path = "../perf", version = "1.4.2" }
|
||||
solana-ledger = { path = "../ledger", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-measure = { path = "../measure", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-core = { path = "../core", version = "1.4.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.23" }
|
||||
solana-perf = { path = "../perf", version = "1.4.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.4.23" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-measure = { path = "../measure", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,15 +12,16 @@ edition = "2018"
|
||||
async-trait = "0.1.36"
|
||||
bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
tarpc = { version = "0.22.0", features = ["full"] }
|
||||
tokio = "0.2"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
tarpc = { version = "0.23.0", features = ["full"] }
|
||||
tokio = { version = "0.3", features = ["full"] }
|
||||
tokio-serde = { version = "0.6", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.4.23" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -10,9 +10,17 @@ use futures::future::join_all;
|
||||
pub use solana_banks_interface::{BanksClient, TransactionStatus};
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_sdk::{
|
||||
account::Account, clock::Slot, commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator, hash::Hash, pubkey::Pubkey, signature::Signature,
|
||||
transaction::Transaction, transport,
|
||||
account::{from_account, Account},
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
signature::Signature,
|
||||
sysvar,
|
||||
transaction::Transaction,
|
||||
transport,
|
||||
};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use tarpc::{
|
||||
@@ -40,6 +48,9 @@ pub trait BanksClientExt {
|
||||
/// use them to calculate the transaction fee.
|
||||
async fn get_fees(&mut self) -> io::Result<(FeeCalculator, Hash, Slot)>;
|
||||
|
||||
/// Return the cluster rent
|
||||
async fn get_rent(&mut self) -> io::Result<Rent>;
|
||||
|
||||
/// Send a transaction and return after the transaction has been rejected or
|
||||
/// reached the given level of commitment.
|
||||
async fn process_transaction_with_commitment(
|
||||
@@ -108,6 +119,17 @@ impl BanksClientExt for BanksClient {
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_rent(&mut self) -> io::Result<Rent> {
|
||||
let rent_sysvar = self
|
||||
.get_account(sysvar::rent::id())
|
||||
.await?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
|
||||
from_account::<Rent>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_recent_blockhash(&mut self) -> io::Result<Hash> {
|
||||
Ok(self.get_fees().await?.1)
|
||||
}
|
||||
@@ -216,7 +238,7 @@ mod tests {
|
||||
use solana_sdk::{message::Message, signature::Signer, system_instruction};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tarpc::transport;
|
||||
use tokio::{runtime::Runtime, time::delay_for};
|
||||
use tokio::{runtime::Runtime, time::sleep};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
@@ -285,7 +307,7 @@ mod tests {
|
||||
if root_slot > last_valid_slot {
|
||||
break;
|
||||
}
|
||||
delay_for(Duration::from_millis(100)).await;
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
status = banks_client.get_transaction_status(signature).await?;
|
||||
}
|
||||
assert!(status.unwrap().err.is_none());
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,9 +9,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.112", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
tarpc = { version = "0.22.0", features = ["full"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
tarpc = { version = "0.23.0", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,12 +12,13 @@ edition = "2018"
|
||||
bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.8"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.2" }
|
||||
tarpc = { version = "0.22.0", features = ["full"] }
|
||||
tokio = "0.2"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.4.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.23" }
|
||||
tarpc = { version = "0.23.0", features = ["full"] }
|
||||
tokio = { version = "0.3", features = ["full"] }
|
||||
tokio-serde = { version = "0.6", features = ["bincode"] }
|
||||
|
||||
[lib]
|
||||
|
@@ -5,11 +5,7 @@ use futures::{
|
||||
prelude::stream::{self, StreamExt},
|
||||
};
|
||||
use solana_banks_interface::{Banks, BanksRequest, BanksResponse, TransactionStatus};
|
||||
use solana_runtime::{
|
||||
bank::Bank,
|
||||
bank_forks::BankForks,
|
||||
commitment::{BlockCommitmentCache, CommitmentSlots},
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
@@ -21,7 +17,6 @@ use solana_sdk::{
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
@@ -38,7 +33,7 @@ use tarpc::{
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
};
|
||||
use tokio::time::delay_for;
|
||||
use tokio::time::sleep;
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -84,11 +79,9 @@ impl BanksServer {
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let slot = bank.slot();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
|
||||
HashMap::default(),
|
||||
0,
|
||||
CommitmentSlots::new_from_slot(slot),
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
|
||||
));
|
||||
Builder::new()
|
||||
.name("solana-bank-forks-client".to_string())
|
||||
.spawn(move || Self::run(&bank, transaction_receiver))
|
||||
@@ -109,23 +102,36 @@ impl BanksServer {
|
||||
|
||||
async fn poll_signature_status(
|
||||
self,
|
||||
signature: Signature,
|
||||
signature: &Signature,
|
||||
blockhash: &Hash,
|
||||
last_valid_slot: Slot,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
let mut status = self.bank(commitment).get_signature_status(&signature);
|
||||
let mut status = self
|
||||
.bank(commitment)
|
||||
.get_signature_status_with_blockhash(signature, blockhash);
|
||||
while status.is_none() {
|
||||
delay_for(Duration::from_millis(200)).await;
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
let bank = self.bank(commitment);
|
||||
if bank.slot() > last_valid_slot {
|
||||
break;
|
||||
}
|
||||
status = bank.get_signature_status(&signature);
|
||||
status = bank.get_signature_status_with_blockhash(signature, blockhash);
|
||||
}
|
||||
status
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tarpc::server]
|
||||
impl Banks for BanksServer {
|
||||
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
||||
@@ -187,19 +193,23 @@ impl Banks for BanksServer {
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_slot = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(&blockhash)
|
||||
.get_blockhash_last_valid_slot(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
self.poll_signature_status(signature, last_valid_slot, commitment)
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
|
||||
.await
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,21 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.4.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-core = { path = "../core", version = "1.4.2" }
|
||||
solana-genesis = { path = "../genesis", version = "1.4.2" }
|
||||
solana-client = { path = "../client", version = "1.4.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-core = { path = "../core", version = "1.4.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.4.23" }
|
||||
solana-client = { path = "../client", version = "1.4.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.4.23" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.4.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.4.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -39,7 +39,7 @@ fn test_exchange_local_cluster() {
|
||||
} = config;
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
|
||||
let cluster = LocalCluster::new(&ClusterConfig {
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
@@ -86,7 +86,7 @@ fn test_exchange_bank_client() {
|
||||
solana_logger::setup();
|
||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_config);
|
||||
bank.add_builtin_program("exchange_program", id(), process_instruction);
|
||||
bank.add_builtin("exchange_program", id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
let mut config = Config::default();
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.23" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,23 +15,23 @@ log = "0.4.8"
|
||||
rayon = "1.4.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-core = { path = "../core", version = "1.4.2" }
|
||||
solana-genesis = { path = "../genesis", version = "1.4.2" }
|
||||
solana-client = { path = "../client", version = "1.4.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.2" }
|
||||
solana-measure = { path = "../measure", version = "1.4.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-core = { path = "../core", version = "1.4.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.4.23" }
|
||||
solana-client = { path = "../client", version = "1.4.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.23" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.23" }
|
||||
solana-measure = { path = "../measure", version = "1.4.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.4.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.4.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -15,7 +15,7 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(&ClusterConfig {
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
|
@@ -1,5 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
|
||||
maybe_bpf_sdk="--bpf-sdk $here/sdk/bpf"
|
||||
for a in "$@"; do
|
||||
if [[ $a = --bpf-sdk ]]; then
|
||||
maybe_bpf_sdk=
|
||||
fi
|
||||
done
|
||||
|
||||
set -x
|
||||
exec cargo run --manifest-path $here/sdk/cargo-build-bpf/Cargo.toml -- --bpf-sdk $here/sdk/bpf "$@"
|
||||
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"
|
||||
|
14
cargo-test-bpf
Executable file
14
cargo-test-bpf
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
|
||||
maybe_bpf_sdk="--bpf-sdk $here/sdk/bpf"
|
||||
for a in "$@"; do
|
||||
if [[ $a = --bpf-sdk ]]; then
|
||||
maybe_bpf_sdk=
|
||||
fi
|
||||
done
|
||||
|
||||
export CARGO_BUILD_BPF="$here"/cargo-build-bpf
|
||||
set -x
|
||||
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-test-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"
|
@@ -175,6 +175,30 @@ EOF
|
||||
"Stable-perf skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
|
@@ -27,8 +27,8 @@ declare print_free_tree=(
|
||||
':sdk/bpf/rust/rust-utils/**.rs'
|
||||
':sdk/**.rs'
|
||||
':^sdk/cargo-build-bpf/**.rs'
|
||||
':^sdk/src/program_option.rs'
|
||||
':^sdk/src/program_stubs.rs'
|
||||
':^sdk/program/src/program_option.rs'
|
||||
':^sdk/program/src/program_stubs.rs'
|
||||
':programs/**.rs'
|
||||
':^**bin**.rs'
|
||||
':^**bench**.rs'
|
||||
|
@@ -4,6 +4,8 @@ cd "$(dirname "$0")/.."
|
||||
source ci/semver_bash/semver.sh
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
cargo="$(readlink -f ./cargo)"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
is_crate_version_uploaded() {
|
||||
name=$1
|
||||
@@ -66,11 +68,11 @@ for Cargo_toml in $Cargo_tomls; do
|
||||
(
|
||||
set -x
|
||||
rm -rf crate-test
|
||||
cargo +"$rust_stable" init crate-test
|
||||
"$cargo" stable init crate-test
|
||||
cd crate-test/
|
||||
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
|
||||
echo "[workspace]" >> Cargo.toml
|
||||
cargo +"$rust_stable" check
|
||||
"$cargo" stable check
|
||||
) && really_uploaded=1
|
||||
if ((really_uploaded)); then
|
||||
break;
|
||||
|
@@ -85,9 +85,6 @@ echo --- Creating release tarball
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh +"$rust_stable" "${RELEASE_BASENAME}"
|
||||
|
||||
mkdir -p "${RELEASE_BASENAME}"/bin/sdk/bpf
|
||||
cp -a sdk/bpf/* "${RELEASE_BASENAME}"/bin/sdk/bpf
|
||||
|
||||
tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}"
|
||||
bzip2 "${TARBALL_BASENAME}"-$TARGET.tar
|
||||
cp "${RELEASE_BASENAME}"/bin/solana-install-init solana-install-init-$TARGET
|
||||
@@ -127,7 +124,7 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
|
||||
/usr/bin/s3cmd --acl-public put /solana/"$file" s3://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh http://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/"$file"
|
||||
)
|
||||
|
||||
if [[ -n $TAG ]]; then
|
||||
@@ -150,4 +147,30 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET.
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# Create install wrapper for release.solana.com
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
cat > release.solana.com-install <<EOF
|
||||
SOLANA_RELEASE=$CHANNEL_OR_TAG
|
||||
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
|
||||
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
|
||||
EOF
|
||||
cat install/solana-install-init.sh >> release.solana.com-install
|
||||
|
||||
echo --- AWS S3 Store: "install"
|
||||
(
|
||||
set -x
|
||||
$DRYRUN docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
/usr/bin/s3cmd --acl-public put /solana/release.solana.com-install s3://release.solana.com/"$CHANNEL_OR_TAG"/install
|
||||
|
||||
echo Published to:
|
||||
$DRYRUN ci/format-url.sh https://release.solana.com/"$CHANNEL_OR_TAG"/install
|
||||
)
|
||||
fi
|
||||
|
||||
echo --- ok
|
||||
|
@@ -6,7 +6,8 @@ source ci/_
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
source ci/rust-version.sh all
|
||||
|
||||
cargo="$(readlink -f "./cargo")"
|
||||
|
||||
set -o pipefail
|
||||
export RUST_BACKTRACE=1
|
||||
@@ -27,35 +28,35 @@ test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# Ensure all dependencies are built
|
||||
_ cargo +$rust_nightly build --release
|
||||
_ "$cargo" nightly build --release
|
||||
|
||||
# Remove "BENCH_FILE", if it exists so that the following commands can append
|
||||
rm -f "$BENCH_FILE"
|
||||
|
||||
# Run sdk benches
|
||||
_ cargo +$rust_nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path sdk/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run runtime benches
|
||||
_ cargo +$rust_nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run core benches
|
||||
_ cargo +$rust_nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run bpf benches
|
||||
_ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
|
||||
_ "$cargo" nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verbose} --features=bpf_c \
|
||||
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run banking/accounts bench. Doesn't require nightly, but use since it is already built.
|
||||
_ cargo +$rust_nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
|
||||
_ cargo +$rust_nightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
|
||||
_ "$cargo" nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
|
||||
_ "$cargo" nightly run --release --manifest-path accounts-bench/Cargo.toml ${V:+--verbose} -- --num_accounts 10000 --num_slots 4 | tee -a "$BENCH_FILE"
|
||||
|
||||
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
|
||||
# reason
|
||||
exit 0
|
||||
_ cargo +$rust_nightly run --release --package solana-upload-perf \
|
||||
_ "$cargo" nightly run --release --package solana-upload-perf \
|
||||
-- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" | tee "$BENCH_ARTIFACT"
|
||||
|
||||
upload-ci-artifact "$BENCH_FILE"
|
||||
|
@@ -8,6 +8,9 @@ source ci/_
|
||||
source ci/rust-version.sh stable
|
||||
source ci/rust-version.sh nightly
|
||||
eval "$(ci/channel-info.sh)"
|
||||
cargo="$(readlink -f "./cargo")"
|
||||
|
||||
scripts/increment-cargo-version.sh check
|
||||
|
||||
echo --- build environment
|
||||
(
|
||||
@@ -16,14 +19,14 @@ echo --- build environment
|
||||
rustup run "$rust_stable" rustc --version --verbose
|
||||
rustup run "$rust_nightly" rustc --version --verbose
|
||||
|
||||
cargo +"$rust_stable" --version --verbose
|
||||
cargo +"$rust_nightly" --version --verbose
|
||||
"$cargo" stable --version --verbose
|
||||
"$cargo" nightly --version --verbose
|
||||
|
||||
cargo +"$rust_stable" clippy --version --verbose
|
||||
cargo +"$rust_nightly" clippy --version --verbose
|
||||
"$cargo" stable clippy --version --verbose
|
||||
"$cargo" nightly clippy --version --verbose
|
||||
|
||||
# audit is done only with stable
|
||||
cargo +"$rust_stable" audit --version
|
||||
"$cargo" stable audit --version
|
||||
)
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
@@ -49,11 +52,11 @@ else
|
||||
fi
|
||||
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ cargo +"$rust_stable" fmt --all -- --check
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ cargo +"$rust_nightly" clippy \
|
||||
_ "$cargo" nightly clippy \
|
||||
-Zunstable-options --workspace --all-targets \
|
||||
-- --deny=warnings --allow=clippy::stable_sort_primitive
|
||||
|
||||
@@ -67,19 +70,35 @@ cargo_audit_ignores=(
|
||||
#
|
||||
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
|
||||
--ignore RUSTSEC-2020-0016
|
||||
|
||||
# stdweb is unmaintained
|
||||
#
|
||||
# Blocked on multiple upstream crates removing their `stdweb` dependency.
|
||||
--ignore RUSTSEC-2020-0056
|
||||
|
||||
# Potential segfault in the time crate
|
||||
#
|
||||
# Blocked on multiple crates updating `time` to >= 0.2.23
|
||||
--ignore RUSTSEC-2020-0071
|
||||
|
||||
# difference is unmaintained
|
||||
#
|
||||
# Blocked on predicates v1.0.6 removing its dependency on `difference`
|
||||
--ignore RUSTSEC-2020-0095
|
||||
|
||||
)
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
_ cargo +"$rust_stable" audit
|
||||
_ "$cargo" stable audit
|
||||
for project in rust/*/ ; do
|
||||
echo "+++ do_bpf_checks $project"
|
||||
(
|
||||
cd "$project"
|
||||
_ cargo +"$rust_stable" fmt -- --check
|
||||
_ cargo +"$rust_nightly" test
|
||||
_ cargo +"$rust_nightly" clippy -- --deny=warnings \
|
||||
_ "$cargo" stable fmt -- --check
|
||||
_ "$cargo" nightly test
|
||||
_ "$cargo" nightly clippy -- --deny=warnings \
|
||||
--allow=clippy::missing_safety_doc \
|
||||
--allow=clippy::stable_sort_primitive
|
||||
)
|
||||
|
@@ -2,6 +2,8 @@
|
||||
set -e
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
cargo="$(readlink -f "./cargo")"
|
||||
|
||||
source ci/_
|
||||
|
||||
annotate() {
|
||||
@@ -37,12 +39,15 @@ NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
# BPF solana-sdk legacy compile test
|
||||
./cargo-build-bpf --manifest-path sdk/Cargo.toml
|
||||
|
||||
# BPF program tests
|
||||
_ make -C programs/bpf/c tests
|
||||
_ cargo +"$rust_stable" test \
|
||||
_ "$cargo" stable test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
||||
|
||||
@@ -62,13 +67,13 @@ test-stable-perf)
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" run --manifest-path poh-bench/Cargo.toml ${V:+--verbose} -- --hashes-per-tick 10
|
||||
_ "$cargo" stable build --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
_ "$cargo" stable run --manifest-path poh-bench/Cargo.toml ${V:+--verbose} -- --hashes-per-tick 10
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
thiserror = "1.0.20"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@@ -189,6 +189,7 @@ pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentC
|
||||
"recent" => CommitmentConfig::recent(),
|
||||
"root" => CommitmentConfig::root(),
|
||||
"single" => CommitmentConfig::single(),
|
||||
"singleGossip" => CommitmentConfig::single_gossip(),
|
||||
_ => CommitmentConfig::default(),
|
||||
})
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use crate::keypair::{parse_keypair_path, KeypairUrl, ASK_KEYWORD};
|
||||
use chrono::DateTime;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
clock::{Epoch, Slot},
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Signature},
|
||||
@@ -148,6 +148,40 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_url_or_moniker<T>(string: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
match url::Url::parse(&normalize_to_url_if_moniker(string.as_ref())) {
|
||||
Ok(url) => {
|
||||
if url.has_host() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err("no host provided".to_string())
|
||||
}
|
||||
}
|
||||
Err(err) => Err(format!("{}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalize_to_url_if_moniker(url_or_moniker: &str) -> String {
|
||||
match url_or_moniker {
|
||||
"m" | "mainnet-beta" => "https://api.mainnet-beta.solana.com",
|
||||
"t" | "testnet" => "https://testnet.solana.com",
|
||||
"d" | "devnet" => "https://devnet.solana.com",
|
||||
"l" | "localhost" => "http://localhost:8899",
|
||||
url => url,
|
||||
}
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub fn is_epoch<T>(epoch: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
is_parsable_generic::<Epoch, _>(epoch)
|
||||
}
|
||||
|
||||
pub fn is_slot<T>(slot: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
|
@@ -58,6 +58,15 @@ impl CliSignerInfo {
|
||||
Some(0)
|
||||
}
|
||||
}
|
||||
pub fn index_of_or_none(&self, pubkey: Option<Pubkey>) -> Option<usize> {
|
||||
if let Some(pubkey) = pubkey {
|
||||
self.signers
|
||||
.iter()
|
||||
.position(|signer| signer.pubkey() == pubkey)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DefaultSigner {
|
||||
|
@@ -3,13 +3,13 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
dirs = "2.0.2"
|
||||
dirs-next = "2.0.0"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
|
@@ -5,7 +5,7 @@ use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG_FILE: Option<String> = {
|
||||
dirs::home_dir().map(|mut path| {
|
||||
dirs_next::home_dir().map(|mut path| {
|
||||
path.extend(&[".config", "solana", "cli", "config.yml"]);
|
||||
path.to_str().unwrap().to_string()
|
||||
})
|
||||
@@ -25,7 +25,7 @@ pub struct Config {
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
let keypair_path = {
|
||||
let mut keypair_path = dirs::home_dir().expect("home directory");
|
||||
let mut keypair_path = dirs_next::home_dir().expect("home directory");
|
||||
keypair_path.extend(&[".config", "solana", "id.json"]);
|
||||
keypair_path.to_str().unwrap().to_string()
|
||||
};
|
||||
@@ -76,17 +76,6 @@ impl Config {
|
||||
ws_url.to_string()
|
||||
}
|
||||
|
||||
pub fn compute_rpc_banks_url(json_rpc_url: &str) -> String {
|
||||
let json_rpc_url: Option<Url> = json_rpc_url.parse().ok();
|
||||
if json_rpc_url.is_none() {
|
||||
return "".to_string();
|
||||
}
|
||||
let mut url = json_rpc_url.unwrap();
|
||||
let port = url.port().unwrap_or(8899);
|
||||
url.set_port(Some(port + 3)).expect("unable to set port");
|
||||
url.to_string()
|
||||
}
|
||||
|
||||
pub fn import_address_labels<P>(&mut self, filename: P) -> Result<(), io::Error>
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
@@ -133,28 +122,4 @@ mod test {
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compute_rpc_banks_url() {
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"http://devnet.solana.com"),
|
||||
"http://devnet.solana.com:8902/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"https://devnet.solana.com"),
|
||||
"https://devnet.solana.com:8902/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"http://example.com:8899"),
|
||||
"http://example.com:8902/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_rpc_banks_url(&"https://example.com:1234"),
|
||||
"https://example.com:1237/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_rpc_banks_url(&"garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,13 +17,13 @@ indicatif = "0.15.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-client = { path = "../client", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-client = { path = "../client", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -10,7 +10,8 @@ use serde_json::{Map, Value};
|
||||
use solana_account_decoder::parse_token::UiTokenAccount;
|
||||
use solana_clap_utils::keypair::SignOnly;
|
||||
use solana_client::rpc_response::{
|
||||
RpcAccountBalance, RpcKeyedAccount, RpcSupply, RpcVoteAccountInfo,
|
||||
RpcAccountBalance, RpcInflationGovernor, RpcInflationRate, RpcKeyedAccount, RpcSupply,
|
||||
RpcVoteAccountInfo,
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::{self, Epoch, Slot, UnixTimestamp},
|
||||
@@ -241,6 +242,9 @@ impl fmt::Display for CliEpochInfo {
|
||||
)?;
|
||||
writeln_name_value(f, "Slot:", &self.epoch_info.absolute_slot.to_string())?;
|
||||
writeln_name_value(f, "Epoch:", &self.epoch_info.epoch.to_string())?;
|
||||
if let Some(transaction_count) = &self.epoch_info.transaction_count {
|
||||
writeln_name_value(f, "Transaction Count:", &transaction_count.to_string())?;
|
||||
}
|
||||
let start_slot = self.epoch_info.absolute_slot - self.epoch_info.slot_index;
|
||||
let end_slot = start_slot + self.epoch_info.slots_in_epoch;
|
||||
writeln_name_value(
|
||||
@@ -520,7 +524,7 @@ impl fmt::Display for CliNonceAccount {
|
||||
)
|
||||
)?;
|
||||
let nonce = self.nonce.as_deref().unwrap_or("uninitialized");
|
||||
writeln!(f, "Nonce: {}", nonce)?;
|
||||
writeln!(f, "Nonce blockhash: {}", nonce)?;
|
||||
if let Some(fees) = self.lamports_per_signature {
|
||||
writeln!(f, "Fee: {} lamports per signature", fees)?;
|
||||
} else {
|
||||
@@ -541,7 +545,15 @@ impl CliStakeVec {
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliStakeVec {}
|
||||
impl VerboseDisplay for CliStakeVec {}
|
||||
impl VerboseDisplay for CliStakeVec {
|
||||
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
|
||||
for state in &self.0 {
|
||||
writeln!(w)?;
|
||||
VerboseDisplay::write_str(state, w)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeVec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@@ -562,7 +574,12 @@ pub struct CliKeyedStakeState {
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliKeyedStakeState {}
|
||||
impl VerboseDisplay for CliKeyedStakeState {}
|
||||
impl VerboseDisplay for CliKeyedStakeState {
|
||||
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
|
||||
writeln!(w, "Stake Pubkey: {}", self.stake_pubkey)?;
|
||||
VerboseDisplay::write_str(&self.stake_state, w)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CliKeyedStakeState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@@ -579,7 +596,7 @@ pub struct CliEpochReward {
|
||||
pub amount: u64, // lamports
|
||||
pub post_balance: u64, // lamports
|
||||
pub percent_change: f64,
|
||||
pub apr: f64,
|
||||
pub apr: Option<f64>,
|
||||
}
|
||||
|
||||
fn show_epoch_rewards(
|
||||
@@ -594,19 +611,22 @@ fn show_epoch_rewards(
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<8} {:<11} {:<15} {:<15} {:>14} {:>14}",
|
||||
" {:<6} {:<11} {:<16} {:<16} {:>14} {:>14}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR"
|
||||
)?;
|
||||
for reward in epoch_rewards {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<8} {:<11} ◎{:<14.9} ◎{:<14.9} {:>13.9}% {:>13.9}%",
|
||||
" {:<6} {:<11} ◎{:<16.9} ◎{:<14.9} {:>13.2}% {}",
|
||||
reward.epoch,
|
||||
reward.effective_slot,
|
||||
lamports_to_sol(reward.amount),
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.percent_change,
|
||||
reward.apr,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@@ -619,6 +639,8 @@ pub struct CliStakeState {
|
||||
pub stake_type: CliStakeType,
|
||||
pub account_balance: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub credits_observed: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_vote_account_address: Option<String>,
|
||||
@@ -647,7 +669,15 @@ pub struct CliStakeState {
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliStakeState {}
|
||||
impl VerboseDisplay for CliStakeState {}
|
||||
impl VerboseDisplay for CliStakeState {
|
||||
fn write_str(&self, w: &mut dyn std::fmt::Write) -> std::fmt::Result {
|
||||
write!(w, "{}", self)?;
|
||||
if let Some(credits) = self.credits_observed {
|
||||
writeln!(w, "Credits Observed: {}", credits)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
@@ -1112,6 +1142,104 @@ impl fmt::Display for CliBlockTime {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliLeaderSchedule {
|
||||
pub epoch: Epoch,
|
||||
pub leader_schedule_entries: Vec<CliLeaderScheduleEntry>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliLeaderSchedule {}
|
||||
impl VerboseDisplay for CliLeaderSchedule {}
|
||||
|
||||
impl fmt::Display for CliLeaderSchedule {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
for entry in &self.leader_schedule_entries {
|
||||
writeln!(f, " {:<15} {:<44}", entry.slot, entry.leader)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliLeaderScheduleEntry {
|
||||
pub slot: Slot,
|
||||
pub leader: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliInflation {
|
||||
pub governor: RpcInflationGovernor,
|
||||
pub current_rate: RpcInflationRate,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliInflation {}
|
||||
impl VerboseDisplay for CliInflation {}
|
||||
|
||||
impl fmt::Display for CliInflation {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f, "{}", style("Inflation Governor:").bold())?;
|
||||
if (self.governor.initial - self.governor.terminal).abs() < f64::EPSILON {
|
||||
writeln!(
|
||||
f,
|
||||
"Fixed APR: {:>5.2}%",
|
||||
self.governor.terminal * 100.
|
||||
)?;
|
||||
} else {
|
||||
writeln!(
|
||||
f,
|
||||
"Initial APR: {:>5.2}%",
|
||||
self.governor.initial * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Terminal APR: {:>5.2}%",
|
||||
self.governor.terminal * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Rate reduction per year: {:>5.2}%",
|
||||
self.governor.taper * 100.
|
||||
)?;
|
||||
}
|
||||
if self.governor.foundation_term > 0. {
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation percentage: {:>5.2}%",
|
||||
self.governor.foundation
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation term: {:.1} years",
|
||||
self.governor.foundation_term
|
||||
)?;
|
||||
}
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"\n{}",
|
||||
style(format!("Inflation for Epoch {}:", self.current_rate.epoch)).bold()
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Total APR: {:>5.2}%",
|
||||
self.current_rate.total * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Staking APR: {:>5.2}%",
|
||||
self.current_rate.validator * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation APR: {:>5.2}%",
|
||||
self.current_rate.foundation * 100.
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSignOnlyData {
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,7 +16,7 @@ clap = "2.33.1"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.5", features = ["termination"] }
|
||||
console = "0.11.3"
|
||||
dirs = "2.0.2"
|
||||
dirs-next = "2.0.0"
|
||||
log = "0.4.8"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.15.0"
|
||||
@@ -27,31 +27,30 @@ reqwest = { version = "0.10.8", default-features = false, features = ["blocking"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.2" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.4.2" }
|
||||
solana-cli-output = { path = "../cli-output", version = "1.4.2" }
|
||||
solana-client = { path = "../client", version = "1.4.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.4.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.2" }
|
||||
solana_rbpf = "=0.1.32"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.4.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.23" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.4.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.4.23" }
|
||||
solana-cli-output = { path = "../cli-output", version = "1.4.23" }
|
||||
solana-client = { path = "../client", version = "1.4.23" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.4.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.23" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.23" }
|
||||
solana_rbpf = "=0.1.34"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.4.23" }
|
||||
thiserror = "1.0.20"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.4.2" }
|
||||
solana-core = { path = "../core", version = "1.4.23" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
483
cli/src/cli.rs
483
cli/src/cli.rs
@@ -1,14 +1,12 @@
|
||||
use crate::{
|
||||
checks::*, cluster_query::*, feature::*, inflation::*, nonce::*, spend_utils::*, stake::*,
|
||||
cluster_query::*, feature::*, inflation::*, nonce::*, program::*, spend_utils::*, stake::*,
|
||||
validator_info::*, vote::*,
|
||||
};
|
||||
use bip39::{Language, Mnemonic, MnemonicType, Seed};
|
||||
use clap::{value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use log::*;
|
||||
use num_traits::FromPrimitive;
|
||||
use serde_json::{self, json, Value};
|
||||
use serde_json::{self, Value};
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_bpf_loader_program::bpf_verifier;
|
||||
use solana_clap_utils::{
|
||||
self,
|
||||
commitment::commitment_arg_with_default,
|
||||
@@ -20,9 +18,7 @@ use solana_clap_utils::{
|
||||
offline::*,
|
||||
};
|
||||
use solana_cli_output::{
|
||||
display::{
|
||||
build_balance_message, new_spinner_progress_bar, println_name_value, println_transaction,
|
||||
},
|
||||
display::{build_balance_message, println_name_value, println_transaction},
|
||||
return_signers, CliAccount, CliSignature, OutputFormat,
|
||||
};
|
||||
use solana_client::{
|
||||
@@ -30,28 +26,23 @@ use solana_client::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionLogsFilter},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(test)]
|
||||
use solana_faucet::faucet_mock::request_airdrop_transaction;
|
||||
use solana_rbpf::vm::EbpfVm;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
bpf_loader, bpf_loader_deprecated,
|
||||
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
|
||||
clock::{Epoch, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
decode_error::DecodeError,
|
||||
hash::Hash,
|
||||
instruction::{Instruction, InstructionError},
|
||||
loader_instruction,
|
||||
instruction::InstructionError,
|
||||
message::Message,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{keypair_from_seed, Keypair, Signature, Signer, SignerError},
|
||||
signers::Signers,
|
||||
signature::{Signature, Signer, SignerError},
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@@ -67,7 +58,7 @@ use std::{
|
||||
error,
|
||||
fmt::Write as FmtWrite,
|
||||
fs::File,
|
||||
io::{Read, Write},
|
||||
io::Write,
|
||||
net::{IpAddr, SocketAddr},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
@@ -77,7 +68,6 @@ use std::{
|
||||
use thiserror::Error;
|
||||
use url::Url;
|
||||
|
||||
const DATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA_SIZE
|
||||
pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30";
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -85,9 +75,11 @@ pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30";
|
||||
pub enum CliCommand {
|
||||
// Cluster Query Commands
|
||||
Catchup {
|
||||
node_pubkey: Pubkey,
|
||||
node_pubkey: Option<Pubkey>,
|
||||
node_json_rpc_url: Option<String>,
|
||||
follow: bool,
|
||||
our_localhost_port: Option<u16>,
|
||||
log: bool,
|
||||
},
|
||||
ClusterDate,
|
||||
ClusterVersion,
|
||||
@@ -115,13 +107,20 @@ pub enum CliCommand {
|
||||
LargestAccounts {
|
||||
filter: Option<RpcLargestAccountsFilter>,
|
||||
},
|
||||
LeaderSchedule,
|
||||
LeaderSchedule {
|
||||
epoch: Option<Epoch>,
|
||||
},
|
||||
LiveSlots,
|
||||
Logs {
|
||||
filter: RpcTransactionLogsFilter,
|
||||
},
|
||||
Ping {
|
||||
lamports: u64,
|
||||
interval: Duration,
|
||||
count: Option<u64>,
|
||||
timeout: Duration,
|
||||
blockhash: Option<Hash>,
|
||||
print_timestamp: bool,
|
||||
},
|
||||
ShowBlockProduction {
|
||||
epoch: Option<Epoch>,
|
||||
@@ -146,6 +145,9 @@ pub enum CliCommand {
|
||||
limit: usize,
|
||||
show_transactions: bool,
|
||||
},
|
||||
WaitForMaxStake {
|
||||
max_stake_percent: f32,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
@@ -178,7 +180,9 @@ pub enum CliCommand {
|
||||
program_location: String,
|
||||
address: Option<SignerIndex>,
|
||||
use_deprecated_loader: bool,
|
||||
allow_excessive_balance: bool,
|
||||
},
|
||||
Program(ProgramCliCommand),
|
||||
// Stake Commands
|
||||
CreateStakeAccount {
|
||||
stake_account: SignerIndex,
|
||||
@@ -461,11 +465,12 @@ impl CliConfig<'_> {
|
||||
json_rpc_cmd_url: &str,
|
||||
json_rpc_cfg_url: &str,
|
||||
) -> (SettingType, String) {
|
||||
Self::first_nonempty_setting(vec![
|
||||
let (setting_type, url_or_moniker) = Self::first_nonempty_setting(vec![
|
||||
(SettingType::Explicit, json_rpc_cmd_url.to_string()),
|
||||
(SettingType::Explicit, json_rpc_cfg_url.to_string()),
|
||||
(SettingType::SystemDefault, Self::default_json_rpc_url()),
|
||||
])
|
||||
]);
|
||||
(setting_type, normalize_to_url_if_moniker(&url_or_moniker))
|
||||
}
|
||||
|
||||
pub fn compute_keypair_path_setting(
|
||||
@@ -494,6 +499,7 @@ impl CliConfig<'_> {
|
||||
config.commitment = CommitmentConfig::recent();
|
||||
config.send_transaction_config = RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
preflight_commitment: Some(CommitmentConfig::recent().commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
};
|
||||
config
|
||||
@@ -569,15 +575,13 @@ pub fn parse_command(
|
||||
("supply", Some(matches)) => parse_supply(matches),
|
||||
("total-supply", Some(matches)) => parse_total_supply(matches),
|
||||
("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::LeaderSchedule,
|
||||
signers: vec![],
|
||||
}),
|
||||
("leader-schedule", Some(matches)) => parse_leader_schedule(matches),
|
||||
("ping", Some(matches)) => parse_cluster_ping(matches, default_signer, wallet_manager),
|
||||
("live-slots", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::LiveSlots,
|
||||
signers: vec![],
|
||||
}),
|
||||
("logs", Some(matches)) => parse_logs(matches, wallet_manager),
|
||||
("block-production", Some(matches)) => parse_show_block_production(matches),
|
||||
("gossip", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowGossip,
|
||||
@@ -609,17 +613,27 @@ pub fn parse_command(
|
||||
signers.push(signer);
|
||||
1
|
||||
});
|
||||
let use_deprecated_loader = matches.is_present("use_deprecated_loader");
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Deploy {
|
||||
program_location: matches.value_of("program_location").unwrap().to_string(),
|
||||
address,
|
||||
use_deprecated_loader,
|
||||
use_deprecated_loader: matches.is_present("use_deprecated_loader"),
|
||||
allow_excessive_balance: matches.is_present("allow_excessive_balance"),
|
||||
},
|
||||
signers,
|
||||
})
|
||||
}
|
||||
("program", Some(matches)) => {
|
||||
parse_program_subcommand(matches, default_signer, wallet_manager)
|
||||
}
|
||||
("wait-for-max-stake", Some(matches)) => {
|
||||
let max_stake_percent = value_t_or_exit!(matches, "max_percent", f32);
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::WaitForMaxStake { max_stake_percent },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
// Stake Commands
|
||||
("create-stake-account", Some(matches)) => {
|
||||
parse_stake_create_account(matches, default_signer, wallet_manager)
|
||||
@@ -1020,360 +1034,6 @@ fn process_show_account(
|
||||
Ok(account_string)
|
||||
}
|
||||
|
||||
fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
rpc_client: &RpcClient,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
commitment: CommitmentConfig,
|
||||
mut last_valid_slot: Slot,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut pending_transactions = HashMap::new();
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let _result = rpc_client
|
||||
.send_transaction_with_config(
|
||||
&transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
pending_transactions.insert(transaction.signatures[0], transaction);
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
pending_transactions.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - pending_transactions.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
let mut statuses = vec![];
|
||||
let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>();
|
||||
for pending_signatures_chunk in
|
||||
pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS - 1)
|
||||
{
|
||||
statuses.extend(
|
||||
rpc_client
|
||||
.get_signature_statuses_with_history(pending_signatures_chunk)?
|
||||
.value
|
||||
.into_iter(),
|
||||
);
|
||||
}
|
||||
assert_eq!(statuses.len(), pending_signatures.len());
|
||||
|
||||
for (signature, status) in pending_signatures.into_iter().zip(statuses.into_iter()) {
|
||||
if let Some(status) = status {
|
||||
if status.confirmations.is_none() || status.confirmations.unwrap() > 1 {
|
||||
let _ = pending_transactions.remove(&signature);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pending_transactions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let slot = rpc_client.get_slot_with_commitment(commitment)?;
|
||||
if slot > last_valid_slot {
|
||||
break;
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err("Transactions failed".into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator, new_last_valid_slot) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(commitment)?
|
||||
.value;
|
||||
last_valid_slot = new_last_valid_slot;
|
||||
transactions = vec![];
|
||||
for (_, mut transaction) in pending_transactions.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
program_location: &str,
|
||||
address: Option<SignerIndex>,
|
||||
use_deprecated_loader: bool,
|
||||
) -> ProcessResult {
|
||||
const WORDS: usize = 12;
|
||||
// Create ephemeral keypair to use for program address, if not provided
|
||||
let mnemonic = Mnemonic::new(MnemonicType::for_word_count(WORDS)?, Language::English);
|
||||
let seed = Seed::new(&mnemonic, "");
|
||||
let new_keypair = keypair_from_seed(seed.as_bytes())?;
|
||||
|
||||
let result = do_process_deploy(
|
||||
rpc_client,
|
||||
config,
|
||||
program_location,
|
||||
address,
|
||||
use_deprecated_loader,
|
||||
new_keypair,
|
||||
);
|
||||
|
||||
if result.is_err() && address.is_none() {
|
||||
let phrase: &str = mnemonic.phrase();
|
||||
let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap();
|
||||
eprintln!(
|
||||
"{}\nTo reuse this address, recover the ephemeral keypair file with",
|
||||
divider
|
||||
);
|
||||
eprintln!(
|
||||
"`solana-keygen recover` and the following {}-word seed phrase,",
|
||||
WORDS
|
||||
);
|
||||
eprintln!(
|
||||
"then pass it as the [ADDRESS_SIGNER] argument to `solana deploy ...`\n{}\n{}\n{}",
|
||||
divider, phrase, divider
|
||||
);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn do_process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
program_location: &str,
|
||||
address: Option<SignerIndex>,
|
||||
use_deprecated_loader: bool,
|
||||
new_keypair: Keypair,
|
||||
) -> ProcessResult {
|
||||
let program_id = if let Some(i) = address {
|
||||
config.signers[i]
|
||||
} else {
|
||||
&new_keypair
|
||||
};
|
||||
let mut file = File::open(program_location).map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Unable to open program file: {}", err))
|
||||
})?;
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Unable to read program file: {}", err))
|
||||
})?;
|
||||
|
||||
EbpfVm::create_executable_from_elf(&program_data, Some(|x| bpf_verifier::check(x, true)))
|
||||
.map_err(|err| CliError::DynamicProgramError(format!("ELF error: {}", err)))?;
|
||||
|
||||
let loader_id = if use_deprecated_loader {
|
||||
bpf_loader_deprecated::id()
|
||||
} else {
|
||||
bpf_loader::id()
|
||||
};
|
||||
|
||||
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(program_data.len())?;
|
||||
let signers = [config.signers[0], program_id];
|
||||
|
||||
// Check program account to see if partial initialization has occurred
|
||||
let (initial_instructions, balance_needed) = if let Some(account) = rpc_client
|
||||
.get_account_with_commitment(&program_id.pubkey(), config.commitment)?
|
||||
.value
|
||||
{
|
||||
let mut instructions: Vec<Instruction> = vec![];
|
||||
let mut balance_needed = 0;
|
||||
if account.executable {
|
||||
return Err(CliError::DynamicProgramError(
|
||||
"Program account is already executable".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
if account.owner != loader_id && !system_program::check_id(&account.owner) {
|
||||
return Err(CliError::DynamicProgramError(
|
||||
"Program account is already owned by another account".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
if account.data.is_empty() && system_program::check_id(&account.owner) {
|
||||
instructions.push(system_instruction::allocate(
|
||||
&program_id.pubkey(),
|
||||
program_data.len() as u64,
|
||||
));
|
||||
if account.owner != loader_id {
|
||||
instructions.push(system_instruction::assign(&program_id.pubkey(), &loader_id));
|
||||
}
|
||||
}
|
||||
if account.lamports < minimum_balance {
|
||||
let balance = minimum_balance - account.lamports;
|
||||
instructions.push(system_instruction::transfer(
|
||||
&config.signers[0].pubkey(),
|
||||
&program_id.pubkey(),
|
||||
balance,
|
||||
));
|
||||
balance_needed = balance;
|
||||
}
|
||||
(instructions, balance_needed)
|
||||
} else {
|
||||
(
|
||||
vec![system_instruction::create_account(
|
||||
&config.signers[0].pubkey(),
|
||||
&program_id.pubkey(),
|
||||
minimum_balance,
|
||||
program_data.len() as u64,
|
||||
&loader_id,
|
||||
)],
|
||||
minimum_balance,
|
||||
)
|
||||
};
|
||||
let initial_message = if !initial_instructions.is_empty() {
|
||||
Some(Message::new(
|
||||
&initial_instructions,
|
||||
Some(&config.signers[0].pubkey()),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Build transactions to calculate fees
|
||||
let mut messages: Vec<&Message> = Vec::new();
|
||||
|
||||
if let Some(message) = &initial_message {
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
let mut write_messages = vec![];
|
||||
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
|
||||
let instruction = loader_instruction::write(
|
||||
&program_id.pubkey(),
|
||||
&loader_id,
|
||||
(i * DATA_CHUNK_SIZE) as u32,
|
||||
chunk.to_vec(),
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
write_messages.push(message);
|
||||
}
|
||||
let mut write_message_refs = vec![];
|
||||
for message in write_messages.iter() {
|
||||
write_message_refs.push(message);
|
||||
}
|
||||
messages.append(&mut write_message_refs);
|
||||
|
||||
let instruction = loader_instruction::finalize(&program_id.pubkey(), &loader_id);
|
||||
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
messages.push(&finalize_message);
|
||||
|
||||
let (blockhash, fee_calculator, _) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(config.commitment)?
|
||||
.value;
|
||||
|
||||
check_account_for_spend_multiple_fees_with_commitment(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
balance_needed,
|
||||
&fee_calculator,
|
||||
&messages,
|
||||
config.commitment,
|
||||
)?;
|
||||
|
||||
if let Some(message) = initial_message {
|
||||
trace!("Creating or modifying program account");
|
||||
let num_required_signatures = message.header.num_required_signatures;
|
||||
|
||||
let mut initial_transaction = Transaction::new_unsigned(message);
|
||||
// Most of the initial_transaction combinations require both the fee-payer and new program
|
||||
// account to sign the transaction. One (transfer) only requires the fee-payer signature.
|
||||
// This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an
|
||||
// extraneous signature.
|
||||
if num_required_signatures == 2 {
|
||||
initial_transaction.try_sign(&signers, blockhash)?;
|
||||
} else {
|
||||
initial_transaction.try_sign(&[signers[0]], blockhash)?;
|
||||
}
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&initial_transaction,
|
||||
config.commitment,
|
||||
config.send_transaction_config,
|
||||
);
|
||||
log_instruction_custom_error::<SystemError>(result, &config).map_err(|err| {
|
||||
CliError::DynamicProgramError(format!("Program account allocation failed: {}", err))
|
||||
})?;
|
||||
}
|
||||
|
||||
let (blockhash, _, last_valid_slot) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(config.commitment)?
|
||||
.value;
|
||||
|
||||
let mut write_transactions = vec![];
|
||||
for message in write_messages.into_iter() {
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
}
|
||||
|
||||
trace!("Writing program data");
|
||||
send_and_confirm_transactions_with_spinner(
|
||||
&rpc_client,
|
||||
write_transactions,
|
||||
&signers,
|
||||
config.commitment,
|
||||
last_valid_slot,
|
||||
)
|
||||
.map_err(|_| {
|
||||
CliError::DynamicProgramError("Data writes to program account failed".to_string())
|
||||
})?;
|
||||
|
||||
let (blockhash, _, _) = rpc_client
|
||||
.get_recent_blockhash_with_commitment(config.commitment)?
|
||||
.value;
|
||||
let mut finalize_tx = Transaction::new_unsigned(finalize_message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&finalize_tx,
|
||||
config.commitment,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
CliError::DynamicProgramError(format!("Finalizing program account failed: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(json!({
|
||||
"programId": format!("{}", program_id.pubkey()),
|
||||
})
|
||||
.to_string())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn process_transfer(
|
||||
rpc_client: &RpcClient,
|
||||
@@ -1479,7 +1139,17 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
node_pubkey,
|
||||
node_json_rpc_url,
|
||||
follow,
|
||||
} => process_catchup(&rpc_client, config, node_pubkey, node_json_rpc_url, *follow),
|
||||
our_localhost_port,
|
||||
log,
|
||||
} => process_catchup(
|
||||
&rpc_client,
|
||||
config,
|
||||
*node_pubkey,
|
||||
node_json_rpc_url.clone(),
|
||||
*follow,
|
||||
*our_localhost_port,
|
||||
*log,
|
||||
),
|
||||
CliCommand::ClusterDate => process_cluster_date(&rpc_client, config),
|
||||
CliCommand::ClusterVersion => process_cluster_version(&rpc_client, config),
|
||||
CliCommand::CreateAddressWithSeed {
|
||||
@@ -1506,14 +1176,28 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::Inflation(inflation_subcommand) => {
|
||||
process_inflation_subcommand(&rpc_client, config, inflation_subcommand)
|
||||
}
|
||||
CliCommand::LeaderSchedule => process_leader_schedule(&rpc_client),
|
||||
CliCommand::LiveSlots => process_live_slots(&config.websocket_url),
|
||||
CliCommand::LeaderSchedule { epoch } => {
|
||||
process_leader_schedule(&rpc_client, config, *epoch)
|
||||
}
|
||||
CliCommand::LiveSlots => process_live_slots(&config),
|
||||
CliCommand::Logs { filter } => process_logs(&config, filter),
|
||||
CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
} => process_ping(&rpc_client, config, *lamports, interval, count, timeout),
|
||||
blockhash,
|
||||
print_timestamp,
|
||||
} => process_ping(
|
||||
&rpc_client,
|
||||
config,
|
||||
*lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
blockhash,
|
||||
*print_timestamp,
|
||||
),
|
||||
CliCommand::ShowBlockProduction { epoch, slot_limit } => {
|
||||
process_show_block_production(&rpc_client, config, *epoch, *slot_limit)
|
||||
}
|
||||
@@ -1527,6 +1211,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*use_lamports_unit,
|
||||
vote_account_pubkeys.as_deref(),
|
||||
),
|
||||
CliCommand::WaitForMaxStake { max_stake_percent } => {
|
||||
process_wait_for_max_stake(&rpc_client, config, *max_stake_percent)
|
||||
}
|
||||
CliCommand::ShowValidators { use_lamports_unit } => {
|
||||
process_show_validators(&rpc_client, config, *use_lamports_unit)
|
||||
}
|
||||
@@ -1619,13 +1306,18 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
program_location,
|
||||
address,
|
||||
use_deprecated_loader,
|
||||
allow_excessive_balance,
|
||||
} => process_deploy(
|
||||
&rpc_client,
|
||||
config,
|
||||
program_location,
|
||||
*address,
|
||||
*use_deprecated_loader,
|
||||
*allow_excessive_balance,
|
||||
),
|
||||
CliCommand::Program(program_subcommand) => {
|
||||
process_program_subcommand(&rpc_client, config, program_subcommand)
|
||||
}
|
||||
|
||||
// Stake Commands
|
||||
|
||||
@@ -2111,6 +1803,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.feature_subcommands()
|
||||
.inflation_subcommands()
|
||||
.nonce_subcommands()
|
||||
.program_subcommands()
|
||||
.stake_subcommands()
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
@@ -2241,7 +1934,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.arg(
|
||||
Arg::with_name("address_signer")
|
||||
.index(2)
|
||||
.value_name("ADDRESS_SIGNER")
|
||||
.value_name("PROGRAM_ADDRESS_SIGNER")
|
||||
.takes_value(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("The signer for the desired address of the program [default: new random address]")
|
||||
@@ -2253,6 +1946,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.hidden(true) // Don't document this argument to discourage its use
|
||||
.help("Use the deprecated BPF loader")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("allow_excessive_balance")
|
||||
.long("allow-excessive-deploy-account-balance")
|
||||
.takes_value(false)
|
||||
.help("Use the designated program id, even if the account already holds a large balance of SOL")
|
||||
)
|
||||
.arg(commitment_arg_with_default("max")),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -2358,7 +2057,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serde_json::Value;
|
||||
use serde_json::{json, Value};
|
||||
use solana_client::{
|
||||
blockhash_query,
|
||||
mock_sender::SIGNATURE,
|
||||
@@ -2367,7 +2066,7 @@ mod tests {
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Presigner},
|
||||
signature::{keypair_from_seed, read_keypair_file, write_keypair_file, Keypair, Presigner},
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use std::path::PathBuf;
|
||||
@@ -2621,6 +2320,7 @@ mod tests {
|
||||
program_location: "/Users/test/program.o".to_string(),
|
||||
address: None,
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
},
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
}
|
||||
@@ -2642,6 +2342,7 @@ mod tests {
|
||||
program_location: "/Users/test/program.o".to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&keypair_file).unwrap().into(),
|
||||
@@ -2955,13 +2656,14 @@ mod tests {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: None,
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
let program_id = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.get("ProgramId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
@@ -2973,6 +2675,7 @@ mod tests {
|
||||
program_location: "bad/file/location.so".to_string(),
|
||||
address: None,
|
||||
use_deprecated_loader: false,
|
||||
allow_excessive_balance: false,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
}
|
||||
|
@@ -1,12 +1,17 @@
|
||||
use crate::{
|
||||
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
|
||||
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
|
||||
stake::is_stake_program_v2_enabled,
|
||||
};
|
||||
use chrono::{Local, TimeZone};
|
||||
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use solana_clap_utils::{
|
||||
commitment::commitment_arg, input_parsers::*, input_validators::*, keypair::DefaultSigner,
|
||||
commitment::{commitment_arg, commitment_arg_with_default},
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::DefaultSigner,
|
||||
offline::{blockhash_arg, BLOCKHASH_ARG},
|
||||
};
|
||||
use solana_cli_output::{
|
||||
display::{
|
||||
@@ -20,26 +25,28 @@ use solana_client::{
|
||||
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter,
|
||||
RpcProgramAccountsConfig,
|
||||
RpcProgramAccountsConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_filter,
|
||||
rpc_response::SlotInfo,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::from_account,
|
||||
account_utils::StateMut,
|
||||
clock::{self, Clock, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::Epoch,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
rpc_port::DEFAULT_RPC_PORT_STR,
|
||||
signature::Signature,
|
||||
system_instruction, system_program,
|
||||
sysvar::{
|
||||
self,
|
||||
stake_history::{self, StakeHistory},
|
||||
Sysvar,
|
||||
stake_history::{self},
|
||||
},
|
||||
transaction::Transaction,
|
||||
};
|
||||
@@ -52,7 +59,7 @@ use std::{
|
||||
Arc,
|
||||
},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
static CHECK_MARK: Emoji = Emoji("✅ ", "");
|
||||
@@ -82,14 +89,14 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("node_pubkey")
|
||||
.index(1)
|
||||
.value_name("VALIDATOR_PUBKEY")
|
||||
.required(true),
|
||||
.value_name("OUR_VALIDATOR_PUBKEY")
|
||||
.required(false),
|
||||
"Identity pubkey of the validator"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("node_json_rpc_url")
|
||||
.index(2)
|
||||
.value_name("URL")
|
||||
.value_name("OUR_URL")
|
||||
.takes_value(true)
|
||||
.validator(is_url)
|
||||
.help("JSON RPC URL for validator, which is useful for validators with a private RPC service")
|
||||
@@ -100,6 +107,21 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(false)
|
||||
.help("Continue reporting progress even after the validator has caught up"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("our_localhost")
|
||||
.long("our-localhost")
|
||||
.takes_value(false)
|
||||
.value_name("PORT")
|
||||
.default_value(&DEFAULT_RPC_PORT_STR)
|
||||
.validator(is_port)
|
||||
.help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("log")
|
||||
.long("log")
|
||||
.takes_value(false)
|
||||
.help("Don't update the progress inplace; instead show updates with its own new lines"),
|
||||
)
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -126,7 +148,17 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.help("Slot number of the block to query")
|
||||
)
|
||||
)
|
||||
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
|
||||
.subcommand(SubCommand::with_name("leader-schedule")
|
||||
.about("Display leader schedule")
|
||||
.arg(
|
||||
Arg::with_name("epoch")
|
||||
.long("epoch")
|
||||
.takes_value(true)
|
||||
.value_name("EPOCH")
|
||||
.validator(is_epoch)
|
||||
.help("Epoch to show leader schedule for. (default: current)")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("epoch-info")
|
||||
.about("Get information about the current epoch")
|
||||
@@ -208,6 +240,13 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.help("Stop after submitting count transactions"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("print_timestamp")
|
||||
.short("D")
|
||||
.long("print-timestamp")
|
||||
.takes_value(false)
|
||||
.help("Print timestamp (unix time + microseconds as in gettimeofday) before each line"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
@@ -226,12 +265,33 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.default_value("15")
|
||||
.help("Wait up to timeout seconds for transaction confirmation"),
|
||||
)
|
||||
.arg(blockhash_arg())
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("live-slots")
|
||||
.about("Show information about the current slot progression"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("logs")
|
||||
.about("Stream transaction logs")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("address")
|
||||
.index(1)
|
||||
.value_name("ADDRESS"),
|
||||
"Account address to monitor \
|
||||
[default: monitor all transactions except for votes] \
|
||||
")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("include_votes")
|
||||
.long("include-votes")
|
||||
.takes_value(false)
|
||||
.conflicts_with("address")
|
||||
.help("Include vote transactions when monitoring all transactions")
|
||||
)
|
||||
.arg(commitment_arg_with_default("singleGossip")),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("block-production")
|
||||
.about("Show information about block production")
|
||||
@@ -317,6 +377,17 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.help("Display the full transactions"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("wait-for-max-stake")
|
||||
.about("Wait for the max stake of any one node to drop below a percentage of total.")
|
||||
.arg(
|
||||
Arg::with_name("max_percent")
|
||||
.long("max-percent")
|
||||
.value_name("PERCENT")
|
||||
.takes_value(true)
|
||||
.index(1),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,14 +395,31 @@ pub fn parse_catchup(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
|
||||
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?;
|
||||
let mut our_localhost_port = value_t!(matches, "our_localhost", u16).ok();
|
||||
// if there is no explicitly specified --our-localhost,
|
||||
// disable the guess mode (= our_localhost_port)
|
||||
if matches.occurrences_of("our_localhost") == 0 {
|
||||
our_localhost_port = None
|
||||
}
|
||||
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
|
||||
// requirement of node_pubkey is relaxed only if our_localhost_port
|
||||
if our_localhost_port.is_none() && node_pubkey.is_none() {
|
||||
return Err(CliError::BadParameter(
|
||||
"OUR_VALIDATOR_PUBKEY (and possibly OUR_URL) must be specified \
|
||||
unless --our-localhost is given"
|
||||
.into(),
|
||||
));
|
||||
}
|
||||
let follow = matches.is_present("follow");
|
||||
let log = matches.is_present("log");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Catchup {
|
||||
node_pubkey,
|
||||
node_json_rpc_url,
|
||||
follow,
|
||||
our_localhost_port,
|
||||
log,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@@ -350,12 +438,16 @@ pub fn parse_cluster_ping(
|
||||
None
|
||||
};
|
||||
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
|
||||
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
|
||||
let print_timestamp = matches.is_present("print_timestamp");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
count,
|
||||
timeout,
|
||||
blockhash,
|
||||
print_timestamp,
|
||||
},
|
||||
signers: vec![default_signer.signer_from_path(matches, wallet_manager)?],
|
||||
})
|
||||
@@ -507,38 +599,76 @@ pub fn parse_transaction_history(
|
||||
pub fn process_catchup(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
node_pubkey: &Pubkey,
|
||||
node_json_rpc_url: &Option<String>,
|
||||
node_pubkey: Option<Pubkey>,
|
||||
mut node_json_rpc_url: Option<String>,
|
||||
follow: bool,
|
||||
our_localhost_port: Option<u16>,
|
||||
log: bool,
|
||||
) -> ProcessResult {
|
||||
let sleep_interval = 5;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
|
||||
let node_client = if let Some(node_json_rpc_url) = node_json_rpc_url {
|
||||
RpcClient::new(node_json_rpc_url.to_string())
|
||||
} else {
|
||||
let rpc_addr = loop {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
if let Some(contact_info) = cluster_nodes
|
||||
.iter()
|
||||
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
|
||||
{
|
||||
if let Some(rpc_addr) = contact_info.rpc {
|
||||
break rpc_addr;
|
||||
}
|
||||
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
|
||||
} else {
|
||||
progress_bar.set_message(&format!(
|
||||
"Contact information not found for {}",
|
||||
node_pubkey
|
||||
));
|
||||
}
|
||||
sleep(Duration::from_secs(sleep_interval as u64));
|
||||
};
|
||||
if let Some(our_localhost_port) = our_localhost_port {
|
||||
let gussed_default = Some(format!("http://localhost:{}", our_localhost_port));
|
||||
if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default {
|
||||
// go to new line to leave this message on console
|
||||
println!(
|
||||
"Prefering explicitly given rpc ({}) as us, \
|
||||
although --our-localhost is given\n",
|
||||
node_json_rpc_url.as_ref().unwrap()
|
||||
);
|
||||
} else {
|
||||
node_json_rpc_url = gussed_default;
|
||||
}
|
||||
}
|
||||
|
||||
RpcClient::new_socket(rpc_addr)
|
||||
let (node_client, node_pubkey) = if our_localhost_port.is_some() {
|
||||
let client = RpcClient::new(node_json_rpc_url.unwrap());
|
||||
let guessed_default = Some(client.get_identity()?);
|
||||
(
|
||||
client,
|
||||
(if node_pubkey.is_some() && node_pubkey != guessed_default {
|
||||
// go to new line to leave this message on console
|
||||
println!(
|
||||
"Prefering explicitly given node pubkey ({}) as us, \
|
||||
although --our-localhost is given\n",
|
||||
node_pubkey.unwrap()
|
||||
);
|
||||
node_pubkey
|
||||
} else {
|
||||
guessed_default
|
||||
})
|
||||
.unwrap(),
|
||||
)
|
||||
} else if let Some(node_pubkey) = node_pubkey {
|
||||
if let Some(node_json_rpc_url) = node_json_rpc_url {
|
||||
(RpcClient::new(node_json_rpc_url), node_pubkey)
|
||||
} else {
|
||||
let rpc_addr = loop {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
if let Some(contact_info) = cluster_nodes
|
||||
.iter()
|
||||
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
|
||||
{
|
||||
if let Some(rpc_addr) = contact_info.rpc {
|
||||
break rpc_addr;
|
||||
}
|
||||
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
|
||||
} else {
|
||||
progress_bar.set_message(&format!(
|
||||
"Contact information not found for {}",
|
||||
node_pubkey
|
||||
));
|
||||
}
|
||||
sleep(Duration::from_secs(sleep_interval as u64));
|
||||
};
|
||||
|
||||
(RpcClient::new_socket(rpc_addr), node_pubkey)
|
||||
}
|
||||
} else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let reported_node_pubkey = loop {
|
||||
@@ -555,7 +685,7 @@ pub fn process_catchup(
|
||||
}
|
||||
};
|
||||
|
||||
if reported_node_pubkey != *node_pubkey {
|
||||
if reported_node_pubkey != node_pubkey {
|
||||
return Err(format!(
|
||||
"The identity reported by node RPC URL does not match. Expected: {:?}. Reported: {:?}",
|
||||
node_pubkey, reported_node_pubkey
|
||||
@@ -563,15 +693,41 @@ pub fn process_catchup(
|
||||
.into());
|
||||
}
|
||||
|
||||
if rpc_client.get_identity()? == *node_pubkey {
|
||||
if rpc_client.get_identity()? == node_pubkey {
|
||||
return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into());
|
||||
}
|
||||
|
||||
let mut previous_rpc_slot = std::u64::MAX;
|
||||
let mut previous_slot_distance = 0;
|
||||
let mut retry_count = 0;
|
||||
let max_retry_count = 5;
|
||||
let mut get_slot_while_retrying = |client: &RpcClient| {
|
||||
loop {
|
||||
match client.get_slot_with_commitment(config.commitment) {
|
||||
Ok(r) => {
|
||||
retry_count = 0;
|
||||
return Ok(r);
|
||||
}
|
||||
Err(e) => {
|
||||
if retry_count >= max_retry_count {
|
||||
return Err(e);
|
||||
}
|
||||
retry_count += 1;
|
||||
if log {
|
||||
// go to new line to leave this message on console
|
||||
println!("Retrying({}/{}): {}\n", retry_count, max_retry_count, e);
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
loop {
|
||||
let rpc_slot = rpc_client.get_slot_with_commitment(config.commitment)?;
|
||||
let node_slot = node_client.get_slot_with_commitment(config.commitment)?;
|
||||
// humbly retry; the reference node (rpc_client) could be spotty,
|
||||
// especially if pointing to api.meinnet-beta.solana.com at times
|
||||
let rpc_slot = get_slot_while_retrying(rpc_client)?;
|
||||
let node_slot = get_slot_while_retrying(&node_client)?;
|
||||
if !follow && node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(format!(
|
||||
@@ -594,15 +750,21 @@ pub fn process_catchup(
|
||||
};
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"{} slots behind (us:{} them:{}){}",
|
||||
slot_distance,
|
||||
"{} slot(s) {} (us:{} them:{}){}",
|
||||
slot_distance.abs(),
|
||||
if slot_distance >= 0 {
|
||||
"behind"
|
||||
} else {
|
||||
"ahead"
|
||||
},
|
||||
node_slot,
|
||||
rpc_slot,
|
||||
if slot_distance == 0 || previous_rpc_slot == std::u64::MAX {
|
||||
"".to_string()
|
||||
} else {
|
||||
format!(
|
||||
", {} at {:.1} slots/second{}",
|
||||
", {} node is {} at {:.1} slots/second{}",
|
||||
if slot_distance >= 0 { "our" } else { "their" },
|
||||
if slots_per_second < 0.0 {
|
||||
"falling behind"
|
||||
} else {
|
||||
@@ -611,8 +773,11 @@ pub fn process_catchup(
|
||||
slots_per_second,
|
||||
time_remaining
|
||||
)
|
||||
}
|
||||
},
|
||||
));
|
||||
if log {
|
||||
println!();
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(sleep_interval as u64));
|
||||
previous_rpc_slot = rpc_slot;
|
||||
@@ -624,7 +789,7 @@ pub fn process_cluster_date(rpc_client: &RpcClient, config: &CliConfig) -> Proce
|
||||
let result = rpc_client
|
||||
.get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::default())?;
|
||||
if let Some(clock_account) = result.value {
|
||||
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
|
||||
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
|
||||
})?;
|
||||
let block_time = CliBlockTime {
|
||||
@@ -664,9 +829,27 @@ pub fn process_first_available_block(rpc_client: &RpcClient) -> ProcessResult {
|
||||
Ok(format!("{}", first_available_block))
|
||||
}
|
||||
|
||||
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
pub fn parse_leader_schedule(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let epoch = value_of(matches, "epoch");
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::LeaderSchedule { epoch },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_leader_schedule(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
epoch: Option<Epoch>,
|
||||
) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
|
||||
let epoch = epoch.unwrap_or(epoch_info.epoch);
|
||||
if epoch > epoch_info.epoch {
|
||||
return Err(format!("Epoch {} is in the future", epoch).into());
|
||||
}
|
||||
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
|
||||
|
||||
let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot_in_epoch))?;
|
||||
if leader_schedule.is_none() {
|
||||
@@ -688,15 +871,18 @@ pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
}
|
||||
}
|
||||
|
||||
let mut leader_schedule_entries = vec![];
|
||||
for (slot_index, leader) in leader_per_slot_index.iter().enumerate() {
|
||||
println!(
|
||||
" {:<15} {:<44}",
|
||||
first_slot_in_epoch + slot_index as u64,
|
||||
leader
|
||||
);
|
||||
leader_schedule_entries.push(CliLeaderScheduleEntry {
|
||||
slot: first_slot_in_epoch + slot_index as u64,
|
||||
leader: leader.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok("".to_string())
|
||||
Ok(config.output_format.formatted_string(&CliLeaderSchedule {
|
||||
epoch,
|
||||
leader_schedule_entries,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn process_get_block(
|
||||
@@ -751,8 +937,9 @@ pub fn process_get_block(
|
||||
format!(
|
||||
"◎{:<19.9} {:>13.9}%",
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64)
|
||||
(reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64))
|
||||
* 100.0
|
||||
)
|
||||
}
|
||||
);
|
||||
@@ -837,7 +1024,7 @@ pub fn process_show_block_production(
|
||||
slot_limit: Option<u64>,
|
||||
) -> ProcessResult {
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::root())?;
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
|
||||
|
||||
let epoch = epoch.unwrap_or(epoch_info.epoch);
|
||||
if epoch > epoch_info.epoch {
|
||||
@@ -1025,6 +1212,8 @@ pub fn process_ping(
|
||||
interval: &Duration,
|
||||
count: &Option<u64>,
|
||||
timeout: &Duration,
|
||||
fixed_blockhash: &Option<Hash>,
|
||||
print_timestamp: bool,
|
||||
) -> ProcessResult {
|
||||
println_name_value("Source Account:", &config.signers[0].pubkey().to_string());
|
||||
println!();
|
||||
@@ -1042,9 +1231,21 @@ pub fn process_ping(
|
||||
let (mut blockhash, mut fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let mut blockhash_transaction_count = 0;
|
||||
let mut blockhash_acquired = Instant::now();
|
||||
if let Some(fixed_blockhash) = fixed_blockhash {
|
||||
let blockhash_origin = if *fixed_blockhash != Hash::default() {
|
||||
blockhash = *fixed_blockhash;
|
||||
"supplied from cli arguments"
|
||||
} else {
|
||||
"fetched from cluster"
|
||||
};
|
||||
println!(
|
||||
"Fixed blockhash is used: {} ({})",
|
||||
blockhash, blockhash_origin
|
||||
);
|
||||
}
|
||||
'mainloop: for seq in 0..count.unwrap_or(std::u64::MAX) {
|
||||
let now = Instant::now();
|
||||
if now.duration_since(blockhash_acquired).as_secs() > 60 {
|
||||
if fixed_blockhash.is_none() && now.duration_since(blockhash_acquired).as_secs() > 60 {
|
||||
// Fetch a new blockhash every minute
|
||||
let (new_blockhash, new_fee_calculator) = rpc_client.get_new_blockhash(&blockhash)?;
|
||||
blockhash = new_blockhash;
|
||||
@@ -1075,6 +1276,18 @@ pub fn process_ping(
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, blockhash)?;
|
||||
|
||||
let timestamp = || {
|
||||
let micros = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_micros();
|
||||
if print_timestamp {
|
||||
format!("[{}.{:06}] ", micros / 1_000_000, micros % 1_000_000)
|
||||
} else {
|
||||
format!("")
|
||||
}
|
||||
};
|
||||
|
||||
match rpc_client.send_transaction(&tx) {
|
||||
Ok(signature) => {
|
||||
let transaction_sent = Instant::now();
|
||||
@@ -1088,15 +1301,20 @@ pub fn process_ping(
|
||||
let elapsed_time_millis = elapsed_time.as_millis() as u64;
|
||||
confirmation_time.push_back(elapsed_time_millis);
|
||||
println!(
|
||||
"{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
"{}{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
|
||||
timestamp(),
|
||||
CHECK_MARK, lamports, seq, elapsed_time_millis, signature
|
||||
);
|
||||
confirmed_count += 1;
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{}Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
CROSS_MARK, seq, err, signature
|
||||
"{}{}Transaction failed: seq={:<3} error={:?} signature={}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
err,
|
||||
signature
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1105,8 +1323,11 @@ pub fn process_ping(
|
||||
|
||||
if elapsed_time >= *timeout {
|
||||
println!(
|
||||
"{}Confirmation timeout: seq={:<3} signature={}",
|
||||
CROSS_MARK, seq, signature
|
||||
"{}{}Confirmation timeout: seq={:<3} signature={}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
signature
|
||||
);
|
||||
break;
|
||||
}
|
||||
@@ -1124,8 +1345,11 @@ pub fn process_ping(
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{}Submit failed: seq={:<3} error={:?}",
|
||||
CROSS_MARK, seq, err
|
||||
"{}{}Submit failed: seq={:<3} error={:?}",
|
||||
timestamp(),
|
||||
CROSS_MARK,
|
||||
seq,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1160,24 +1384,83 @@ pub fn process_ping(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_live_slots(url: &str) -> ProcessResult {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
pub fn parse_logs(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?;
|
||||
let include_votes = matches.is_present("include_votes");
|
||||
|
||||
// Disable Ctrl+C handler as sometimes the PubsubClient shutdown can stall. Also it doesn't
|
||||
// really matter that the shutdown is clean because the process is terminating.
|
||||
/*
|
||||
let exit_clone = exit.clone();
|
||||
ctrlc::set_handler(move || {
|
||||
exit_clone.store(true, Ordering::Relaxed);
|
||||
})?;
|
||||
*/
|
||||
let filter = match address {
|
||||
None => {
|
||||
if include_votes {
|
||||
RpcTransactionLogsFilter::AllWithVotes
|
||||
} else {
|
||||
RpcTransactionLogsFilter::All
|
||||
}
|
||||
}
|
||||
Some(address) => RpcTransactionLogsFilter::Mentions(vec![address.to_string()]),
|
||||
};
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Logs { filter },
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_logs(config: &CliConfig, filter: &RpcTransactionLogsFilter) -> ProcessResult {
|
||||
println!(
|
||||
"Streaming transaction logs{}. {:?} commitment",
|
||||
match filter {
|
||||
RpcTransactionLogsFilter::All => "".into(),
|
||||
RpcTransactionLogsFilter::AllWithVotes => " (including votes)".into(),
|
||||
RpcTransactionLogsFilter::Mentions(addresses) =>
|
||||
format!(" mentioning {}", addresses.join(",")),
|
||||
},
|
||||
config.commitment.commitment
|
||||
);
|
||||
|
||||
let (_client, receiver) = PubsubClient::logs_subscribe(
|
||||
&config.websocket_url,
|
||||
filter.clone(),
|
||||
RpcTransactionLogsConfig {
|
||||
commitment: Some(config.commitment),
|
||||
},
|
||||
)?;
|
||||
|
||||
loop {
|
||||
match receiver.recv() {
|
||||
Ok(logs) => {
|
||||
println!("Transaction executed in slot {}:", logs.context.slot);
|
||||
println!(" Signature: {}", logs.value.signature);
|
||||
println!(
|
||||
" Status: {}",
|
||||
logs.value
|
||||
.err
|
||||
.map(|err| err.to_string())
|
||||
.unwrap_or_else(|| "Ok".to_string())
|
||||
);
|
||||
println!(" Log Messages:");
|
||||
for log in logs.value.logs {
|
||||
println!(" {}", log);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
return Ok(format!("Disconnected: {}", err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_live_slots(config: &CliConfig) -> ProcessResult {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let mut current: Option<SlotInfo> = None;
|
||||
let mut message = "".to_string();
|
||||
|
||||
let slot_progress = new_spinner_progress_bar();
|
||||
slot_progress.set_message("Connecting...");
|
||||
let (mut client, receiver) = PubsubClient::slot_subscribe(url)?;
|
||||
let (mut client, receiver) = PubsubClient::slot_subscribe(&config.websocket_url)?;
|
||||
slot_progress.set_message("Connected.");
|
||||
|
||||
let spacer = "|";
|
||||
@@ -1341,14 +1624,16 @@ pub fn process_show_stakes(
|
||||
.get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?;
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let clock_account = rpc_client.get_account(&sysvar::clock::id())?;
|
||||
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
|
||||
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
|
||||
})?;
|
||||
progress_bar.finish_and_clear();
|
||||
|
||||
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
let stake_history = from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
// At v1.6, this check can be removed and simply passed as `true`
|
||||
let stake_program_v2_enabled = is_stake_program_v2_enabled(rpc_client);
|
||||
|
||||
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
|
||||
for (stake_pubkey, stake_account) in all_stake_accounts {
|
||||
@@ -1364,6 +1649,7 @@ pub fn process_show_stakes(
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
&clock,
|
||||
stake_program_v2_enabled,
|
||||
),
|
||||
});
|
||||
}
|
||||
@@ -1382,6 +1668,7 @@ pub fn process_show_stakes(
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
&clock,
|
||||
stake_program_v2_enabled,
|
||||
),
|
||||
});
|
||||
}
|
||||
@@ -1395,6 +1682,16 @@ pub fn process_show_stakes(
|
||||
.formatted_string(&CliStakeVec::new(stake_accounts)))
|
||||
}
|
||||
|
||||
pub fn process_wait_for_max_stake(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
max_stake_percent: f32,
|
||||
) -> ProcessResult {
|
||||
let now = std::time::Instant::now();
|
||||
rpc_client.wait_for_max_stake(config.commitment, max_stake_percent)?;
|
||||
Ok(format!("Done waiting, took: {}s", now.elapsed().as_secs()))
|
||||
}
|
||||
|
||||
pub fn process_show_validators(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@@ -1555,6 +1852,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::cli::{app, parse_command};
|
||||
use solana_sdk::signature::{write_keypair, Keypair};
|
||||
use std::str::FromStr;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
@@ -1690,8 +1988,11 @@ mod tests {
|
||||
"2",
|
||||
"-t",
|
||||
"3",
|
||||
"-D",
|
||||
"--commitment",
|
||||
"max",
|
||||
"--blockhash",
|
||||
"4CCNp28j6AhGq7PkjPDP4wbQWBS8LLbQin2xV5n8frKX",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_ping, &default_signer, &mut None).unwrap(),
|
||||
@@ -1701,6 +2002,10 @@ mod tests {
|
||||
interval: Duration::from_secs(1),
|
||||
count: Some(2),
|
||||
timeout: Duration::from_secs(3),
|
||||
blockhash: Some(
|
||||
Hash::from_str("4CCNp28j6AhGq7PkjPDP4wbQWBS8LLbQin2xV5n8frKX").unwrap()
|
||||
),
|
||||
print_timestamp: true,
|
||||
},
|
||||
signers: vec![default_keypair.into()],
|
||||
}
|
||||
|
@@ -9,12 +9,13 @@ use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*};
|
||||
use solana_cli_output::{QuietDisplay, VerboseDisplay};
|
||||
use solana_client::{client_error::ClientError, rpc_client::RpcClient};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_runtime::{
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
feature::{self, Feature},
|
||||
feature_set::FEATURE_NAMES,
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::Slot, message::Message, pubkey::Pubkey, system_instruction, transaction::Transaction,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{collections::HashMap, fmt, sync::Arc};
|
||||
|
||||
@@ -241,7 +242,22 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
.unwrap_or(false);
|
||||
|
||||
if !feature_activation_allowed && !quiet {
|
||||
println!("{}", style("Stake By Feature Set:").bold());
|
||||
if active_stake_by_feature_set.get(&my_feature_set).is_none() {
|
||||
println!(
|
||||
"{}",
|
||||
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
|
||||
.bold());
|
||||
} else {
|
||||
println!(
|
||||
"{}",
|
||||
style("To activate features the stake must be >= 95%").bold()
|
||||
);
|
||||
}
|
||||
println!(
|
||||
"{}",
|
||||
style(format!("Tool Feture Set: {}", my_feature_set)).bold()
|
||||
);
|
||||
println!("{}", style("Cluster Feature Sets and Stakes:").bold());
|
||||
for (feature_set, percentage) in active_stake_by_feature_set.iter() {
|
||||
if *feature_set == 0 {
|
||||
println!("unknown - {}%", percentage);
|
||||
@@ -279,7 +295,7 @@ fn process_status(
|
||||
let feature_id = &feature_ids[i];
|
||||
let feature_name = FEATURE_NAMES.get(feature_id).unwrap();
|
||||
if let Some(account) = account {
|
||||
if let Some(feature) = Feature::from_account(&account) {
|
||||
if let Some(feature) = feature::from_account(&account) {
|
||||
let feature_status = match feature.activated_at {
|
||||
None => CliFeatureStatus::Pending,
|
||||
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
|
||||
@@ -320,7 +336,7 @@ fn process_activate(
|
||||
.next()
|
||||
.unwrap();
|
||||
if let Some(account) = account {
|
||||
if Feature::from_account(&account).is_some() {
|
||||
if feature::from_account(&account).is_some() {
|
||||
return Err(format!("{} has already been activated", feature_id).into());
|
||||
}
|
||||
}
|
||||
@@ -340,15 +356,11 @@ fn process_activate(
|
||||
&config.signers[0].pubkey(),
|
||||
|lamports| {
|
||||
Message::new(
|
||||
&[
|
||||
system_instruction::transfer(
|
||||
&config.signers[0].pubkey(),
|
||||
&feature_id,
|
||||
lamports,
|
||||
),
|
||||
system_instruction::allocate(&feature_id, Feature::size_of() as u64),
|
||||
system_instruction::assign(&feature_id, &feature::id()),
|
||||
],
|
||||
&feature::activate_with_lamports(
|
||||
&feature_id,
|
||||
&config.signers[0].pubkey(),
|
||||
lamports,
|
||||
),
|
||||
Some(&config.signers[0].pubkey()),
|
||||
)
|
||||
},
|
||||
|
@@ -1,7 +1,7 @@
|
||||
use crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult};
|
||||
use clap::{App, ArgMatches, SubCommand};
|
||||
use console::style;
|
||||
use solana_clap_utils::keypair::*;
|
||||
use solana_cli_output::CliInflation;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use std::sync::Arc;
|
||||
@@ -34,56 +34,18 @@ pub fn parse_inflation_subcommand(
|
||||
|
||||
pub fn process_inflation_subcommand(
|
||||
rpc_client: &RpcClient,
|
||||
_config: &CliConfig,
|
||||
config: &CliConfig,
|
||||
inflation_subcommand: &InflationCliCommand,
|
||||
) -> ProcessResult {
|
||||
assert_eq!(*inflation_subcommand, InflationCliCommand::Show);
|
||||
|
||||
let governor = rpc_client.get_inflation_governor()?;
|
||||
let current_inflation_rate = rpc_client.get_inflation_rate()?;
|
||||
let current_rate = rpc_client.get_inflation_rate()?;
|
||||
|
||||
println!("{}", style("Inflation Governor:").bold());
|
||||
if (governor.initial - governor.terminal).abs() < f64::EPSILON {
|
||||
println!(
|
||||
"Fixed APR: {:>5.2}%",
|
||||
governor.terminal * 100.
|
||||
);
|
||||
} else {
|
||||
println!("Initial APR: {:>5.2}%", governor.initial * 100.);
|
||||
println!(
|
||||
"Terminal APR: {:>5.2}%",
|
||||
governor.terminal * 100.
|
||||
);
|
||||
println!("Rate reduction per year: {:>5.2}%", governor.taper * 100.);
|
||||
}
|
||||
if governor.foundation_term > 0. {
|
||||
println!("Foundation percentage: {:>5.2}%", governor.foundation);
|
||||
println!(
|
||||
"Foundation term: {:.1} years",
|
||||
governor.foundation_term
|
||||
);
|
||||
}
|
||||
let inflation = CliInflation {
|
||||
governor,
|
||||
current_rate,
|
||||
};
|
||||
|
||||
println!(
|
||||
"\n{}",
|
||||
style(format!(
|
||||
"Inflation for Epoch {}:",
|
||||
current_inflation_rate.epoch
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
println!(
|
||||
"Total APR: {:>5.2}%",
|
||||
current_inflation_rate.total * 100.
|
||||
);
|
||||
println!(
|
||||
"Staking APR: {:>5.2}%",
|
||||
current_inflation_rate.validator * 100.
|
||||
);
|
||||
println!(
|
||||
"Foundation APR: {:>5.2}%",
|
||||
current_inflation_rate.foundation * 100.
|
||||
);
|
||||
|
||||
Ok("".to_string())
|
||||
Ok(config.output_format.formatted_string(&inflation))
|
||||
}
|
||||
|
@@ -26,6 +26,8 @@ pub mod cluster_query;
|
||||
pub mod feature;
|
||||
pub mod inflation;
|
||||
pub mod nonce;
|
||||
pub mod program;
|
||||
pub mod send_tpu;
|
||||
pub mod spend_utils;
|
||||
pub mod stake;
|
||||
pub mod test_utils;
|
||||
|
@@ -7,7 +7,7 @@ use console::style;
|
||||
use solana_clap_utils::{
|
||||
commitment::COMMITMENT_ARG,
|
||||
input_parsers::commitment_of,
|
||||
input_validators::is_url,
|
||||
input_validators::{is_url, is_url_or_moniker},
|
||||
keypair::{CliSigners, DefaultSigner, SKIP_SEED_PHRASE_VALIDATION_ARG},
|
||||
DisplayError,
|
||||
};
|
||||
@@ -168,6 +168,7 @@ pub fn parse_args<'a>(
|
||||
let CliCommandInfo { command, signers } =
|
||||
parse_command(&matches, &default_signer, &mut wallet_manager)?;
|
||||
|
||||
let verbose = matches.is_present("verbose");
|
||||
let output_format = matches
|
||||
.value_of("output_format")
|
||||
.map(|value| match value {
|
||||
@@ -175,7 +176,11 @@ pub fn parse_args<'a>(
|
||||
"json-compact" => OutputFormat::JsonCompact,
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.unwrap_or(OutputFormat::Display);
|
||||
.unwrap_or(if verbose {
|
||||
OutputFormat::DisplayVerbose
|
||||
} else {
|
||||
OutputFormat::Display
|
||||
});
|
||||
|
||||
let commitment = matches
|
||||
.subcommand_name()
|
||||
@@ -198,7 +203,7 @@ pub fn parse_args<'a>(
|
||||
keypair_path: default_signer_path,
|
||||
rpc_client: None,
|
||||
rpc_timeout,
|
||||
verbose: matches.is_present("verbose"),
|
||||
verbose,
|
||||
output_format,
|
||||
commitment,
|
||||
send_transaction_config: RpcSendTransactionConfig::default(),
|
||||
@@ -233,11 +238,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
Arg::with_name("json_rpc_url")
|
||||
.short("u")
|
||||
.long("url")
|
||||
.value_name("URL")
|
||||
.value_name("URL_OR_MONIKER")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url)
|
||||
.help("JSON RPC URL for the solana cluster"),
|
||||
.validator(is_url_or_moniker)
|
||||
.help(
|
||||
"URL for Solana's JSON RPC or moniker (or their first letter): \
|
||||
[mainnet-beta, testnet, devnet, localhost]",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("websocket_url")
|
||||
|
@@ -580,6 +580,7 @@ mod tests {
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::hash,
|
||||
nonce::{self, state::Versions, State},
|
||||
nonce_account,
|
||||
signature::{read_keypair_file, write_keypair, Keypair, Signer},
|
||||
system_program,
|
||||
};
|
||||
@@ -891,7 +892,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_account_identity_ok() {
|
||||
let nonce_account = nonce::create_account(1).into_inner();
|
||||
let nonce_account = nonce_account::create_account(1).into_inner();
|
||||
assert_eq!(account_identity_ok(&nonce_account), Ok(()));
|
||||
|
||||
let system_account = Account::new(1, 0, &system_program::id());
|
||||
@@ -910,7 +911,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_state_from_account() {
|
||||
let mut nonce_account = nonce::create_account(1).into_inner();
|
||||
let mut nonce_account = nonce_account::create_account(1).into_inner();
|
||||
assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized));
|
||||
|
||||
let data = nonce::state::Data {
|
||||
@@ -935,7 +936,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_data_from_helpers() {
|
||||
let mut nonce_account = nonce::create_account(1).into_inner();
|
||||
let mut nonce_account = nonce_account::create_account(1).into_inner();
|
||||
let state = state_from_account(&nonce_account).unwrap();
|
||||
assert_eq!(
|
||||
data_from_state(&state),
|
||||
|
2298
cli/src/program.rs
Normal file
2298
cli/src/program.rs
Normal file
File diff suppressed because it is too large
Load Diff
29
cli/src/send_tpu.rs
Normal file
29
cli/src/send_tpu.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
use log::*;
|
||||
use solana_client::rpc_response::{RpcContactInfo, RpcLeaderSchedule};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
|
||||
pub fn get_leader_tpu(
|
||||
slot_index: u64,
|
||||
leader_schedule: Option<&RpcLeaderSchedule>,
|
||||
cluster_nodes: Option<&Vec<RpcContactInfo>>,
|
||||
) -> Option<SocketAddr> {
|
||||
leader_schedule?
|
||||
.iter()
|
||||
.find(|(_pubkey, slots)| slots.iter().any(|slot| *slot as u64 == slot_index))
|
||||
.and_then(|(pubkey, _)| {
|
||||
cluster_nodes?
|
||||
.iter()
|
||||
.find(|contact_info| contact_info.pubkey == *pubkey)
|
||||
.and_then(|contact_info| contact_info.tpu)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_transaction_tpu(
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
wire_transaction: &[u8],
|
||||
) {
|
||||
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
|
||||
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
|
||||
}
|
||||
}
|
@@ -32,15 +32,16 @@ use solana_client::{
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::from_account,
|
||||
account_utils::StateMut,
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp, SECONDS_PER_DAY},
|
||||
feature, feature_set,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
system_instruction::SystemError,
|
||||
sysvar::{
|
||||
clock,
|
||||
stake_history::{self, StakeHistory},
|
||||
Sysvar,
|
||||
},
|
||||
transaction::Transaction,
|
||||
};
|
||||
@@ -1501,6 +1502,7 @@ pub fn build_stake_state(
|
||||
use_lamports_unit: bool,
|
||||
stake_history: &StakeHistory,
|
||||
clock: &Clock,
|
||||
stake_program_v2_enabled: bool,
|
||||
) -> CliStakeState {
|
||||
match stake_state {
|
||||
StakeState::Stake(
|
||||
@@ -1512,9 +1514,12 @@ pub fn build_stake_state(
|
||||
stake,
|
||||
) => {
|
||||
let current_epoch = clock.epoch;
|
||||
let (active_stake, activating_stake, deactivating_stake) = stake
|
||||
.delegation
|
||||
.stake_activating_and_deactivating(current_epoch, Some(stake_history));
|
||||
let (active_stake, activating_stake, deactivating_stake) =
|
||||
stake.delegation.stake_activating_and_deactivating(
|
||||
current_epoch,
|
||||
Some(stake_history),
|
||||
stake_program_v2_enabled,
|
||||
);
|
||||
let lockup = if lockup.is_in_force(clock, None) {
|
||||
Some(lockup.into())
|
||||
} else {
|
||||
@@ -1523,6 +1528,7 @@ pub fn build_stake_state(
|
||||
CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
account_balance,
|
||||
credits_observed: Some(stake.credits_observed),
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey
|
||||
!= Pubkey::default()
|
||||
@@ -1574,6 +1580,7 @@ pub fn build_stake_state(
|
||||
CliStakeState {
|
||||
stake_type: CliStakeType::Initialized,
|
||||
account_balance,
|
||||
credits_observed: Some(0),
|
||||
authorized: Some(authorized.into()),
|
||||
lockup,
|
||||
use_lamports_unit,
|
||||
@@ -1618,9 +1625,9 @@ pub(crate) fn fetch_epoch_rewards(
|
||||
kind:
|
||||
ClientErrorKind::RpcError(rpc_request::RpcError::RpcResponseError {
|
||||
code: rpc_custom_error::JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE,
|
||||
message: _,
|
||||
..
|
||||
}),
|
||||
request: _,
|
||||
..
|
||||
}) => {
|
||||
// RPC node doesn't have this block
|
||||
break;
|
||||
@@ -1640,29 +1647,37 @@ pub(crate) fn fetch_epoch_rewards(
|
||||
let previous_epoch_rewards = first_confirmed_block.rewards;
|
||||
|
||||
if let Some((effective_slot, epoch_end_time, epoch_rewards)) = epoch_info {
|
||||
let wallclock_epoch_duration =
|
||||
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
|
||||
.to_std()?
|
||||
.as_secs_f64();
|
||||
|
||||
let wallclock_epochs_per_year =
|
||||
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
|
||||
let wallclock_epoch_duration = if epoch_end_time > epoch_start_time {
|
||||
Some(
|
||||
{ Local.timestamp(epoch_end_time, 0) - Local.timestamp(epoch_start_time, 0) }
|
||||
.to_std()?
|
||||
.as_secs_f64(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(reward) = epoch_rewards
|
||||
.into_iter()
|
||||
.find(|reward| reward.pubkey == address.to_string())
|
||||
{
|
||||
if reward.post_balance > reward.lamports.try_into().unwrap_or(0) {
|
||||
let balance_increase_percent = reward.lamports.abs() as f64
|
||||
let rate_change = reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64);
|
||||
|
||||
let apr = wallclock_epoch_duration.map(|wallclock_epoch_duration| {
|
||||
let wallclock_epochs_per_year =
|
||||
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
|
||||
rate_change * wallclock_epochs_per_year
|
||||
});
|
||||
|
||||
all_epoch_rewards.push(CliEpochReward {
|
||||
epoch,
|
||||
effective_slot,
|
||||
amount: reward.lamports.abs() as u64,
|
||||
post_balance: reward.post_balance,
|
||||
percent_change: balance_increase_percent,
|
||||
apr: balance_increase_percent * wallclock_epochs_per_year,
|
||||
percent_change: rate_change * 100.0,
|
||||
apr: apr.map(|r| r * 100.0),
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1696,12 +1711,11 @@ pub fn process_show_stake_account(
|
||||
match stake_account.state() {
|
||||
Ok(stake_state) => {
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let stake_history =
|
||||
StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
let stake_history = from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
let clock_account = rpc_client.get_account(&clock::id())?;
|
||||
let clock: Clock = Sysvar::from_account(&clock_account).ok_or_else(|| {
|
||||
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string())
|
||||
})?;
|
||||
|
||||
@@ -1711,15 +1725,17 @@ pub fn process_show_stake_account(
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
&clock,
|
||||
is_stake_program_v2_enabled(rpc_client), // At v1.6, this check can be removed and simply passed as `true`
|
||||
);
|
||||
|
||||
if state.stake_type == CliStakeType::Stake {
|
||||
if let Some(activation_epoch) = state.activation_epoch {
|
||||
state.epoch_rewards = Some(fetch_epoch_rewards(
|
||||
rpc_client,
|
||||
stake_account_address,
|
||||
activation_epoch,
|
||||
)?);
|
||||
let rewards =
|
||||
fetch_epoch_rewards(rpc_client, stake_account_address, activation_epoch);
|
||||
match rewards {
|
||||
Ok(rewards) => state.epoch_rewards = Some(rewards),
|
||||
Err(error) => eprintln!("Failed to fetch epoch rewards: {:?}", error),
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(config.output_format.formatted_string(&state))
|
||||
@@ -1738,7 +1754,7 @@ pub fn process_show_stake_history(
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
let stake_history = from_account::<StakeHistory>(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
|
||||
@@ -1881,6 +1897,15 @@ pub fn process_delegate_stake(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_stake_program_v2_enabled(rpc_client: &RpcClient) -> bool {
|
||||
rpc_client
|
||||
.get_account(&feature_set::stake_program_v2::id())
|
||||
.ok()
|
||||
.and_then(|account| feature::from_account(&account))
|
||||
.and_then(|feature| feature.activated_at)
|
||||
.is_some()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -696,11 +696,14 @@ pub fn process_show_vote_account(
|
||||
}
|
||||
}
|
||||
|
||||
let epoch_rewards = Some(crate::stake::fetch_epoch_rewards(
|
||||
rpc_client,
|
||||
vote_account_address,
|
||||
1,
|
||||
)?);
|
||||
let epoch_rewards = match crate::stake::fetch_epoch_rewards(rpc_client, vote_account_address, 1)
|
||||
{
|
||||
Ok(rewards) => Some(rewards),
|
||||
Err(error) => {
|
||||
eprintln!("Failed to fetch epoch rewards: {:?}", error);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let vote_account_data = CliVoteAccount {
|
||||
account_balance: vote_account.lamports,
|
||||
|
@@ -1,118 +0,0 @@
|
||||
use serde_json::Value;
|
||||
use solana_cli::cli::{process_command, CliCommand, CliConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
commitment_config::CommitmentConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
fs::{remove_dir_all, File},
|
||||
io::Read,
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::mpsc::channel,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_cli_deploy_program() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
pathbuf.push("tests");
|
||||
pathbuf.push("fixtures");
|
||||
pathbuf.push("noop");
|
||||
pathbuf.set_extension("so");
|
||||
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run();
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap()).unwrap();
|
||||
let mut program_data = Vec::new();
|
||||
file.read_to_end(&mut program_data).unwrap();
|
||||
let minimum_balance_for_rent_exemption = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(program_data.len())
|
||||
.unwrap();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
let keypair = Keypair::new();
|
||||
config.json_rpc_url = format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
config.command = CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
faucet_port: faucet_addr.port(),
|
||||
pubkey: None,
|
||||
lamports: 3 * minimum_balance_for_rent_exemption, // min balance for rent exemption for two programs + leftover for tx processing
|
||||
};
|
||||
config.signers = vec![&keypair];
|
||||
process_command(&config).unwrap();
|
||||
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: None,
|
||||
use_deprecated_loader: false,
|
||||
};
|
||||
|
||||
let response = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&response.unwrap()).unwrap();
|
||||
let program_id_str = json
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.get("programId")
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let account0 = rpc_client
|
||||
.get_account_with_commitment(&program_id, CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account0.owner, bpf_loader::id());
|
||||
assert_eq!(account0.executable, true);
|
||||
|
||||
let mut file = File::open(pathbuf.to_str().unwrap().to_string()).unwrap();
|
||||
let mut elf = Vec::new();
|
||||
file.read_to_end(&mut elf).unwrap();
|
||||
|
||||
assert_eq!(account0.data, elf);
|
||||
|
||||
// Test custom address
|
||||
let custom_address_keypair = Keypair::new();
|
||||
config.signers = vec![&keypair, &custom_address_keypair];
|
||||
config.command = CliCommand::Deploy {
|
||||
program_location: pathbuf.to_str().unwrap().to_string(),
|
||||
address: Some(1),
|
||||
use_deprecated_loader: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let account1 = rpc_client
|
||||
.get_account_with_commitment(&custom_address_keypair.pubkey(), CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.value
|
||||
.unwrap();
|
||||
assert_eq!(account1.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account1.owner, bpf_loader::id());
|
||||
assert_eq!(account1.executable, true);
|
||||
assert_eq!(account0.data, account1.data);
|
||||
|
||||
// Attempt to redeploy to the same address
|
||||
process_command(&config).unwrap_err();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
@@ -10,7 +10,7 @@ use solana_client::{
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::test_validator::{TestValidator, TestValidatorOptions};
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
@@ -231,17 +231,14 @@ fn full_battery_tests(
|
||||
|
||||
#[test]
|
||||
fn test_create_account_with_seed() {
|
||||
solana_logger::setup();
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
|
1004
cli/tests/program.rs
Normal file
1004
cli/tests/program.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -9,7 +9,7 @@ use solana_client::{
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_core::test_validator::{TestValidator, TestValidatorOptions};
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
@@ -848,11 +848,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: SIG_FEE,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(SIG_FEE);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
@@ -985,11 +981,7 @@ fn test_stake_split() {
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(1);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
@@ -1140,11 +1132,7 @@ fn test_stake_set_lockup() {
|
||||
alice,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(1);
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(alice, sender, None);
|
||||
let faucet_addr = receiver.recv().unwrap();
|
||||
|
@@ -9,7 +9,7 @@ use solana_client::{
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
};
|
||||
use solana_core::test_validator::{TestValidator, TestValidatorOptions};
|
||||
use solana_core::test_validator::TestValidator;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
@@ -21,17 +21,14 @@ use std::{fs::remove_dir_all, sync::mpsc::channel};
|
||||
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
solana_logger::setup();
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
@@ -252,17 +249,14 @@ fn test_transfer() {
|
||||
|
||||
#[test]
|
||||
fn test_transfer_multisession_signing() {
|
||||
solana_logger::setup();
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
@@ -382,17 +376,14 @@ fn test_transfer_multisession_signing() {
|
||||
|
||||
#[test]
|
||||
fn test_transfer_all() {
|
||||
solana_logger::setup();
|
||||
let TestValidator {
|
||||
server,
|
||||
leader_data,
|
||||
alice: mint_keypair,
|
||||
ledger_path,
|
||||
..
|
||||
} = TestValidator::run_with_options(TestValidatorOptions {
|
||||
fees: 1,
|
||||
bootstrap_validator_lamports: 42_000,
|
||||
..TestValidatorOptions::default()
|
||||
});
|
||||
} = TestValidator::run_with_fees(1);
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_faucet(mint_keypair, sender, None);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -16,19 +16,20 @@ clap = "2.33.0"
|
||||
indicatif = "0.15.0"
|
||||
jsonrpc-core = "15.0.0"
|
||||
log = "0.4.8"
|
||||
net2 = "0.2.37"
|
||||
rayon = "1.4.0"
|
||||
reqwest = { version = "0.10.8", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "0.11.0"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.23" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -37,7 +38,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "15.0.0"
|
||||
jsonrpc-http-server = "15.0.0"
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -5,6 +5,8 @@ use solana_sdk::{
|
||||
use std::io;
|
||||
use thiserror::Error;
|
||||
|
||||
pub use reqwest; // export `reqwest` for clients
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ClientErrorKind {
|
||||
#[error(transparent)]
|
||||
|
@@ -1,6 +1,8 @@
|
||||
use crate::{
|
||||
client_error::Result,
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_custom_error,
|
||||
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData},
|
||||
rpc_response::RpcSimulateTransactionResult,
|
||||
rpc_sender::RpcSender,
|
||||
};
|
||||
use log::*;
|
||||
@@ -31,7 +33,7 @@ impl HttpSender {
|
||||
struct RpcErrorObject {
|
||||
code: i64,
|
||||
message: String,
|
||||
/*data field omitted*/
|
||||
data: serde_json::Value,
|
||||
}
|
||||
|
||||
impl RpcSender for HttpSender {
|
||||
@@ -72,11 +74,27 @@ impl RpcSender for HttpSender {
|
||||
if json["error"].is_object() {
|
||||
return match serde_json::from_value::<RpcErrorObject>(json["error"].clone())
|
||||
{
|
||||
Ok(rpc_error_object) => Err(RpcError::RpcResponseError {
|
||||
code: rpc_error_object.code,
|
||||
message: rpc_error_object.message,
|
||||
Ok(rpc_error_object) => {
|
||||
let data = match rpc_error_object.code {
|
||||
rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE => {
|
||||
match serde_json::from_value::<RpcSimulateTransactionResult>(json["error"]["data"].clone()) {
|
||||
Ok(data) => RpcResponseErrorData::SendTransactionPreflightFailure(data),
|
||||
Err(err) => {
|
||||
debug!("Failed to deserialize RpcSimulateTransactionResult: {:?}", err);
|
||||
RpcResponseErrorData::Empty
|
||||
}
|
||||
}
|
||||
},
|
||||
_ => RpcResponseErrorData::Empty
|
||||
};
|
||||
|
||||
Err(RpcError::RpcResponseError {
|
||||
code: rpc_error_object.code,
|
||||
message: rpc_error_object.message,
|
||||
data,
|
||||
}
|
||||
.into())
|
||||
}
|
||||
.into()),
|
||||
Err(err) => Err(RpcError::RpcRequestError(format!(
|
||||
"Failed to deserialize RPC error response: {} [{}]",
|
||||
serde_json::to_string(&json["error"]).unwrap(),
|
||||
|
@@ -6,6 +6,7 @@ use crate::{
|
||||
};
|
||||
use serde_json::{json, Number, Value};
|
||||
use solana_sdk::{
|
||||
epoch_info::EpochInfo,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
instruction::InstructionError,
|
||||
signature::Signature,
|
||||
@@ -47,6 +48,10 @@ impl RpcSender for MockSender {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let val = match request {
|
||||
RpcRequest::GetAccountInfo => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Null,
|
||||
})?,
|
||||
RpcRequest::GetBalance => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(50)),
|
||||
@@ -58,6 +63,14 @@ impl RpcSender for MockSender {
|
||||
serde_json::to_value(FeeCalculator::default()).unwrap(),
|
||||
),
|
||||
})?,
|
||||
RpcRequest::GetEpochInfo => serde_json::to_value(EpochInfo {
|
||||
epoch: 1,
|
||||
slot_index: 2,
|
||||
slots_in_epoch: 32,
|
||||
absolute_slot: 34,
|
||||
block_height: 34,
|
||||
transaction_count: Some(123),
|
||||
})?,
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => {
|
||||
let value = if self.url == "blockhash_expired" {
|
||||
Value::Null
|
||||
|
@@ -1,4 +1,7 @@
|
||||
use crate::rpc_response::{Response as RpcResponse, RpcSignatureResult, SlotInfo};
|
||||
use crate::{
|
||||
rpc_config::{RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter},
|
||||
rpc_response::{Response as RpcResponse, RpcLogsResponse, RpcSignatureResult, SlotInfo},
|
||||
};
|
||||
use log::*;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json::{
|
||||
@@ -20,8 +23,6 @@ use thiserror::Error;
|
||||
use tungstenite::{client::AutoStream, connect, Message, WebSocket};
|
||||
use url::{ParseError, Url};
|
||||
|
||||
type PubsubSignatureResponse = PubsubClientSubscription<RpcResponse<RpcSignatureResult>>;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum PubsubClientError {
|
||||
#[error("url parse error")]
|
||||
@@ -33,8 +34,8 @@ pub enum PubsubClientError {
|
||||
#[error("json parse error")]
|
||||
JsonParseError(#[from] serde_json::error::Error),
|
||||
|
||||
#[error("unexpected message format")]
|
||||
UnexpectedMessageError,
|
||||
#[error("unexpected message format: {0}")]
|
||||
UnexpectedMessageError(String),
|
||||
}
|
||||
|
||||
pub struct PubsubClientSubscription<T>
|
||||
@@ -89,8 +90,11 @@ where
|
||||
return Ok(x);
|
||||
}
|
||||
}
|
||||
|
||||
Err(PubsubClientError::UnexpectedMessageError)
|
||||
// TODO: Add proper JSON RPC response/error handling...
|
||||
Err(PubsubClientError::UnexpectedMessageError(format!(
|
||||
"{:?}",
|
||||
json_msg
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn send_unsubscribe(&self) -> Result<(), PubsubClientError> {
|
||||
@@ -114,14 +118,18 @@ where
|
||||
let message_text = &message.into_text().unwrap();
|
||||
let json_msg: Map<String, Value> = serde_json::from_str(message_text)?;
|
||||
|
||||
if let Some(Object(value_1)) = json_msg.get("params") {
|
||||
if let Some(value_2) = value_1.get("result") {
|
||||
let x: T = serde_json::from_value::<T>(value_2.clone()).unwrap();
|
||||
if let Some(Object(params)) = json_msg.get("params") {
|
||||
if let Some(result) = params.get("result") {
|
||||
let x: T = serde_json::from_value::<T>(result.clone()).unwrap();
|
||||
return Ok(x);
|
||||
}
|
||||
}
|
||||
|
||||
Err(PubsubClientError::UnexpectedMessageError)
|
||||
// TODO: Add proper JSON RPC response/error handling...
|
||||
Err(PubsubClientError::UnexpectedMessageError(format!(
|
||||
"{:?}",
|
||||
json_msg
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn shutdown(&mut self) -> std::thread::Result<()> {
|
||||
@@ -138,15 +146,79 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
const SLOT_OPERATION: &str = "slot";
|
||||
const SIGNATURE_OPERATION: &str = "signature";
|
||||
pub type LogsSubscription = (
|
||||
PubsubClientSubscription<RpcResponse<RpcLogsResponse>>,
|
||||
Receiver<RpcResponse<RpcLogsResponse>>,
|
||||
);
|
||||
pub type SlotsSubscription = (PubsubClientSubscription<SlotInfo>, Receiver<SlotInfo>);
|
||||
pub type SignatureSubscription = (
|
||||
PubsubClientSubscription<RpcResponse<RpcSignatureResult>>,
|
||||
Receiver<RpcResponse<RpcSignatureResult>>,
|
||||
);
|
||||
|
||||
pub struct PubsubClient {}
|
||||
|
||||
impl PubsubClient {
|
||||
pub fn slot_subscribe(
|
||||
pub fn logs_subscribe(
|
||||
url: &str,
|
||||
) -> Result<(PubsubClientSubscription<SlotInfo>, Receiver<SlotInfo>), PubsubClientError> {
|
||||
filter: RpcTransactionLogsFilter,
|
||||
config: RpcTransactionLogsConfig,
|
||||
) -> Result<LogsSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let (socket, _response) = connect(url)?;
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_clone = exit.clone();
|
||||
|
||||
let subscription_id =
|
||||
PubsubClientSubscription::<RpcResponse<RpcLogsResponse>>::send_subscribe(
|
||||
&socket_clone,
|
||||
json!({
|
||||
"jsonrpc":"2.0","id":1,"method":"logsSubscribe","params":[filter, config]
|
||||
})
|
||||
.to_string(),
|
||||
)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
loop {
|
||||
if exit_clone.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
match PubsubClientSubscription::read_message(&socket_clone) {
|
||||
Ok(message) => match sender.send(message) {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
info!("receive error: {:?}", err);
|
||||
break;
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
info!("receive error: {:?}", err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("websocket - exited receive loop");
|
||||
});
|
||||
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: "logs",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn slot_subscribe(url: &str) -> Result<SlotsSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let (socket, _response) = connect(url)?;
|
||||
let (sender, receiver) = channel::<SlotInfo>();
|
||||
@@ -158,41 +230,37 @@ impl PubsubClient {
|
||||
let subscription_id = PubsubClientSubscription::<SlotInfo>::send_subscribe(
|
||||
&socket_clone,
|
||||
json!({
|
||||
"jsonrpc":"2.0","id":1,"method":format!("{}Subscribe", SLOT_OPERATION),"params":[]
|
||||
"jsonrpc":"2.0","id":1,"method":"slotSubscribe","params":[]
|
||||
})
|
||||
.to_string(),
|
||||
)
|
||||
.unwrap();
|
||||
)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
loop {
|
||||
if exit_clone.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
let message: Result<SlotInfo, PubsubClientError> =
|
||||
PubsubClientSubscription::read_message(&socket_clone);
|
||||
|
||||
if let Ok(msg) = message {
|
||||
match sender.send(msg) {
|
||||
match PubsubClientSubscription::read_message(&socket_clone) {
|
||||
Ok(message) => match sender.send(message) {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
info!("receive error: {:?}", err);
|
||||
break;
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
info!("receive error: {:?}", err);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
info!("receive error: {:?}", message);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
info!("websocket - exited receive loop");
|
||||
});
|
||||
|
||||
let result: PubsubClientSubscription<SlotInfo> = PubsubClientSubscription {
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: SLOT_OPERATION,
|
||||
operation: "slot",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
@@ -205,16 +273,11 @@ impl PubsubClient {
|
||||
pub fn signature_subscribe(
|
||||
url: &str,
|
||||
signature: &Signature,
|
||||
) -> Result<
|
||||
(
|
||||
PubsubSignatureResponse,
|
||||
Receiver<RpcResponse<RpcSignatureResult>>,
|
||||
),
|
||||
PubsubClientError,
|
||||
> {
|
||||
config: Option<RpcSignatureSubscribeConfig>,
|
||||
) -> Result<SignatureSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let (socket, _response) = connect(url)?;
|
||||
let (sender, receiver) = channel::<RpcResponse<RpcSignatureResult>>();
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
@@ -223,10 +286,10 @@ impl PubsubClient {
|
||||
let body = json!({
|
||||
"jsonrpc":"2.0",
|
||||
"id":1,
|
||||
"method":format!("{}Subscribe", SIGNATURE_OPERATION),
|
||||
"method":"signatureSubscribe",
|
||||
"params":[
|
||||
signature.to_string(),
|
||||
{"enableReceivedNotification": true }
|
||||
config
|
||||
]
|
||||
})
|
||||
.to_string();
|
||||
@@ -234,8 +297,7 @@ impl PubsubClient {
|
||||
PubsubClientSubscription::<RpcResponse<RpcSignatureResult>>::send_subscribe(
|
||||
&socket_clone,
|
||||
body,
|
||||
)
|
||||
.unwrap();
|
||||
)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
loop {
|
||||
@@ -263,15 +325,14 @@ impl PubsubClient {
|
||||
info!("websocket - exited receive loop");
|
||||
});
|
||||
|
||||
let result: PubsubClientSubscription<RpcResponse<RpcSignatureResult>> =
|
||||
PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: SIGNATURE_OPERATION,
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: "signature",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@ use crate::{
|
||||
RpcProgramAccountsConfig, RpcSendTransactionConfig, RpcSimulateTransactionConfig,
|
||||
RpcTokenAccountsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
|
||||
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
|
||||
rpc_response::*,
|
||||
rpc_sender::RpcSender,
|
||||
};
|
||||
@@ -48,6 +48,7 @@ use std::{
|
||||
|
||||
pub struct RpcClient {
|
||||
sender: Box<dyn RpcSender + Send + Sync + 'static>,
|
||||
commitment_config: CommitmentConfig,
|
||||
default_cluster_transaction_encoding: RwLock<Option<UiTransactionEncoding>>,
|
||||
}
|
||||
|
||||
@@ -72,27 +73,41 @@ fn serialize_encode_transaction(
|
||||
}
|
||||
|
||||
impl RpcClient {
|
||||
pub fn new_sender<T: RpcSender + Send + Sync + 'static>(sender: T) -> Self {
|
||||
fn new_sender<T: RpcSender + Send + Sync + 'static>(
|
||||
sender: T,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
sender: Box::new(sender),
|
||||
default_cluster_transaction_encoding: RwLock::new(None),
|
||||
commitment_config,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(url: String) -> Self {
|
||||
Self::new_sender(HttpSender::new(url))
|
||||
Self::new_with_commitment(url, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
|
||||
Self::new_sender(HttpSender::new(url), commitment_config)
|
||||
}
|
||||
|
||||
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
|
||||
Self::new_sender(HttpSender::new_with_timeout(url, timeout))
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
CommitmentConfig::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_mock(url: String) -> Self {
|
||||
Self::new_sender(MockSender::new(url))
|
||||
Self::new_sender(MockSender::new(url), CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
Self::new_sender(MockSender::new_with_mocks(url, mocks))
|
||||
Self::new_sender(
|
||||
MockSender::new_with_mocks(url, mocks),
|
||||
CommitmentConfig::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_socket(addr: SocketAddr) -> Self {
|
||||
@@ -106,10 +121,14 @@ impl RpcClient {
|
||||
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, CommitmentConfig::default())?
|
||||
.confirm_transaction_with_commitment(signature, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn commitment(&self) -> CommitmentConfig {
|
||||
self.commitment_config
|
||||
}
|
||||
|
||||
pub fn confirm_transaction_with_commitment(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
@@ -128,7 +147,13 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
|
||||
self.send_transaction_with_config(
|
||||
transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(self.commitment_config.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
|
||||
@@ -171,10 +196,33 @@ impl RpcClient {
|
||||
..config
|
||||
};
|
||||
let serialized_encoded = serialize_encode_transaction(transaction, encoding)?;
|
||||
let signature_base58_str: String = self.send(
|
||||
let signature_base58_str: String = match self.send(
|
||||
RpcRequest::SendTransaction,
|
||||
json!([serialized_encoded, config]),
|
||||
)?;
|
||||
) {
|
||||
Ok(signature_base58_str) => signature_base58_str,
|
||||
Err(err) => {
|
||||
if let ClientErrorKind::RpcError(RpcError::RpcResponseError {
|
||||
code,
|
||||
message,
|
||||
data,
|
||||
}) = &err.kind
|
||||
{
|
||||
debug!("{} {}", code, message);
|
||||
if let RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
RpcSimulateTransactionResult {
|
||||
logs: Some(logs), ..
|
||||
},
|
||||
) = data
|
||||
{
|
||||
for (i, log) in logs.iter().enumerate() {
|
||||
debug!("{:>3}: {}", i + 1, log);
|
||||
}
|
||||
}
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
let signature = signature_base58_str
|
||||
.parse::<Signature>()
|
||||
@@ -198,7 +246,13 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> RpcResult<RpcSimulateTransactionResult> {
|
||||
self.simulate_transaction_with_config(transaction, RpcSimulateTransactionConfig::default())
|
||||
self.simulate_transaction_with_config(
|
||||
transaction,
|
||||
RpcSimulateTransactionConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
..RpcSimulateTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn simulate_transaction_with_config(
|
||||
@@ -226,7 +280,7 @@ impl RpcClient {
|
||||
&self,
|
||||
signature: &Signature,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
self.get_signature_status_with_commitment(signature, CommitmentConfig::default())
|
||||
self.get_signature_status_with_commitment(signature, self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
@@ -284,7 +338,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_slot(&self) -> ClientResult<Slot> {
|
||||
self.get_slot_with_commitment(CommitmentConfig::default())
|
||||
self.get_slot_with_commitment(self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn get_slot_with_commitment(
|
||||
@@ -302,7 +356,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn total_supply(&self) -> ClientResult<u64> {
|
||||
self.total_supply_with_commitment(CommitmentConfig::default())
|
||||
self.total_supply_with_commitment(self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn total_supply_with_commitment(
|
||||
@@ -320,7 +374,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
|
||||
self.get_vote_accounts_with_commitment(CommitmentConfig::default())
|
||||
self.get_vote_accounts_with_commitment(self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts_with_commitment(
|
||||
@@ -330,6 +384,38 @@ impl RpcClient {
|
||||
self.send(RpcRequest::GetVoteAccounts, json!([commitment_config]))
|
||||
}
|
||||
|
||||
pub fn wait_for_max_stake(
|
||||
&self,
|
||||
commitment: CommitmentConfig,
|
||||
max_stake_percent: f32,
|
||||
) -> ClientResult<()> {
|
||||
let mut current_percent;
|
||||
loop {
|
||||
let vote_accounts = self.get_vote_accounts_with_commitment(commitment)?;
|
||||
|
||||
let mut max = 0;
|
||||
let total_active_stake = vote_accounts
|
||||
.current
|
||||
.iter()
|
||||
.chain(vote_accounts.delinquent.iter())
|
||||
.map(|vote_account| {
|
||||
max = std::cmp::max(max, vote_account.activated_stake);
|
||||
vote_account.activated_stake
|
||||
})
|
||||
.sum::<u64>();
|
||||
current_percent = 100f32 * max as f32 / total_active_stake as f32;
|
||||
if current_percent < max_stake_percent {
|
||||
break;
|
||||
}
|
||||
info!(
|
||||
"Waiting for stake to drop below {} current: {:.1}",
|
||||
max_stake_percent, current_percent
|
||||
);
|
||||
sleep(Duration::from_secs(10));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_cluster_nodes(&self) -> ClientResult<Vec<RpcContactInfo>> {
|
||||
self.send(RpcRequest::GetClusterNodes, Value::Null)
|
||||
}
|
||||
@@ -448,7 +534,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_epoch_info(&self) -> ClientResult<EpochInfo> {
|
||||
self.get_epoch_info_with_commitment(CommitmentConfig::default())
|
||||
self.get_epoch_info_with_commitment(self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn get_epoch_info_with_commitment(
|
||||
@@ -462,7 +548,7 @@ impl RpcClient {
|
||||
&self,
|
||||
slot: Option<Slot>,
|
||||
) -> ClientResult<Option<RpcLeaderSchedule>> {
|
||||
self.get_leader_schedule_with_commitment(slot, CommitmentConfig::default())
|
||||
self.get_leader_schedule_with_commitment(slot, self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn get_leader_schedule_with_commitment(
|
||||
@@ -556,8 +642,10 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// Note that `get_account` returns `Err(..)` if the account does not exist whereas
|
||||
/// `get_account_with_commitment` returns `Ok(None)` if the account does not exist.
|
||||
pub fn get_account(&self, pubkey: &Pubkey) -> ClientResult<Account> {
|
||||
self.get_account_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
self.get_account_with_commitment(pubkey, self.commitment_config)?
|
||||
.value
|
||||
.ok_or_else(|| RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into())
|
||||
}
|
||||
@@ -605,7 +693,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
|
||||
Ok(self
|
||||
.get_multiple_accounts_with_commitment(pubkeys, CommitmentConfig::default())?
|
||||
.get_multiple_accounts_with_commitment(pubkeys, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -659,7 +747,7 @@ impl RpcClient {
|
||||
/// Request the balance of the account `pubkey`.
|
||||
pub fn get_balance(&self, pubkey: &Pubkey) -> ClientResult<u64> {
|
||||
Ok(self
|
||||
.get_balance_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.get_balance_with_commitment(pubkey, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -701,7 +789,7 @@ impl RpcClient {
|
||||
|
||||
/// Request the transaction count.
|
||||
pub fn get_transaction_count(&self) -> ClientResult<u64> {
|
||||
self.get_transaction_count_with_commitment(CommitmentConfig::default())
|
||||
self.get_transaction_count_with_commitment(self.commitment_config)
|
||||
}
|
||||
|
||||
pub fn get_transaction_count_with_commitment(
|
||||
@@ -713,7 +801,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::default())?
|
||||
.get_recent_blockhash_with_commitment(self.commitment_config)?
|
||||
.value;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
@@ -770,10 +858,7 @@ impl RpcClient {
|
||||
blockhash: &Hash,
|
||||
) -> ClientResult<Option<FeeCalculator>> {
|
||||
Ok(self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
blockhash,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -849,7 +934,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
|
||||
Ok(self
|
||||
.get_token_account_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.get_token_account_with_commitment(pubkey, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -910,7 +995,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.get_token_account_balance_with_commitment(pubkey, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -934,7 +1019,7 @@ impl RpcClient {
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
self.commitment_config,
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
@@ -973,7 +1058,7 @@ impl RpcClient {
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
self.commitment_config,
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
@@ -1005,7 +1090,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
|
||||
.get_token_supply_with_commitment(mint, self.commitment_config)?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1086,7 +1171,7 @@ impl RpcClient {
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
pub fn poll_for_signature(&self, signature: &Signature) -> ClientResult<()> {
|
||||
self.poll_for_signature_with_commitment(signature, CommitmentConfig::default())
|
||||
self.poll_for_signature_with_commitment(signature, self.commitment_config)
|
||||
}
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
@@ -1194,10 +1279,9 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
self.send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
transaction,
|
||||
CommitmentConfig::default(),
|
||||
RpcSendTransactionConfig::default(),
|
||||
self.commitment_config,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1209,7 +1293,10 @@ impl RpcClient {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
transaction,
|
||||
commitment,
|
||||
RpcSendTransactionConfig::default(),
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
|
@@ -71,6 +71,21 @@ pub struct RpcProgramAccountsConfig {
|
||||
pub account_config: RpcAccountInfoConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTransactionLogsFilter {
|
||||
All,
|
||||
AllWithVotes,
|
||||
Mentions(Vec<String>), // base58-encoded list of addresses
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTransactionLogsConfig {
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTokenAccountsFilter {
|
||||
|
@@ -10,6 +10,7 @@ pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64
|
||||
pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
|
||||
pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY: i64 = -32005;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
|
||||
pub const JSON_RPC_SERVER_ERROR_SLOT_SKIPPED: i64 = -32007;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
BlockCleanedUp {
|
||||
@@ -26,6 +27,9 @@ pub enum RpcCustomError {
|
||||
},
|
||||
RpcNodeUnhealthy,
|
||||
TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError),
|
||||
SlotSkipped {
|
||||
slot: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@@ -73,6 +77,14 @@ impl From<RpcCustomError> for Error {
|
||||
message: format!("Transaction precompile verification failure {:?}", e),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SlotSkipped { slot } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_SLOT_SKIPPED),
|
||||
message: format!(
|
||||
"Slot {} was skipped, or missing due to ledger jump to recent snapshot",
|
||||
slot
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
use crate::rpc_response::RpcSimulateTransactionResult;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fmt;
|
||||
@@ -138,12 +139,42 @@ impl RpcRequest {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum RpcResponseErrorData {
|
||||
Empty,
|
||||
SendTransactionPreflightFailure(RpcSimulateTransactionResult),
|
||||
}
|
||||
|
||||
impl fmt::Display for RpcResponseErrorData {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
RpcSimulateTransactionResult {
|
||||
logs: Some(logs), ..
|
||||
},
|
||||
) => {
|
||||
if logs.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
// Give the user a hint that there is more useful logging information available...
|
||||
write!(f, "[{} log messages]", logs.len())
|
||||
}
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RpcError {
|
||||
#[error("RPC request error: {0}")]
|
||||
RpcRequestError(String),
|
||||
#[error("RPC response error {code}: {message}")]
|
||||
RpcResponseError { code: i64, message: String },
|
||||
#[error("RPC response error {code}: {message} {data}")]
|
||||
RpcResponseError {
|
||||
code: i64,
|
||||
message: String,
|
||||
data: RpcResponseErrorData,
|
||||
},
|
||||
#[error("parse error: expected {0}")]
|
||||
ParseError(String), /* "expected" */
|
||||
// Anything in a `ForUser` needs to die. The caller should be
|
||||
|
@@ -108,6 +108,14 @@ pub enum RpcSignatureResult {
|
||||
ReceivedSignature(ReceivedSignatureResult),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcLogsResponse {
|
||||
pub signature: String, // Signature as base58 string
|
||||
pub err: Option<TransactionError>,
|
||||
pub logs: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ProcessedSignatureResult {
|
||||
|
@@ -3,7 +3,7 @@
|
||||
//! messages to the network directly. The binary encoding of its messages are
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use crate::{rpc_client::RpcClient, rpc_response::Response};
|
||||
use crate::{rpc_client::RpcClient, rpc_config::RpcProgramAccountsConfig, rpc_response::Response};
|
||||
use bincode::{serialize_into, serialized_size};
|
||||
use log::*;
|
||||
use solana_sdk::{
|
||||
@@ -276,6 +276,16 @@ impl ThinClient {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_program_accounts_with_config(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
config: RpcProgramAccountsConfig,
|
||||
) -> TransportResult<Vec<(Pubkey, Account)>> {
|
||||
self.rpc_client()
|
||||
.get_program_accounts_with_config(pubkey, config)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn wait_for_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
@@ -389,6 +399,12 @@ impl SyncClient for ThinClient {
|
||||
.map(|r| r.value)
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> TransportResult<u64> {
|
||||
self.rpc_client()
|
||||
.get_minimum_balance_for_rent_exemption(data_len)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
let (blockhash, fee_calculator, _last_valid_slot) =
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.4.2"
|
||||
version = "1.4.23"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -14,6 +14,7 @@ edition = "2018"
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[dependencies]
|
||||
ahash = "0.6.1"
|
||||
base64 = "0.12.3"
|
||||
bincode = "1.3.1"
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
@@ -34,6 +35,9 @@ jsonrpc-http-server = "15.0.0"
|
||||
jsonrpc-pubsub = "15.0.0"
|
||||
jsonrpc-ws-server = "15.0.0"
|
||||
log = "0.4.8"
|
||||
lru = "0.6.0"
|
||||
miow = "0.2.2"
|
||||
net2 = "0.2.37"
|
||||
num_cpus = "1.13.0"
|
||||
num-traits = "0.2"
|
||||
rand = "0.7.0"
|
||||
@@ -41,41 +45,44 @@ rand_chacha = "0.2.2"
|
||||
raptorq = "1.4.2"
|
||||
rayon = "1.4.1"
|
||||
regex = "1.3.9"
|
||||
rustversion = "1.0.4"
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.2" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.4.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.2" }
|
||||
solana-client = { path = "../client", version = "1.4.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.2" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "1.4.2" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.4.2" }
|
||||
solana-ledger = { path = "../ledger", version = "1.4.2" }
|
||||
solana-logger = { path = "../logger", version = "1.4.2" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.4.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.2" }
|
||||
solana-measure = { path = "../measure", version = "1.4.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.2" }
|
||||
solana-perf = { path = "../perf", version = "1.4.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.2" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.2" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.2" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.4.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.2" }
|
||||
solana-version = { path = "../version", version = "1.4.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.4.2" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=2.0.8" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.4.23" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.4.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.4.23" }
|
||||
solana-client = { path = "../client", version = "1.4.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.4.23" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "1.4.23" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "1.4.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.4.23" }
|
||||
solana-logger = { path = "../logger", version = "1.4.23" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.4.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.4.23" }
|
||||
solana-measure = { path = "../measure", version = "1.4.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.4.23" }
|
||||
solana-perf = { path = "../perf", version = "1.4.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.4.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.4.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.4.23" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.4.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.4.23" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.4.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.4.23" }
|
||||
solana-version = { path = "../version", version = "1.4.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.4.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.4.23" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
tokio = { version = "0.2", features = ["full"] }
|
||||
tokio_01 = { version = "0.1", package = "tokio" }
|
||||
tokio_01_bytes = { version = "0.4.7", package = "bytes" }
|
||||
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
|
||||
tokio_io_01 = { version = "0.1", package = "tokio-io" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.2" }
|
||||
tokio_codec_01 = { version = "0.1", package = "tokio-codec" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.4.23" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
@@ -94,6 +101,9 @@ name = "banking_stage"
|
||||
[[bench]]
|
||||
name = "blockstore"
|
||||
|
||||
[[bench]]
|
||||
name = "crds"
|
||||
|
||||
[[bench]]
|
||||
name = "crds_gossip_pull"
|
||||
|
||||
|
31
core/benches/crds.rs
Normal file
31
core/benches/crds.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::ThreadPoolBuilder;
|
||||
use solana_core::crds::Crds;
|
||||
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValue;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashMap;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_find_old_labels(bencher: &mut Bencher) {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut rng = thread_rng();
|
||||
let mut crds = Crds::default();
|
||||
let now = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 1000;
|
||||
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng, None), rng.gen_range(0, now)))
|
||||
.take(50_000)
|
||||
.for_each(|(v, ts)| assert!(crds.insert(v, ts).is_ok()));
|
||||
let mut timeouts = HashMap::new();
|
||||
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
|
||||
bencher.iter(|| {
|
||||
let out = crds.find_old_labels(&thread_pool, now, &timeouts);
|
||||
assert!(out.len() > 10);
|
||||
assert!(out.len() < 250);
|
||||
out
|
||||
});
|
||||
}
|
@@ -39,7 +39,7 @@ fn bench_build_crds_filters(bencher: &mut Bencher) {
|
||||
let mut num_inserts = 0;
|
||||
for _ in 0..90_000 {
|
||||
if crds
|
||||
.insert(CrdsValue::new_rand(&mut rng), rng.gen())
|
||||
.insert(CrdsValue::new_rand(&mut rng, None), rng.gen())
|
||||
.is_ok()
|
||||
{
|
||||
num_inserts += 1;
|
||||
|
@@ -7,14 +7,18 @@ use log::*;
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::retransmit_stage::retransmitter;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_perf::packet::{Packet, Packets};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_transaction;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
@@ -63,14 +67,24 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
|
||||
// To work reliably with higher values, this needs larger udp rmem size
|
||||
let tx = test_tx();
|
||||
const NUM_PACKETS: usize = 50;
|
||||
let chunk_size = NUM_PACKETS / (4 * NUM_THREADS);
|
||||
let batches = to_packets_chunked(
|
||||
&std::iter::repeat(tx).take(NUM_PACKETS).collect::<Vec<_>>(),
|
||||
chunk_size,
|
||||
);
|
||||
info!("batches: {}", batches.len());
|
||||
let entries: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 =
|
||||
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
Entry::new(&Hash::default(), 1, vec![tx0])
|
||||
})
|
||||
.collect();
|
||||
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0;
|
||||
let parent = 0;
|
||||
let shredder =
|
||||
Shredder::new(slot, parent, 0.0, keypair, 0, 0).expect("Failed to create entry shredder");
|
||||
let mut data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
|
||||
|
||||
let num_packets = data_shreds.len();
|
||||
|
||||
let retransmitter_handles = retransmitter(
|
||||
Arc::new(sockets),
|
||||
@@ -80,6 +94,8 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
packet_receiver,
|
||||
);
|
||||
|
||||
let mut index = 0;
|
||||
let mut slot = 0;
|
||||
let total = Arc::new(AtomicUsize::new(0));
|
||||
bencher.iter(move || {
|
||||
let peer_sockets1 = peer_sockets.clone();
|
||||
@@ -96,7 +112,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
while peer_sockets2[p].recv(&mut buf).is_ok() {
|
||||
total2.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
if total2.load(Ordering::Relaxed) >= NUM_PACKETS {
|
||||
if total2.load(Ordering::Relaxed) >= num_packets {
|
||||
break;
|
||||
}
|
||||
info!("{} recv", total2.load(Ordering::Relaxed));
|
||||
@@ -107,9 +123,17 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
})
|
||||
.collect();
|
||||
|
||||
for packets in batches.clone() {
|
||||
packet_sender.send(packets).unwrap();
|
||||
for shred in data_shreds.iter_mut() {
|
||||
shred.set_slot(slot);
|
||||
shred.set_index(index);
|
||||
index += 1;
|
||||
index %= 200;
|
||||
let mut p = Packet::default();
|
||||
shred.copy_to_packet(&mut p);
|
||||
let _ = packet_sender.send(Packets::new(vec![p]));
|
||||
}
|
||||
slot += 1;
|
||||
|
||||
info!("sent...");
|
||||
|
||||
let mut join_time = Measure::start("join");
|
||||
|
@@ -123,8 +123,14 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
|
||||
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
|
||||
let data_shreds = make_shreds(symbol_count);
|
||||
bencher.iter(|| {
|
||||
Shredder::generate_coding_shreds(0, RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], 0)
|
||||
.len();
|
||||
Shredder::generate_coding_shreds(
|
||||
0,
|
||||
RECOMMENDED_FEC_RATE,
|
||||
&data_shreds[..symbol_count],
|
||||
0,
|
||||
symbol_count,
|
||||
)
|
||||
.len();
|
||||
})
|
||||
}
|
||||
|
||||
@@ -132,8 +138,13 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
|
||||
fn bench_shredder_decoding(bencher: &mut Bencher) {
|
||||
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
|
||||
let data_shreds = make_shreds(symbol_count);
|
||||
let coding_shreds =
|
||||
Shredder::generate_coding_shreds(0, RECOMMENDED_FEC_RATE, &data_shreds[..symbol_count], 0);
|
||||
let coding_shreds = Shredder::generate_coding_shreds(
|
||||
0,
|
||||
RECOMMENDED_FEC_RATE,
|
||||
&data_shreds[..symbol_count],
|
||||
0,
|
||||
symbol_count,
|
||||
);
|
||||
bencher.iter(|| {
|
||||
Shredder::try_recovery(
|
||||
coding_shreds[..].to_vec(),
|
||||
|
@@ -4,10 +4,11 @@
|
||||
// hash on gossip. Monitor gossip for messages from validators in the --trusted-validators
|
||||
// set and halt the node if a mismatch is detected.
|
||||
|
||||
use crate::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
|
||||
use solana_runtime::snapshot_package::{
|
||||
AccountsPackage, AccountsPackageReceiver, AccountsPackageSender,
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES},
|
||||
snapshot_packager_service::PendingSnapshotPackage,
|
||||
};
|
||||
use solana_runtime::snapshot_package::{AccountsPackage, AccountsPackageReceiver};
|
||||
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::{
|
||||
@@ -27,7 +28,7 @@ pub struct AccountsHashVerifier {
|
||||
impl AccountsHashVerifier {
|
||||
pub fn new(
|
||||
accounts_package_receiver: AccountsPackageReceiver,
|
||||
accounts_package_sender: Option<AccountsPackageSender>,
|
||||
pending_snapshot_package: Option<PendingSnapshotPackage>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
@@ -53,7 +54,7 @@ impl AccountsHashVerifier {
|
||||
&cluster_info,
|
||||
&trusted_validators,
|
||||
halt_on_trusted_validators_accounts_hash_mismatch,
|
||||
&accounts_package_sender,
|
||||
&pending_snapshot_package,
|
||||
&mut hashes,
|
||||
&exit,
|
||||
fault_injection_rate_slots,
|
||||
@@ -76,24 +77,24 @@ impl AccountsHashVerifier {
|
||||
cluster_info: &ClusterInfo,
|
||||
trusted_validators: &Option<HashSet<Pubkey>>,
|
||||
halt_on_trusted_validator_accounts_hash_mismatch: bool,
|
||||
accounts_package_sender: &Option<AccountsPackageSender>,
|
||||
pending_snapshot_package: &Option<PendingSnapshotPackage>,
|
||||
hashes: &mut Vec<(Slot, Hash)>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
fault_injection_rate_slots: u64,
|
||||
snapshot_interval_slots: u64,
|
||||
) {
|
||||
if fault_injection_rate_slots != 0
|
||||
&& accounts_package.root % fault_injection_rate_slots == 0
|
||||
&& accounts_package.slot % fault_injection_rate_slots == 0
|
||||
{
|
||||
// For testing, publish an invalid hash to gossip.
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_sdk::hash::extend_and_hash;
|
||||
warn!("inserting fault at slot: {}", accounts_package.root);
|
||||
warn!("inserting fault at slot: {}", accounts_package.slot);
|
||||
let rand = thread_rng().gen_range(0, 10);
|
||||
let hash = extend_and_hash(&accounts_package.hash, &[rand]);
|
||||
hashes.push((accounts_package.root, hash));
|
||||
hashes.push((accounts_package.slot, hash));
|
||||
} else {
|
||||
hashes.push((accounts_package.root, accounts_package.hash));
|
||||
hashes.push((accounts_package.slot, accounts_package.hash));
|
||||
}
|
||||
|
||||
while hashes.len() > MAX_SNAPSHOT_HASHES {
|
||||
@@ -111,8 +112,8 @@ impl AccountsHashVerifier {
|
||||
}
|
||||
|
||||
if accounts_package.block_height % snapshot_interval_slots == 0 {
|
||||
if let Some(sender) = accounts_package_sender.as_ref() {
|
||||
if sender.send(accounts_package).is_err() {}
|
||||
if let Some(pending_snapshot_package) = pending_snapshot_package.as_ref() {
|
||||
*pending_snapshot_package.lock().unwrap() = Some(accounts_package);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,7 +176,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::cluster_info::make_accounts_hashes_message;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_runtime::bank_forks::CompressionType;
|
||||
use solana_runtime::bank_forks::ArchiveFormat;
|
||||
use solana_runtime::snapshot_utils::SnapshotVersion;
|
||||
use solana_sdk::{
|
||||
hash::hash,
|
||||
@@ -234,12 +235,12 @@ mod tests {
|
||||
let accounts_package = AccountsPackage {
|
||||
hash: hash(&[i as u8]),
|
||||
block_height: 100 + i as u64,
|
||||
root: 100 + i as u64,
|
||||
slot: 100 + i as u64,
|
||||
slot_deltas: vec![],
|
||||
snapshot_links,
|
||||
tar_output_file: PathBuf::from("."),
|
||||
storages: vec![],
|
||||
compression: CompressionType::Bzip2,
|
||||
archive_format: ArchiveFormat::TarBzip2,
|
||||
snapshot_version: SnapshotVersion::default(),
|
||||
};
|
||||
|
||||
@@ -254,6 +255,9 @@ mod tests {
|
||||
0,
|
||||
100,
|
||||
);
|
||||
// sleep for 1ms to create a newer timestmap for gossip entry
|
||||
// otherwise the timestamp won't be newer.
|
||||
std::thread::sleep(Duration::from_millis(1));
|
||||
}
|
||||
cluster_info.flush_push_queue();
|
||||
let cluster_hashes = cluster_info
|
||||
|
@@ -1,151 +0,0 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
progress_map::{ForkStats, ProgressMap},
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::timing;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BankWeightForkChoice {}
|
||||
|
||||
impl ForkChoice for BankWeightForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
_tower: &Tower,
|
||||
progress: &mut ProgressMap,
|
||||
computed_bank_state: &ComputedBankState,
|
||||
) {
|
||||
let bank_slot = bank.slot();
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let parent_weight = bank
|
||||
.parent()
|
||||
.and_then(|b| progress.get(&b.slot()))
|
||||
.map(|x| x.fork_stats.fork_weight)
|
||||
.unwrap_or(0);
|
||||
|
||||
let stats = progress
|
||||
.get_fork_stats_mut(bank_slot)
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
let ComputedBankState { bank_weight, .. } = computed_bank_state;
|
||||
stats.weight = *bank_weight;
|
||||
stats.fork_weight = stats.weight + parent_weight;
|
||||
}
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bank
|
||||
// 2) The heaviest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
_bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>) {
|
||||
let tower_start = Instant::now();
|
||||
assert!(!frozen_banks.is_empty());
|
||||
let num_frozen_banks = frozen_banks.len();
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let num_old_banks = frozen_banks
|
||||
.iter()
|
||||
.filter(|b| b.slot() < tower.root())
|
||||
.count();
|
||||
|
||||
let last_voted_slot = tower.last_voted_slot();
|
||||
let mut heaviest_bank_on_same_fork = None;
|
||||
let mut heaviest_same_fork_weight = 0;
|
||||
let stats: Vec<&ForkStats> = frozen_banks
|
||||
.iter()
|
||||
.map(|bank| {
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let stats = progress
|
||||
.get_fork_stats(bank.slot())
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
if let Some(last_voted_slot) = last_voted_slot {
|
||||
if ancestors
|
||||
.get(&bank.slot())
|
||||
.expect("Entry in frozen banks must exist in ancestors")
|
||||
.contains(&last_voted_slot)
|
||||
{
|
||||
// Descendant of last vote cannot be locked out
|
||||
assert!(!stats.is_locked_out);
|
||||
|
||||
// ancestors(slot) should not contain the slot itself,
|
||||
// so we should never get the same bank as the last vote
|
||||
assert_ne!(bank.slot(), last_voted_slot);
|
||||
// highest weight, lowest slot first. frozen_banks is sorted
|
||||
// from least slot to greatest slot, so if two banks have
|
||||
// the same fork weight, the lower slot will be picked
|
||||
if stats.fork_weight > heaviest_same_fork_weight {
|
||||
heaviest_bank_on_same_fork = Some(bank.clone());
|
||||
heaviest_same_fork_weight = stats.fork_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats
|
||||
})
|
||||
.collect();
|
||||
let num_not_recent = stats.iter().filter(|s| !s.is_recent).count();
|
||||
let num_has_voted = stats.iter().filter(|s| s.has_voted).count();
|
||||
let num_empty = stats.iter().filter(|s| s.is_empty).count();
|
||||
let num_threshold_failure = stats.iter().filter(|s| !s.vote_threshold).count();
|
||||
let num_votable_threshold_failure = stats
|
||||
.iter()
|
||||
.filter(|s| s.is_recent && !s.has_voted && !s.vote_threshold)
|
||||
.count();
|
||||
|
||||
let mut candidates: Vec<_> = frozen_banks.iter().zip(stats.iter()).collect();
|
||||
|
||||
//highest weight, lowest slot first
|
||||
candidates.sort_by_key(|b| (b.1.fork_weight, 0i64 - b.0.slot() as i64));
|
||||
let rv = candidates
|
||||
.last()
|
||||
.expect("frozen banks was nonempty so candidates must also be nonempty");
|
||||
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
||||
let weights: Vec<(u128, u64, u64)> = candidates
|
||||
.iter()
|
||||
.map(|x| (x.1.weight, x.0.slot(), x.1.block_height))
|
||||
.collect();
|
||||
debug!(
|
||||
"@{:?} tower duration: {:?} len: {}/{} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
candidates.len(),
|
||||
stats.iter().filter(|s| !s.has_voted).count(),
|
||||
weights,
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-select_forks",
|
||||
("frozen_banks", num_frozen_banks as i64, i64),
|
||||
("not_recent", num_not_recent as i64, i64),
|
||||
("has_voted", num_has_voted as i64, i64),
|
||||
("old_banks", num_old_banks as i64, i64),
|
||||
("empty_banks", num_empty as i64, i64),
|
||||
("threshold_failure", num_threshold_failure as i64, i64),
|
||||
(
|
||||
"votable_threshold_failure",
|
||||
num_votable_threshold_failure as i64,
|
||||
i64
|
||||
),
|
||||
("tower_duration", ms as i64, i64),
|
||||
);
|
||||
|
||||
(rv.0.clone(), heaviest_bank_on_same_fork)
|
||||
}
|
||||
}
|
@@ -4,7 +4,7 @@
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
|
||||
poh_service::PohService,
|
||||
poh_service::{self, PohService},
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
||||
use itertools::Itertools;
|
||||
@@ -23,7 +23,7 @@ use solana_perf::{
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_db::ErrorCounters,
|
||||
bank::{Bank, TransactionBalancesSet, TransactionProcessResult},
|
||||
bank::{Bank, TransactionBalancesSet, TransactionCheckResult, TransactionExecutionResult},
|
||||
bank_utils,
|
||||
transaction_batch::TransactionBatch,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
@@ -53,7 +53,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
|
||||
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
||||
|
||||
/// Transaction forwarding
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2;
|
||||
|
||||
// Fixed thread size seems to be fastest on GCP setup
|
||||
pub const NUM_THREADS: u32 = 4;
|
||||
@@ -460,7 +460,7 @@ impl BankingStage {
|
||||
fn record_transactions(
|
||||
bank_slot: Slot,
|
||||
txs: &[Transaction],
|
||||
results: &[TransactionProcessResult],
|
||||
results: &[TransactionExecutionResult],
|
||||
poh: &Arc<Mutex<PohRecorder>>,
|
||||
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
|
||||
let mut processed_generation = Measure::start("record::process_generation");
|
||||
@@ -578,7 +578,7 @@ impl BankingStage {
|
||||
bank.clone(),
|
||||
batch.transactions(),
|
||||
batch.iteration_order_vec(),
|
||||
tx_results.processing_results,
|
||||
tx_results.execution_results,
|
||||
TransactionBalancesSet::new(pre_balances, post_balances),
|
||||
inner_instructions,
|
||||
transaction_logs,
|
||||
@@ -719,7 +719,7 @@ impl BankingStage {
|
||||
// This function returns a vector containing index of all valid transactions. A valid
|
||||
// transaction has result Ok() as the value
|
||||
fn filter_valid_transaction_indexes(
|
||||
valid_txs: &[TransactionProcessResult],
|
||||
valid_txs: &[TransactionCheckResult],
|
||||
transaction_indexes: &[usize],
|
||||
) -> Vec<usize> {
|
||||
let valid_transactions = valid_txs
|
||||
@@ -1072,7 +1072,13 @@ pub fn create_test_recorder(
|
||||
poh_recorder.set_bank(&bank);
|
||||
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
||||
let poh_service = PohService::new(
|
||||
poh_recorder.clone(),
|
||||
&poh_config,
|
||||
&exit,
|
||||
bank.ticks_per_slot(),
|
||||
poh_service::DEFAULT_PINNED_CPU_CORE,
|
||||
);
|
||||
|
||||
(exit, poh_recorder, poh_service, entry_receiver)
|
||||
}
|
||||
@@ -1093,7 +1099,6 @@ mod tests {
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_perf::packet::to_packets;
|
||||
use solana_runtime::bank::HashAgeKind;
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
signature::{Keypair, Signer},
|
||||
@@ -1457,10 +1462,7 @@ mod tests {
|
||||
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()),
|
||||
];
|
||||
|
||||
let mut results = vec![
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
];
|
||||
let mut results = vec![(Ok(()), None), (Ok(()), None)];
|
||||
let _ = BankingStage::record_transactions(
|
||||
bank.slot(),
|
||||
&transactions,
|
||||
@@ -1476,7 +1478,7 @@ mod tests {
|
||||
1,
|
||||
SystemError::ResultWithNegativeLamports.into(),
|
||||
)),
|
||||
Some(HashAgeKind::Extant),
|
||||
None,
|
||||
);
|
||||
let (res, retryable) = BankingStage::record_transactions(
|
||||
bank.slot(),
|
||||
@@ -1652,10 +1654,10 @@ mod tests {
|
||||
&[
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), None),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), None),
|
||||
(Ok(()), None),
|
||||
],
|
||||
&[2, 4, 5, 9, 11, 13]
|
||||
),
|
||||
@@ -1665,12 +1667,12 @@ mod tests {
|
||||
assert_eq!(
|
||||
BankingStage::filter_valid_transaction_indexes(
|
||||
&[
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), None),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Err(TransactionError::BlockhashNotFound), None),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), Some(HashAgeKind::Extant)),
|
||||
(Ok(()), None),
|
||||
(Ok(()), None),
|
||||
(Ok(()), None),
|
||||
],
|
||||
&[1, 6, 7, 9, 31, 43]
|
||||
),
|
||||
|
@@ -2,7 +2,7 @@ use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing::slot_duration_from_slots_per_year;
|
||||
use solana_sdk::{feature_set, timing::slot_duration_from_slots_per_year};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
@@ -60,13 +60,24 @@ impl CacheBlockTimeService {
|
||||
}
|
||||
|
||||
fn cache_block_time(bank: Arc<Bank>, blockstore: &Arc<Blockstore>) {
|
||||
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
|
||||
let epoch = bank.epoch_schedule().get_epoch(bank.slot());
|
||||
let stakes = HashMap::new();
|
||||
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
|
||||
if bank
|
||||
.feature_set
|
||||
.is_active(&feature_set::timestamp_correction::id())
|
||||
{
|
||||
if let Err(e) = blockstore.cache_block_time(bank.slot(), bank.clock().unix_timestamp) {
|
||||
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
|
||||
}
|
||||
} else {
|
||||
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
|
||||
let epoch = bank.epoch_schedule().get_epoch(bank.slot());
|
||||
let stakes = HashMap::new();
|
||||
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
|
||||
|
||||
if let Err(e) = blockstore.cache_block_time(bank.slot(), slot_duration, stakes) {
|
||||
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
|
||||
if let Err(e) =
|
||||
blockstore.cache_block_time_from_slot_entries(bank.slot(), slot_duration, stakes)
|
||||
{
|
||||
error!("cache_block_time failed: slot {:?} {:?}", bank.slot(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -27,7 +27,7 @@ use solana_runtime::{
|
||||
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT},
|
||||
epoch_schedule::EpochSchedule,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
@@ -98,7 +98,7 @@ impl VoteTracker {
|
||||
epoch_schedule: *root_bank.epoch_schedule(),
|
||||
..VoteTracker::default()
|
||||
};
|
||||
vote_tracker.process_new_root_bank(&root_bank);
|
||||
vote_tracker.progress_with_new_root_bank(&root_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
root_bank.get_leader_schedule_epoch(root_bank.slot())
|
||||
@@ -174,7 +174,7 @@ impl VoteTracker {
|
||||
self.keys.get_or_insert(&pubkey);
|
||||
}
|
||||
|
||||
fn update_leader_schedule_epoch(&self, root_bank: &Bank) {
|
||||
fn progress_leader_schedule_epoch(&self, root_bank: &Bank) {
|
||||
// Update with any newly calculated epoch state about future epochs
|
||||
let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap();
|
||||
let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch;
|
||||
@@ -205,7 +205,7 @@ impl VoteTracker {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_new_root(&self, root_bank: &Bank) {
|
||||
fn purge_stale_state(&self, root_bank: &Bank) {
|
||||
// Purge any outdated slot data
|
||||
let new_root = root_bank.slot();
|
||||
let root_epoch = root_bank.epoch();
|
||||
@@ -220,15 +220,15 @@ impl VoteTracker {
|
||||
self.epoch_authorized_voters
|
||||
.write()
|
||||
.unwrap()
|
||||
.retain(|epoch, _| epoch >= &root_epoch);
|
||||
.retain(|epoch, _| *epoch >= root_epoch);
|
||||
self.keys.purge();
|
||||
*self.current_epoch.write().unwrap() = root_epoch;
|
||||
}
|
||||
}
|
||||
|
||||
fn process_new_root_bank(&self, root_bank: &Bank) {
|
||||
self.update_leader_schedule_epoch(root_bank);
|
||||
self.update_new_root(root_bank);
|
||||
fn progress_with_new_root_bank(&self, root_bank: &Bank) {
|
||||
self.progress_leader_schedule_epoch(root_bank);
|
||||
self.purge_stale_state(root_bank);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -425,7 +425,7 @@ impl ClusterInfoVoteListener {
|
||||
blockstore: Arc<Blockstore>,
|
||||
bank_notification_sender: Option<BankNotificationSender>,
|
||||
) -> Result<()> {
|
||||
let mut optimistic_confirmation_verifier =
|
||||
let mut confirmation_verifier =
|
||||
OptimisticConfirmationVerifier::new(bank_forks.read().unwrap().root());
|
||||
let mut last_process_root = Instant::now();
|
||||
loop {
|
||||
@@ -434,21 +434,21 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
if last_process_root.elapsed().as_millis() > 400 {
|
||||
let unrooted_optimistic_slots = optimistic_confirmation_verifier
|
||||
.get_unrooted_optimistic_slots(&root_bank, &blockstore);
|
||||
if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 {
|
||||
let unrooted_optimistic_slots = confirmation_verifier
|
||||
.verify_for_unrooted_optimistic_slots(&root_bank, &blockstore);
|
||||
// SlotVoteTracker's for all `slots` in `unrooted_optimistic_slots`
|
||||
// should still be available because we haven't purged in
|
||||
// `process_new_root_bank()` yet, which is called below
|
||||
// `progress_with_new_root_bank()` yet, which is called below
|
||||
OptimisticConfirmationVerifier::log_unrooted_optimistic_slots(
|
||||
&root_bank,
|
||||
&vote_tracker,
|
||||
&unrooted_optimistic_slots,
|
||||
);
|
||||
vote_tracker.process_new_root_bank(&root_bank);
|
||||
vote_tracker.progress_with_new_root_bank(&root_bank);
|
||||
last_process_root = Instant::now();
|
||||
}
|
||||
let optimistic_confirmed_slots = Self::get_and_process_votes(
|
||||
let confirmed_slots = Self::listen_and_confirm_votes(
|
||||
&gossip_vote_txs_receiver,
|
||||
&vote_tracker,
|
||||
&root_bank,
|
||||
@@ -457,19 +457,17 @@ impl ClusterInfoVoteListener {
|
||||
&replay_votes_receiver,
|
||||
&bank_notification_sender,
|
||||
);
|
||||
|
||||
if let Err(e) = optimistic_confirmed_slots {
|
||||
match e {
|
||||
match confirmed_slots {
|
||||
Ok(confirmed_slots) => {
|
||||
confirmation_verifier.add_new_optimistic_confirmed_slots(confirmed_slots);
|
||||
}
|
||||
Err(e) => match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::ReadyTimeoutError => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let optimistic_confirmed_slots = optimistic_confirmed_slots.unwrap();
|
||||
optimistic_confirmation_verifier
|
||||
.add_new_optimistic_confirmed_slots(optimistic_confirmed_slots);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -483,7 +481,7 @@ impl ClusterInfoVoteListener {
|
||||
verified_vote_sender: &VerifiedVoteSender,
|
||||
replay_votes_receiver: &ReplayVoteReceiver,
|
||||
) -> Result<Vec<(Slot, Hash)>> {
|
||||
Self::get_and_process_votes(
|
||||
Self::listen_and_confirm_votes(
|
||||
gossip_vote_txs_receiver,
|
||||
vote_tracker,
|
||||
root_bank,
|
||||
@@ -494,7 +492,7 @@ impl ClusterInfoVoteListener {
|
||||
)
|
||||
}
|
||||
|
||||
fn get_and_process_votes(
|
||||
fn listen_and_confirm_votes(
|
||||
gossip_vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||
vote_tracker: &VoteTracker,
|
||||
root_bank: &Bank,
|
||||
@@ -523,7 +521,7 @@ impl ClusterInfoVoteListener {
|
||||
let gossip_vote_txs: Vec<_> = gossip_vote_txs_receiver.try_iter().flatten().collect();
|
||||
let replay_votes: Vec<_> = replay_votes_receiver.try_iter().collect();
|
||||
if !gossip_vote_txs.is_empty() || !replay_votes.is_empty() {
|
||||
return Ok(Self::process_votes(
|
||||
return Ok(Self::filter_and_confirm_with_new_votes(
|
||||
vote_tracker,
|
||||
gossip_vote_txs,
|
||||
replay_votes,
|
||||
@@ -541,7 +539,7 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn update_new_votes(
|
||||
fn track_new_votes_and_notify_confirmations(
|
||||
vote: Vote,
|
||||
vote_pubkey: &Pubkey,
|
||||
vote_tracker: &VoteTracker,
|
||||
@@ -557,56 +555,52 @@ impl ClusterInfoVoteListener {
|
||||
return;
|
||||
}
|
||||
|
||||
let last_vote_slot = vote.slots.last().unwrap();
|
||||
let last_vote_slot = *vote.slots.last().unwrap();
|
||||
let last_vote_hash = vote.hash;
|
||||
|
||||
let root = root_bank.slot();
|
||||
let last_vote_hash = vote.hash;
|
||||
let mut is_new_vote = false;
|
||||
for slot in vote.slots.iter().rev() {
|
||||
// If slot is before the root, or so far ahead we don't have
|
||||
// stake information, then ignore it
|
||||
let epoch = root_bank.epoch_schedule().get_epoch(*slot);
|
||||
// If slot is before the root, ignore it
|
||||
for slot in vote.slots.iter().filter(|slot| **slot > root).rev() {
|
||||
let slot = *slot;
|
||||
|
||||
// if we don't have stake information, ignore it
|
||||
let epoch = root_bank.epoch_schedule().get_epoch(slot);
|
||||
let epoch_stakes = root_bank.epoch_stakes(epoch);
|
||||
if *slot <= root || epoch_stakes.is_none() {
|
||||
if epoch_stakes.is_none() {
|
||||
continue;
|
||||
}
|
||||
let epoch_stakes = epoch_stakes.unwrap();
|
||||
let epoch_vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
|
||||
let total_epoch_stake = epoch_stakes.total_stake();
|
||||
let unduplicated_pubkey = vote_tracker.keys.get_or_insert(&vote_pubkey);
|
||||
|
||||
// The last vote slot, which is the greatest slot in the stack
|
||||
// of votes in a vote transaction, qualifies for optimistic confirmation.
|
||||
let update_optimistic_confirmation_info = if slot == last_vote_slot {
|
||||
let stake = epoch_vote_accounts
|
||||
if slot == last_vote_slot {
|
||||
let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
|
||||
let stake = vote_accounts
|
||||
.get(&vote_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or(0);
|
||||
Some((stake, last_vote_hash))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
.unwrap_or_default();
|
||||
let total_stake = epoch_stakes.total_stake();
|
||||
|
||||
// If this vote for this slot qualifies for optimistic confirmation
|
||||
if let Some((stake, hash)) = update_optimistic_confirmation_info {
|
||||
// Fast track processing of the last slot in a vote transactions
|
||||
// so that notifications for optimistic confirmation can be sent
|
||||
// as soon as possible.
|
||||
let (is_confirmed, is_new) = Self::add_optimistic_confirmation_vote(
|
||||
let (is_confirmed, is_new) = Self::track_optimistic_confirmation_vote(
|
||||
vote_tracker,
|
||||
*slot,
|
||||
hash,
|
||||
last_vote_slot,
|
||||
last_vote_hash,
|
||||
unduplicated_pubkey.clone(),
|
||||
stake,
|
||||
total_epoch_stake,
|
||||
total_stake,
|
||||
);
|
||||
|
||||
if is_confirmed {
|
||||
new_optimistic_confirmed_slots.push((*slot, last_vote_hash));
|
||||
new_optimistic_confirmed_slots.push((last_vote_slot, last_vote_hash));
|
||||
// Notify subscribers about new optimistic confirmation
|
||||
if let Some(sender) = bank_notification_sender {
|
||||
sender
|
||||
.send(BankNotification::OptimisticallyConfirmed(*slot))
|
||||
.send(BankNotification::OptimisticallyConfirmed(last_vote_slot))
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("bank_notification_sender failed: {:?}", err)
|
||||
});
|
||||
@@ -617,7 +611,7 @@ impl ClusterInfoVoteListener {
|
||||
// By now:
|
||||
// 1) The vote must have come from ReplayStage,
|
||||
// 2) We've seen this vote from replay for this hash before
|
||||
// (`add_optimistic_confirmation_vote()` will not set `is_new == true`
|
||||
// (`track_optimistic_confirmation_vote()` will not set `is_new == true`
|
||||
// for same slot different hash), so short circuit because this vote
|
||||
// has no new information
|
||||
|
||||
@@ -629,7 +623,7 @@ impl ClusterInfoVoteListener {
|
||||
is_new_vote = is_new;
|
||||
}
|
||||
|
||||
diff.entry(*slot)
|
||||
diff.entry(slot)
|
||||
.or_default()
|
||||
.entry(unduplicated_pubkey)
|
||||
.and_modify(|seen_in_gossip_previously| {
|
||||
@@ -644,7 +638,40 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_votes(
|
||||
fn filter_gossip_votes(
|
||||
vote_tracker: &VoteTracker,
|
||||
vote_pubkey: &Pubkey,
|
||||
vote: &Vote,
|
||||
gossip_tx: &Transaction,
|
||||
) -> bool {
|
||||
if vote.slots.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let last_vote_slot = vote.slots.last().unwrap();
|
||||
// Votes from gossip need to be verified as they have not been
|
||||
// verified by the replay pipeline. Determine the authorized voter
|
||||
// based on the last vote slot. This will drop votes from authorized
|
||||
// voters trying to make votes for slots earlier than the epoch for
|
||||
// which they are authorized
|
||||
let actual_authorized_voter =
|
||||
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
|
||||
|
||||
if actual_authorized_voter.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Voting without the correct authorized pubkey, dump the vote
|
||||
if !VoteTracker::vote_contains_authorized_voter(
|
||||
&gossip_tx,
|
||||
&actual_authorized_voter.unwrap(),
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
fn filter_and_confirm_with_new_votes(
|
||||
vote_tracker: &VoteTracker,
|
||||
gossip_vote_txs: Vec<Transaction>,
|
||||
replayed_votes: Vec<ReplayedVote>,
|
||||
@@ -662,37 +689,13 @@ impl ClusterInfoVoteListener {
|
||||
.filter_map(|gossip_tx| {
|
||||
vote_transaction::parse_vote_transaction(gossip_tx)
|
||||
.filter(|(vote_pubkey, vote, _)| {
|
||||
if vote.slots.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let last_vote_slot = vote.slots.last().unwrap();
|
||||
// Votes from gossip need to be verified as they have not been
|
||||
// verified by the replay pipeline. Determine the authorized voter
|
||||
// based on the last vote slot. This will drop votes from authorized
|
||||
// voters trying to make votes for slots earlier than the epoch for
|
||||
// which they are authorized
|
||||
let actual_authorized_voter =
|
||||
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
|
||||
|
||||
if actual_authorized_voter.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Voting without the correct authorized pubkey, dump the vote
|
||||
if !VoteTracker::vote_contains_authorized_voter(
|
||||
&gossip_tx,
|
||||
&actual_authorized_voter.unwrap(),
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
Self::filter_gossip_votes(vote_tracker, vote_pubkey, vote, gossip_tx)
|
||||
})
|
||||
.map(|v| (true, v))
|
||||
})
|
||||
.chain(replayed_votes.into_iter().map(|v| (false, v)))
|
||||
{
|
||||
Self::update_new_votes(
|
||||
Self::track_new_votes_and_notify_confirmations(
|
||||
vote,
|
||||
&vote_pubkey,
|
||||
&vote_tracker,
|
||||
@@ -757,7 +760,7 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
// Returns if the slot was optimistically confirmed, and whether
|
||||
// the slot was new
|
||||
fn add_optimistic_confirmation_vote(
|
||||
fn track_optimistic_confirmation_vote(
|
||||
vote_tracker: &VoteTracker,
|
||||
slot: Slot,
|
||||
hash: Hash,
|
||||
@@ -909,7 +912,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.contains_key(&bank.slot()));
|
||||
let bank1 = Bank::new_from_parent(&bank, &Pubkey::default(), bank.slot() + 1);
|
||||
vote_tracker.process_new_root_bank(&bank1);
|
||||
vote_tracker.progress_with_new_root_bank(&bank1);
|
||||
assert!(!vote_tracker
|
||||
.slot_vote_trackers
|
||||
.read()
|
||||
@@ -926,7 +929,7 @@ mod tests {
|
||||
bank.epoch_schedule()
|
||||
.get_first_slot_in_epoch(current_epoch + 1),
|
||||
);
|
||||
vote_tracker.process_new_root_bank(&new_epoch_bank);
|
||||
vote_tracker.progress_with_new_root_bank(&new_epoch_bank);
|
||||
assert!(!vote_tracker.keys.0.read().unwrap().contains(&new_voter));
|
||||
assert_eq!(
|
||||
*vote_tracker.current_epoch.read().unwrap(),
|
||||
@@ -956,7 +959,7 @@ mod tests {
|
||||
);
|
||||
let next_leader_schedule_bank =
|
||||
Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed);
|
||||
vote_tracker.update_leader_schedule_epoch(&next_leader_schedule_bank);
|
||||
vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
next_leader_schedule_epoch
|
||||
@@ -1007,7 +1010,7 @@ mod tests {
|
||||
&votes_sender,
|
||||
&replay_votes_sender,
|
||||
);
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
ClusterInfoVoteListener::listen_and_confirm_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
&bank3,
|
||||
@@ -1036,7 +1039,7 @@ mod tests {
|
||||
&votes_sender,
|
||||
&replay_votes_sender,
|
||||
);
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
ClusterInfoVoteListener::listen_and_confirm_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
&bank3,
|
||||
@@ -1114,7 +1117,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Check that all the votes were registered for each validator correctly
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
ClusterInfoVoteListener::listen_and_confirm_votes(
|
||||
&votes_txs_receiver,
|
||||
&vote_tracker,
|
||||
&bank0,
|
||||
@@ -1233,7 +1236,7 @@ mod tests {
|
||||
}
|
||||
|
||||
// Read and process votes from channel `votes_receiver`
|
||||
ClusterInfoVoteListener::get_and_process_votes(
|
||||
ClusterInfoVoteListener::listen_and_confirm_votes(
|
||||
&votes_txs_receiver,
|
||||
&vote_tracker,
|
||||
&bank0,
|
||||
@@ -1328,7 +1331,7 @@ mod tests {
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
let _ = ClusterInfoVoteListener::get_and_process_votes(
|
||||
let _ = ClusterInfoVoteListener::listen_and_confirm_votes(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
&bank,
|
||||
@@ -1474,7 +1477,7 @@ mod tests {
|
||||
)];
|
||||
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
|
||||
&vote_tracker,
|
||||
vote_tx,
|
||||
// Add gossip vote for same slot, should not affect outcome
|
||||
@@ -1545,7 +1548,7 @@ mod tests {
|
||||
|
||||
let new_root_bank =
|
||||
Bank::new_from_parent(&bank, &Pubkey::default(), first_slot_in_new_epoch - 2);
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
ClusterInfoVoteListener::filter_and_confirm_with_new_votes(
|
||||
&vote_tracker,
|
||||
vote_txs,
|
||||
vec![(
|
||||
|
@@ -106,28 +106,30 @@ impl ClusterSlots {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<(u64, usize)> {
|
||||
let slot_peers = self.lookup(slot);
|
||||
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<u64> {
|
||||
let stakes = {
|
||||
let validator_stakes = self.validator_stakes.read().unwrap();
|
||||
repair_peers
|
||||
.iter()
|
||||
.map(|peer| {
|
||||
validator_stakes
|
||||
.get(&peer.id)
|
||||
.map(|node| node.total_stake)
|
||||
.unwrap_or(0)
|
||||
+ 1
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
let slot_peers = match self.lookup(slot) {
|
||||
None => return stakes,
|
||||
Some(slot_peers) => slot_peers,
|
||||
};
|
||||
let slot_peers = slot_peers.read().unwrap();
|
||||
repair_peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| {
|
||||
let peer_stake = slot_peers
|
||||
.as_ref()
|
||||
.and_then(|v| v.read().unwrap().get(&x.id).cloned())
|
||||
.unwrap_or(0);
|
||||
(
|
||||
1 + peer_stake
|
||||
+ self
|
||||
.validator_stakes
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&x.id)
|
||||
.map(|v| v.total_stake)
|
||||
.unwrap_or(0),
|
||||
i,
|
||||
)
|
||||
})
|
||||
.map(|peer| slot_peers.get(&peer.id).cloned().unwrap_or(0))
|
||||
.zip(stakes)
|
||||
.map(|(a, b)| a + b)
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -228,7 +230,7 @@ mod tests {
|
||||
fn test_compute_weights() {
|
||||
let cs = ClusterSlots::default();
|
||||
let ci = ContactInfo::default();
|
||||
assert_eq!(cs.compute_weights(0, &[ci]), vec![(1, 0)]);
|
||||
assert_eq!(cs.compute_weights(0, &[ci]), vec![1]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -249,7 +251,7 @@ mod tests {
|
||||
c2.id = k2;
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
|
||||
vec![std::u64::MAX / 2 + 1, 1]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -281,7 +283,7 @@ mod tests {
|
||||
c2.id = k2;
|
||||
assert_eq!(
|
||||
cs.compute_weights(0, &[c1, c2]),
|
||||
vec![(std::u64::MAX / 2 + 1, 0), (1, 1)]
|
||||
vec![std::u64::MAX / 2 + 1, 1]
|
||||
);
|
||||
}
|
||||
|
||||
|
@@ -113,7 +113,17 @@ impl AggregateCommitmentService {
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as i64,
|
||||
i64
|
||||
)
|
||||
),
|
||||
(
|
||||
"highest-confirmed-root",
|
||||
update_commitment_slots.highest_confirmed_root as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"highest-confirmed-slot",
|
||||
update_commitment_slots.highest_confirmed_slot as i64,
|
||||
i64
|
||||
),
|
||||
);
|
||||
|
||||
// Triggers rpc_subscription notifications as soon as new commitment data is available,
|
||||
@@ -176,19 +186,15 @@ impl AggregateCommitmentService {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
continue;
|
||||
if let Ok(vote_state) = account.vote_state().as_ref() {
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
(commitment, rooted_stake)
|
||||
@@ -427,26 +433,26 @@ mod tests {
|
||||
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
|
||||
vote_state1.process_slot_vote_unchecked(3);
|
||||
vote_state1.process_slot_vote_unchecked(5);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state1));
|
||||
let versioned = VoteStateVersions::new_current(vote_state1);
|
||||
VoteState::to(&versioned, &mut vote_account1).unwrap();
|
||||
bank.store_account(&pk1, &vote_account1);
|
||||
|
||||
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
|
||||
vote_state2.process_slot_vote_unchecked(9);
|
||||
vote_state2.process_slot_vote_unchecked(10);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state2));
|
||||
let versioned = VoteStateVersions::new_current(vote_state2);
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
let versioned = VoteStateVersions::new_current(vote_state3);
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
let versioned = VoteStateVersions::new_current(vote_state4);
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
@@ -482,9 +488,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_highest_confirmed_root_advance() {
|
||||
fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Arc<Bank>) -> Slot {
|
||||
let account = &bank.vote_accounts()[&vote_pubkey].1;
|
||||
let vote_state = VoteState::from(account).unwrap();
|
||||
vote_state.root_slot.unwrap()
|
||||
let (_stake, vote_account) = bank.get_vote_account(&vote_pubkey).unwrap();
|
||||
let slot = vote_account
|
||||
.vote_state()
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.root_slot
|
||||
.unwrap();
|
||||
slot
|
||||
}
|
||||
|
||||
let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests());
|
||||
|
@@ -1,6 +1,7 @@
|
||||
use crate::rpc_subscriptions::RpcSubscriptions;
|
||||
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
|
||||
use solana_ledger::blockstore::{Blockstore, CompletedDataSetInfo};
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_sdk::signature::Signature;
|
||||
use std::{
|
||||
sync::{
|
||||
@@ -61,10 +62,7 @@ impl CompletedDataSetsService {
|
||||
} = completed_set_info;
|
||||
match blockstore.get_entries_in_data_block(slot, start_index, end_index, None) {
|
||||
Ok(entries) => {
|
||||
let transactions = entries
|
||||
.into_iter()
|
||||
.flat_map(|e| e.transactions.into_iter().map(|t| t.signatures[0]))
|
||||
.collect::<Vec<Signature>>();
|
||||
let transactions = Self::get_transaction_signatures(entries);
|
||||
if !transactions.is_empty() {
|
||||
rpc_subscriptions.notify_signatures_received((slot, transactions));
|
||||
}
|
||||
@@ -76,7 +74,51 @@ impl CompletedDataSetsService {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_transaction_signatures(entries: Vec<Entry>) -> Vec<Signature> {
|
||||
entries
|
||||
.into_iter()
|
||||
.flat_map(|e| {
|
||||
e.transactions
|
||||
.into_iter()
|
||||
.filter_map(|mut t| t.signatures.drain(..).next())
|
||||
})
|
||||
.collect::<Vec<Signature>>()
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_zero_signatures() {
|
||||
let tx = Transaction::new_with_payer(&[], None);
|
||||
let entries = vec![Entry::new(&Hash::default(), 1, vec![tx])];
|
||||
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
|
||||
assert!(signatures.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_signatures() {
|
||||
let kp = Keypair::new();
|
||||
let tx =
|
||||
Transaction::new_signed_with_payer(&[], Some(&kp.pubkey()), &[&kp], Hash::default());
|
||||
let entries = vec![Entry::new(&Hash::default(), 1, vec![tx.clone()])];
|
||||
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
|
||||
assert_eq!(signatures.len(), 1);
|
||||
|
||||
let entries = vec![
|
||||
Entry::new(&Hash::default(), 1, vec![tx.clone(), tx.clone()]),
|
||||
Entry::new(&Hash::default(), 1, vec![tx]),
|
||||
];
|
||||
let signatures = CompletedDataSetsService::get_transaction_signatures(entries);
|
||||
assert_eq!(signatures.len(), 3);
|
||||
}
|
||||
}
|
||||
|
@@ -5,9 +5,11 @@ use crate::{
|
||||
use chrono::prelude::*;
|
||||
use solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE};
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE,
|
||||
vote_account::ArcVoteAccount,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
hash::Hash,
|
||||
instruction::Instruction,
|
||||
@@ -89,7 +91,7 @@ pub(crate) struct ComputedBankState {
|
||||
pub pubkey_votes: Arc<PubkeyVotes>,
|
||||
}
|
||||
|
||||
#[frozen_abi(digest = "2ZUeCLMVQxmHYbeqMH7M97ifVSKoVErGvRHzyxcQRjgU")]
|
||||
#[frozen_abi(digest = "Eay84NBbJqiMBfE7HHH2o6e51wcvoU79g8zCi5sw6uj3")]
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
|
||||
pub struct Tower {
|
||||
node_pubkey: Pubkey,
|
||||
@@ -179,7 +181,7 @@ impl Tower {
|
||||
vote_account: &Pubkey,
|
||||
) -> Self {
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let (_progress, heaviest_subtree_fork_choice, unlock_heaviest_subtree_fork_choice_slot) =
|
||||
let (_progress, heaviest_subtree_fork_choice) =
|
||||
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
|
||||
root_bank,
|
||||
bank_forks.frozen_banks().values().cloned().collect(),
|
||||
@@ -188,14 +190,12 @@ impl Tower {
|
||||
);
|
||||
let root = root_bank.slot();
|
||||
|
||||
let heaviest_bank = if root > unlock_heaviest_subtree_fork_choice_slot {
|
||||
bank_forks
|
||||
.get(heaviest_subtree_fork_choice.best_overall_slot())
|
||||
.expect("The best overall slot must be one of `frozen_banks` which all exist in bank_forks")
|
||||
.clone()
|
||||
} else {
|
||||
Tower::find_heaviest_bank(&bank_forks, &my_pubkey).unwrap_or_else(|| root_bank.clone())
|
||||
};
|
||||
let heaviest_bank = bank_forks
|
||||
.get(heaviest_subtree_fork_choice.best_overall_slot())
|
||||
.expect(
|
||||
"The best overall slot must be one of `frozen_banks` which all exist in bank_forks",
|
||||
)
|
||||
.clone();
|
||||
|
||||
Self::new(
|
||||
&my_pubkey,
|
||||
@@ -214,7 +214,7 @@ impl Tower {
|
||||
all_pubkeys: &mut PubkeyReferences,
|
||||
) -> ComputedBankState
|
||||
where
|
||||
F: Iterator<Item = (Pubkey, (u64, Account))>,
|
||||
F: IntoIterator<Item = (Pubkey, (u64, ArcVoteAccount))>,
|
||||
{
|
||||
let mut voted_stakes = HashMap::new();
|
||||
let mut total_stake = 0;
|
||||
@@ -228,20 +228,20 @@ impl Tower {
|
||||
continue;
|
||||
}
|
||||
trace!("{} {} with stake {}", node_pubkey, key, voted_stake);
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
datapoint_warn!(
|
||||
"tower_warn",
|
||||
(
|
||||
"warn",
|
||||
format!("Unable to get vote_state from account {}", key),
|
||||
String
|
||||
),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let mut vote_state = vote_state.unwrap();
|
||||
|
||||
let mut vote_state = match account.vote_state().as_ref() {
|
||||
Err(_) => {
|
||||
datapoint_warn!(
|
||||
"tower_warn",
|
||||
(
|
||||
"warn",
|
||||
format!("Unable to get vote_state from account {}", key),
|
||||
String
|
||||
),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
Ok(vote_state) => vote_state.clone(),
|
||||
};
|
||||
for vote in &vote_state.votes {
|
||||
let key = all_pubkeys.get_or_insert(&key);
|
||||
lockout_intervals
|
||||
@@ -376,9 +376,9 @@ impl Tower {
|
||||
}
|
||||
|
||||
fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<Slot> {
|
||||
let vote_account = bank.vote_accounts().get(vote_account_pubkey)?.1.clone();
|
||||
let bank_vote_state = VoteState::deserialize(&vote_account.data).ok()?;
|
||||
bank_vote_state.last_voted_slot()
|
||||
let (_stake, vote_account) = bank.get_vote_account(vote_account_pubkey)?;
|
||||
let slot = vote_account.vote_state().as_ref().ok()?.last_voted_slot();
|
||||
slot
|
||||
}
|
||||
|
||||
pub fn new_vote_from_bank(&self, bank: &Bank, vote_account_pubkey: &Pubkey) -> (Vote, usize) {
|
||||
@@ -509,12 +509,65 @@ impl Tower {
|
||||
descendants: &HashMap<Slot, HashSet<u64>>,
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
) -> SwitchForkDecision {
|
||||
self.last_voted_slot()
|
||||
.map(|last_voted_slot| {
|
||||
let root = self.root();
|
||||
let empty_ancestors = HashSet::default();
|
||||
let empty_ancestors_due_to_minor_unsynced_ledger = || {
|
||||
// This condition (stale stray last vote) shouldn't occur under normal validator
|
||||
// operation, indicating something unusual happened.
|
||||
// This condition could be introduced by manual ledger mishandling,
|
||||
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
|
||||
|
||||
// However, returning empty ancestors as a fallback here shouldn't result in
|
||||
// slashing by itself (Note that we couldn't fully preclude any kind of slashing if
|
||||
// the failure was OS or HW level).
|
||||
|
||||
// Firstly, lockout is ensured elsewhere.
|
||||
|
||||
// Also, there is no risk of optimistic conf. violation. Although empty ancestors
|
||||
// could result in incorrect (= more than actual) locked_out_stake and
|
||||
// false-positive SwitchProof later in this function, there should be no such a
|
||||
// heavier fork candidate, first of all, if the last vote (or any of its
|
||||
// unavailable ancestors) were already optimistically confirmed.
|
||||
// The only exception is that other validator is already violating it...
|
||||
if self.is_first_switch_check() && switch_slot < last_voted_slot {
|
||||
// `switch < last` is needed not to warn! this message just because of using
|
||||
// newer snapshots on validator restart
|
||||
let message = format!(
|
||||
"bank_forks doesn't have corresponding data for the stray restored \
|
||||
last vote({}), meaning some inconsistency between saved tower and ledger.",
|
||||
last_voted_slot
|
||||
);
|
||||
warn!("{}", message);
|
||||
datapoint_warn!("tower_warn", ("warn", message, String));
|
||||
}
|
||||
&empty_ancestors
|
||||
};
|
||||
|
||||
let suspended_decision_due_to_major_unsynced_ledger = || {
|
||||
// This peculiar corner handling is needed mainly for a tower which is newer than
|
||||
// blockstore. (Yeah, we tolerate it for ease of maintaining validator by operators)
|
||||
// This condition could be introduced by manual ledger mishandling,
|
||||
// validator SEGV, OS/HW crash, or plain No Free Space FS error.
|
||||
|
||||
// When we're in this clause, it basically means validator is badly running
|
||||
// with a future tower while replaying past slots, especially problematic is
|
||||
// last_voted_slot.
|
||||
// So, don't re-vote on it by returning pseudo FailedSwitchThreshold, otherwise
|
||||
// there would be slashing because of double vote on one of last_vote_ancestors.
|
||||
// (Well, needless to say, re-creating the duplicate block must be handled properly
|
||||
// at the banking stage: https://github.com/solana-labs/solana/issues/8232)
|
||||
//
|
||||
// To be specific, the replay stage is tricked into a false perception where
|
||||
// last_vote_ancestors is AVAILABLE for descendant-of-`switch_slot`, stale, and
|
||||
// stray slots (which should always be empty_ancestors).
|
||||
//
|
||||
// This is covered by test_future_tower_* in local_cluster
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, total_stake)
|
||||
};
|
||||
|
||||
let last_vote_ancestors =
|
||||
ancestors.get(&last_voted_slot).unwrap_or_else(|| {
|
||||
@@ -529,35 +582,7 @@ impl Tower {
|
||||
// all of them.
|
||||
panic!("no ancestors found with slot: {}", last_voted_slot);
|
||||
} else {
|
||||
// This condition (stale stray last vote) shouldn't occur under normal validator
|
||||
// operation, indicating something unusual happened.
|
||||
// Possible causes include: OS/HW crash, validator process crash, only saved tower
|
||||
// is moved over to a new setup, etc...
|
||||
|
||||
// However, returning empty ancestors as a fallback here shouldn't result in
|
||||
// slashing by itself (Note that we couldn't fully preclude any kind of slashing if
|
||||
// the failure was OS or HW level).
|
||||
|
||||
// Firstly, lockout is ensured elsewhere.
|
||||
|
||||
// Also, there is no risk of optimistic conf. violation. Although empty ancestors
|
||||
// could result in incorrect (= more than actual) locked_out_stake and
|
||||
// false-positive SwitchProof later in this function, there should be no such a
|
||||
// heavier fork candidate, first of all, if the last vote (or any of its
|
||||
// unavailable ancestors) were already optimistically confirmed.
|
||||
// The only exception is that other validator is already violating it...
|
||||
if self.is_first_switch_check() && switch_slot < last_voted_slot {
|
||||
// `switch < last` is needed not to warn! this message just because of using
|
||||
// newer snapshots on validator restart
|
||||
let message = format!(
|
||||
"bank_forks doesn't have corresponding data for the stray restored \
|
||||
last vote({}), meaning some inconsistency between saved tower and ledger.",
|
||||
last_voted_slot
|
||||
);
|
||||
warn!("{}", message);
|
||||
datapoint_warn!("tower_warn", ("warn", message, String));
|
||||
}
|
||||
&empty_ancestors
|
||||
empty_ancestors_due_to_minor_unsynced_ledger()
|
||||
}
|
||||
});
|
||||
|
||||
@@ -569,13 +594,18 @@ impl Tower {
|
||||
return SwitchForkDecision::SameFork;
|
||||
}
|
||||
|
||||
assert!(
|
||||
!last_vote_ancestors.contains(&switch_slot),
|
||||
"Should never consider switching to slot ({}), which is ancestors({:?}) of last vote: {}",
|
||||
switch_slot,
|
||||
last_vote_ancestors,
|
||||
last_voted_slot
|
||||
);
|
||||
if last_vote_ancestors.contains(&switch_slot) {
|
||||
if !self.is_stray_last_vote() {
|
||||
panic!(
|
||||
"Should never consider switching to slot ({}), which is ancestors({:?}) of last vote: {}",
|
||||
switch_slot,
|
||||
last_vote_ancestors,
|
||||
last_voted_slot
|
||||
);
|
||||
} else {
|
||||
return suspended_decision_due_to_major_unsynced_ledger();
|
||||
}
|
||||
}
|
||||
|
||||
// By this point, we know the `switch_slot` is on a different fork
|
||||
// (is neither an ancestor nor descendant of `last_vote`), so a
|
||||
@@ -673,7 +703,7 @@ impl Tower {
|
||||
descendants: &HashMap<Slot, HashSet<u64>>,
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
) -> SwitchForkDecision {
|
||||
let decision = self.make_check_switch_threshold_decision(
|
||||
switch_slot,
|
||||
@@ -753,26 +783,6 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn find_heaviest_bank(
|
||||
bank_forks: &BankForks,
|
||||
node_pubkey: &Pubkey,
|
||||
) -> Option<Arc<Bank>> {
|
||||
let ancestors = bank_forks.ancestors();
|
||||
let mut bank_weights: Vec<_> = bank_forks
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.map(|b| {
|
||||
(
|
||||
Self::bank_weight(node_pubkey, b, &ancestors),
|
||||
b.parents().len(),
|
||||
b.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bank_weights.sort_by_key(|b| (b.0, b.1));
|
||||
bank_weights.pop().map(|b| b.2)
|
||||
}
|
||||
|
||||
/// Update stake for all the ancestors.
|
||||
/// Note, stake is the same for all the ancestor.
|
||||
fn update_ancestor_voted_stakes(
|
||||
@@ -795,21 +805,6 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
fn bank_weight(
|
||||
node_pubkey: &Pubkey,
|
||||
bank: &Bank,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) -> u128 {
|
||||
let ComputedBankState { bank_weight, .. } = Self::collect_vote_lockouts(
|
||||
node_pubkey,
|
||||
bank.slot(),
|
||||
bank.vote_accounts().into_iter(),
|
||||
ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
bank_weight
|
||||
}
|
||||
|
||||
fn voted_slots(&self) -> Vec<Slot> {
|
||||
self.lockouts
|
||||
.votes
|
||||
@@ -846,14 +841,6 @@ impl Tower {
|
||||
);
|
||||
assert_eq!(slot_history.check(replayed_root), Check::Found);
|
||||
|
||||
// reconcile_blockstore_roots_with_tower() should already have aligned these.
|
||||
assert!(
|
||||
tower_root <= replayed_root,
|
||||
format!(
|
||||
"tower root: {:?} >= replayed root slot: {}",
|
||||
tower_root, replayed_root
|
||||
)
|
||||
);
|
||||
assert!(
|
||||
self.last_vote == Vote::default() && self.lockouts.votes.is_empty()
|
||||
|| self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(),
|
||||
@@ -864,16 +851,59 @@ impl Tower {
|
||||
);
|
||||
|
||||
if let Some(last_voted_slot) = self.last_voted_slot() {
|
||||
if slot_history.check(last_voted_slot) == Check::TooOld {
|
||||
// We could try hard to anchor with other older votes, but opt to simplify the
|
||||
// following logic
|
||||
return Err(TowerError::TooOldTower(
|
||||
if tower_root <= replayed_root {
|
||||
// Normally, we goes into this clause with possible help of
|
||||
// reconcile_blockstore_roots_with_tower()
|
||||
if slot_history.check(last_voted_slot) == Check::TooOld {
|
||||
// We could try hard to anchor with other older votes, but opt to simplify the
|
||||
// following logic
|
||||
return Err(TowerError::TooOldTower(
|
||||
last_voted_slot,
|
||||
slot_history.oldest(),
|
||||
));
|
||||
}
|
||||
|
||||
self.adjust_lockouts_with_slot_history(slot_history)?;
|
||||
self.initialize_root(replayed_root);
|
||||
} else {
|
||||
// This should never occur under normal operation.
|
||||
// While this validator's voting is suspended this way,
|
||||
// suspended_decision_due_to_major_unsynced_ledger() will be also touched.
|
||||
let message = format!(
|
||||
"For some reason, we're REPROCESSING slots which has already been \
|
||||
voted and ROOTED by us; \
|
||||
VOTING will be SUSPENDED UNTIL {}!",
|
||||
last_voted_slot,
|
||||
slot_history.oldest(),
|
||||
));
|
||||
);
|
||||
error!("{}", message);
|
||||
datapoint_error!("tower_error", ("error", message, String));
|
||||
|
||||
// Let's pass-through adjust_lockouts_with_slot_history just for sanitization,
|
||||
// using a synthesized SlotHistory.
|
||||
|
||||
let mut warped_slot_history = (*slot_history).clone();
|
||||
// Blockstore doesn't have the tower_root slot because of
|
||||
// (replayed_root < tower_root) in this else clause, meaning the tower is from
|
||||
// the future from the view of blockstore.
|
||||
// Pretend the blockstore has the future tower_root to anchor exactly with that
|
||||
// slot by adding tower_root to a slot history. The added slot will be newer
|
||||
// than all slots in the slot history (remember tower_root > replayed_root),
|
||||
// satisfying the slot history invariant.
|
||||
// Thus, the whole process will be safe as well because tower_root exists
|
||||
// within both tower and slot history, guaranteeing the success of adjustment
|
||||
// and retaining all of future votes correctly while sanitizing.
|
||||
warped_slot_history.add(tower_root);
|
||||
|
||||
self.adjust_lockouts_with_slot_history(&warped_slot_history)?;
|
||||
// don't update root; future tower's root should be kept across validator
|
||||
// restarts to continue to show the scary messages at restarts until the next
|
||||
// voting.
|
||||
}
|
||||
self.adjust_lockouts_with_slot_history(slot_history)?;
|
||||
self.initialize_root(replayed_root);
|
||||
} else {
|
||||
// This else clause is for newly created tower.
|
||||
// initialize_lockouts_from_bank() should ensure the following invariant,
|
||||
// otherwise we're screwing something up.
|
||||
assert_eq!(tower_root, replayed_root);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
@@ -921,11 +951,12 @@ impl Tower {
|
||||
}
|
||||
|
||||
if let Some(checked_slot) = checked_slot {
|
||||
// This is really special, only if tower is initialized (root = slot 0) for genesis and contains
|
||||
// a vote (= slot 0) for the genesis, the slot 0 can repeat only once
|
||||
let voting_from_genesis = *slot_in_tower == checked_slot && *slot_in_tower == 0;
|
||||
// This is really special, only if tower is initialized and contains
|
||||
// a vote for the root, the root slot can repeat only once
|
||||
let voting_for_root =
|
||||
*slot_in_tower == checked_slot && *slot_in_tower == tower_root;
|
||||
|
||||
if !voting_from_genesis {
|
||||
if !voting_for_root {
|
||||
// Unless we're voting since genesis, slots_in_tower must always be older than last checked_slot
|
||||
// including all vote slot and the root slot.
|
||||
assert!(
|
||||
@@ -992,10 +1023,12 @@ impl Tower {
|
||||
root: Slot,
|
||||
bank: &Bank,
|
||||
) {
|
||||
if let Some((_stake, vote_account)) = bank.vote_accounts().get(vote_account_pubkey) {
|
||||
let vote_state = VoteState::deserialize(&vote_account.data)
|
||||
.expect("vote_account isn't a VoteState?");
|
||||
self.lockouts = vote_state;
|
||||
if let Some((_stake, vote_account)) = bank.get_vote_account(vote_account_pubkey) {
|
||||
self.lockouts = vote_account
|
||||
.vote_state()
|
||||
.as_ref()
|
||||
.expect("vote_account isn't a VoteState?")
|
||||
.clone();
|
||||
self.initialize_root(root);
|
||||
self.initialize_lockouts(|v| v.slot > root);
|
||||
trace!(
|
||||
@@ -1116,6 +1149,9 @@ pub enum TowerError {
|
||||
|
||||
#[error("The tower is fatally inconsistent with blockstore: {0}")]
|
||||
FatallyInconsistent(&'static str),
|
||||
|
||||
#[error("The tower is useless because of new hard fork: {0}")]
|
||||
HardFork(Slot),
|
||||
}
|
||||
|
||||
impl TowerError {
|
||||
@@ -1151,8 +1187,11 @@ impl SavedTower {
|
||||
}
|
||||
}
|
||||
|
||||
// Given an untimely crash, tower may have roots that are not reflected in blockstore because
|
||||
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots
|
||||
// Given an untimely crash, tower may have roots that are not reflected in blockstore,
|
||||
// or the reverse of this.
|
||||
// That's because we don't impose any ordering guarantee or any kind of write barriers
|
||||
// between tower (plain old POSIX fs calls) and blockstore (through RocksDB), when
|
||||
// `ReplayState::handle_votable_bank()` saves tower before setting blockstore roots.
|
||||
pub fn reconcile_blockstore_roots_with_tower(
|
||||
tower: &Tower,
|
||||
blockstore: &Blockstore,
|
||||
@@ -1172,12 +1211,23 @@ pub fn reconcile_blockstore_roots_with_tower(
|
||||
),
|
||||
})
|
||||
.collect();
|
||||
assert!(
|
||||
!new_roots.is_empty(),
|
||||
"at least 1 parent slot must be found"
|
||||
);
|
||||
|
||||
blockstore.set_roots(&new_roots)?;
|
||||
if !new_roots.is_empty() {
|
||||
info!(
|
||||
"Reconciling slots as root based on tower root: {:?} ({}..{}) ",
|
||||
new_roots, tower_root, last_blockstore_root
|
||||
);
|
||||
blockstore.set_roots(&new_roots)?;
|
||||
} else {
|
||||
// This indicates we're in bad state; but still don't panic here.
|
||||
// That's because we might have a chance of recovering properly with
|
||||
// newer snapshot.
|
||||
warn!(
|
||||
"Couldn't find any ancestor slots from tower root ({}) \
|
||||
towards blockstore root ({}); blockstore pruned or only \
|
||||
tower moved into new ledger?",
|
||||
tower_root, last_blockstore_root,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1186,7 +1236,6 @@ pub fn reconcile_blockstore_roots_with_tower(
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
bank_weight_fork_choice::BankWeightForkChoice,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
@@ -1203,7 +1252,8 @@ pub mod test {
|
||||
},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signer, slot_history::SlotHistory,
|
||||
account::Account, clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signer,
|
||||
slot_history::SlotHistory,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY},
|
||||
@@ -1325,7 +1375,6 @@ pub mod test {
|
||||
&self.bank_forks,
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
);
|
||||
|
||||
let vote_bank = self
|
||||
@@ -1521,7 +1570,7 @@ pub mod test {
|
||||
(bank_forks, progress, heaviest_subtree_fork_choice)
|
||||
}
|
||||
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, Account))> {
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
||||
let mut stakes = vec![];
|
||||
for (lamports, votes) in stake_votes {
|
||||
let mut account = Account::default();
|
||||
@@ -1532,11 +1581,14 @@ pub mod test {
|
||||
vote_state.process_slot_vote_unchecked(*slot);
|
||||
}
|
||||
VoteState::serialize(
|
||||
&VoteStateVersions::Current(Box::new(vote_state)),
|
||||
&VoteStateVersions::new_current(vote_state),
|
||||
&mut account.data,
|
||||
)
|
||||
.expect("serialize state");
|
||||
stakes.push((solana_sdk::pubkey::new_rand(), (*lamports, account)));
|
||||
stakes.push((
|
||||
solana_sdk::pubkey::new_rand(),
|
||||
(*lamports, ArcVoteAccount::from(account)),
|
||||
));
|
||||
}
|
||||
stakes
|
||||
}
|
||||
@@ -1890,16 +1942,16 @@ pub mod test {
|
||||
}
|
||||
|
||||
info!("local tower: {:#?}", tower.lockouts.votes);
|
||||
let vote_accounts = vote_simulator
|
||||
let observed = vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(next_unlocked_slot)
|
||||
.unwrap()
|
||||
.vote_accounts();
|
||||
let observed = vote_accounts.get(&vote_pubkey).unwrap();
|
||||
let state = VoteState::from(&observed.1).unwrap();
|
||||
info!("observed tower: {:#?}", state.votes);
|
||||
.get_vote_account(&vote_pubkey)
|
||||
.unwrap();
|
||||
let state = observed.1.vote_state();
|
||||
info!("observed tower: {:#?}", state.as_ref().unwrap().votes);
|
||||
|
||||
let num_slots_to_try = 200;
|
||||
cluster_votes
|
||||
@@ -2699,8 +2751,7 @@ pub mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "at least 1 parent slot must be found")]
|
||||
fn test_reconcile_blockstore_roots_with_tower_panic_no_parent() {
|
||||
fn test_reconcile_blockstore_roots_with_tower_nop_no_parent() {
|
||||
solana_logger::setup();
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
@@ -2716,7 +2767,9 @@ pub mod test {
|
||||
|
||||
let mut tower = Tower::new_with_key(&Pubkey::default());
|
||||
tower.lockouts.root_slot = Some(4);
|
||||
assert_eq!(blockstore.last_root(), 0);
|
||||
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
|
||||
assert_eq!(blockstore.last_root(), 0);
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
@@ -3038,6 +3091,23 @@ pub mod test {
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_vote_on_root() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.root_slot = Some(42);
|
||||
tower.lockouts.votes.push_back(Lockout::new(42));
|
||||
tower.lockouts.votes.push_back(Lockout::new(43));
|
||||
tower.lockouts.votes.push_back(Lockout::new(44));
|
||||
let vote = Vote::new(vec![44], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
|
||||
let tower = tower.adjust_lockouts_after_replay(42, &slot_history);
|
||||
assert_eq!(tower.unwrap().voted_slots(), [43, 44]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_vote_on_genesis() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
@@ -3050,4 +3120,25 @@ pub mod test {
|
||||
|
||||
assert!(tower.adjust_lockouts_after_replay(0, &slot_history).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_future_tower() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(13));
|
||||
tower.lockouts.votes.push_back(Lockout::new(14));
|
||||
let vote = Vote::new(vec![14], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
tower.initialize_root(12);
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(0);
|
||||
slot_history.add(2);
|
||||
|
||||
let tower = tower
|
||||
.adjust_lockouts_after_replay(2, &slot_history)
|
||||
.unwrap();
|
||||
assert_eq!(tower.root(), 12);
|
||||
assert_eq!(tower.voted_slots(), vec![13, 14]);
|
||||
assert_eq!(tower.stray_restored_slot, Some(14));
|
||||
}
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@ pub struct ContactInfo {
|
||||
/// address to forward unprocessed transactions to
|
||||
pub tpu_forwards: SocketAddr,
|
||||
/// address to which to send bank state requests
|
||||
pub rpc_banks: SocketAddr,
|
||||
pub unused: SocketAddr,
|
||||
/// address to which to send JSON-RPC requests
|
||||
pub rpc: SocketAddr,
|
||||
/// websocket for JSON-RPC push notifications
|
||||
@@ -95,7 +95,7 @@ impl Default for ContactInfo {
|
||||
repair: socketaddr_any!(),
|
||||
tpu: socketaddr_any!(),
|
||||
tpu_forwards: socketaddr_any!(),
|
||||
rpc_banks: socketaddr_any!(),
|
||||
unused: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
serve_repair: socketaddr_any!(),
|
||||
@@ -115,7 +115,7 @@ impl ContactInfo {
|
||||
repair: socketaddr!("127.0.0.1:1237"),
|
||||
tpu: socketaddr!("127.0.0.1:1238"),
|
||||
tpu_forwards: socketaddr!("127.0.0.1:1239"),
|
||||
rpc_banks: socketaddr!("127.0.0.1:1240"),
|
||||
unused: socketaddr!("127.0.0.1:1240"),
|
||||
rpc: socketaddr!("127.0.0.1:1241"),
|
||||
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
|
||||
serve_repair: socketaddr!("127.0.0.1:1243"),
|
||||
@@ -124,6 +124,14 @@ impl ContactInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/// New random ContactInfo for tests and simulations.
|
||||
pub(crate) fn new_rand<R: rand::Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
let delay = 10 * 60 * 1000; // 10 minutes
|
||||
let now = timestamp() - delay + rng.gen_range(0, 2 * delay);
|
||||
let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand);
|
||||
ContactInfo::new_localhost(&pubkey, now)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// ContactInfo with multicast addresses for adversarial testing.
|
||||
pub fn new_multicast() -> Self {
|
||||
@@ -137,7 +145,7 @@ impl ContactInfo {
|
||||
repair: addr,
|
||||
tpu: addr,
|
||||
tpu_forwards: addr,
|
||||
rpc_banks: addr,
|
||||
unused: addr,
|
||||
rpc: addr,
|
||||
rpc_pubsub: addr,
|
||||
serve_repair: addr,
|
||||
@@ -162,7 +170,6 @@ impl ContactInfo {
|
||||
let repair = next_port(&bind_addr, 5);
|
||||
let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
|
||||
let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
let rpc_banks = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_BANKS_PORT);
|
||||
let serve_repair = next_port(&bind_addr, 6);
|
||||
Self {
|
||||
id: *pubkey,
|
||||
@@ -172,7 +179,7 @@ impl ContactInfo {
|
||||
repair,
|
||||
tpu,
|
||||
tpu_forwards,
|
||||
rpc_banks,
|
||||
unused: "0.0.0.0:0".parse().unwrap(),
|
||||
rpc,
|
||||
rpc_pubsub,
|
||||
serve_repair,
|
||||
@@ -249,7 +256,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_unspecified());
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.rpc_banks.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
@@ -261,7 +268,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_multicast());
|
||||
assert!(ci.rpc_pubsub.ip().is_multicast());
|
||||
assert!(ci.tpu.ip().is_multicast());
|
||||
assert!(ci.rpc_banks.ip().is_multicast());
|
||||
assert!(ci.unused.ip().is_multicast());
|
||||
assert!(ci.serve_repair.ip().is_multicast());
|
||||
}
|
||||
#[test]
|
||||
@@ -274,7 +281,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_unspecified());
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.rpc_banks.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
@@ -287,7 +294,7 @@ mod tests {
|
||||
assert_eq!(ci.tpu_forwards.port(), 13);
|
||||
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
|
||||
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
assert_eq!(ci.rpc_banks.port(), rpc_port::DEFAULT_RPC_BANKS_PORT);
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert_eq!(ci.serve_repair.port(), 16);
|
||||
}
|
||||
|
||||
@@ -311,10 +318,6 @@ mod tests {
|
||||
d1.rpc_pubsub,
|
||||
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_PUBSUB_PORT))
|
||||
);
|
||||
assert_eq!(
|
||||
d1.rpc_banks,
|
||||
socketaddr!(format!("127.0.0.1:{}", rpc_port::DEFAULT_RPC_BANKS_PORT))
|
||||
);
|
||||
assert_eq!(d1.tvu_forwards, socketaddr!("127.0.0.1:1238"));
|
||||
assert_eq!(d1.repair, socketaddr!("127.0.0.1:1239"));
|
||||
assert_eq!(d1.serve_repair, socketaddr!("127.0.0.1:1240"));
|
||||
|
461
core/src/crds.rs
461
core/src/crds.rs
@@ -24,24 +24,38 @@
|
||||
//! A value is updated to a new version if the labels match, and the value
|
||||
//! wallclock is later, or the value hash is greater.
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_shards::CrdsShards;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use crate::crds_value::{CrdsData, CrdsValue, CrdsValueLabel, LowestSlot};
|
||||
use bincode::serialize;
|
||||
use indexmap::map::{Entry, IndexMap};
|
||||
use indexmap::map::{rayon::ParValues, Entry, IndexMap, Iter, Values};
|
||||
use indexmap::set::IndexSet;
|
||||
use rayon::{prelude::*, ThreadPool};
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Index;
|
||||
use std::collections::{hash_map, HashMap};
|
||||
use std::ops::{Index, IndexMut};
|
||||
|
||||
const CRDS_SHARDS_BITS: u32 = 8;
|
||||
// Limit number of crds values associated with each unique pubkey. This
|
||||
// excludes crds values which by label design are limited per each pubkey.
|
||||
// TODO: Find the right value for this once duplicate shreds and corresponding
|
||||
// votes are broadcasted over gossip.
|
||||
const MAX_CRDS_VALUES_PER_PUBKEY: usize = 512;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Crds {
|
||||
/// Stores the map of labels and values
|
||||
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize,
|
||||
pub shards: CrdsShards,
|
||||
table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize, // Only used in tests.
|
||||
shards: CrdsShards,
|
||||
// Indices of all crds values which are node ContactInfo.
|
||||
nodes: IndexSet<usize>,
|
||||
// Indices of all crds values associated with a node.
|
||||
records: HashMap<Pubkey, IndexSet<usize>>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@@ -84,14 +98,23 @@ impl VersionedCrdsValue {
|
||||
value_hash,
|
||||
}
|
||||
}
|
||||
|
||||
/// New random VersionedCrdsValue for tests and simulations.
|
||||
pub fn new_rand<R: rand::Rng>(rng: &mut R, keypair: Option<&Keypair>) -> Self {
|
||||
let delay = 10 * 60 * 1000; // 10 minutes
|
||||
let now = timestamp() - delay + rng.gen_range(0, 2 * delay);
|
||||
Self::new(now, CrdsValue::new_rand(rng, keypair))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Crds {
|
||||
fn default() -> Self {
|
||||
Crds {
|
||||
table: IndexMap::new(),
|
||||
table: IndexMap::default(),
|
||||
num_inserts: 0,
|
||||
shards: CrdsShards::new(CRDS_SHARDS_BITS),
|
||||
nodes: IndexSet::default(),
|
||||
records: HashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,16 +144,27 @@ impl Crds {
|
||||
let label = new_value.value.label();
|
||||
match self.table.entry(label) {
|
||||
Entry::Vacant(entry) => {
|
||||
assert!(self.shards.insert(entry.index(), &new_value));
|
||||
let entry_index = entry.index();
|
||||
self.shards.insert(entry_index, &new_value);
|
||||
if let CrdsData::ContactInfo(_) = new_value.value.data {
|
||||
self.nodes.insert(entry_index);
|
||||
}
|
||||
self.records
|
||||
.entry(new_value.value.pubkey())
|
||||
.or_default()
|
||||
.insert(entry_index);
|
||||
entry.insert(new_value);
|
||||
self.num_inserts += 1;
|
||||
Ok(None)
|
||||
}
|
||||
Entry::Occupied(mut entry) if *entry.get() < new_value => {
|
||||
let index = entry.index();
|
||||
assert!(self.shards.remove(index, entry.get()));
|
||||
assert!(self.shards.insert(index, &new_value));
|
||||
self.shards.remove(index, entry.get());
|
||||
self.shards.insert(index, &new_value);
|
||||
self.num_inserts += 1;
|
||||
// As long as the pubkey does not change, self.records
|
||||
// does not need to be updated.
|
||||
debug_assert_eq!(entry.get().value.pubkey(), new_value.value.pubkey());
|
||||
Ok(Some(entry.insert(new_value)))
|
||||
}
|
||||
_ => {
|
||||
@@ -159,16 +193,74 @@ impl Crds {
|
||||
self.table.get(label)
|
||||
}
|
||||
|
||||
fn update_label_timestamp(&mut self, id: &CrdsValueLabel, now: u64) {
|
||||
if let Some(e) = self.table.get_mut(id) {
|
||||
e.local_timestamp = cmp::max(e.local_timestamp, now);
|
||||
}
|
||||
pub fn get(&self, label: &CrdsValueLabel) -> Option<&VersionedCrdsValue> {
|
||||
self.table.get(label)
|
||||
}
|
||||
|
||||
pub fn get_contact_info(&self, pubkey: Pubkey) -> Option<&ContactInfo> {
|
||||
let label = CrdsValueLabel::ContactInfo(pubkey);
|
||||
self.table.get(&label)?.value.contact_info()
|
||||
}
|
||||
|
||||
pub fn get_lowest_slot(&self, pubkey: Pubkey) -> Option<&LowestSlot> {
|
||||
let lable = CrdsValueLabel::LowestSlot(pubkey);
|
||||
self.table.get(&lable)?.value.lowest_slot()
|
||||
}
|
||||
|
||||
/// Returns all entries which are ContactInfo.
|
||||
pub fn get_nodes(&self) -> impl Iterator<Item = &VersionedCrdsValue> {
|
||||
self.nodes.iter().map(move |i| self.table.index(*i))
|
||||
}
|
||||
|
||||
/// Returns ContactInfo of all known nodes.
|
||||
pub fn get_nodes_contact_info(&self) -> impl Iterator<Item = &ContactInfo> {
|
||||
self.get_nodes().map(|v| match &v.value.data {
|
||||
CrdsData::ContactInfo(info) => info,
|
||||
_ => panic!("this should not happen!"),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.table.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.table.is_empty()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> Iter<'_, CrdsValueLabel, VersionedCrdsValue> {
|
||||
self.table.iter()
|
||||
}
|
||||
|
||||
pub fn values(&self) -> Values<'_, CrdsValueLabel, VersionedCrdsValue> {
|
||||
self.table.values()
|
||||
}
|
||||
|
||||
pub fn par_values(&self) -> ParValues<'_, CrdsValueLabel, VersionedCrdsValue> {
|
||||
self.table.par_values()
|
||||
}
|
||||
|
||||
/// Returns all crds values which the first 'mask_bits'
|
||||
/// of their hash value is equal to 'mask'.
|
||||
pub fn filter_bitmask(
|
||||
&self,
|
||||
mask: u64,
|
||||
mask_bits: u32,
|
||||
) -> impl Iterator<Item = &VersionedCrdsValue> {
|
||||
self.shards
|
||||
.find(mask, mask_bits)
|
||||
.map(move |i| self.table.index(i))
|
||||
}
|
||||
|
||||
/// Update the timestamp's of all the labels that are associated with Pubkey
|
||||
pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) {
|
||||
for label in &CrdsValue::record_labels(pubkey) {
|
||||
self.update_label_timestamp(label, now);
|
||||
if let Some(indices) = self.records.get(pubkey) {
|
||||
for index in indices {
|
||||
let entry = self.table.index_mut(*index);
|
||||
if entry.local_timestamp < now {
|
||||
entry.local_timestamp = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,46 +268,113 @@ impl Crds {
|
||||
/// * timeouts - Pubkey specific timeouts with Pubkey::default() as the default timeout.
|
||||
pub fn find_old_labels(
|
||||
&self,
|
||||
thread_pool: &ThreadPool,
|
||||
now: u64,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<CrdsValueLabel> {
|
||||
#[rustversion::before(1.49.0)]
|
||||
fn select_nth<T: Ord>(xs: &mut Vec<T>, _nth: usize) {
|
||||
xs.sort_unstable();
|
||||
}
|
||||
#[rustversion::since(1.49.0)]
|
||||
fn select_nth<T: Ord>(xs: &mut Vec<T>, nth: usize) {
|
||||
xs.select_nth_unstable(nth);
|
||||
}
|
||||
let default_timeout = *timeouts
|
||||
.get(&Pubkey::default())
|
||||
.expect("must have default timeout");
|
||||
self.table
|
||||
.iter()
|
||||
.filter_map(|(k, v)| {
|
||||
let timeout = timeouts.get(&k.pubkey()).unwrap_or(&default_timeout);
|
||||
if v.local_timestamp.saturating_add(*timeout) <= now {
|
||||
Some(k)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
// Given an index of all crd values associated with a pubkey,
|
||||
// returns crds labels of old values to be evicted.
|
||||
let evict = |pubkey, index: &IndexSet<usize>| {
|
||||
let timeout = *timeouts.get(pubkey).unwrap_or(&default_timeout);
|
||||
let mut old_labels = Vec::new();
|
||||
// Buffer of crds values to be evicted based on their wallclock.
|
||||
let mut recent_unlimited_labels: Vec<(u64 /*wallclock*/, usize /*index*/)> = index
|
||||
.into_iter()
|
||||
.filter_map(|ix| {
|
||||
let (label, value) = self.table.get_index(*ix).unwrap();
|
||||
if value.local_timestamp.saturating_add(timeout) <= now {
|
||||
old_labels.push(label.clone());
|
||||
None
|
||||
} else {
|
||||
match label.value_space() {
|
||||
Some(_) => None,
|
||||
None => Some((value.value.wallclock(), *ix)),
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Number of values to discard from the buffer:
|
||||
let nth = recent_unlimited_labels
|
||||
.len()
|
||||
.saturating_sub(MAX_CRDS_VALUES_PER_PUBKEY);
|
||||
// Partition on wallclock to discard the older ones.
|
||||
if nth > 0 && nth < recent_unlimited_labels.len() {
|
||||
select_nth(&mut recent_unlimited_labels, nth);
|
||||
}
|
||||
old_labels.extend(
|
||||
recent_unlimited_labels
|
||||
.split_at(nth)
|
||||
.0
|
||||
.iter()
|
||||
.map(|(_ /*wallclock*/, ix)| self.table.get_index(*ix).unwrap().0.clone()),
|
||||
);
|
||||
old_labels
|
||||
};
|
||||
thread_pool.install(|| {
|
||||
self.records
|
||||
.par_iter()
|
||||
.flat_map(|(pubkey, index)| evict(pubkey, index))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, key: &CrdsValueLabel) {
|
||||
if let Some((index, _, value)) = self.table.swap_remove_full(key) {
|
||||
assert!(self.shards.remove(index, &value));
|
||||
// The previously last element in the table is now moved to the
|
||||
// 'index' position. Shards need to be updated accordingly.
|
||||
if index < self.table.len() {
|
||||
let value = self.table.index(index);
|
||||
assert!(self.shards.remove(self.table.len(), value));
|
||||
assert!(self.shards.insert(index, value));
|
||||
}
|
||||
pub fn remove(&mut self, key: &CrdsValueLabel) -> Option<VersionedCrdsValue> {
|
||||
let (index, _ /*label*/, value) = self.table.swap_remove_full(key)?;
|
||||
self.shards.remove(index, &value);
|
||||
if let CrdsData::ContactInfo(_) = value.value.data {
|
||||
self.nodes.swap_remove(&index);
|
||||
}
|
||||
// Remove the index from records associated with the value's pubkey.
|
||||
let pubkey = value.value.pubkey();
|
||||
let mut records_entry = match self.records.entry(pubkey) {
|
||||
hash_map::Entry::Vacant(_) => panic!("this should not happen!"),
|
||||
hash_map::Entry::Occupied(entry) => entry,
|
||||
};
|
||||
records_entry.get_mut().swap_remove(&index);
|
||||
if records_entry.get().is_empty() {
|
||||
records_entry.remove();
|
||||
}
|
||||
// If index == self.table.len(), then the removed entry was the last
|
||||
// entry in the table, in which case no other keys were modified.
|
||||
// Otherwise, the previously last element in the table is now moved to
|
||||
// the 'index' position; and so shards and nodes need to be updated
|
||||
// accordingly.
|
||||
let size = self.table.len();
|
||||
if index < size {
|
||||
let value = self.table.index(index);
|
||||
self.shards.remove(size, value);
|
||||
self.shards.insert(index, value);
|
||||
if let CrdsData::ContactInfo(_) = value.value.data {
|
||||
self.nodes.swap_remove(&size);
|
||||
self.nodes.insert(index);
|
||||
}
|
||||
let pubkey = value.value.pubkey();
|
||||
let records = self.records.get_mut(&pubkey).unwrap();
|
||||
records.swap_remove(&size);
|
||||
records.insert(index);
|
||||
}
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
use crate::{contact_info::ContactInfo, crds_value::NodeInstance};
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::ThreadPoolBuilder;
|
||||
use std::{collections::HashSet, iter::repeat_with};
|
||||
|
||||
#[test]
|
||||
fn test_insert() {
|
||||
@@ -261,8 +420,6 @@ mod test {
|
||||
)));
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
|
||||
crds.update_label_timestamp(&val.label(), 1);
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 1);
|
||||
assert_eq!(crds.table[&val.label()].insert_timestamp, 0);
|
||||
|
||||
let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
@@ -288,48 +445,102 @@ mod test {
|
||||
}
|
||||
#[test]
|
||||
fn test_find_old_records_default() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
|
||||
let mut set = HashMap::new();
|
||||
set.insert(Pubkey::default(), 0);
|
||||
assert!(crds.find_old_labels(0, &set).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
|
||||
set.insert(Pubkey::default(), 1);
|
||||
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 2, &set),
|
||||
vec![val.label()]
|
||||
);
|
||||
set.insert(Pubkey::default(), 2);
|
||||
assert_eq!(crds.find_old_labels(4, &set), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 4, &set),
|
||||
vec![val.label()]
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
fn test_find_old_records_with_override() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut rng = thread_rng();
|
||||
let mut crds = Crds::default();
|
||||
let mut timeouts = HashMap::new();
|
||||
let val = CrdsValue::new_rand(&mut rng);
|
||||
let val = CrdsValue::new_rand(&mut rng, None);
|
||||
timeouts.insert(Pubkey::default(), 3);
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
assert!(crds.find_old_labels(2, &timeouts).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
|
||||
timeouts.insert(val.pubkey(), 1);
|
||||
assert_eq!(crds.find_old_labels(2, &timeouts), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 2, &timeouts),
|
||||
vec![val.label()]
|
||||
);
|
||||
timeouts.insert(val.pubkey(), u64::MAX);
|
||||
assert!(crds.find_old_labels(2, &timeouts).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
|
||||
timeouts.insert(Pubkey::default(), 1);
|
||||
assert!(crds.find_old_labels(2, &timeouts).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
|
||||
timeouts.remove(&val.pubkey());
|
||||
assert_eq!(crds.find_old_labels(2, &timeouts), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 2, &timeouts),
|
||||
vec![val.label()]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_old_records_unlimited() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut rng = thread_rng();
|
||||
let now = 1_610_034_423_000;
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let mut crds = Crds::default();
|
||||
let mut timeouts = HashMap::new();
|
||||
timeouts.insert(Pubkey::default(), 1);
|
||||
timeouts.insert(pubkey, 180);
|
||||
for _ in 0..1024 {
|
||||
let wallclock = now - rng.gen_range(0, 240);
|
||||
let val = NodeInstance::new(&mut rng, pubkey, wallclock);
|
||||
let val = CrdsData::NodeInstance(val);
|
||||
let val = CrdsValue::new_unsigned(val);
|
||||
assert_eq!(crds.insert(val, now), Ok(None));
|
||||
}
|
||||
let now = now + 1;
|
||||
let labels = crds.find_old_labels(&thread_pool, now, &timeouts);
|
||||
assert_eq!(crds.table.len() - labels.len(), MAX_CRDS_VALUES_PER_PUBKEY);
|
||||
let max_wallclock = labels
|
||||
.iter()
|
||||
.map(|label| crds.lookup(label).unwrap().wallclock())
|
||||
.max()
|
||||
.unwrap();
|
||||
assert!(max_wallclock > now - 180);
|
||||
let labels: HashSet<_> = labels.into_iter().collect();
|
||||
for (label, value) in crds.table.iter() {
|
||||
if !labels.contains(label) {
|
||||
assert!(max_wallclock <= value.value.wallclock());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_default() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
|
||||
let mut set = HashMap::new();
|
||||
set.insert(Pubkey::default(), 1);
|
||||
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 2, &set),
|
||||
vec![val.label()]
|
||||
);
|
||||
crds.remove(&val.label());
|
||||
assert!(crds.find_old_labels(2, &set).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
|
||||
}
|
||||
#[test]
|
||||
fn test_find_old_records_staked() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
|
||||
@@ -337,20 +548,26 @@ mod test {
|
||||
//now < timestamp
|
||||
set.insert(Pubkey::default(), 0);
|
||||
set.insert(val.pubkey(), 0);
|
||||
assert!(crds.find_old_labels(0, &set).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
|
||||
|
||||
//pubkey shouldn't expire since its timeout is MAX
|
||||
set.insert(val.pubkey(), std::u64::MAX);
|
||||
assert!(crds.find_old_labels(2, &set).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
|
||||
|
||||
//default has max timeout, but pubkey should still expire
|
||||
set.insert(Pubkey::default(), std::u64::MAX);
|
||||
set.insert(val.pubkey(), 1);
|
||||
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 2, &set),
|
||||
vec![val.label()]
|
||||
);
|
||||
|
||||
set.insert(val.pubkey(), 2);
|
||||
assert!(crds.find_old_labels(2, &set).is_empty());
|
||||
assert_eq!(crds.find_old_labels(3, &set), vec![val.label()]);
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 3, &set),
|
||||
vec![val.label()]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -361,27 +578,29 @@ mod test {
|
||||
}
|
||||
|
||||
let mut crds = Crds::default();
|
||||
let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
|
||||
.take(256)
|
||||
.collect();
|
||||
let keypairs: Vec<_> = std::iter::repeat_with(Keypair::new).take(256).collect();
|
||||
let mut rng = thread_rng();
|
||||
let mut num_inserts = 0;
|
||||
let mut num_overrides = 0;
|
||||
for _ in 0..4096 {
|
||||
let pubkey = pubkeys[rng.gen_range(0, pubkeys.len())];
|
||||
let value = VersionedCrdsValue::new(
|
||||
rng.gen(), // local_timestamp
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&pubkey,
|
||||
rng.gen(), // now
|
||||
))),
|
||||
);
|
||||
if crds.insert_versioned(value).is_ok() {
|
||||
check_crds_shards(&crds);
|
||||
num_inserts += 1;
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
match crds.insert_versioned(value) {
|
||||
Ok(None) => {
|
||||
num_inserts += 1;
|
||||
check_crds_shards(&crds);
|
||||
}
|
||||
Ok(Some(_)) => {
|
||||
num_inserts += 1;
|
||||
num_overrides += 1;
|
||||
check_crds_shards(&crds);
|
||||
}
|
||||
Err(_) => (),
|
||||
}
|
||||
}
|
||||
assert_eq!(num_inserts, crds.num_inserts);
|
||||
assert!(num_inserts > 700);
|
||||
assert!(num_overrides > 500);
|
||||
assert!(crds.table.len() > 200);
|
||||
assert!(num_inserts > crds.table.len());
|
||||
check_crds_shards(&crds);
|
||||
@@ -394,8 +613,97 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crds_nodes() {
|
||||
fn check_crds_nodes(crds: &Crds) -> usize {
|
||||
let num_nodes = crds
|
||||
.table
|
||||
.values()
|
||||
.filter(|value| matches!(value.value.data, CrdsData::ContactInfo(_)))
|
||||
.count();
|
||||
assert_eq!(num_nodes, crds.get_nodes_contact_info().count());
|
||||
num_nodes
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let keypairs: Vec<_> = std::iter::repeat_with(Keypair::new).take(256).collect();
|
||||
let mut crds = Crds::default();
|
||||
let mut num_inserts = 0;
|
||||
let mut num_overrides = 0;
|
||||
for _ in 0..4096 {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
match crds.insert_versioned(value) {
|
||||
Ok(None) => {
|
||||
num_inserts += 1;
|
||||
check_crds_nodes(&crds);
|
||||
}
|
||||
Ok(Some(_)) => {
|
||||
num_inserts += 1;
|
||||
num_overrides += 1;
|
||||
check_crds_nodes(&crds);
|
||||
}
|
||||
Err(_) => (),
|
||||
}
|
||||
}
|
||||
assert_eq!(num_inserts, crds.num_inserts);
|
||||
assert!(num_inserts > 700);
|
||||
assert!(num_overrides > 500);
|
||||
assert!(crds.table.len() > 200);
|
||||
assert!(num_inserts > crds.table.len());
|
||||
let num_nodes = check_crds_nodes(&crds);
|
||||
assert!(num_nodes * 3 < crds.table.len());
|
||||
assert!(num_nodes > 150);
|
||||
// Remove values one by one and assert that nodes indices stay valid.
|
||||
while !crds.table.is_empty() {
|
||||
let index = rng.gen_range(0, crds.table.len());
|
||||
let key = crds.table.get_index(index).unwrap().0.clone();
|
||||
crds.remove(&key);
|
||||
check_crds_nodes(&crds);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crds_records() {
|
||||
fn check_crds_records(crds: &Crds) {
|
||||
assert_eq!(
|
||||
crds.table.len(),
|
||||
crds.records.values().map(IndexSet::len).sum::<usize>()
|
||||
);
|
||||
for (pubkey, indices) in &crds.records {
|
||||
for index in indices {
|
||||
let value = crds.table.index(*index);
|
||||
assert_eq!(*pubkey, value.value.pubkey());
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let keypairs: Vec<_> = repeat_with(Keypair::new).take(128).collect();
|
||||
let mut crds = Crds::default();
|
||||
for k in 0..4096 {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = VersionedCrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
let _ = crds.insert_versioned(value);
|
||||
if k % 64 == 0 {
|
||||
check_crds_records(&crds);
|
||||
}
|
||||
}
|
||||
assert!(crds.records.len() > 96);
|
||||
assert!(crds.records.len() <= keypairs.len());
|
||||
// Remove values one by one and assert that records stay valid.
|
||||
while !crds.table.is_empty() {
|
||||
let index = rng.gen_range(0, crds.table.len());
|
||||
let key = crds.table.get_index(index).unwrap().0.clone();
|
||||
crds.remove(&key);
|
||||
if crds.table.len() % 64 == 0 {
|
||||
check_crds_records(&crds);
|
||||
}
|
||||
}
|
||||
assert!(crds.records.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_staked() {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
|
||||
@@ -404,9 +712,12 @@ mod test {
|
||||
//default has max timeout, but pubkey should still expire
|
||||
set.insert(Pubkey::default(), std::u64::MAX);
|
||||
set.insert(val.pubkey(), 1);
|
||||
assert_eq!(crds.find_old_labels(2, &set), vec![val.label()]);
|
||||
assert_eq!(
|
||||
crds.find_old_labels(&thread_pool, 2, &set),
|
||||
vec![val.label()]
|
||||
);
|
||||
crds.remove(&val.label());
|
||||
assert!(crds.find_old_labels(2, &set).is_empty());
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &set).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -17,7 +17,6 @@ use std::collections::{HashMap, HashSet};
|
||||
///The min size for bloom filters
|
||||
pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossip {
|
||||
pub crds: Crds,
|
||||
pub id: Pubkey,
|
||||
@@ -108,7 +107,7 @@ impl CrdsGossip {
|
||||
|
||||
/// add the `from` to the peer's filter of nodes
|
||||
pub fn process_prune_msg(
|
||||
&mut self,
|
||||
&self,
|
||||
peer: &Pubkey,
|
||||
destination: &Pubkey,
|
||||
origin: &[Pubkey],
|
||||
@@ -174,9 +173,12 @@ impl CrdsGossip {
|
||||
self.pull.mark_pull_request_creation_time(from, now)
|
||||
}
|
||||
/// process a pull request and create a response
|
||||
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
|
||||
pub fn process_pull_requests<I>(&mut self, callers: I, now: u64)
|
||||
where
|
||||
I: IntoIterator<Item = CrdsValue>,
|
||||
{
|
||||
self.pull
|
||||
.process_pull_requests(&mut self.crds, filters, now);
|
||||
.process_pull_requests(&mut self.crds, callers, now);
|
||||
}
|
||||
|
||||
pub fn generate_pull_responses(
|
||||
@@ -232,7 +234,12 @@ impl CrdsGossip {
|
||||
self.pull.make_timeouts(&self.id, stakes, epoch_ms)
|
||||
}
|
||||
|
||||
pub fn purge(&mut self, now: u64, timeouts: &HashMap<Pubkey, u64>) -> usize {
|
||||
pub fn purge(
|
||||
&mut self,
|
||||
thread_pool: &ThreadPool,
|
||||
now: u64,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
) -> usize {
|
||||
let mut rv = 0;
|
||||
if now > self.push.msg_timeout {
|
||||
let min = now - self.push.msg_timeout;
|
||||
@@ -247,7 +254,9 @@ impl CrdsGossip {
|
||||
let min = self.pull.crds_timeout;
|
||||
assert_eq!(timeouts[&self.id], std::u64::MAX);
|
||||
assert_eq!(timeouts[&Pubkey::default()], min);
|
||||
rv = self.pull.purge_active(&mut self.crds, now, &timeouts);
|
||||
rv = self
|
||||
.pull
|
||||
.purge_active(thread_pool, &mut self.crds, now, &timeouts);
|
||||
}
|
||||
if now > 5 * self.pull.crds_timeout {
|
||||
let min = now - 5 * self.pull.crds_timeout;
|
||||
@@ -256,6 +265,16 @@ impl CrdsGossip {
|
||||
self.pull.purge_failed_inserts(now);
|
||||
rv
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub(crate) fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
crds: self.crds.clone(),
|
||||
push: self.push.mock_clone(),
|
||||
pull: self.pull.clone(),
|
||||
..*self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes a normalized(log of actual stake) stake
|
||||
|
@@ -24,13 +24,14 @@ use std::cmp;
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::convert::TryInto;
|
||||
use std::ops::Index;
|
||||
|
||||
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
|
||||
// The maximum age of a value received over pull responses
|
||||
pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
|
||||
// Retention period of hashes of received outdated values.
|
||||
const FAILED_INSERTS_RETENTION_MS: u64 = 20_000;
|
||||
// Do not pull from peers which have not been updated for this long.
|
||||
const PULL_ACTIVE_TIMEOUT_MS: u64 = 60_000;
|
||||
pub const FALSE_RATE: f64 = 0.1f64;
|
||||
pub const KEYS: f64 = 8f64;
|
||||
|
||||
@@ -78,7 +79,7 @@ impl CrdsFilter {
|
||||
let seed: u64 = seed.checked_shl(64 - mask_bits).unwrap_or(0x0);
|
||||
seed | (!0u64).checked_shr(mask_bits).unwrap_or(!0x0) as u64
|
||||
}
|
||||
pub fn max_items(max_bits: f64, false_rate: f64, num_keys: f64) -> f64 {
|
||||
fn max_items(max_bits: f64, false_rate: f64, num_keys: f64) -> f64 {
|
||||
let m = max_bits;
|
||||
let p = false_rate;
|
||||
let k = num_keys;
|
||||
@@ -92,28 +93,26 @@ impl CrdsFilter {
|
||||
let buf = item.as_ref()[..8].try_into().unwrap();
|
||||
u64::from_le_bytes(buf)
|
||||
}
|
||||
pub fn test_mask_u64(&self, item: u64, ones: u64) -> bool {
|
||||
let bits = item | ones;
|
||||
bits == self.mask
|
||||
}
|
||||
pub fn test_mask(&self, item: &Hash) -> bool {
|
||||
fn test_mask(&self, item: &Hash) -> bool {
|
||||
// only consider the highest mask_bits bits from the hash and set the rest to 1.
|
||||
let ones = (!0u64).checked_shr(self.mask_bits).unwrap_or(!0u64);
|
||||
let bits = Self::hash_as_u64(item) | ones;
|
||||
bits == self.mask
|
||||
}
|
||||
pub fn add(&mut self, item: &Hash) {
|
||||
#[cfg(test)]
|
||||
fn add(&mut self, item: &Hash) {
|
||||
if self.test_mask(item) {
|
||||
self.filter.add(item);
|
||||
}
|
||||
}
|
||||
pub fn contains(&self, item: &Hash) -> bool {
|
||||
#[cfg(test)]
|
||||
fn contains(&self, item: &Hash) -> bool {
|
||||
if !self.test_mask(item) {
|
||||
return true;
|
||||
}
|
||||
self.filter.contains(item)
|
||||
}
|
||||
pub fn filter_contains(&self, item: &Hash) -> bool {
|
||||
fn filter_contains(&self, item: &Hash) -> bool {
|
||||
self.filter.contains(item)
|
||||
}
|
||||
}
|
||||
@@ -238,9 +237,22 @@ impl CrdsGossipPull {
|
||||
gossip_validators: Option<&HashSet<Pubkey>>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<(f32, &'a ContactInfo)> {
|
||||
crds.table
|
||||
.values()
|
||||
.filter_map(|v| v.value.contact_info())
|
||||
let mut rng = rand::thread_rng();
|
||||
let active_cutoff = now.saturating_sub(PULL_ACTIVE_TIMEOUT_MS);
|
||||
crds.get_nodes()
|
||||
.filter_map(|value| {
|
||||
let info = value.value.contact_info().unwrap();
|
||||
// Stop pulling from nodes which have not been active recently.
|
||||
if value.local_timestamp < active_cutoff {
|
||||
// In order to mitigate eclipse attack, for staked nodes
|
||||
// continue retrying periodically.
|
||||
let stake = stakes.get(&info.id).unwrap_or(&0);
|
||||
if *stake == 0 || !rng.gen_ratio(1, 16) {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(info)
|
||||
})
|
||||
.filter(|v| {
|
||||
v.id != *self_id
|
||||
&& ContactInfo::is_valid_address(&v.gossip)
|
||||
@@ -273,20 +285,18 @@ impl CrdsGossipPull {
|
||||
}
|
||||
|
||||
/// process a pull request
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
requests: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) {
|
||||
requests.into_iter().for_each(|(caller, _)| {
|
||||
pub fn process_pull_requests<I>(&mut self, crds: &mut Crds, callers: I, now: u64)
|
||||
where
|
||||
I: IntoIterator<Item = CrdsValue>,
|
||||
{
|
||||
for caller in callers {
|
||||
let key = caller.label().pubkey();
|
||||
if let Ok(Some(val)) = crds.insert(caller, now) {
|
||||
self.purged_values
|
||||
.push_back((val.value_hash, val.local_timestamp));
|
||||
}
|
||||
crds.update_record_timestamp(&key, now);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Create gossip responses to pull requests
|
||||
@@ -438,12 +448,11 @@ impl CrdsGossipPull {
|
||||
const PAR_MIN_LENGTH: usize = 512;
|
||||
let num = cmp::max(
|
||||
CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS,
|
||||
crds.table.len() + self.purged_values.len() + self.failed_inserts.len(),
|
||||
crds.len() + self.purged_values.len() + self.failed_inserts.len(),
|
||||
);
|
||||
let filters = CrdsFilterSet::new(num, bloom_size);
|
||||
thread_pool.install(|| {
|
||||
crds.table
|
||||
.par_values()
|
||||
crds.par_values()
|
||||
.with_min_len(PAR_MIN_LENGTH)
|
||||
.map(|v| v.value_hash)
|
||||
.chain(
|
||||
@@ -486,10 +495,8 @@ impl CrdsGossipPull {
|
||||
return vec![];
|
||||
}
|
||||
let caller_wallclock = caller_wallclock.checked_add(jitter).unwrap_or(0);
|
||||
crds.shards
|
||||
.find(filter.mask, filter.mask_bits)
|
||||
.filter_map(|index| {
|
||||
let item = crds.table.index(index);
|
||||
crds.filter_bitmask(filter.mask, filter.mask_bits)
|
||||
.filter_map(|item| {
|
||||
debug_assert!(filter.test_mask(&item.value_hash));
|
||||
//skip values that are too new
|
||||
if item.value.wallclock() > caller_wallclock {
|
||||
@@ -537,24 +544,21 @@ impl CrdsGossipPull {
|
||||
/// The value_hash of an active item is put into self.purged_values queue
|
||||
pub fn purge_active(
|
||||
&mut self,
|
||||
thread_pool: &ThreadPool,
|
||||
crds: &mut Crds,
|
||||
now: u64,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
) -> usize {
|
||||
let old = crds.find_old_labels(now, timeouts);
|
||||
let mut purged: VecDeque<_> = old
|
||||
.iter()
|
||||
.filter_map(|label| {
|
||||
let rv = crds
|
||||
.lookup_versioned(label)
|
||||
.map(|val| (val.value_hash, val.local_timestamp));
|
||||
crds.remove(label);
|
||||
rv
|
||||
})
|
||||
.collect();
|
||||
let ret = purged.len();
|
||||
self.purged_values.append(&mut purged);
|
||||
ret
|
||||
let num_purged_values = self.purged_values.len();
|
||||
self.purged_values.extend(
|
||||
crds.find_old_labels(thread_pool, now, timeouts)
|
||||
.into_iter()
|
||||
.filter_map(|label| {
|
||||
let val = crds.remove(&label)?;
|
||||
Some((val.value_hash, val.local_timestamp))
|
||||
}),
|
||||
);
|
||||
self.purged_values.len() - num_purged_values
|
||||
}
|
||||
/// Purge values from the `self.purged_values` queue that are older then purge_timeout
|
||||
pub fn purge_purged(&mut self, min_ts: u64) {
|
||||
@@ -854,7 +858,7 @@ mod test {
|
||||
let mut num_inserts = 0;
|
||||
for _ in 0..20_000 {
|
||||
if crds
|
||||
.insert(CrdsValue::new_rand(&mut rng), rng.gen())
|
||||
.insert(CrdsValue::new_rand(&mut rng, None), rng.gen())
|
||||
.is_ok()
|
||||
{
|
||||
num_inserts += 1;
|
||||
@@ -864,7 +868,6 @@ mod test {
|
||||
let filters = crds_gossip_pull.build_crds_filters(&thread_pool, &crds, MAX_BLOOM_SIZE);
|
||||
assert_eq!(filters.len(), 32);
|
||||
let hash_values: Vec<_> = crds
|
||||
.table
|
||||
.values()
|
||||
.map(|v| v.value_hash)
|
||||
.chain(
|
||||
@@ -954,6 +957,7 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_new_mark_creation_time() {
|
||||
let now: u64 = 1_605_127_770_789;
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@@ -962,29 +966,31 @@ mod test {
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
crds.insert(entry.clone(), now).unwrap();
|
||||
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(old.clone(), 0).unwrap();
|
||||
crds.insert(old.clone(), now).unwrap();
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
crds.insert(new.clone(), now).unwrap();
|
||||
|
||||
// set request creation time to max_value
|
||||
node.mark_pull_request_creation_time(&new.label().pubkey(), u64::max_value());
|
||||
// set request creation time to now.
|
||||
let now = now + 50_000;
|
||||
node.mark_pull_request_creation_time(&new.label().pubkey(), now);
|
||||
|
||||
// odds of getting the other request should be 1 in u64::max_value()
|
||||
// odds of getting the other request should be close to 1.
|
||||
let now = now + 1_000;
|
||||
for _ in 0..10 {
|
||||
let req = node.new_pull_request(
|
||||
&thread_pool,
|
||||
&crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
u64::max_value(),
|
||||
now,
|
||||
None,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
@@ -1090,7 +1096,11 @@ mod test {
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
dest.process_pull_requests(
|
||||
&mut dest_crds,
|
||||
filters.into_iter().map(|(caller, _)| caller),
|
||||
1,
|
||||
);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
assert_eq!(
|
||||
@@ -1164,7 +1174,11 @@ mod test {
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
dest.process_pull_requests(
|
||||
&mut dest_crds,
|
||||
filters.into_iter().map(|(caller, _)| caller),
|
||||
0,
|
||||
);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
if rsp.is_empty() {
|
||||
@@ -1229,7 +1243,7 @@ mod test {
|
||||
|
||||
// purge
|
||||
let timeouts = node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1);
|
||||
node.purge_active(&mut node_crds, 2, &timeouts);
|
||||
node.purge_active(&thread_pool, &mut node_crds, 2, &timeouts);
|
||||
|
||||
//verify self is still valid after purge
|
||||
assert_eq!(node_crds.lookup(&node_label).unwrap().label(), node_label);
|
||||
|
@@ -20,7 +20,7 @@ use bincode::serialized_size;
|
||||
use indexmap::map::IndexMap;
|
||||
use itertools::Itertools;
|
||||
use rand::{seq::SliceRandom, Rng};
|
||||
use solana_runtime::bloom::Bloom;
|
||||
use solana_runtime::bloom::{AtomicBloom, Bloom};
|
||||
use solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp};
|
||||
use std::{
|
||||
cmp,
|
||||
@@ -35,19 +35,18 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
|
||||
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 3;
|
||||
// Do not push to peers which have not been updated for this long.
|
||||
const PUSH_ACTIVE_TIMEOUT_MS: u64 = 60_000;
|
||||
|
||||
// 10 minutes
|
||||
const MAX_PUSHED_TO_TIMEOUT_MS: u64 = 10 * 60 * 1000;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPush {
|
||||
/// max bytes per message
|
||||
pub max_bytes: usize,
|
||||
/// active set of validators for push
|
||||
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
|
||||
active_set: IndexMap<Pubkey, AtomicBloom<Pubkey>>,
|
||||
/// push message queue
|
||||
push_messages: HashMap<CrdsValueLabel, Hash>,
|
||||
/// Cache that tracks which validators a message was received from
|
||||
@@ -136,8 +135,12 @@ impl CrdsGossipPush {
|
||||
|
||||
let mut keep = HashSet::new();
|
||||
let mut peer_stake_sum = 0;
|
||||
keep.insert(*origin);
|
||||
for next in shuffle {
|
||||
let (next_peer, next_stake) = staked_peers[next];
|
||||
if next_peer == *origin {
|
||||
continue;
|
||||
}
|
||||
keep.insert(next_peer);
|
||||
peer_stake_sum += next_stake;
|
||||
if peer_stake_sum >= prune_stake_threshold
|
||||
@@ -244,7 +247,7 @@ impl CrdsGossipPush {
|
||||
for i in start..(start + push_fanout) {
|
||||
let index = i % self.active_set.len();
|
||||
let (peer, filter) = self.active_set.get_index(index).unwrap();
|
||||
if !filter.contains(&origin) {
|
||||
if !filter.contains(&origin) || value.should_force_push(peer) {
|
||||
trace!("new_push_messages insert {} {:?}", *peer, value);
|
||||
push_messages.entry(*peer).or_default().push(value.clone());
|
||||
num_pushes += 1;
|
||||
@@ -283,13 +286,12 @@ impl CrdsGossipPush {
|
||||
}
|
||||
|
||||
/// add the `from` to the peer's filter of nodes
|
||||
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
for origin in origins {
|
||||
if origin == self_pubkey {
|
||||
continue;
|
||||
}
|
||||
if let Some(p) = self.active_set.get_mut(peer) {
|
||||
p.add(origin)
|
||||
pub fn process_prune_msg(&self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
if let Some(filter) = self.active_set.get(peer) {
|
||||
for origin in origins {
|
||||
if origin != self_pubkey {
|
||||
filter.add(origin);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -345,7 +347,7 @@ impl CrdsGossipPush {
|
||||
continue;
|
||||
}
|
||||
let size = cmp::max(CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS, network_size);
|
||||
let mut bloom = Bloom::random(size, 0.1, 1024 * 8 * 4);
|
||||
let bloom: AtomicBloom<_> = Bloom::random(size, 0.1, 1024 * 8 * 4).into();
|
||||
bloom.add(&item.id);
|
||||
new_items.insert(item.id, bloom);
|
||||
}
|
||||
@@ -375,16 +377,15 @@ impl CrdsGossipPush {
|
||||
let mut rng = rand::thread_rng();
|
||||
let max_weight = u16::MAX as f32 - 1.0;
|
||||
let active_cutoff = now.saturating_sub(PUSH_ACTIVE_TIMEOUT_MS);
|
||||
crds.table
|
||||
.values()
|
||||
crds.get_nodes()
|
||||
.filter_map(|value| {
|
||||
let info = value.value.contact_info()?;
|
||||
let info = value.value.contact_info().unwrap();
|
||||
// Stop pushing to nodes which have not been active recently.
|
||||
if value.local_timestamp < active_cutoff {
|
||||
// In order to mitigate eclipse attack, for staked nodes
|
||||
// continue retrying periodically.
|
||||
let stake = stakes.get(&info.id).unwrap_or(&0);
|
||||
if *stake == 0 || rng.gen_ratio(7, 8) {
|
||||
if *stake == 0 || !rng.gen_ratio(1, 16) {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@@ -424,6 +425,21 @@ impl CrdsGossipPush {
|
||||
!v.is_empty()
|
||||
});
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub(crate) fn mock_clone(&self) -> Self {
|
||||
let mut active_set = IndexMap::<Pubkey, AtomicBloom<Pubkey>>::new();
|
||||
for (k, v) in &self.active_set {
|
||||
active_set.insert(*k, v.mock_clone());
|
||||
}
|
||||
Self {
|
||||
active_set,
|
||||
push_messages: self.push_messages.clone(),
|
||||
received_cache: self.received_cache.clone(),
|
||||
last_pushed_to: self.last_pushed_to.clone(),
|
||||
..*self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -21,13 +21,11 @@ impl CrdsShards {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn insert(&mut self, index: usize, value: &VersionedCrdsValue) -> bool {
|
||||
let hash = CrdsFilter::hash_as_u64(&value.value_hash);
|
||||
self.shard_mut(hash).insert(index, hash).is_none()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn remove(&mut self, index: usize, value: &VersionedCrdsValue) -> bool {
|
||||
let hash = CrdsFilter::hash_as_u64(&value.value_hash);
|
||||
self.shard_mut(hash).swap_remove(&index).is_some()
|
||||
|
@@ -1,19 +1,21 @@
|
||||
use crate::cluster_info::MAX_SNAPSHOT_HASHES;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::deprecated;
|
||||
use crate::epoch_slots::EpochSlots;
|
||||
use bincode::{serialize, serialized_size};
|
||||
use rand::{CryptoRng, Rng};
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signable, Signature},
|
||||
pubkey::{self, Pubkey},
|
||||
signature::{Keypair, Signable, Signature, Signer},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
borrow::{Borrow, Cow},
|
||||
collections::{BTreeSet, HashSet},
|
||||
collections::{hash_map::Entry, BTreeSet, HashMap, HashSet},
|
||||
fmt,
|
||||
};
|
||||
|
||||
@@ -77,6 +79,7 @@ pub enum CrdsData {
|
||||
EpochSlots(EpochSlotsIndex, EpochSlots),
|
||||
LegacyVersion(LegacyVersion),
|
||||
Version(Version),
|
||||
NodeInstance(NodeInstance),
|
||||
}
|
||||
|
||||
impl Sanitize for CrdsData {
|
||||
@@ -105,6 +108,30 @@ impl Sanitize for CrdsData {
|
||||
}
|
||||
CrdsData::LegacyVersion(version) => version.sanitize(),
|
||||
CrdsData::Version(version) => version.sanitize(),
|
||||
CrdsData::NodeInstance(node) => node.sanitize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Random timestamp for tests and benchmarks.
|
||||
pub(crate) fn new_rand_timestamp<R: Rng>(rng: &mut R) -> u64 {
|
||||
const DELAY: u64 = 10 * 60 * 1000; // 10 minutes
|
||||
timestamp() - DELAY + rng.gen_range(0, 2 * DELAY)
|
||||
}
|
||||
|
||||
impl CrdsData {
|
||||
/// New random CrdsData for tests and benchmarks.
|
||||
fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> CrdsData {
|
||||
let kind = rng.gen_range(0, 5);
|
||||
// TODO: Implement other kinds of CrdsData here.
|
||||
// TODO: Assign ranges to each arm proportional to their frequency in
|
||||
// the mainnet crds table.
|
||||
match kind {
|
||||
0 => CrdsData::ContactInfo(ContactInfo::new_rand(rng, pubkey)),
|
||||
1 => CrdsData::LowestSlot(rng.gen(), LowestSlot::new_rand(rng, pubkey)),
|
||||
2 => CrdsData::SnapshotHashes(SnapshotHash::new_rand(rng, pubkey)),
|
||||
3 => CrdsData::AccountsHashes(SnapshotHash::new_rand(rng, pubkey)),
|
||||
_ => CrdsData::Version(Version::new_rand(rng, pubkey)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,6 +165,23 @@ impl SnapshotHash {
|
||||
wallclock: timestamp(),
|
||||
}
|
||||
}
|
||||
|
||||
/// New random SnapshotHash for tests and benchmarks.
|
||||
pub(crate) fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
let num_hashes = rng.gen_range(0, MAX_SNAPSHOT_HASHES) + 1;
|
||||
let hashes = std::iter::repeat_with(|| {
|
||||
let slot = 47825632 + rng.gen_range(0, 512);
|
||||
let hash = solana_sdk::hash::new_rand(rng);
|
||||
(slot, hash)
|
||||
})
|
||||
.take(num_hashes)
|
||||
.collect();
|
||||
Self {
|
||||
from: pubkey.unwrap_or_else(pubkey::new_rand),
|
||||
hashes,
|
||||
wallclock: new_rand_timestamp(rng),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample)]
|
||||
pub struct LowestSlot {
|
||||
@@ -160,6 +204,18 @@ impl LowestSlot {
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
|
||||
/// New random LowestSlot for tests and benchmarks.
|
||||
fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
Self {
|
||||
from: pubkey.unwrap_or_else(pubkey::new_rand),
|
||||
root: rng.gen(),
|
||||
lowest: rng.gen(),
|
||||
slots: BTreeSet::default(),
|
||||
stash: Vec::default(),
|
||||
wallclock: new_rand_timestamp(rng),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for LowestSlot {
|
||||
@@ -252,6 +308,73 @@ impl Version {
|
||||
version: solana_version::Version::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// New random Version for tests and benchmarks.
|
||||
fn new_rand<R: Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
Self {
|
||||
from: pubkey.unwrap_or_else(pubkey::new_rand),
|
||||
wallclock: new_rand_timestamp(rng),
|
||||
version: solana_version::Version {
|
||||
major: rng.gen(),
|
||||
minor: rng.gen(),
|
||||
patch: rng.gen(),
|
||||
commit: Some(rng.gen()),
|
||||
feature_set: rng.gen(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, AbiExample, Deserialize, Serialize)]
|
||||
pub struct NodeInstance {
|
||||
from: Pubkey,
|
||||
wallclock: u64,
|
||||
timestamp: u64, // Timestamp when the instance was created.
|
||||
token: u64, // Randomly generated value at node instantiation.
|
||||
}
|
||||
|
||||
impl NodeInstance {
|
||||
pub fn new<R>(rng: &mut R, pubkey: Pubkey, now: u64) -> Self
|
||||
where
|
||||
R: Rng + CryptoRng,
|
||||
{
|
||||
Self {
|
||||
from: pubkey,
|
||||
wallclock: now,
|
||||
timestamp: now,
|
||||
token: rng.gen(),
|
||||
}
|
||||
}
|
||||
|
||||
// Clones the value with an updated wallclock.
|
||||
pub fn with_wallclock(&self, now: u64) -> Self {
|
||||
Self {
|
||||
wallclock: now,
|
||||
..*self
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the crds-value is a duplicate instance
|
||||
// of this node, with a more recent timestamp.
|
||||
pub fn check_duplicate(&self, other: &CrdsValue) -> bool {
|
||||
match &other.data {
|
||||
CrdsData::NodeInstance(other) => {
|
||||
self.token != other.token
|
||||
&& self.timestamp <= other.timestamp
|
||||
&& self.from == other.from
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for NodeInstance {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of the replicated value
|
||||
@@ -266,6 +389,7 @@ pub enum CrdsValueLabel {
|
||||
AccountsHashes(Pubkey),
|
||||
LegacyVersion(Pubkey),
|
||||
Version(Pubkey),
|
||||
NodeInstance(Pubkey, u64 /*token*/),
|
||||
}
|
||||
|
||||
impl fmt::Display for CrdsValueLabel {
|
||||
@@ -279,6 +403,7 @@ impl fmt::Display for CrdsValueLabel {
|
||||
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
|
||||
CrdsValueLabel::LegacyVersion(_) => write!(f, "LegacyVersion({})", self.pubkey()),
|
||||
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
|
||||
CrdsValueLabel::NodeInstance(_, _) => write!(f, "NodeInstance({})", self.pubkey()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -294,6 +419,23 @@ impl CrdsValueLabel {
|
||||
CrdsValueLabel::AccountsHashes(p) => *p,
|
||||
CrdsValueLabel::LegacyVersion(p) => *p,
|
||||
CrdsValueLabel::Version(p) => *p,
|
||||
CrdsValueLabel::NodeInstance(p, _ /*token*/) => *p,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns number of possible distinct labels of the same type for
|
||||
/// a fixed pubkey, and None if that is practically unlimited.
|
||||
pub(crate) fn value_space(&self) -> Option<usize> {
|
||||
match self {
|
||||
CrdsValueLabel::ContactInfo(_) => Some(1),
|
||||
CrdsValueLabel::Vote(_, _) => Some(MAX_VOTES as usize),
|
||||
CrdsValueLabel::LowestSlot(_) => Some(1),
|
||||
CrdsValueLabel::SnapshotHashes(_) => Some(1),
|
||||
CrdsValueLabel::EpochSlots(_, _) => Some(MAX_EPOCH_SLOTS as usize),
|
||||
CrdsValueLabel::AccountsHashes(_) => Some(1),
|
||||
CrdsValueLabel::LegacyVersion(_) => Some(1),
|
||||
CrdsValueLabel::Version(_) => Some(1),
|
||||
CrdsValueLabel::NodeInstance(_, _) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -312,14 +454,19 @@ impl CrdsValue {
|
||||
value
|
||||
}
|
||||
|
||||
/// New random crds value for tests and benchmarks.
|
||||
pub fn new_rand<R: ?Sized>(rng: &mut R) -> CrdsValue
|
||||
where
|
||||
R: rand::Rng,
|
||||
{
|
||||
let now = rng.gen();
|
||||
let contact_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), now);
|
||||
Self::new_signed(CrdsData::ContactInfo(contact_info), &Keypair::new())
|
||||
/// New random CrdsValue for tests and benchmarks.
|
||||
pub fn new_rand<R: Rng>(rng: &mut R, keypair: Option<&Keypair>) -> CrdsValue {
|
||||
match keypair {
|
||||
None => {
|
||||
let keypair = Keypair::new();
|
||||
let data = CrdsData::new_rand(rng, Some(keypair.pubkey()));
|
||||
Self::new_signed(data, &keypair)
|
||||
}
|
||||
Some(keypair) => {
|
||||
let data = CrdsData::new_rand(rng, Some(keypair.pubkey()));
|
||||
Self::new_signed(data, keypair)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Totally unsecure unverifiable wallclock of the node that generated this message
|
||||
@@ -335,6 +482,7 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, p) => p.wallclock,
|
||||
CrdsData::LegacyVersion(version) => version.wallclock,
|
||||
CrdsData::Version(version) => version.wallclock,
|
||||
CrdsData::NodeInstance(node) => node.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
@@ -347,6 +495,7 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, p) => p.from,
|
||||
CrdsData::LegacyVersion(version) => version.from,
|
||||
CrdsData::Version(version) => version.from,
|
||||
CrdsData::NodeInstance(node) => node.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
@@ -359,6 +508,7 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(ix, _) => CrdsValueLabel::EpochSlots(*ix, self.pubkey()),
|
||||
CrdsData::LegacyVersion(_) => CrdsValueLabel::LegacyVersion(self.pubkey()),
|
||||
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
|
||||
CrdsData::NodeInstance(node) => CrdsValueLabel::NodeInstance(self.pubkey(), node.token),
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
@@ -423,21 +573,6 @@ impl CrdsValue {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return all the possible labels for a record identified by Pubkey.
|
||||
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
|
||||
let mut labels = vec![
|
||||
CrdsValueLabel::ContactInfo(*key),
|
||||
CrdsValueLabel::LowestSlot(*key),
|
||||
CrdsValueLabel::SnapshotHashes(*key),
|
||||
CrdsValueLabel::AccountsHashes(*key),
|
||||
CrdsValueLabel::LegacyVersion(*key),
|
||||
CrdsValueLabel::Version(*key),
|
||||
];
|
||||
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
|
||||
labels.extend((0..MAX_EPOCH_SLOTS).map(|ix| CrdsValueLabel::EpochSlots(ix, *key)));
|
||||
labels
|
||||
}
|
||||
|
||||
/// Returns the size (in bytes) of a CrdsValue
|
||||
pub fn size(&self) -> u64 {
|
||||
serialized_size(&self).expect("unable to serialize contact info")
|
||||
@@ -469,6 +604,39 @@ impl CrdsValue {
|
||||
.vote_index()
|
||||
.expect("all values must be votes")
|
||||
}
|
||||
|
||||
/// Returns true if, regardless of prunes, this crds-value
|
||||
/// should be pushed to the receiving node.
|
||||
pub fn should_force_push(&self, peer: &Pubkey) -> bool {
|
||||
match &self.data {
|
||||
CrdsData::NodeInstance(node) => node.from == *peer,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Filters out an iterator of crds values, returning
|
||||
/// the unique ones with the most recent wallclock.
|
||||
pub(crate) fn filter_current<'a, I>(values: I) -> impl Iterator<Item = &'a CrdsValue>
|
||||
where
|
||||
I: IntoIterator<Item = &'a CrdsValue>,
|
||||
{
|
||||
let mut out = HashMap::new();
|
||||
for value in values {
|
||||
match out.entry(value.label()) {
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert((value, value.wallclock()));
|
||||
}
|
||||
Entry::Occupied(mut entry) => {
|
||||
let value_wallclock = value.wallclock();
|
||||
let (_, entry_wallclock) = entry.get();
|
||||
if *entry_wallclock < value_wallclock {
|
||||
entry.insert((value, value_wallclock));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out.into_iter().map(|(_, (v, _))| v)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -479,27 +647,9 @@ mod test {
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::cmp::Ordering;
|
||||
use std::iter::repeat_with;
|
||||
|
||||
#[test]
|
||||
fn test_labels() {
|
||||
let mut hits = [false; 6 + MAX_VOTES as usize + MAX_EPOCH_SLOTS as usize];
|
||||
// this method should cover all the possible labels
|
||||
for v in &CrdsValue::record_labels(&Pubkey::default()) {
|
||||
match v {
|
||||
CrdsValueLabel::ContactInfo(_) => hits[0] = true,
|
||||
CrdsValueLabel::LowestSlot(_) => hits[1] = true,
|
||||
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
|
||||
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
|
||||
CrdsValueLabel::LegacyVersion(_) => hits[4] = true,
|
||||
CrdsValueLabel::Version(_) => hits[5] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 6] = true,
|
||||
CrdsValueLabel::EpochSlots(ix, _) => {
|
||||
hits[*ix as usize + MAX_VOTES as usize + 6] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(hits.iter().all(|x| *x));
|
||||
}
|
||||
#[test]
|
||||
fn test_keys_and_values() {
|
||||
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
@@ -669,4 +819,163 @@ mod test {
|
||||
assert!(!value.verify());
|
||||
serialize_deserialize_value(value, correct_keypair);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_current() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let keys: Vec<_> = repeat_with(Keypair::new).take(16).collect();
|
||||
let values: Vec<_> = repeat_with(|| {
|
||||
let index = rng.gen_range(0, keys.len());
|
||||
CrdsValue::new_rand(&mut rng, Some(&keys[index]))
|
||||
})
|
||||
.take(256)
|
||||
.collect();
|
||||
let mut currents = HashMap::new();
|
||||
for value in filter_current(&values) {
|
||||
// Assert that filtered values have unique labels.
|
||||
assert!(currents.insert(value.label(), value).is_none());
|
||||
}
|
||||
// Assert that currents are the most recent version of each value.
|
||||
let mut count = 0;
|
||||
for value in &values {
|
||||
let current_value = currents.get(&value.label()).unwrap();
|
||||
match value.wallclock().cmp(¤t_value.wallclock()) {
|
||||
Ordering::Less => (),
|
||||
Ordering::Equal => {
|
||||
assert_eq!(value, *current_value);
|
||||
count += 1;
|
||||
}
|
||||
Ordering::Greater => panic!("this should not happen!"),
|
||||
}
|
||||
}
|
||||
assert_eq!(count, currents.len());
|
||||
// Currently CrdsData::new_rand is only implemented for 5 different
|
||||
// kinds and excludes Vote and EpochSlots, and so the unique labels
|
||||
// cannot be more than 5 times number of keys.
|
||||
assert!(currents.len() <= keys.len() * 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_instance_crds_lable() {
|
||||
fn make_crds_value(node: NodeInstance) -> CrdsValue {
|
||||
CrdsValue::new_unsigned(CrdsData::NodeInstance(node))
|
||||
}
|
||||
let mut rng = rand::thread_rng();
|
||||
let now = timestamp();
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let node = NodeInstance::new(&mut rng, pubkey, now);
|
||||
assert_eq!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(node.with_wallclock(now + 8)).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
from: Pubkey::new_unique(),
|
||||
..node
|
||||
};
|
||||
assert_ne!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
wallclock: now + 8,
|
||||
..node
|
||||
};
|
||||
assert_eq!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
timestamp: now + 8,
|
||||
..node
|
||||
};
|
||||
assert_eq!(
|
||||
make_crds_value(node.clone()).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
let other = NodeInstance {
|
||||
token: rng.gen(),
|
||||
..node
|
||||
};
|
||||
assert_ne!(
|
||||
make_crds_value(node).label(),
|
||||
make_crds_value(other).label()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_duplicate_instance() {
|
||||
fn make_crds_value(node: NodeInstance) -> CrdsValue {
|
||||
CrdsValue::new_unsigned(CrdsData::NodeInstance(node))
|
||||
}
|
||||
let now = timestamp();
|
||||
let mut rng = rand::thread_rng();
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let node = NodeInstance::new(&mut rng, pubkey, now);
|
||||
// Same token is not a duplicate.
|
||||
assert!(!node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: now + 1,
|
||||
timestamp: now + 1,
|
||||
token: node.token,
|
||||
})));
|
||||
// Older timestamp is not a duplicate.
|
||||
assert!(!node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: now + 1,
|
||||
timestamp: now - 1,
|
||||
token: rng.gen(),
|
||||
})));
|
||||
// Updated wallclock is not a duplicate.
|
||||
let other = node.with_wallclock(now + 8);
|
||||
assert_eq!(
|
||||
other,
|
||||
NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: now + 8,
|
||||
timestamp: now,
|
||||
token: node.token,
|
||||
}
|
||||
);
|
||||
assert!(!node.check_duplicate(&make_crds_value(other)));
|
||||
// Duplicate instance.
|
||||
assert!(node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: pubkey,
|
||||
wallclock: 0,
|
||||
timestamp: now,
|
||||
token: rng.gen(),
|
||||
})));
|
||||
// Different pubkey is not a duplicate.
|
||||
assert!(!node.check_duplicate(&make_crds_value(NodeInstance {
|
||||
from: Pubkey::new_unique(),
|
||||
wallclock: now + 1,
|
||||
timestamp: now + 1,
|
||||
token: rng.gen(),
|
||||
})));
|
||||
// Differnt crds value is not a duplicate.
|
||||
assert!(
|
||||
!node.check_duplicate(&CrdsValue::new_unsigned(CrdsData::ContactInfo(
|
||||
ContactInfo::new_rand(&mut rng, Some(pubkey))
|
||||
)))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_force_push() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let pubkey = Pubkey::new_unique();
|
||||
assert!(
|
||||
!CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_rand(
|
||||
&mut rng,
|
||||
Some(pubkey),
|
||||
)))
|
||||
.should_force_push(&pubkey)
|
||||
);
|
||||
let node = CrdsValue::new_unsigned(CrdsData::NodeInstance(NodeInstance::new(
|
||||
&mut rng,
|
||||
pubkey,
|
||||
timestamp(),
|
||||
)));
|
||||
assert!(node.should_force_push(&pubkey));
|
||||
assert!(!node.should_force_push(&Pubkey::new_unique()));
|
||||
}
|
||||
}
|
||||
|
@@ -24,6 +24,15 @@ impl Sanitize for Uncompressed {
|
||||
if self.num >= MAX_SLOTS_PER_ENTRY {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.slots.len() % 8 != 0 {
|
||||
// Uncompressed::new() ensures the length is always a multiple of 8
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.slots.len() != self.slots.capacity() {
|
||||
// A BitVec<u8> with a length that's a multiple of 8 will always have len() equal to
|
||||
// capacity(), assuming no bit manipulation
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -132,7 +141,7 @@ impl Uncompressed {
|
||||
if *s < self.first_slot {
|
||||
return i;
|
||||
}
|
||||
if *s - self.first_slot >= self.slots.capacity() {
|
||||
if *s - self.first_slot >= self.slots.len() {
|
||||
return i;
|
||||
}
|
||||
self.slots.set(*s - self.first_slot, true);
|
||||
@@ -393,6 +402,14 @@ mod tests {
|
||||
o.num = MAX_SLOTS_PER_ENTRY;
|
||||
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
|
||||
let mut o = slots.clone();
|
||||
o.slots = BitVec::new_fill(false, 7); // Length not a multiple of 8
|
||||
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
|
||||
let mut o = slots.clone();
|
||||
o.slots = BitVec::with_capacity(8); // capacity() not equal to len()
|
||||
assert_eq!(o.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
|
||||
let compressed = Flate2::deflate(slots).unwrap();
|
||||
assert!(compressed.sanitize().is_ok());
|
||||
|
||||
|
@@ -187,6 +187,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
.expect("new root must exist in fork_infos map")
|
||||
.parent = None;
|
||||
self.root = new_root;
|
||||
self.last_root_time = Instant::now();
|
||||
}
|
||||
|
||||
pub fn add_root_parent(&mut self, root_parent: Slot) {
|
||||
|
@@ -19,7 +19,6 @@ pub mod sample_performance_service;
|
||||
pub mod shred_fetch_stage;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
pub mod bank_weight_fork_choice;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_slots;
|
||||
pub mod cluster_slots_service;
|
||||
@@ -43,6 +42,8 @@ pub mod local_vote_signer_service;
|
||||
pub mod non_circulating_supply;
|
||||
pub mod optimistic_confirmation_verifier;
|
||||
pub mod optimistically_confirmed_bank_tracker;
|
||||
pub mod packet_hasher;
|
||||
pub mod ping_pong;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
pub mod progress_map;
|
||||
|
@@ -1,4 +1,7 @@
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::{
|
||||
accounts_index::{AccountIndex, IndexKey},
|
||||
bank::Bank,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
@@ -18,7 +21,24 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSuppl
|
||||
let withdraw_authority_list = withdraw_authority();
|
||||
|
||||
let clock = bank.clock();
|
||||
let stake_accounts = bank.get_program_accounts(&solana_stake_program::id());
|
||||
let stake_accounts = if bank
|
||||
.rc
|
||||
.accounts
|
||||
.accounts_db
|
||||
.account_indexes
|
||||
.contains(&AccountIndex::ProgramId)
|
||||
{
|
||||
bank.get_filtered_indexed_accounts(
|
||||
&IndexKey::ProgramId(solana_stake_program::id()),
|
||||
// The program-id account index checks for Account owner on inclusion. However, due to
|
||||
// the current AccountsDB implementation, an account may remain in storage as a
|
||||
// zero-lamport Account::Default() after being wiped and reinitialized in later
|
||||
// updates. We include the redundant filter here to avoid returning these accounts.
|
||||
|account| account.owner == solana_stake_program::id(),
|
||||
)
|
||||
} else {
|
||||
bank.get_program_accounts(&solana_stake_program::id())
|
||||
};
|
||||
for (pubkey, account) in stake_accounts.iter() {
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user