Compare commits
93 Commits
document-r
...
v1.2.2
Author | SHA1 | Date | |
---|---|---|---|
f13498b428 | |||
b567138170 | |||
653982cae5 | |||
605f4906ba | |||
d27f24e312 | |||
c9c1cb5c9c | |||
1cc6493ccf | |||
ae47862be2 | |||
8590184df7 | |||
d840bbab08 | |||
63314de516 | |||
c47a6e12c7 | |||
7937c45ba4 | |||
813b11ac56 | |||
ad6883b66a | |||
a8f4c4e297 | |||
6d68e94e4e | |||
5dd40d7d88 | |||
3f58177670 | |||
edfd65b115 | |||
51da66ec84 | |||
ba36308d69 | |||
ee450b2dd0 | |||
84b28fb261 | |||
1586b86797 | |||
8f065e487e | |||
953eadd983 | |||
a4a792facd | |||
055f808f98 | |||
0404878445 | |||
053907f8a4 | |||
f76dcc1f05 | |||
823bc138cd | |||
18f746b025 | |||
c81adaf901 | |||
2d12ddd0f6 | |||
bee36cc8d0 | |||
f7aee67023 | |||
c021727009 | |||
6653136e1d | |||
06c40c807c | |||
9b262b4915 | |||
cc2d3ecfd7 | |||
92743499bf | |||
aa6a00a03e | |||
bd19f7c4cb | |||
988bf65ba4 | |||
d5b03bd824 | |||
6a72dab111 | |||
56e8319a6d | |||
aed1e51ef1 | |||
f4278d61df | |||
a5c3ae3cef | |||
05c052e212 | |||
dc05bb648a | |||
800b65b2f6 | |||
ae1a0f57c5 | |||
df7c44bd0c | |||
3e29cfd712 | |||
202031538f | |||
29ff1b925d | |||
5a91db6e62 | |||
94ba700e58 | |||
1964c6ec29 | |||
4dd6591bfd | |||
163217815b | |||
37c182cd5d | |||
0c68f27ac3 | |||
5fb8da9b35 | |||
74d9fd1e4f | |||
e71206c578 | |||
0141c80238 | |||
ed928cfdf7 | |||
2fd319ab7a | |||
7813a1decd | |||
93e4ed1f75 | |||
a70f31b3da | |||
2d25227d0a | |||
fc7bfd0f67 | |||
2996291b37 | |||
3e80b9231c | |||
78231a8682 | |||
ace711e7f1 | |||
c9cbc39ec9 | |||
606a392d50 | |||
c67596ceb4 | |||
9a42cc7555 | |||
2e5ef2a802 | |||
8c8e2c4b2b | |||
0578801f99 | |||
6141e1410a | |||
4fc86807ff | |||
d2a2eba69e |
@ -13,11 +13,14 @@ script:
|
||||
- source ci/env.sh
|
||||
- ci/publish-tarball.sh
|
||||
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v\d+\.\d+/
|
||||
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
|
||||
notifications:
|
||||
slack:
|
||||
on_success: change
|
||||
|
692
Cargo.lock
generated
692
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-measure = { path = "../measure", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -13,16 +13,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||
solana-perf = { path = "../perf", version = "1.2.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.2" }
|
||||
solana-perf = { path = "../perf", version = "1.2.2" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.2" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-measure = { path = "../measure", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -169,7 +169,7 @@ fn main() {
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -18,21 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-core = { path = "../core", version = "1.2.2" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.2" }
|
||||
solana-client = { path = "../client", version = "1.2.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.2" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.2" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -449,7 +449,7 @@ fn swapper<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
@ -577,7 +577,7 @@ fn trader<T>(
|
||||
}
|
||||
account_group = (account_group + 1) % account_groups as usize;
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
|
||||
@ -776,7 +776,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
to_fund_txs.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("blockhash");
|
||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||
@ -868,7 +868,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
|
||||
let mut retries = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
to_create_txs
|
||||
@ -997,7 +997,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
let (blockhash, _fee_calculator) = client
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
|
@ -2,18 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.2" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,25 +14,25 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.0", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-core = { path = "../core", version = "1.2.2" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.2" }
|
||||
solana-client = { path = "../client", version = "1.2.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.2" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.2", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.2" }
|
||||
solana-measure = { path = "../measure", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.2", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.2" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
@ -55,7 +55,9 @@ type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
Ok((blockhash, fee_calculator)) => return (blockhash, fee_calculator),
|
||||
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
|
||||
return (blockhash, fee_calculator)
|
||||
}
|
||||
Err(err) => {
|
||||
info!("Couldn't get recent blockhash: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
|
@ -5,6 +5,9 @@
|
||||
# Release tags use buildkite-release.yml instead
|
||||
|
||||
steps:
|
||||
- command: "ci/test-sanity.sh"
|
||||
name: "sanity"
|
||||
timeout_in_minutes: 5
|
||||
- command: "ci/dependabot-pr.sh"
|
||||
name: "dependabot"
|
||||
timeout_in_minutes: 5
|
||||
|
@ -19,6 +19,9 @@ while [[ ! -f config/run/init-completed ]]; do
|
||||
fi
|
||||
done
|
||||
|
||||
while [[ $($solana_cli --url http://localhost:8899 slot --commitment recent) -eq 0 ]]; do
|
||||
sleep 1
|
||||
done
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
|
||||
|
||||
wait $pid
|
||||
|
@ -10,9 +10,6 @@ source ci/rust-version.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
# Look for failed mergify.io backports
|
||||
_ git show HEAD --check --oneline
|
||||
|
||||
if _ scripts/cargo-for-all-lock-files.sh +"$rust_nightly" check --locked --all-targets; then
|
||||
true
|
||||
else
|
||||
@ -29,10 +26,8 @@ _ cargo +"$rust_stable" clippy --workspace -- --deny=warnings
|
||||
|
||||
_ cargo +"$rust_stable" audit --version
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit --ignore RUSTSEC-2020-0002 --ignore RUSTSEC-2020-0008
|
||||
_ ci/nits.sh
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ docs/build.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
|
27
ci/test-sanity.sh
Executable file
27
ci/test-sanity.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
source ci/_
|
||||
|
||||
(
|
||||
echo --- git diff --check
|
||||
set -x
|
||||
# Look for failed mergify.io backports by searching leftover conflict markers
|
||||
# Also check for any trailing whitespaces!
|
||||
if [[ -n $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
|
||||
base_branch=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
else
|
||||
base_branch=$BUILDKITE_BRANCH
|
||||
fi
|
||||
git fetch origin "$base_branch"
|
||||
git diff "$(git merge-base HEAD "origin/$base_branch")..HEAD" --check --oneline
|
||||
)
|
||||
|
||||
echo
|
||||
|
||||
_ ci/nits.sh
|
||||
_ ci/check-ssh-keys.sh
|
||||
|
||||
echo --- ok
|
@ -39,9 +39,9 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 1gb/thread of memory
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>16 ? 16 : NPROC))
|
||||
NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
|
@ -23,10 +23,14 @@ if [[ -z $CI_TAG ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CI_REPO_SLUG ]]; then
|
||||
echo Error: CI_REPO_SLUG not defined
|
||||
exit 1
|
||||
fi
|
||||
# Force CI_REPO_SLUG since sometimes
|
||||
# BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG is not set correctly, causing the
|
||||
# artifact upload to fail
|
||||
CI_REPO_SLUG=solana-labs/solana
|
||||
#if [[ -z $CI_REPO_SLUG ]]; then
|
||||
# echo Error: CI_REPO_SLUG not defined
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
releaseId=$( \
|
||||
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -27,28 +27,28 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.2" }
|
||||
solana-client = { path = "../client", version = "1.2.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.2" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.2" }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.2" }
|
||||
thiserror = "1.0.19"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.2" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.2" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
205
cli/src/cli.rs
205
cli/src/cli.rs
@ -2,7 +2,7 @@ use crate::{
|
||||
checks::*,
|
||||
cli_output::{CliAccount, CliSignOnlyData, CliSignature, OutputFormat},
|
||||
cluster_query::*,
|
||||
display::println_name_value,
|
||||
display::{new_spinner_progress_bar, println_name_value, println_transaction},
|
||||
nonce::{self, *},
|
||||
offline::{blockhash_query::BlockhashQuery, *},
|
||||
spend_utils::*,
|
||||
@ -27,7 +27,7 @@ use solana_clap_utils::{
|
||||
use solana_client::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::RpcLargestAccountsFilter,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_response::{RpcAccount, RpcKeyedAccount},
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
@ -37,7 +37,7 @@ use solana_faucet::faucet_mock::request_airdrop_transaction;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
clock::{Epoch, Slot},
|
||||
clock::{Epoch, Slot, DEFAULT_TICKS_PER_SECOND},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
@ -48,6 +48,7 @@ use solana_sdk::{
|
||||
program_utils::DecodeError,
|
||||
pubkey::{Pubkey, MAX_SEED_LEN},
|
||||
signature::{Keypair, Signature, Signer, SignerError},
|
||||
signers::Signers,
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@ -323,6 +324,16 @@ pub enum CliCommand {
|
||||
lamports: u64,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
MergeStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
source_stake_account_pubkey: Pubkey,
|
||||
stake_authority: SignerIndex,
|
||||
sign_only: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
fee_payer: SignerIndex,
|
||||
},
|
||||
ShowStakeHistory {
|
||||
use_lamports_unit: bool,
|
||||
},
|
||||
@ -396,6 +407,10 @@ pub enum CliCommand {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_account: SignerIndex,
|
||||
},
|
||||
VoteUpdateCommission {
|
||||
vote_account_pubkey: Pubkey,
|
||||
commission: u8,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
Airdrop {
|
||||
@ -686,6 +701,9 @@ pub fn parse_command(
|
||||
("split-stake", Some(matches)) => {
|
||||
parse_split_stake(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("merge-stake", Some(matches)) => {
|
||||
parse_merge_stake(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("stake-authorize", Some(matches)) => {
|
||||
parse_stake_authorize(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
@ -709,6 +727,9 @@ pub fn parse_command(
|
||||
("vote-update-validator", Some(matches)) => {
|
||||
parse_vote_update_validator(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-update-commission", Some(matches)) => {
|
||||
parse_vote_update_commission(matches, default_signer_path, wallet_manager)
|
||||
}
|
||||
("vote-authorize-voter", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer_path,
|
||||
@ -1159,7 +1180,7 @@ fn process_confirm(
|
||||
"\nTransaction executed in slot {}:",
|
||||
confirmed_transaction.slot
|
||||
);
|
||||
crate::display::println_transaction(
|
||||
println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
@ -1189,7 +1210,7 @@ fn process_confirm(
|
||||
}
|
||||
|
||||
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
||||
crate::display::println_transaction(transaction, &None, "");
|
||||
println_transaction(transaction, &None, "");
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@ -1227,6 +1248,103 @@ fn process_show_account(
|
||||
Ok(account_string)
|
||||
}
|
||||
|
||||
fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
rpc_client: &RpcClient,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut transactions_signatures = vec![];
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = rpc_client
|
||||
.send_transaction_with_config(
|
||||
&transaction,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
transactions_signatures.push((transaction, signature));
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = rpc_client.get_signature_status(&signature) {
|
||||
if rpc_client
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
if transactions_signatures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err("Transactions failed".into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator) = rpc_client
|
||||
.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
|
||||
transactions = vec![];
|
||||
for (mut transaction, _) in transactions_signatures.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_deploy(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@ -1294,15 +1412,18 @@ fn process_deploy(
|
||||
})?;
|
||||
|
||||
trace!("Writing program data");
|
||||
rpc_client
|
||||
.send_and_confirm_transactions_with_spinner(write_transactions, &signers)
|
||||
.map_err(|_| {
|
||||
CliError::DynamicProgramError("Data writes to program account failed".to_string())
|
||||
})?;
|
||||
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|
||||
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
|
||||
)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner(&finalize_tx)
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
&finalize_tx,
|
||||
RpcSendTransactionConfig {
|
||||
skip_preflight: true,
|
||||
},
|
||||
)
|
||||
.map_err(|e| {
|
||||
CliError::DynamicProgramError(format!("Finalizing program account failed: {}", e))
|
||||
})?;
|
||||
@ -1530,11 +1651,6 @@ fn process_transfer(
|
||||
) -> ProcessResult {
|
||||
let from = config.signers[from];
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&from.pubkey(), "cli keypair".to_string()),
|
||||
(to, "to".to_string()),
|
||||
)?;
|
||||
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
|
||||
|
||||
@ -1655,7 +1771,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
seed,
|
||||
program_id,
|
||||
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
|
||||
CliCommand::Fees => process_fees(&rpc_client),
|
||||
CliCommand::Fees => process_fees(&rpc_client, config),
|
||||
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, config, *slot),
|
||||
CliCommand::GetGenesisHash => process_get_genesis_hash(&rpc_client),
|
||||
CliCommand::GetEpochInfo { commitment_config } => {
|
||||
@ -1889,6 +2005,27 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*lamports,
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::MergeStake {
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
stake_authority,
|
||||
sign_only,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority,
|
||||
fee_payer,
|
||||
} => process_merge_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&source_stake_account_pubkey,
|
||||
*stake_authority,
|
||||
*sign_only,
|
||||
blockhash_query,
|
||||
*nonce_account,
|
||||
*nonce_authority,
|
||||
*fee_payer,
|
||||
),
|
||||
CliCommand::ShowStakeAccount {
|
||||
pubkey: stake_account_pubkey,
|
||||
use_lamports_unit,
|
||||
@ -2048,6 +2185,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
),
|
||||
CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
} => process_vote_update_commission(&rpc_client, config, &vote_account_pubkey, *commission),
|
||||
|
||||
// Wallet Commands
|
||||
|
||||
@ -3241,6 +3382,7 @@ mod tests {
|
||||
// Success cases
|
||||
let mut config = CliConfig::default();
|
||||
config.rpc_client = Some(RpcClient::new_mock("succeeds".to_string()));
|
||||
config.json_rpc_url = "http://127.0.0.1:8899".to_string();
|
||||
|
||||
let keypair = Keypair::new();
|
||||
let pubkey = keypair.pubkey().to_string();
|
||||
@ -3328,10 +3470,10 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
let to_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: to_pubkey,
|
||||
lamports: 100,
|
||||
withdraw_authority: 0,
|
||||
@ -3346,9 +3488,9 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
@ -3359,10 +3501,10 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
let split_stake_account = Keypair::new();
|
||||
config.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
@ -3377,6 +3519,23 @@ mod tests {
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let stake_account_pubkey = Pubkey::new_rand();
|
||||
let source_stake_account_pubkey = Pubkey::new_rand();
|
||||
let merge_stake_account = Keypair::new();
|
||||
config.command = CliCommand::MergeStake {
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
stake_authority: 1,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
};
|
||||
config.signers = vec![&keypair, &merge_stake_account];
|
||||
let result = process_command(&config);
|
||||
assert!(dbg!(result).is_ok());
|
||||
|
||||
config.command = CliCommand::GetSlot {
|
||||
commitment_config: CommitmentConfig::default(),
|
||||
};
|
||||
|
@ -482,7 +482,7 @@ impl fmt::Display for CliKeyedStakeState {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliStakeState {
|
||||
pub stake_type: CliStakeType,
|
||||
pub total_stake: u64,
|
||||
pub account_balance: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@ -497,6 +497,16 @@ pub struct CliStakeState {
|
||||
pub lockup: Option<CliLockup>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
#[serde(skip_serializing)]
|
||||
pub current_epoch: Epoch,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rent_exempt_reserve: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub active_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activating_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deactivating_stake: Option<u64>,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeState {
|
||||
@ -522,52 +532,122 @@ impl fmt::Display for CliStakeState {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"Balance: {}",
|
||||
build_balance_message(self.account_balance, self.use_lamports_unit, true)
|
||||
)?;
|
||||
|
||||
if let Some(rent_exempt_reserve) = self.rent_exempt_reserve {
|
||||
writeln!(
|
||||
f,
|
||||
"Rent Exempt Reserve: {}",
|
||||
build_balance_message(rent_exempt_reserve, self.use_lamports_unit, true)
|
||||
)?;
|
||||
}
|
||||
|
||||
match self.stake_type {
|
||||
CliStakeType::RewardsPool => writeln!(f, "Stake account is a rewards pool")?,
|
||||
CliStakeType::Uninitialized => writeln!(f, "Stake account is uninitialized")?,
|
||||
CliStakeType::Initialized => {
|
||||
writeln!(
|
||||
f,
|
||||
"Total Stake: {}",
|
||||
build_balance_message(self.total_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(f, "Stake account is undelegated")?;
|
||||
show_authorized(f, self.authorized.as_ref().unwrap())?;
|
||||
show_lockup(f, self.lockup.as_ref().unwrap())?;
|
||||
}
|
||||
CliStakeType::Stake => {
|
||||
writeln!(
|
||||
f,
|
||||
"Total Stake: {}",
|
||||
build_balance_message(self.total_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Stake: {}",
|
||||
build_balance_message(
|
||||
self.delegated_stake.unwrap(),
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
)
|
||||
)?;
|
||||
if let Some(delegated_vote_account_address) = &self.delegated_vote_account_address {
|
||||
let show_delegation = {
|
||||
self.active_stake.is_some()
|
||||
|| self.activating_stake.is_some()
|
||||
|| self.deactivating_stake.is_some()
|
||||
|| self
|
||||
.deactivation_epoch
|
||||
.map(|de| de > self.current_epoch)
|
||||
.unwrap_or(true)
|
||||
};
|
||||
if show_delegation {
|
||||
let delegated_stake = self.delegated_stake.unwrap();
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Vote Account Address: {}",
|
||||
delegated_vote_account_address
|
||||
)?;
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"Stake activates starting from epoch: {}",
|
||||
self.activation_epoch.unwrap()
|
||||
)?;
|
||||
if let Some(deactivation_epoch) = self.deactivation_epoch {
|
||||
writeln!(
|
||||
f,
|
||||
"Stake deactivates starting from epoch: {}",
|
||||
deactivation_epoch
|
||||
"Delegated Stake: {}",
|
||||
build_balance_message(delegated_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
if self
|
||||
.deactivation_epoch
|
||||
.map(|d| self.current_epoch <= d)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
let active_stake = self.active_stake.unwrap_or(0);
|
||||
writeln!(
|
||||
f,
|
||||
"Active Stake: {}",
|
||||
build_balance_message(active_stake, self.use_lamports_unit, true),
|
||||
)?;
|
||||
let activating_stake = self.activating_stake.or_else(|| {
|
||||
if self.active_stake.is_none() {
|
||||
Some(delegated_stake)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
if let Some(activating_stake) = activating_stake {
|
||||
writeln!(
|
||||
f,
|
||||
"Activating Stake: {}",
|
||||
build_balance_message(
|
||||
activating_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Stake activates starting from epoch: {}",
|
||||
self.activation_epoch.unwrap()
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(deactivation_epoch) = self.deactivation_epoch {
|
||||
if self.current_epoch > deactivation_epoch {
|
||||
let deactivating_stake = self.deactivating_stake.or(self.active_stake);
|
||||
if let Some(deactivating_stake) = deactivating_stake {
|
||||
writeln!(
|
||||
f,
|
||||
"Inactive Stake: {}",
|
||||
build_balance_message(
|
||||
delegated_stake - deactivating_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Deactivating Stake: {}",
|
||||
build_balance_message(
|
||||
deactivating_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"Stake deactivates starting from epoch: {}",
|
||||
deactivation_epoch
|
||||
)?;
|
||||
}
|
||||
if let Some(delegated_vote_account_address) =
|
||||
&self.delegated_vote_account_address
|
||||
{
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Vote Account Address: {}",
|
||||
delegated_vote_account_address
|
||||
)?;
|
||||
}
|
||||
} else {
|
||||
writeln!(f, "Stake account is undelegated")?;
|
||||
}
|
||||
show_authorized(f, self.authorized.as_ref().unwrap())?;
|
||||
show_lockup(f, self.lockup.as_ref().unwrap())?;
|
||||
@ -900,6 +980,7 @@ impl fmt::Display for CliSignOnlyData {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSignature {
|
||||
pub signature: String,
|
||||
}
|
||||
@ -913,6 +994,7 @@ impl fmt::Display for CliSignature {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliAccountBalances {
|
||||
pub accounts: Vec<RpcAccountBalance>,
|
||||
}
|
||||
@ -937,6 +1019,7 @@ impl fmt::Display for CliAccountBalances {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSupply {
|
||||
pub total: u64,
|
||||
pub circulating: u64,
|
||||
@ -981,3 +1064,25 @@ impl fmt::Display for CliSupply {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFees {
|
||||
pub slot: Slot,
|
||||
pub blockhash: String,
|
||||
pub lamports_per_signature: u64,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliFees {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln_name_value(f, "Blockhash:", &self.blockhash)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Lamports per signature:",
|
||||
&self.lamports_per_signature.to_string(),
|
||||
)?;
|
||||
writeln_name_value(f, "Last valid slot:", &self.last_valid_slot.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,11 @@
|
||||
use crate::{
|
||||
cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
|
||||
cli_output::*,
|
||||
display::println_name_value,
|
||||
display::{new_spinner_progress_bar, println_name_value},
|
||||
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
|
||||
};
|
||||
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
@ -29,7 +28,11 @@ use solana_sdk::{
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
system_instruction, system_program,
|
||||
sysvar::{self, Sysvar},
|
||||
sysvar::{
|
||||
self,
|
||||
stake_history::{self, StakeHistory},
|
||||
Sysvar,
|
||||
},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
@ -463,15 +466,6 @@ pub fn parse_transaction_history(
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
||||
pub fn process_catchup(
|
||||
rpc_client: &RpcClient,
|
||||
node_pubkey: &Pubkey,
|
||||
@ -597,13 +591,16 @@ pub fn process_cluster_version(rpc_client: &RpcClient) -> ProcessResult {
|
||||
Ok(remote_version.solana_core)
|
||||
}
|
||||
|
||||
pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
Ok(format!(
|
||||
"blockhash: {}\nlamports per signature: {}",
|
||||
recent_blockhash, fee_calculator.lamports_per_signature
|
||||
))
|
||||
pub fn process_fees(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult {
|
||||
let result = rpc_client.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
|
||||
let (recent_blockhash, fee_calculator, last_valid_slot) = result.value;
|
||||
let fees = CliFees {
|
||||
slot: result.context.slot,
|
||||
blockhash: recent_blockhash.to_string(),
|
||||
lamports_per_signature: fee_calculator.lamports_per_signature,
|
||||
last_valid_slot,
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&fees))
|
||||
}
|
||||
|
||||
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
|
||||
@ -1181,8 +1178,13 @@ pub fn process_show_stakes(
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Fetching stake accounts...");
|
||||
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
progress_bar.finish_and_clear();
|
||||
|
||||
let stake_history = StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
|
||||
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
|
||||
for (stake_pubkey, stake_account) in all_stake_accounts {
|
||||
if let Ok(stake_state) = stake_account.state() {
|
||||
@ -1195,6 +1197,7 @@ pub fn process_show_stakes(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
),
|
||||
});
|
||||
}
|
||||
@ -1211,6 +1214,7 @@ pub fn process_show_stakes(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
),
|
||||
});
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
@ -200,3 +201,12 @@ pub fn println_transaction(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
pub fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
305
cli/src/stake.rs
305
cli/src/stake.rs
@ -12,7 +12,7 @@ use crate::{
|
||||
};
|
||||
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, offline::*, ArgConstant};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_client::{rpc_client::RpcClient, rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
@ -264,6 +264,29 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(fee_payer_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("merge-stake")
|
||||
.about("Merges one stake account into another")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("stake_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("STAKE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Stake account to merge into")
|
||||
)
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("source_stake_account_pubkey")
|
||||
.index(2)
|
||||
.value_name("SOURCE_STAKE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Source stake account for the merge. If successful, this stake account will no longer exist after the merge")
|
||||
)
|
||||
.arg(stake_authority_arg())
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(fee_payer_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-stake")
|
||||
.about("Withdraw the unstaked SOL from the stake account")
|
||||
@ -606,6 +629,47 @@ pub fn parse_split_stake(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_merge_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
|
||||
let source_stake_account_pubkey = pubkey_of(matches, "source_stake_account_pubkey").unwrap();
|
||||
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(matches);
|
||||
let nonce_account = pubkey_of(matches, NONCE_ARG.name);
|
||||
let (stake_authority, stake_authority_pubkey) =
|
||||
signer_of(matches, STAKE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
|
||||
|
||||
let mut bulk_signers = vec![stake_authority, fee_payer];
|
||||
if nonce_account.is_some() {
|
||||
bulk_signers.push(nonce_authority);
|
||||
}
|
||||
let signer_info =
|
||||
generate_unique_signers(bulk_signers, matches, default_signer_path, wallet_manager)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::MergeStake {
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
stake_authority: signer_info.index_of(stake_authority_pubkey).unwrap(),
|
||||
sign_only,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
|
||||
fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_stake_deactivate_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
@ -1199,6 +1263,99 @@ pub fn process_split_stake(
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn process_merge_stake(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
stake_account_pubkey: &Pubkey,
|
||||
source_stake_account_pubkey: &Pubkey,
|
||||
stake_authority: SignerIndex,
|
||||
sign_only: bool,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
fee_payer: SignerIndex,
|
||||
) -> ProcessResult {
|
||||
let fee_payer = config.signers[fee_payer];
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(
|
||||
&source_stake_account_pubkey,
|
||||
"source_stake_account".to_string(),
|
||||
),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
(
|
||||
&source_stake_account_pubkey,
|
||||
"source_stake_account".to_string(),
|
||||
),
|
||||
)?;
|
||||
|
||||
let stake_authority = config.signers[stake_authority];
|
||||
|
||||
if !sign_only {
|
||||
for stake_account_address in &[stake_account_pubkey, source_stake_account_pubkey] {
|
||||
if let Ok(stake_account) = rpc_client.get_account(stake_account_address) {
|
||||
if stake_account.owner != solana_stake_program::id() {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Account {} is not a stake account",
|
||||
stake_account_address
|
||||
))
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
|
||||
|
||||
let ixs = stake_instruction::merge(
|
||||
&stake_account_pubkey,
|
||||
&source_stake_account_pubkey,
|
||||
&stake_authority.pubkey(),
|
||||
);
|
||||
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
|
||||
let message = if let Some(nonce_account) = &nonce_account {
|
||||
Message::new_with_nonce(
|
||||
ixs,
|
||||
Some(&fee_payer.pubkey()),
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
)
|
||||
} else {
|
||||
Message::new_with_payer(&ixs, Some(&fee_payer.pubkey()))
|
||||
};
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
|
||||
if sign_only {
|
||||
tx.try_partial_sign(&config.signers, recent_blockhash)?;
|
||||
return_signers(&tx, &config)
|
||||
} else {
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
if let Some(nonce_account) = &nonce_account {
|
||||
let nonce_account = rpc_client.get_account(nonce_account)?;
|
||||
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?;
|
||||
}
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn process_stake_set_lockup(
|
||||
rpc_client: &RpcClient,
|
||||
@ -1256,53 +1413,85 @@ pub fn process_stake_set_lockup(
|
||||
}
|
||||
}
|
||||
|
||||
fn u64_some_if_not_zero(n: u64) -> Option<u64> {
|
||||
if n > 0 {
|
||||
Some(n)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_stake_state(
|
||||
stake_lamports: u64,
|
||||
account_balance: u64,
|
||||
stake_state: &StakeState,
|
||||
use_lamports_unit: bool,
|
||||
stake_history: &StakeHistory,
|
||||
) -> CliStakeState {
|
||||
match stake_state {
|
||||
StakeState::Stake(
|
||||
Meta {
|
||||
authorized, lockup, ..
|
||||
rent_exempt_reserve,
|
||||
authorized,
|
||||
lockup,
|
||||
},
|
||||
stake,
|
||||
) => CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
total_stake: stake_lamports,
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey != Pubkey::default() {
|
||||
Some(stake.delegation.voter_pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
|
||||
stake.delegation.activation_epoch
|
||||
} else {
|
||||
0
|
||||
}),
|
||||
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
|
||||
Some(stake.delegation.deactivation_epoch)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
},
|
||||
) => {
|
||||
// The first entry in stake history is the previous epoch, so +1 for current
|
||||
let current_epoch = stake_history.iter().next().unwrap().0 + 1;
|
||||
let (active_stake, activating_stake, deactivating_stake) = stake
|
||||
.delegation
|
||||
.stake_activating_and_deactivating(current_epoch, Some(stake_history));
|
||||
CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
account_balance,
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey
|
||||
!= Pubkey::default()
|
||||
{
|
||||
Some(stake.delegation.voter_pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
|
||||
stake.delegation.activation_epoch
|
||||
} else {
|
||||
0
|
||||
}),
|
||||
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
|
||||
Some(stake.delegation.deactivation_epoch)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
current_epoch,
|
||||
rent_exempt_reserve: Some(*rent_exempt_reserve),
|
||||
active_stake: u64_some_if_not_zero(active_stake),
|
||||
activating_stake: u64_some_if_not_zero(activating_stake),
|
||||
deactivating_stake: u64_some_if_not_zero(deactivating_stake),
|
||||
}
|
||||
}
|
||||
StakeState::RewardsPool => CliStakeState {
|
||||
stake_type: CliStakeType::RewardsPool,
|
||||
account_balance,
|
||||
..CliStakeState::default()
|
||||
},
|
||||
StakeState::Uninitialized => CliStakeState {
|
||||
account_balance,
|
||||
..CliStakeState::default()
|
||||
},
|
||||
StakeState::Uninitialized => CliStakeState::default(),
|
||||
StakeState::Initialized(Meta {
|
||||
authorized, lockup, ..
|
||||
rent_exempt_reserve,
|
||||
authorized,
|
||||
lockup,
|
||||
}) => CliStakeState {
|
||||
stake_type: CliStakeType::Initialized,
|
||||
total_stake: stake_lamports,
|
||||
account_balance,
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
rent_exempt_reserve: Some(*rent_exempt_reserve),
|
||||
..CliStakeState::default()
|
||||
},
|
||||
}
|
||||
@ -1324,7 +1513,18 @@ pub fn process_show_stake_account(
|
||||
}
|
||||
match stake_account.state() {
|
||||
Ok(stake_state) => {
|
||||
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let stake_history =
|
||||
StakeHistory::from_account(&stake_history_account).ok_or_else(|| {
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
|
||||
let state = build_stake_state(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
&stake_history,
|
||||
);
|
||||
Ok(config.output_format.formatted_string(&state))
|
||||
}
|
||||
Err(err) => Err(CliError::RpcRequestError(format!(
|
||||
@ -1399,13 +1599,15 @@ pub fn process_delegate_stake(
|
||||
"Unable to delegate. Vote account has no root slot".to_string(),
|
||||
)),
|
||||
Some(root_slot) => {
|
||||
let slot = rpc_client.get_slot()?;
|
||||
if root_slot + solana_sdk::clock::DEFAULT_SLOTS_PER_TURN < slot {
|
||||
Err(CliError::BadParameter(
|
||||
format!(
|
||||
"Unable to delegate. Vote account root slot ({}) is too old, the current slot is {}", root_slot, slot
|
||||
)
|
||||
))
|
||||
let min_root_slot = rpc_client
|
||||
.get_slot()?
|
||||
.saturating_sub(DELINQUENT_VALIDATOR_SLOT_DISTANCE);
|
||||
if root_slot < min_root_slot {
|
||||
Err(CliError::DynamicProgramError(format!(
|
||||
"Unable to delegate. Vote account appears delinquent \
|
||||
because its current root slot, {}, is less than {}",
|
||||
root_slot, min_root_slot
|
||||
)))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
@ -2878,5 +3080,34 @@ mod tests {
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test MergeStake SubCommand
|
||||
let (keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let stake_account_keypair = Keypair::new();
|
||||
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let source_stake_account_pubkey = Pubkey::new_rand();
|
||||
let test_merge_stake_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"merge-stake",
|
||||
&keypair_file,
|
||||
&source_stake_account_pubkey.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_merge_stake_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::MergeStake {
|
||||
stake_account_pubkey: stake_account_keypair.pubkey(),
|
||||
source_stake_account_pubkey,
|
||||
stake_authority: 0,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
104
cli/src/vote.rs
104
cli/src/vote.rs
@ -161,6 +161,35 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-commission")
|
||||
.about("Update the vote account's commission")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Vote account to update. "),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("commission")
|
||||
.index(2)
|
||||
.value_name("PERCENTAGE")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_percentage)
|
||||
.help("The new commission")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
@ -309,6 +338,33 @@ pub fn parse_vote_update_validator(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_update_commission(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized_withdrawer, _) = signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
let commission = value_t_or_exit!(matches, "commission", u8);
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = generate_unique_signers(
|
||||
vec![payer_provided, authorized_withdrawer],
|
||||
matches,
|
||||
default_signer_path,
|
||||
wallet_manager,
|
||||
)?;
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey,
|
||||
commission,
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_get_account_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
@ -521,6 +577,33 @@ pub fn process_vote_update_validator(
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_commission(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
commission: u8,
|
||||
) -> ProcessResult {
|
||||
let authorized_withdrawer = config.signers[1];
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_commission(
|
||||
vote_account_pubkey,
|
||||
&authorized_withdrawer.pubkey(),
|
||||
commission,
|
||||
)];
|
||||
|
||||
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.signers[0].pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
}
|
||||
|
||||
fn get_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
@ -842,6 +925,27 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_update_commission = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-update-commission",
|
||||
&pubkey_string,
|
||||
"42",
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_update_commission, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateCommission {
|
||||
vote_account_pubkey: pubkey,
|
||||
commission: 42,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
Box::new(read_keypair_file(&keypair_file).unwrap()),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawFromVoteAccount subcommand
|
||||
let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.2" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@ -31,7 +31,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.1.0"
|
||||
jsonrpc-http-server = "14.1.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@ use crate::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
http_sender::HttpSender,
|
||||
mock_sender::{MockSender, Mocks},
|
||||
rpc_config::RpcLargestAccountsConfig,
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig},
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_response::*,
|
||||
rpc_sender::RpcSender,
|
||||
@ -22,7 +22,6 @@ use solana_sdk::{
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
hash::Hash,
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
signers::Signers,
|
||||
@ -33,7 +32,6 @@ use solana_transaction_status::{
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
error,
|
||||
net::SocketAddr,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
@ -95,10 +93,20 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
self.send_transaction_with_config(transaction, RpcSendTransactionConfig::default())
|
||||
}
|
||||
|
||||
pub fn send_transaction_with_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
|
||||
let signature_base58_str: String =
|
||||
self.send(RpcRequest::SendTransaction, json!([serialized_encoded]))?;
|
||||
let signature_base58_str: String = self.send(
|
||||
RpcRequest::SendTransaction,
|
||||
json!([serialized_encoded, config]),
|
||||
)?;
|
||||
|
||||
let signature = signature_base58_str
|
||||
.parse::<Signature>()
|
||||
@ -122,7 +130,7 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
sig_verify: bool,
|
||||
) -> RpcResult<TransactionStatus> {
|
||||
) -> RpcResult<RpcSimulateTransactionResult> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
self.send(
|
||||
RpcRequest::SimulateTransaction,
|
||||
@ -346,8 +354,12 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_inflation(&self) -> ClientResult<Inflation> {
|
||||
self.send(RpcRequest::GetInflation, Value::Null)
|
||||
pub fn get_inflation_governor(&self) -> ClientResult<RpcInflationGovernor> {
|
||||
self.send(RpcRequest::GetInflationGovernor, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_inflation_rate(&self) -> ClientResult<RpcInflationRate> {
|
||||
self.send(RpcRequest::GetInflationRate, Value::Null)
|
||||
}
|
||||
|
||||
pub fn get_version(&self) -> ClientResult<RpcVersionInfo> {
|
||||
@ -403,96 +415,6 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
&self,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
|
||||
// Send all transactions
|
||||
let mut transactions_signatures = vec![];
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if cfg!(not(test)) {
|
||||
// Delay ~1 tick between write transactions in an attempt to reduce AccountInUse errors
|
||||
// when all the write transactions modify the same program account (eg, deploying a
|
||||
// new program)
|
||||
sleep(Duration::from_millis(1000 / DEFAULT_TICKS_PER_SECOND));
|
||||
}
|
||||
|
||||
let signature = self.send_transaction(&transaction).ok();
|
||||
transactions_signatures.push((transaction, signature));
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
while status_retries > 0 {
|
||||
status_retries -= 1;
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed",
|
||||
num_transactions - transactions_signatures.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = self.get_signature_status(&signature) {
|
||||
if self
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.collect();
|
||||
|
||||
if transactions_signatures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err(RpcError::ForUser("Transactions failed".to_string()).into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let (blockhash, _fee_calculator) =
|
||||
self.get_new_blockhash(&transactions_signatures[0].0.message().recent_blockhash)?;
|
||||
transactions = vec![];
|
||||
for (mut transaction, _) in transactions_signatures.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resign_transaction<T: Signers>(
|
||||
&self,
|
||||
tx: &mut Transaction,
|
||||
@ -614,26 +536,46 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
Ok(self
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::default())?
|
||||
.value)
|
||||
.value;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<(Hash, FeeCalculator)> {
|
||||
let Response {
|
||||
) -> RpcResult<(Hash, FeeCalculator, Slot)> {
|
||||
let (context, blockhash, fee_calculator, last_valid_slot) = if let Ok(Response {
|
||||
context,
|
||||
value:
|
||||
RpcFees {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
last_valid_slot,
|
||||
},
|
||||
}) =
|
||||
self.send::<Response<RpcFees>>(RpcRequest::GetFees, json!([commitment_config]))
|
||||
{
|
||||
(context, blockhash, fee_calculator, last_valid_slot)
|
||||
} else if let Ok(Response {
|
||||
context,
|
||||
value:
|
||||
RpcBlockhashFeeCalculator {
|
||||
blockhash,
|
||||
fee_calculator,
|
||||
},
|
||||
} = self.send::<Response<RpcBlockhashFeeCalculator>>(
|
||||
}) = self.send::<Response<RpcBlockhashFeeCalculator>>(
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
json!([commitment_config]),
|
||||
)?;
|
||||
) {
|
||||
(context, blockhash, fee_calculator, 0)
|
||||
} else {
|
||||
return Err(ClientError::new_with_request(
|
||||
RpcError::ParseError("RpcBlockhashFeeCalculator or RpcFees".to_string()).into(),
|
||||
RpcRequest::GetRecentBlockhash,
|
||||
));
|
||||
};
|
||||
|
||||
let blockhash = blockhash.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
@ -643,7 +585,7 @@ impl RpcClient {
|
||||
})?;
|
||||
Ok(Response {
|
||||
context,
|
||||
value: (blockhash, fee_calculator),
|
||||
value: (blockhash, fee_calculator, last_valid_slot),
|
||||
})
|
||||
}
|
||||
|
||||
@ -651,12 +593,28 @@ impl RpcClient {
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
) -> ClientResult<Option<FeeCalculator>> {
|
||||
let Response { value, .. } = self.send::<Response<Option<RpcFeeCalculator>>>(
|
||||
Ok(self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
blockhash,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_fee_calculator_for_blockhash_with_commitment(
|
||||
&self,
|
||||
blockhash: &Hash,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<FeeCalculator>> {
|
||||
let Response { context, value } = self.send::<Response<Option<RpcFeeCalculator>>>(
|
||||
RpcRequest::GetFeeCalculatorForBlockhash,
|
||||
json!([blockhash.to_string()]),
|
||||
json!([blockhash.to_string(), commitment_config]),
|
||||
)?;
|
||||
|
||||
Ok(value.map(|rf| rf.fee_calculator))
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value.map(|rf| rf.fee_calculator),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_fee_rate_governor(&self) -> RpcResult<FeeRateGovernor> {
|
||||
@ -913,6 +871,17 @@ impl RpcClient {
|
||||
pub fn send_and_confirm_transaction_with_spinner(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
transaction,
|
||||
RpcSendTransactionConfig::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let mut confirmations = 0;
|
||||
|
||||
@ -928,7 +897,7 @@ impl RpcClient {
|
||||
));
|
||||
let mut status_retries = 15;
|
||||
let (signature, status) = loop {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
let signature = self.send_transaction_with_config(transaction, config.clone())?;
|
||||
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
|
@ -1,4 +1,4 @@
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -6,7 +6,13 @@ pub struct RpcSignatureStatusConfig {
|
||||
pub search_transaction_history: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSendTransactionConfig {
|
||||
pub skip_preflight: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionConfig {
|
||||
pub sig_verify: bool,
|
||||
@ -26,3 +32,11 @@ pub struct RpcLargestAccountsConfig {
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub filter: Option<RpcLargestAccountsFilter>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationConfig {
|
||||
pub epoch: Option<Epoch>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
@ -16,15 +16,18 @@ pub enum RpcRequest {
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
GetIdentity,
|
||||
GetInflation,
|
||||
GetLargestAccounts,
|
||||
GetLeaderSchedule,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetFees,
|
||||
GetGenesisHash,
|
||||
GetIdentity,
|
||||
GetInflationGovernor,
|
||||
GetInflationRate,
|
||||
GetLargestAccounts,
|
||||
GetLeaderSchedule,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
@ -37,13 +40,12 @@ pub enum RpcRequest {
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
GetVoteAccounts,
|
||||
MinimumLedgerSlot,
|
||||
RegisterNode,
|
||||
RequestAirdrop,
|
||||
SendTransaction,
|
||||
SimulateTransaction,
|
||||
SignVote,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
MinimumLedgerSlot,
|
||||
}
|
||||
|
||||
impl fmt::Display for RpcRequest {
|
||||
@ -61,15 +63,18 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflation => "getInflation",
|
||||
RpcRequest::GetLargestAccounts => "getLargestAccounts",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetFees => "getFees",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetIdentity => "getIdentity",
|
||||
RpcRequest::GetInflationGovernor => "getInflationGovernor",
|
||||
RpcRequest::GetInflationRate => "getInflationRate",
|
||||
RpcRequest::GetLargestAccounts => "getLargestAccounts",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
@ -82,13 +87,12 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
RpcRequest::GetVoteAccounts => "getVoteAccounts",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
RpcRequest::RegisterNode => "registerNode",
|
||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||
RpcRequest::SendTransaction => "sendTransaction",
|
||||
RpcRequest::SimulateTransaction => "simulateTransaction",
|
||||
RpcRequest::SignVote => "signVote",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
};
|
||||
|
||||
write!(f, "{}", method)
|
||||
@ -99,6 +103,9 @@ pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
|
||||
// Validators that are this number of slots behind are considered delinquent
|
||||
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;
|
||||
|
||||
impl RpcRequest {
|
||||
pub(crate) fn build_request_json(self, id: u64, params: Value) -> Value {
|
||||
let jsonrpc = "2.0";
|
||||
@ -144,10 +151,6 @@ mod tests {
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getEpochInfo");
|
||||
|
||||
let test_request = RpcRequest::GetInflation;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getInflation");
|
||||
|
||||
let test_request = RpcRequest::GetRecentBlockhash;
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getRecentBlockhash");
|
||||
|
@ -3,6 +3,7 @@ use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
@ -35,6 +36,14 @@ pub struct RpcBlockhashFeeCalculator {
|
||||
pub fee_calculator: FeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFees {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeCalculator {
|
||||
@ -47,6 +56,37 @@ pub struct RpcFeeRateGovernor {
|
||||
pub fee_rate_governor: FeeRateGovernor,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationGovernor {
|
||||
pub initial: f64,
|
||||
pub terminal: f64,
|
||||
pub taper: f64,
|
||||
pub foundation: f64,
|
||||
pub foundation_term: f64,
|
||||
}
|
||||
|
||||
impl From<Inflation> for RpcInflationGovernor {
|
||||
fn from(inflation: Inflation) -> Self {
|
||||
Self {
|
||||
initial: inflation.initial,
|
||||
terminal: inflation.terminal,
|
||||
taper: inflation.taper,
|
||||
foundation: inflation.foundation,
|
||||
foundation_term: inflation.foundation_term,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcInflationRate {
|
||||
pub total: f64,
|
||||
pub validator: f64,
|
||||
pub foundation: f64,
|
||||
pub epoch: Epoch,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcKeyedAccount {
|
||||
@ -171,6 +211,13 @@ pub struct RpcSignatureConfirmation {
|
||||
pub status: Result<()>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionResult {
|
||||
pub err: Option<TransactionError>,
|
||||
pub logs: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcStorageTurn {
|
||||
|
@ -9,7 +9,7 @@ use log::*;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
client::{AsyncClient, Client, SyncClient},
|
||||
clock::MAX_PROCESSING_AGE,
|
||||
clock::{Slot, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_info::EpochInfo,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
@ -427,13 +427,15 @@ impl SyncClient for ThinClient {
|
||||
}
|
||||
|
||||
fn get_recent_blockhash(&self) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())
|
||||
let (blockhash, fee_calculator, _last_valid_slot) =
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::default())?;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
|
||||
fn get_recent_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> TransportResult<(Hash, FeeCalculator)> {
|
||||
) -> TransportResult<(Hash, FeeCalculator, Slot)> {
|
||||
let index = self.optimizer.experiment();
|
||||
let now = Instant::now();
|
||||
let recent_blockhash =
|
||||
@ -441,7 +443,7 @@ impl SyncClient for ThinClient {
|
||||
match recent_blockhash {
|
||||
Ok(Response { value, .. }) => {
|
||||
self.optimizer.report(index, duration_as_ms(&now.elapsed()));
|
||||
Ok(value)
|
||||
Ok((value.0, value.1, value.2))
|
||||
}
|
||||
Err(e) => {
|
||||
self.optimizer.report(index, std::u64::MAX);
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -21,6 +21,7 @@ byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
@ -41,35 +42,35 @@ regex = "1.3.7"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.0" }
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
solana-ledger = { path = "../ledger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.0" }
|
||||
solana-measure = { path = "../measure", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-perf = { path = "../perf", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.0" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.2" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-client = { path = "../client", version = "1.2.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.2" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.2" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.2" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.2" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.2" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.2" }
|
||||
solana-measure = { path = "../measure", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-perf = { path = "../perf", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.2" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.2" }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.2" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.2" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.2" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -3,6 +3,7 @@
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
@ -47,7 +48,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&last_datapoint,
|
||||
&mut 0,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
|
@ -49,7 +49,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let bank_forks = BankForks::new(0, bank0);
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let bank = bank_forks.working_bank();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
|
@ -15,11 +15,15 @@ pub struct AccountsBackgroundService {
|
||||
}
|
||||
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
const SHRUNKEN_ACCOUNT_PER_SEC: usize = 250;
|
||||
const SHRUNKEN_ACCOUNT_PER_INTERVAL: usize =
|
||||
SHRUNKEN_ACCOUNT_PER_SEC / (1000 / INTERVAL_MS as usize);
|
||||
|
||||
impl AccountsBackgroundService {
|
||||
pub fn new(bank_forks: Arc<RwLock<BankForks>>, exit: &Arc<AtomicBool>) -> Self {
|
||||
info!("AccountsBackgroundService active");
|
||||
let exit = exit.clone();
|
||||
let mut consumed_budget = 0;
|
||||
let t_background = Builder::new()
|
||||
.name("solana-accounts-background".to_string())
|
||||
.spawn(move || loop {
|
||||
@ -30,8 +34,8 @@ impl AccountsBackgroundService {
|
||||
|
||||
bank.process_dead_slots();
|
||||
|
||||
// Currently, given INTERVAL_MS, we process 1 slot/100 ms
|
||||
bank.process_stale_slot();
|
||||
consumed_budget = bank
|
||||
.process_stale_slot_with_budget(consumed_budget, SHRUNKEN_ACCOUNT_PER_INTERVAL);
|
||||
|
||||
sleep(Duration::from_millis(INTERVAL_MS));
|
||||
})
|
||||
|
152
core/src/bank_weight_fork_choice.rs
Normal file
152
core/src/bank_weight_fork_choice.rs
Normal file
@ -0,0 +1,152 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
progress_map::{ForkStats, ProgressMap},
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BankWeightForkChoice {}
|
||||
|
||||
impl ForkChoice for BankWeightForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
_tower: &Tower,
|
||||
progress: &mut ProgressMap,
|
||||
computed_bank_stats: &ComputedBankState,
|
||||
) {
|
||||
let bank_slot = bank.slot();
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let parent_weight = bank
|
||||
.parent()
|
||||
.and_then(|b| progress.get(&b.slot()))
|
||||
.map(|x| x.fork_stats.fork_weight)
|
||||
.unwrap_or(0);
|
||||
|
||||
let stats = progress
|
||||
.get_fork_stats_mut(bank_slot)
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
let ComputedBankState { bank_weight, .. } = computed_bank_stats;
|
||||
stats.weight = *bank_weight;
|
||||
stats.fork_weight = stats.weight + parent_weight;
|
||||
}
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bank
|
||||
// 2) The heavest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
_bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>) {
|
||||
let tower_start = Instant::now();
|
||||
assert!(!frozen_banks.is_empty());
|
||||
let num_frozen_banks = frozen_banks.len();
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let num_old_banks = frozen_banks
|
||||
.iter()
|
||||
.filter(|b| b.slot() < tower.root().unwrap_or(0))
|
||||
.count();
|
||||
|
||||
let last_vote = tower.last_vote().slots.last().cloned();
|
||||
let mut heaviest_bank_on_same_fork = None;
|
||||
let mut heaviest_same_fork_weight = 0;
|
||||
let stats: Vec<&ForkStats> = frozen_banks
|
||||
.iter()
|
||||
.map(|bank| {
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let stats = progress
|
||||
.get_fork_stats(bank.slot())
|
||||
.expect("All frozen banks must exist in the Progress map");
|
||||
|
||||
if let Some(last_vote) = last_vote {
|
||||
if ancestors
|
||||
.get(&bank.slot())
|
||||
.expect("Entry in frozen banks must exist in ancestors")
|
||||
.contains(&last_vote)
|
||||
{
|
||||
// Descendant of last vote cannot be locked out
|
||||
assert!(!stats.is_locked_out);
|
||||
|
||||
// ancestors(slot) should not contain the slot itself,
|
||||
// so we should never get the same bank as the last vote
|
||||
assert_ne!(bank.slot(), last_vote);
|
||||
// highest weight, lowest slot first. frozen_banks is sorted
|
||||
// from least slot to greatest slot, so if two banks have
|
||||
// the same fork weight, the lower slot will be picked
|
||||
if stats.fork_weight > heaviest_same_fork_weight {
|
||||
heaviest_bank_on_same_fork = Some(bank.clone());
|
||||
heaviest_same_fork_weight = stats.fork_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats
|
||||
})
|
||||
.collect();
|
||||
let num_not_recent = stats.iter().filter(|s| !s.is_recent).count();
|
||||
let num_has_voted = stats.iter().filter(|s| s.has_voted).count();
|
||||
let num_empty = stats.iter().filter(|s| s.is_empty).count();
|
||||
let num_threshold_failure = stats.iter().filter(|s| !s.vote_threshold).count();
|
||||
let num_votable_threshold_failure = stats
|
||||
.iter()
|
||||
.filter(|s| s.is_recent && !s.has_voted && !s.vote_threshold)
|
||||
.count();
|
||||
|
||||
let mut candidates: Vec<_> = frozen_banks.iter().zip(stats.iter()).collect();
|
||||
|
||||
//highest weight, lowest slot first
|
||||
candidates.sort_by_key(|b| (b.1.fork_weight, 0i64 - b.0.slot() as i64));
|
||||
let rv = candidates
|
||||
.last()
|
||||
.expect("frozen banks was nonempty so candidates must also be nonempty");
|
||||
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
||||
let weights: Vec<(u128, u64, u64)> = candidates
|
||||
.iter()
|
||||
.map(|x| (x.1.weight, x.0.slot(), x.1.block_height))
|
||||
.collect();
|
||||
debug!(
|
||||
"@{:?} tower duration: {:?} len: {}/{} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
candidates.len(),
|
||||
stats.iter().filter(|s| !s.has_voted).count(),
|
||||
weights,
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-select_forks",
|
||||
("frozen_banks", num_frozen_banks as i64, i64),
|
||||
("not_recent", num_not_recent as i64, i64),
|
||||
("has_voted", num_has_voted as i64, i64),
|
||||
("old_banks", num_old_banks as i64, i64),
|
||||
("empty_banks", num_empty as i64, i64),
|
||||
("threshold_failure", num_threshold_failure as i64, i64),
|
||||
(
|
||||
"votable_threshold_failure",
|
||||
num_votable_threshold_failure as i64,
|
||||
i64
|
||||
),
|
||||
("tower_duration", ms as i64, i64),
|
||||
);
|
||||
|
||||
(rv.0.clone(), heaviest_bank_on_same_fork)
|
||||
}
|
||||
}
|
@ -51,7 +51,7 @@ type PacketsAndOffsets = (Packets, Vec<usize>);
|
||||
pub type UnprocessedPackets = Vec<PacketsAndOffsets>;
|
||||
|
||||
/// Transaction forwarding
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 4;
|
||||
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 1;
|
||||
|
||||
// Fixed thread size seems to be fastest on GCP setup
|
||||
pub const NUM_THREADS: u32 = 4;
|
||||
@ -514,7 +514,7 @@ impl BankingStage {
|
||||
vec![]
|
||||
};
|
||||
let (mut loaded_accounts, results, mut retryable_txs, tx_count, signature_count) =
|
||||
bank.load_and_execute_transactions(batch, MAX_PROCESSING_AGE);
|
||||
bank.load_and_execute_transactions(batch, MAX_PROCESSING_AGE, None);
|
||||
load_execute_time.stop();
|
||||
|
||||
let freeze_lock = bank.freeze_lock();
|
||||
|
@ -35,7 +35,7 @@ use std::{
|
||||
};
|
||||
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub(crate) mod broadcast_metrics;
|
||||
pub mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
@ -374,13 +374,14 @@ pub fn broadcast_shreds(
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
send_mmsg_total: &mut u64,
|
||||
transmit_stats: &mut TransmitShredsStats,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
if broadcast_len == 0 {
|
||||
update_peer_stats(1, 1, last_datapoint_submit);
|
||||
return Ok(());
|
||||
}
|
||||
let mut shred_select = Measure::start("shred_select");
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
@ -389,6 +390,8 @@ pub fn broadcast_shreds(
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
.collect();
|
||||
shred_select.stop();
|
||||
transmit_stats.shred_select += shred_select.as_us();
|
||||
|
||||
let mut sent = 0;
|
||||
let mut send_mmsg_time = Measure::start("send_mmsg");
|
||||
@ -401,7 +404,7 @@ pub fn broadcast_shreds(
|
||||
}
|
||||
}
|
||||
send_mmsg_time.stop();
|
||||
*send_mmsg_total += send_mmsg_time.as_us();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
update_peer_stats(
|
||||
|
@ -29,11 +29,12 @@ impl ProcessShredsStats {
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TransmitShredsStats {
|
||||
pub(crate) transmit_elapsed: u64,
|
||||
pub(crate) send_mmsg_elapsed: u64,
|
||||
pub(crate) get_peers_elapsed: u64,
|
||||
pub(crate) num_shreds: usize,
|
||||
pub struct TransmitShredsStats {
|
||||
pub transmit_elapsed: u64,
|
||||
pub send_mmsg_elapsed: u64,
|
||||
pub get_peers_elapsed: u64,
|
||||
pub shred_select: u64,
|
||||
pub num_shreds: usize,
|
||||
}
|
||||
|
||||
impl BroadcastStats for TransmitShredsStats {
|
||||
@ -42,6 +43,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
|
||||
self.get_peers_elapsed += new_stats.get_peers_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
self.shred_select += new_stats.shred_select;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
@ -58,6 +60,7 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -176,15 +179,16 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update() {
|
||||
fn test_update_broadcast() {
|
||||
let start = Instant::now();
|
||||
let mut slot_broadcast_stats = SlotBroadcastStats::default();
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
get_peers_elapsed: 2,
|
||||
send_mmsg_elapsed: 3,
|
||||
shred_select: 4,
|
||||
num_shreds: 5,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
slot: 0,
|
||||
@ -198,16 +202,18 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
transmit_elapsed: 7,
|
||||
get_peers_elapsed: 8,
|
||||
send_mmsg_elapsed: 9,
|
||||
shred_select: 10,
|
||||
num_shreds: 11,
|
||||
},
|
||||
&None,
|
||||
);
|
||||
@ -217,9 +223,10 @@ mod test {
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 3);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.shred_select, 4);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 5);
|
||||
|
||||
// If another batch is given, then total number of batches == num_expected_batches == 2,
|
||||
// so the batch should be purged from the HashMap
|
||||
@ -228,6 +235,7 @@ mod test {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
shred_select: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
|
@ -137,14 +137,13 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
// Broadcast data
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&Arc::new(AtomicU64::new(0)),
|
||||
&mut send_mmsg_total,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -9,6 +9,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -23,6 +24,14 @@ pub struct StandardBroadcastRun {
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Arc<AtomicU64>,
|
||||
num_batches: usize,
|
||||
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
|
||||
last_peer_update: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BroadcastPeerCache {
|
||||
peers: Vec<ContactInfo>,
|
||||
peers_and_stakes: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
@ -38,6 +47,8 @@ impl StandardBroadcastRun {
|
||||
shred_version,
|
||||
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
|
||||
num_batches: 0,
|
||||
broadcast_peer_cache: Arc::new(RwLock::new(BroadcastPeerCache::default())),
|
||||
last_peer_update: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,33 +304,46 @@ impl StandardBroadcastRun {
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
const BROADCAST_PEER_UPDATE_INTERVAL_MS: u64 = 1000;
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
// Get the list of peers to broadcast to
|
||||
let get_peers_start = Instant::now();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
let get_peers_elapsed = get_peers_start.elapsed();
|
||||
let mut get_peers_time = Measure::start("broadcast::get_peers");
|
||||
let now = timestamp();
|
||||
let last = self.last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > BROADCAST_PEER_UPDATE_INTERVAL_MS
|
||||
&& self
|
||||
.last_peer_update
|
||||
.compare_and_swap(now, last, Ordering::Relaxed)
|
||||
== last
|
||||
{
|
||||
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
w_broadcast_peer_cache.peers = peers;
|
||||
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
|
||||
}
|
||||
get_peers_time.stop();
|
||||
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
|
||||
|
||||
let mut transmit_stats = TransmitShredsStats::default();
|
||||
// Broadcast the shreds
|
||||
let transmit_start = Instant::now();
|
||||
let mut send_mmsg_total = 0;
|
||||
let mut transmit_time = Measure::start("broadcast_shreds");
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&r_broadcast_peer_cache.peers_and_stakes,
|
||||
&r_broadcast_peer_cache.peers,
|
||||
&self.last_datapoint_submit,
|
||||
&mut send_mmsg_total,
|
||||
&mut transmit_stats,
|
||||
)?;
|
||||
let transmit_elapsed = transmit_start.elapsed();
|
||||
let new_transmit_shreds_stats = TransmitShredsStats {
|
||||
transmit_elapsed: duration_as_us(&transmit_elapsed),
|
||||
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
|
||||
send_mmsg_elapsed: send_mmsg_total,
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
drop(r_broadcast_peer_cache);
|
||||
transmit_time.stop();
|
||||
|
||||
transmit_stats.transmit_elapsed = transmit_time.as_us();
|
||||
transmit_stats.get_peers_elapsed = get_peers_time.as_us();
|
||||
transmit_stats.num_shreds = shreds.len();
|
||||
|
||||
// Process metrics
|
||||
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
|
||||
self.update_transmit_metrics(&transmit_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
consensus::VOTE_THRESHOLD_SIZE,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
crds_value::CrdsValueLabel,
|
||||
poh_recorder::PohRecorder,
|
||||
pubkey_references::LockedPubkeyReferences,
|
||||
@ -948,7 +948,7 @@ mod tests {
|
||||
);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank_forks = BankForks::new(bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
@ -1064,7 +1064,7 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank_forks = BankForks::new(bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
@ -1,26 +1,10 @@
|
||||
use crate::{consensus::VOTE_THRESHOLD_SIZE, rpc_subscriptions::RpcSubscriptions};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CacheSlotInfo {
|
||||
pub current_slot: Slot,
|
||||
pub node_root: Slot,
|
||||
pub largest_confirmed_root: Slot,
|
||||
pub highest_confirmed_slot: Slot,
|
||||
}
|
||||
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
|
||||
|
||||
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
|
||||
|
||||
@ -59,9 +43,9 @@ pub struct BlockCommitmentCache {
|
||||
largest_confirmed_root: Slot,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
pub blockstore: Arc<Blockstore>,
|
||||
root: Slot,
|
||||
highest_confirmed_slot: Slot,
|
||||
pub highest_confirmed_slot: Slot,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for BlockCommitmentCache {
|
||||
@ -151,7 +135,7 @@ impl BlockCommitmentCache {
|
||||
self.root
|
||||
}
|
||||
|
||||
fn calculate_highest_confirmed_slot(&self) -> Slot {
|
||||
pub fn calculate_highest_confirmed_slot(&self) -> Slot {
|
||||
self.highest_slot_with_confirmation_count(1)
|
||||
}
|
||||
|
||||
@ -219,222 +203,11 @@ impl BlockCommitmentCache {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
|
||||
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
|
||||
let mut stake_sum = 0;
|
||||
for (root, stake) in rooted_stake {
|
||||
stake_sum += stake;
|
||||
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
|
||||
return root;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct AggregateCommitmentService {
|
||||
t_commitment: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AggregateCommitmentService {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> (Sender<CommitmentAggregationData>, Self) {
|
||||
let (sender, receiver): (
|
||||
Sender<CommitmentAggregationData>,
|
||||
Receiver<CommitmentAggregationData>,
|
||||
) = channel();
|
||||
let exit_ = exit.clone();
|
||||
(
|
||||
sender,
|
||||
Self {
|
||||
t_commitment: Builder::new()
|
||||
.name("solana-aggregate-stake-lockouts".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit_.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Err(RecvTimeoutError::Disconnected) =
|
||||
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
|
||||
{
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn run(
|
||||
receiver: &Receiver<CommitmentAggregationData>,
|
||||
block_commitment_cache: &RwLock<BlockCommitmentCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
while let Ok(new_data) = receiver.try_recv() {
|
||||
aggregation_data = new_data;
|
||||
}
|
||||
|
||||
let ancestors = aggregation_data.bank.status_cache_ancestors();
|
||||
if ancestors.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
datapoint_info!(
|
||||
"block-commitment-cache",
|
||||
(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as i64,
|
||||
i64
|
||||
)
|
||||
);
|
||||
|
||||
subscriptions.notify_subscribers(CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_commitment(
|
||||
ancestors: &[Slot],
|
||||
bank: &Bank,
|
||||
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
|
||||
assert!(!ancestors.is_empty());
|
||||
|
||||
// Check ancestors is sorted
|
||||
for a in ancestors.windows(2) {
|
||||
assert!(a[0] < a[1]);
|
||||
}
|
||||
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
|
||||
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
(commitment, rooted_stake)
|
||||
}
|
||||
|
||||
fn aggregate_commitment_for_vote_account(
|
||||
commitment: &mut HashMap<Slot, BlockCommitment>,
|
||||
rooted_stake: &mut Vec<(Slot, u64)>,
|
||||
vote_state: &VoteState,
|
||||
ancestors: &[Slot],
|
||||
lamports: u64,
|
||||
) {
|
||||
assert!(!ancestors.is_empty());
|
||||
let mut ancestors_index = 0;
|
||||
if let Some(root) = vote_state.root_slot {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rooted_stake.push((root, lamports));
|
||||
}
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
while ancestors[ancestors_index] <= vote.slot {
|
||||
commitment
|
||||
.entry(ancestors[ancestors_index])
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(vote.confirmation_count as usize, lamports);
|
||||
ancestors_index += 1;
|
||||
|
||||
if ancestors_index == ancestors.len() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_commitment.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::{
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::{genesis_config::GenesisConfig, pubkey::Pubkey};
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_block_commitment() {
|
||||
@ -512,21 +285,6 @@ mod tests {
|
||||
assert!(!block_commitment_cache.is_confirmed_rooted(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((0, 5));
|
||||
rooted_stake.push((1, 5));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((1, 5));
|
||||
rooted_stake.push((0, 10));
|
||||
rooted_stake.push((2, 5));
|
||||
rooted_stake.push((1, 4));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_highest_confirmed_slot() {
|
||||
let bank = Arc::new(Bank::new(&GenesisConfig::default()));
|
||||
@ -634,211 +392,4 @@ mod tests {
|
||||
|
||||
assert_eq!(block_commitment_cache.calculate_highest_confirmed_slot(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_1() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = *ancestors.last().unwrap();
|
||||
vote_state.root_slot = Some(root);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_2() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_3() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
assert!(ancestors[4] + 2 >= ancestors[6]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[4]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[6]);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 6 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_validity() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let rooted_stake_amount = 40;
|
||||
|
||||
let sk1 = Pubkey::new_rand();
|
||||
let pk1 = Pubkey::new_rand();
|
||||
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
|
||||
let stake_account1 =
|
||||
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
|
||||
let sk2 = Pubkey::new_rand();
|
||||
let pk2 = Pubkey::new_rand();
|
||||
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
|
||||
let stake_account2 =
|
||||
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
|
||||
let sk3 = Pubkey::new_rand();
|
||||
let pk3 = Pubkey::new_rand();
|
||||
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account3 = stake_state::create_account(
|
||||
&sk3,
|
||||
&pk3,
|
||||
&vote_account3,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
let sk4 = Pubkey::new_rand();
|
||||
let pk4 = Pubkey::new_rand();
|
||||
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account4 = stake_state::create_account(
|
||||
&sk4,
|
||||
&pk4,
|
||||
&vote_account4,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
|
||||
genesis_config.accounts.extend(vec![
|
||||
(pk1, vote_account1.clone()),
|
||||
(sk1, stake_account1),
|
||||
(pk2, vote_account2.clone()),
|
||||
(sk2, stake_account2),
|
||||
(pk3, vote_account3.clone()),
|
||||
(sk3, stake_account3),
|
||||
(pk4, vote_account4.clone()),
|
||||
(sk4, stake_account4),
|
||||
]);
|
||||
|
||||
// Create bank
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
|
||||
vote_state1.process_slot_vote_unchecked(3);
|
||||
vote_state1.process_slot_vote_unchecked(5);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state1));
|
||||
VoteState::to(&versioned, &mut vote_account1).unwrap();
|
||||
bank.store_account(&pk1, &vote_account1);
|
||||
|
||||
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
|
||||
vote_state2.process_slot_vote_unchecked(9);
|
||||
vote_state2.process_slot_vote_unchecked(10);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state2));
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
let (commitment, rooted_stake) =
|
||||
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= 3 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 150);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 5 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 100);
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 9 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 10 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
assert!(commitment.get(&a).is_none());
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
}
|
||||
|
454
core/src/commitment_service.rs
Normal file
454
core/src/commitment_service.rs
Normal file
@ -0,0 +1,454 @@
|
||||
use crate::{
|
||||
commitment::{BlockCommitment, BlockCommitmentCache, VOTE_THRESHOLD_SIZE},
|
||||
rpc_subscriptions::{CacheSlotInfo, RpcSubscriptions},
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
bank: Arc<Bank>,
|
||||
root: Slot,
|
||||
total_staked: u64,
|
||||
}
|
||||
|
||||
impl CommitmentAggregationData {
|
||||
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
|
||||
Self {
|
||||
bank,
|
||||
root,
|
||||
total_staked,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
|
||||
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
|
||||
let mut stake_sum = 0;
|
||||
for (root, stake) in rooted_stake {
|
||||
stake_sum += stake;
|
||||
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
|
||||
return root;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct AggregateCommitmentService {
|
||||
t_commitment: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl AggregateCommitmentService {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
) -> (Sender<CommitmentAggregationData>, Self) {
|
||||
let (sender, receiver): (
|
||||
Sender<CommitmentAggregationData>,
|
||||
Receiver<CommitmentAggregationData>,
|
||||
) = channel();
|
||||
let exit_ = exit.clone();
|
||||
(
|
||||
sender,
|
||||
Self {
|
||||
t_commitment: Builder::new()
|
||||
.name("solana-aggregate-stake-lockouts".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit_.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Err(RecvTimeoutError::Disconnected) =
|
||||
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
|
||||
{
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn run(
|
||||
receiver: &Receiver<CommitmentAggregationData>,
|
||||
block_commitment_cache: &RwLock<BlockCommitmentCache>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
while let Ok(new_data) = receiver.try_recv() {
|
||||
aggregation_data = new_data;
|
||||
}
|
||||
|
||||
let ancestors = aggregation_data.bank.status_cache_ancestors();
|
||||
if ancestors.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
aggregate_commitment_time.stop();
|
||||
datapoint_info!(
|
||||
"block-commitment-cache",
|
||||
(
|
||||
"aggregate-commitment-ms",
|
||||
aggregate_commitment_time.as_ms() as i64,
|
||||
i64
|
||||
)
|
||||
);
|
||||
|
||||
subscriptions.notify_subscribers(CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_commitment(
|
||||
ancestors: &[Slot],
|
||||
bank: &Bank,
|
||||
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
|
||||
assert!(!ancestors.is_empty());
|
||||
|
||||
// Check ancestors is sorted
|
||||
for a in ancestors.windows(2) {
|
||||
assert!(a[0] < a[1]);
|
||||
}
|
||||
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
|
||||
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
(commitment, rooted_stake)
|
||||
}
|
||||
|
||||
fn aggregate_commitment_for_vote_account(
|
||||
commitment: &mut HashMap<Slot, BlockCommitment>,
|
||||
rooted_stake: &mut Vec<(Slot, u64)>,
|
||||
vote_state: &VoteState,
|
||||
ancestors: &[Slot],
|
||||
lamports: u64,
|
||||
) {
|
||||
assert!(!ancestors.is_empty());
|
||||
let mut ancestors_index = 0;
|
||||
if let Some(root) = vote_state.root_slot {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rooted_stake.push((root, lamports));
|
||||
}
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
while ancestors[ancestors_index] <= vote.slot {
|
||||
commitment
|
||||
.entry(ancestors[ancestors_index])
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(vote.confirmation_count as usize, lamports);
|
||||
ancestors_index += 1;
|
||||
|
||||
if ancestors_index == ancestors.len() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_commitment.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((0, 5));
|
||||
rooted_stake.push((1, 5));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((1, 5));
|
||||
rooted_stake.push((0, 10));
|
||||
rooted_stake.push((2, 5));
|
||||
rooted_stake.push((1, 4));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_1() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = *ancestors.last().unwrap();
|
||||
vote_state.root_slot = Some(root);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_2() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_3() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors[2];
|
||||
vote_state.root_slot = Some(root);
|
||||
assert!(ancestors[4] + 2 >= ancestors[6]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[4]);
|
||||
vote_state.process_slot_vote_unchecked(ancestors[6]);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
);
|
||||
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 6 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_validity() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let rooted_stake_amount = 40;
|
||||
|
||||
let sk1 = Pubkey::new_rand();
|
||||
let pk1 = Pubkey::new_rand();
|
||||
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
|
||||
let stake_account1 =
|
||||
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
|
||||
let sk2 = Pubkey::new_rand();
|
||||
let pk2 = Pubkey::new_rand();
|
||||
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
|
||||
let stake_account2 =
|
||||
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
|
||||
let sk3 = Pubkey::new_rand();
|
||||
let pk3 = Pubkey::new_rand();
|
||||
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account3 = stake_state::create_account(
|
||||
&sk3,
|
||||
&pk3,
|
||||
&vote_account3,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
let sk4 = Pubkey::new_rand();
|
||||
let pk4 = Pubkey::new_rand();
|
||||
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account4 = stake_state::create_account(
|
||||
&sk4,
|
||||
&pk4,
|
||||
&vote_account4,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
|
||||
genesis_config.accounts.extend(vec![
|
||||
(pk1, vote_account1.clone()),
|
||||
(sk1, stake_account1),
|
||||
(pk2, vote_account2.clone()),
|
||||
(sk2, stake_account2),
|
||||
(pk3, vote_account3.clone()),
|
||||
(sk3, stake_account3),
|
||||
(pk4, vote_account4.clone()),
|
||||
(sk4, stake_account4),
|
||||
]);
|
||||
|
||||
// Create bank
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
|
||||
vote_state1.process_slot_vote_unchecked(3);
|
||||
vote_state1.process_slot_vote_unchecked(5);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state1));
|
||||
VoteState::to(&versioned, &mut vote_account1).unwrap();
|
||||
bank.store_account(&pk1, &vote_account1);
|
||||
|
||||
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
|
||||
vote_state2.process_slot_vote_unchecked(9);
|
||||
vote_state2.process_slot_vote_unchecked(10);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state2));
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
let (commitment, rooted_stake) =
|
||||
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= 3 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 150);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 5 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 100);
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 9 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if a <= 10 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, 50);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
assert!(commitment.get(&a).is_none());
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
use crate::{
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
progress_map::{LockoutIntervals, ProgressMap},
|
||||
pubkey_references::PubkeyReferences,
|
||||
};
|
||||
@ -9,19 +10,55 @@ use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
hash::Hash,
|
||||
instruction::Instruction,
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{
|
||||
BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY, TIMESTAMP_SLOT_INTERVAL,
|
||||
use solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{
|
||||
BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY, TIMESTAMP_SLOT_INTERVAL,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
ops::Bound::{Included, Unbounded},
|
||||
sync::Arc,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub enum SwitchForkDecision {
|
||||
SwitchProof(Hash),
|
||||
NoSwitch,
|
||||
FailedSwitchThreshold,
|
||||
}
|
||||
|
||||
impl SwitchForkDecision {
|
||||
pub fn to_vote_instruction(
|
||||
&self,
|
||||
vote: Vote,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
authorized_voter_pubkey: &Pubkey,
|
||||
) -> Option<Instruction> {
|
||||
match self {
|
||||
SwitchForkDecision::FailedSwitchThreshold => None,
|
||||
SwitchForkDecision::NoSwitch => Some(vote_instruction::vote(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
vote,
|
||||
)),
|
||||
SwitchForkDecision::SwitchProof(switch_proof_hash) => {
|
||||
Some(vote_instruction::vote_switch(
|
||||
vote_account_pubkey,
|
||||
authorized_voter_pubkey,
|
||||
vote,
|
||||
*switch_proof_hash,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
|
||||
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
|
||||
pub const SWITCH_FORK_THRESHOLD: f64 = 0.38;
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
@ -42,6 +79,14 @@ impl StakeLockout {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ComputedBankState {
|
||||
pub stake_lockouts: HashMap<Slot, StakeLockout>,
|
||||
pub total_staked: u64,
|
||||
pub bank_weight: u128,
|
||||
pub lockout_intervals: LockoutIntervals,
|
||||
pub pubkey_votes: Vec<(Pubkey, Slot)>,
|
||||
}
|
||||
|
||||
pub struct Tower {
|
||||
node_pubkey: Pubkey,
|
||||
threshold_depth: usize,
|
||||
@ -65,10 +110,14 @@ impl Default for Tower {
|
||||
}
|
||||
|
||||
impl Tower {
|
||||
pub fn new(node_pubkey: &Pubkey, vote_account_pubkey: &Pubkey, bank_forks: &BankForks) -> Self {
|
||||
pub fn new(
|
||||
node_pubkey: &Pubkey,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
root: Slot,
|
||||
heaviest_bank: &Bank,
|
||||
) -> Self {
|
||||
let mut tower = Self::new_with_key(node_pubkey);
|
||||
|
||||
tower.initialize_lockouts_from_bank_forks(&bank_forks, vote_account_pubkey);
|
||||
tower.initialize_lockouts_from_bank_forks(vote_account_pubkey, root, heaviest_bank);
|
||||
|
||||
tower
|
||||
}
|
||||
@ -89,27 +138,28 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect_vote_lockouts<F>(
|
||||
&self,
|
||||
pub(crate) fn collect_vote_lockouts<F>(
|
||||
node_pubkey: &Pubkey,
|
||||
bank_slot: u64,
|
||||
vote_accounts: F,
|
||||
ancestors: &HashMap<Slot, HashSet<u64>>,
|
||||
all_pubkeys: &mut PubkeyReferences,
|
||||
) -> (HashMap<Slot, StakeLockout>, u64, u128, LockoutIntervals)
|
||||
) -> ComputedBankState
|
||||
where
|
||||
F: Iterator<Item = (Pubkey, (u64, Account))>,
|
||||
{
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let mut total_stake = 0;
|
||||
let mut total_weight = 0;
|
||||
let mut total_staked = 0;
|
||||
let mut bank_weight = 0;
|
||||
// Tree of intervals of lockouts of the form [slot, slot + slot.lockout],
|
||||
// keyed by end of the range
|
||||
let mut lockout_intervals = BTreeMap::new();
|
||||
let mut pubkey_votes = vec![];
|
||||
for (key, (lamports, account)) in vote_accounts {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
}
|
||||
trace!("{} {} with stake {}", self.node_pubkey, key, lamports);
|
||||
trace!("{} {} with stake {}", node_pubkey, key, lamports);
|
||||
let vote_state = VoteState::from(&account);
|
||||
if vote_state.is_none() {
|
||||
datapoint_warn!(
|
||||
@ -128,11 +178,11 @@ impl Tower {
|
||||
let key = all_pubkeys.get_or_insert(&key);
|
||||
lockout_intervals
|
||||
.entry(vote.expiration_slot())
|
||||
.or_insert_with(|| vec![])
|
||||
.or_insert_with(Vec::new)
|
||||
.push((vote.slot, key));
|
||||
}
|
||||
|
||||
if key == self.node_pubkey || vote_state.node_pubkey == self.node_pubkey {
|
||||
if key == *node_pubkey || vote_state.node_pubkey == *node_pubkey {
|
||||
debug!("vote state {:?}", vote_state);
|
||||
debug!(
|
||||
"observed slot {}",
|
||||
@ -151,10 +201,15 @@ impl Tower {
|
||||
}
|
||||
let start_root = vote_state.root_slot;
|
||||
|
||||
// Add the latest vote to update the `heaviest_subtree_fork_choice`
|
||||
if let Some(latest_vote) = vote_state.votes.back() {
|
||||
pubkey_votes.push((key, latest_vote.slot));
|
||||
}
|
||||
|
||||
vote_state.process_slot_vote_unchecked(bank_slot);
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
total_weight += vote.lockout() as u128 * lamports as u128;
|
||||
bank_weight += vote.lockout() as u128 * lamports as u128;
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
|
||||
@ -165,7 +220,7 @@ impl Tower {
|
||||
slot: root,
|
||||
};
|
||||
trace!("ROOT: {}", vote.slot);
|
||||
total_weight += vote.lockout() as u128 * lamports as u128;
|
||||
bank_weight += vote.lockout() as u128 * lamports as u128;
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
}
|
||||
@ -174,7 +229,7 @@ impl Tower {
|
||||
confirmation_count: MAX_LOCKOUT_HISTORY as u32,
|
||||
slot: root,
|
||||
};
|
||||
total_weight += vote.lockout() as u128 * lamports as u128;
|
||||
bank_weight += vote.lockout() as u128 * lamports as u128;
|
||||
Self::update_ancestor_lockouts(&mut stake_lockouts, &vote, ancestors);
|
||||
}
|
||||
|
||||
@ -195,9 +250,16 @@ impl Tower {
|
||||
// Update all the parents of this last vote with the stake of this vote account
|
||||
Self::update_ancestor_stakes(&mut stake_lockouts, vote.slot, lamports, ancestors);
|
||||
}
|
||||
total_stake += lamports;
|
||||
total_staked += lamports;
|
||||
}
|
||||
|
||||
ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
bank_weight,
|
||||
lockout_intervals,
|
||||
pubkey_votes,
|
||||
}
|
||||
(stake_lockouts, total_stake, total_weight, lockout_intervals)
|
||||
}
|
||||
|
||||
pub fn is_slot_confirmed(
|
||||
@ -345,7 +407,7 @@ impl Tower {
|
||||
progress: &ProgressMap,
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
|
||||
) -> bool {
|
||||
) -> SwitchForkDecision {
|
||||
self.last_vote()
|
||||
.slots
|
||||
.last()
|
||||
@ -355,14 +417,18 @@ impl Tower {
|
||||
|
||||
if switch_slot == *last_vote || switch_slot_ancestors.contains(last_vote) {
|
||||
// If the `switch_slot is a descendant of the last vote,
|
||||
// no switching proof is neceessary
|
||||
return true;
|
||||
// no switching proof is necessary
|
||||
return SwitchForkDecision::NoSwitch;
|
||||
}
|
||||
|
||||
// Should never consider switching to an ancestor
|
||||
// of your last vote
|
||||
assert!(!last_vote_ancestors.contains(&switch_slot));
|
||||
|
||||
// By this point, we know the `switch_slot` is on a different fork
|
||||
// (is neither an ancestor nor descendant of `last_vote`), so a
|
||||
// switching proof is necessary
|
||||
let switch_proof = Hash::default();
|
||||
let mut locked_out_stake = 0;
|
||||
let mut locked_out_vote_accounts = HashSet::new();
|
||||
for (candidate_slot, descendants) in descendants.iter() {
|
||||
@ -423,9 +489,14 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
}
|
||||
(locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD
|
||||
|
||||
if (locked_out_stake as f64 / total_stake as f64) > SWITCH_FORK_THRESHOLD {
|
||||
SwitchForkDecision::SwitchProof(switch_proof)
|
||||
} else {
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
}
|
||||
})
|
||||
.unwrap_or(true)
|
||||
.unwrap_or(SwitchForkDecision::NoSwitch)
|
||||
}
|
||||
|
||||
pub fn check_vote_stake_threshold(
|
||||
@ -463,7 +534,7 @@ impl Tower {
|
||||
}
|
||||
|
||||
/// Update lockouts for all the ancestors
|
||||
fn update_ancestor_lockouts(
|
||||
pub(crate) fn update_ancestor_lockouts(
|
||||
stake_lockouts: &mut HashMap<Slot, StakeLockout>,
|
||||
vote: &Lockout,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
@ -483,6 +554,28 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn find_heaviest_bank(
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
node_pubkey: &Pubkey,
|
||||
) -> Option<Arc<Bank>> {
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
let mut bank_weights: Vec<_> = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.map(|b| {
|
||||
(
|
||||
Self::bank_weight(node_pubkey, b, &ancestors),
|
||||
b.parents().len(),
|
||||
b.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bank_weights.sort_by_key(|b| (b.0, b.1));
|
||||
bank_weights.pop().map(|b| b.2)
|
||||
}
|
||||
|
||||
/// Update stake for all the ancestors.
|
||||
/// Note, stake is the same for all the ancestor.
|
||||
fn update_ancestor_stakes(
|
||||
@ -491,9 +584,8 @@ impl Tower {
|
||||
lamports: u64,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) {
|
||||
// If there's no ancestors, that means this slot must be from before the current root,
|
||||
// in which case the lockouts won't be calculated in bank_weight anyways, so ignore
|
||||
// this slot
|
||||
// If there's no ancestors, that means this slot must be from
|
||||
// before the current root, so ignore this slot
|
||||
let vote_slot_ancestors = ancestors.get(&slot);
|
||||
if vote_slot_ancestors.is_none() {
|
||||
return;
|
||||
@ -506,8 +598,13 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
|
||||
fn bank_weight(&self, bank: &Bank, ancestors: &HashMap<Slot, HashSet<Slot>>) -> u128 {
|
||||
let (_, _, bank_weight, _) = self.collect_vote_lockouts(
|
||||
fn bank_weight(
|
||||
node_pubkey: &Pubkey,
|
||||
bank: &Bank,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
) -> u128 {
|
||||
let ComputedBankState { bank_weight, .. } = Self::collect_vote_lockouts(
|
||||
node_pubkey,
|
||||
bank.slot(),
|
||||
bank.vote_accounts().into_iter(),
|
||||
ancestors,
|
||||
@ -516,47 +613,28 @@ impl Tower {
|
||||
bank_weight
|
||||
}
|
||||
|
||||
fn find_heaviest_bank(&self, bank_forks: &BankForks) -> Option<Arc<Bank>> {
|
||||
let ancestors = bank_forks.ancestors();
|
||||
let mut bank_weights: Vec<_> = bank_forks
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.map(|b| {
|
||||
(
|
||||
self.bank_weight(b, &ancestors),
|
||||
b.parents().len(),
|
||||
b.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bank_weights.sort_by_key(|b| (b.0, b.1));
|
||||
bank_weights.pop().map(|b| b.2)
|
||||
}
|
||||
|
||||
fn initialize_lockouts_from_bank_forks(
|
||||
&mut self,
|
||||
bank_forks: &BankForks,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
root: Slot,
|
||||
heaviest_bank: &Bank,
|
||||
) {
|
||||
if let Some(bank) = self.find_heaviest_bank(bank_forks) {
|
||||
let root = bank_forks.root();
|
||||
if let Some((_stake, vote_account)) = bank.vote_accounts().get(vote_account_pubkey) {
|
||||
let mut vote_state = VoteState::deserialize(&vote_account.data)
|
||||
.expect("vote_account isn't a VoteState?");
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.votes.retain(|v| v.slot > root);
|
||||
trace!(
|
||||
"{} lockouts initialized to {:?}",
|
||||
self.node_pubkey,
|
||||
vote_state
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
vote_state.node_pubkey, self.node_pubkey,
|
||||
"vote account's node_pubkey doesn't match",
|
||||
);
|
||||
self.lockouts = vote_state;
|
||||
}
|
||||
if let Some((_stake, vote_account)) = heaviest_bank.vote_accounts().get(vote_account_pubkey)
|
||||
{
|
||||
let mut vote_state = VoteState::deserialize(&vote_account.data)
|
||||
.expect("vote_account isn't a VoteState?");
|
||||
vote_state.root_slot = Some(root);
|
||||
vote_state.votes.retain(|v| v.slot > root);
|
||||
trace!(
|
||||
"{} lockouts initialized to {:?}",
|
||||
self.node_pubkey,
|
||||
vote_state
|
||||
);
|
||||
assert_eq!(
|
||||
vote_state.node_pubkey, self.node_pubkey,
|
||||
"vote account's node_pubkey doesn't match",
|
||||
);
|
||||
self.lockouts = vote_state;
|
||||
}
|
||||
}
|
||||
|
||||
@ -580,8 +658,11 @@ impl Tower {
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
bank_weight_fork_choice::BankWeightForkChoice,
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
};
|
||||
@ -599,7 +680,7 @@ pub mod test {
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_state::{Vote, VoteStateVersions},
|
||||
vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY},
|
||||
vote_transaction,
|
||||
};
|
||||
use std::{
|
||||
@ -616,18 +697,26 @@ pub mod test {
|
||||
pub vote_pubkeys: Vec<Pubkey>,
|
||||
pub bank_forks: RwLock<BankForks>,
|
||||
pub progress: ProgressMap,
|
||||
pub heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
|
||||
}
|
||||
|
||||
impl VoteSimulator {
|
||||
pub(crate) fn new(num_keypairs: usize) -> Self {
|
||||
let (validator_keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress) =
|
||||
Self::init_state(num_keypairs);
|
||||
let (
|
||||
validator_keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
) = Self::init_state(num_keypairs);
|
||||
Self {
|
||||
validator_keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks: RwLock::new(bank_forks),
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
}
|
||||
}
|
||||
pub(crate) fn fill_bank_forks(
|
||||
@ -670,6 +759,8 @@ pub mod test {
|
||||
}
|
||||
}
|
||||
new_bank.freeze();
|
||||
self.heaviest_subtree_fork_choice
|
||||
.add_new_leaf_slot(new_bank.slot(), Some(new_bank.parent_slot()));
|
||||
self.bank_forks.write().unwrap().insert(new_bank);
|
||||
walk.forward();
|
||||
}
|
||||
@ -704,6 +795,8 @@ pub mod test {
|
||||
&ClusterSlots::default(),
|
||||
&self.bank_forks,
|
||||
&mut PubkeyReferences::default(),
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut BankWeightForkChoice::default(),
|
||||
);
|
||||
|
||||
let vote_bank = self
|
||||
@ -716,8 +809,11 @@ pub mod test {
|
||||
|
||||
// Try to vote on the given slot
|
||||
let descendants = self.bank_forks.read().unwrap().descendants();
|
||||
let (_, _, failure_reasons) = ReplayStage::select_vote_and_reset_forks(
|
||||
&Some(vote_bank.clone()),
|
||||
let SelectVoteAndResetForkResult {
|
||||
heaviest_fork_failures,
|
||||
..
|
||||
} = ReplayStage::select_vote_and_reset_forks(
|
||||
&vote_bank,
|
||||
&None,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
@ -727,8 +823,8 @@ pub mod test {
|
||||
|
||||
// Make sure this slot isn't locked out or failing threshold
|
||||
info!("Checking vote: {}", vote_bank.slot());
|
||||
if !failure_reasons.is_empty() {
|
||||
return failure_reasons;
|
||||
if !heaviest_fork_failures.is_empty() {
|
||||
return heaviest_fork_failures;
|
||||
}
|
||||
let vote = tower.new_vote_from_bank(&vote_bank, &my_vote_pubkey).0;
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
@ -746,6 +842,7 @@ pub mod test {
|
||||
&None,
|
||||
&mut PubkeyReferences::default(),
|
||||
None,
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
)
|
||||
}
|
||||
|
||||
@ -831,6 +928,7 @@ pub mod test {
|
||||
Vec<Pubkey>,
|
||||
BankForks,
|
||||
ProgressMap,
|
||||
HeaviestSubtreeForkChoice,
|
||||
) {
|
||||
let keypairs: HashMap<_, _> = std::iter::repeat_with(|| {
|
||||
let node_keypair = Keypair::new();
|
||||
@ -853,8 +951,16 @@ pub mod test {
|
||||
.map(|keys| keys.vote_keypair.pubkey())
|
||||
.collect();
|
||||
|
||||
let (bank_forks, progress) = initialize_state(&keypairs, 10_000);
|
||||
(keypairs, node_pubkeys, vote_pubkeys, bank_forks, progress)
|
||||
let (bank_forks, progress, heaviest_subtree_fork_choice) =
|
||||
initialize_state(&keypairs, 10_000);
|
||||
(
|
||||
keypairs,
|
||||
node_pubkeys,
|
||||
vote_pubkeys,
|
||||
bank_forks,
|
||||
progress,
|
||||
heaviest_subtree_fork_choice,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -862,7 +968,7 @@ pub mod test {
|
||||
pub(crate) fn initialize_state(
|
||||
validator_keypairs_map: &HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
stake: u64,
|
||||
) -> (BankForks, ProgressMap) {
|
||||
) -> (BankForks, ProgressMap, HeaviestSubtreeForkChoice) {
|
||||
let validator_keypairs: Vec<_> = validator_keypairs_map.values().collect();
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -882,7 +988,10 @@ pub mod test {
|
||||
0,
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
);
|
||||
(BankForks::new(0, bank0), progress)
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let heaviest_subtree_fork_choice =
|
||||
HeaviestSubtreeForkChoice::new_from_bank_forks(&bank_forks);
|
||||
(bank_forks, progress, heaviest_subtree_fork_choice)
|
||||
}
|
||||
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, Account))> {
|
||||
@ -905,6 +1014,34 @@ pub mod test {
|
||||
stakes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_vote_instruction() {
|
||||
let vote = Vote::default();
|
||||
let mut decision = SwitchForkDecision::FailedSwitchThreshold;
|
||||
assert!(decision
|
||||
.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default())
|
||||
.is_none());
|
||||
decision = SwitchForkDecision::NoSwitch;
|
||||
assert_eq!(
|
||||
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
|
||||
Some(vote_instruction::vote(
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
vote.clone(),
|
||||
))
|
||||
);
|
||||
decision = SwitchForkDecision::SwitchProof(Hash::default());
|
||||
assert_eq!(
|
||||
decision.to_vote_instruction(vote.clone(), &Pubkey::default(), &Pubkey::default()),
|
||||
Some(vote_instruction::vote_switch(
|
||||
&Pubkey::default(),
|
||||
&Pubkey::default(),
|
||||
vote,
|
||||
Hash::default()
|
||||
))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_votes() {
|
||||
// Init state
|
||||
@ -975,85 +1112,106 @@ pub mod test {
|
||||
tower.record_vote(47, Hash::default());
|
||||
|
||||
// Trying to switch to a descendant of last vote should always work
|
||||
assert!(tower.check_switch_threshold(
|
||||
48,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
48,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::NoSwitch
|
||||
);
|
||||
|
||||
// Trying to switch to another fork at 110 should fail
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on a descendant of last vote should
|
||||
// not count toward the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(50, (49, 100), &other_vote_account);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on an ancestor of last vote should
|
||||
// not count toward the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(50, (45, 100), &other_vote_account);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on a different fork, but the lockout
|
||||
// doesn't cover the last vote, should not satisfy the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(14, (12, 46), &other_vote_account);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
|
||||
// Adding another validator lockout on a different fork, and the lockout
|
||||
// covers the last vote, should satisfy the switch threshold
|
||||
vote_simulator.simulate_lockout_interval(14, (12, 47), &other_vote_account);
|
||||
assert!(tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
||||
// If we set a root, then any lockout intervals below the root shouldn't
|
||||
// count toward the switch threshold. This means the other validator's
|
||||
// vote lockout no longer counts
|
||||
vote_simulator.set_root(43);
|
||||
assert!(!tower.check_switch_threshold(
|
||||
110,
|
||||
&vote_simulator.bank_forks.read().unwrap().ancestors(),
|
||||
&vote_simulator.bank_forks.read().unwrap().descendants(),
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
110,
|
||||
&vote_simulator.bank_forks.read().unwrap().ancestors(),
|
||||
&vote_simulator.bank_forks.read().unwrap().descendants(),
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1193,20 +1351,32 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_collect_vote_lockouts_sums() {
|
||||
//two accounts voting for slot 0 with 1 token staked
|
||||
let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
let tower = Tower::new_for_tests(0, 0.67);
|
||||
let mut accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: Vec<(Pubkey, Slot)> =
|
||||
accounts.iter().map(|(pubkey, _)| (*pubkey, 0)).collect();
|
||||
|
||||
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let (staked_lockouts, total_staked, bank_weight, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
bank_weight,
|
||||
mut pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert_eq!(staked_lockouts[&0].stake, 2);
|
||||
assert_eq!(staked_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(stake_lockouts[&0].stake, 2);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 2 + 4 + 4);
|
||||
assert_eq!(total_staked, 2);
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
|
||||
// Each acccount has 1 vote in it. After simulating a vote in collect_vote_lockouts,
|
||||
// the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for
|
||||
@ -1218,7 +1388,12 @@ pub mod test {
|
||||
fn test_collect_vote_lockouts_root() {
|
||||
let votes: Vec<u64> = (0..MAX_LOCKOUT_HISTORY as u64).collect();
|
||||
//two accounts voting for slots 0..MAX_LOCKOUT_HISTORY with 1 token staked
|
||||
let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
let mut accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
|
||||
accounts.sort_by_key(|(pk, _)| *pk);
|
||||
let account_latest_votes: Vec<(Pubkey, Slot)> = accounts
|
||||
.iter()
|
||||
.map(|(pubkey, _)| (*pubkey, (MAX_LOCKOUT_HISTORY - 1) as Slot))
|
||||
.collect();
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let mut ancestors = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
|
||||
@ -1239,18 +1414,27 @@ pub mod test {
|
||||
+ root_weight;
|
||||
let expected_bank_weight = 2 * vote_account_expected_weight;
|
||||
assert_eq!(tower.lockouts.root_slot, Some(0));
|
||||
let (staked_lockouts, _total_staked, bank_weight, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
bank_weight,
|
||||
mut pubkey_votes,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
MAX_LOCKOUT_HISTORY as u64,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
for i in 0..MAX_LOCKOUT_HISTORY {
|
||||
assert_eq!(staked_lockouts[&(i as u64)].stake, 2);
|
||||
assert_eq!(stake_lockouts[&(i as u64)].stake, 2);
|
||||
}
|
||||
|
||||
// should be the sum of all the weights for root
|
||||
assert!(staked_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
assert!(stake_lockouts[&0].lockout > (2 * (1 << MAX_LOCKOUT_HISTORY)));
|
||||
assert_eq!(bank_weight, expected_bank_weight);
|
||||
pubkey_votes.sort();
|
||||
assert_eq!(pubkey_votes, account_latest_votes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1283,7 +1467,7 @@ pub mod test {
|
||||
);
|
||||
tower.record_vote(i, Hash::default());
|
||||
}
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2));
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1511,48 +1695,7 @@ pub mod test {
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockout_is_updated_for_entire_branch() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let vote = Lockout {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let mut ancestors = HashMap::new();
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lockout_is_updated_for_slot_or_lower() {
|
||||
let mut stake_lockouts = HashMap::new();
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let mut ancestors = HashMap::new();
|
||||
ancestors.insert(2, set);
|
||||
let set: HashSet<u64> = vec![0u64].into_iter().collect();
|
||||
ancestors.insert(1, set);
|
||||
let vote = Lockout {
|
||||
slot: 2,
|
||||
confirmation_count: 1,
|
||||
};
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
let vote = Lockout {
|
||||
slot: 1,
|
||||
confirmation_count: 2,
|
||||
};
|
||||
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&1].lockout, 2 + 4);
|
||||
assert_eq!(stake_lockouts[&2].lockout, 2);
|
||||
assert!(tower.check_vote_stake_threshold(6, &stakes, 2,));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1631,7 +1774,7 @@ pub mod test {
|
||||
let total_stake = 4;
|
||||
let threshold_size = 0.67;
|
||||
let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
|
||||
let tower_votes: Vec<u64> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
|
||||
let tower_votes: Vec<Slot> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
|
||||
let accounts = gen_stakes(&[
|
||||
(threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
|
||||
(total_stake - threshold_stake, &tower_votes[..]),
|
||||
@ -1648,29 +1791,35 @@ pub mod test {
|
||||
for vote in &tower_votes {
|
||||
tower.record_vote(*vote, Hash::default());
|
||||
}
|
||||
let (staked_lockouts, total_staked, _, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
vote_to_evaluate,
|
||||
accounts.clone().into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &staked_lockouts, total_staked));
|
||||
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &stake_lockouts, total_staked,));
|
||||
|
||||
// CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
|
||||
// will expire the vote in one of the vote accounts, so we should have insufficient
|
||||
// stake to pass the threshold
|
||||
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
|
||||
let (staked_lockouts, total_staked, _, _) = tower.collect_vote_lockouts(
|
||||
let ComputedBankState {
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
..
|
||||
} = Tower::collect_vote_lockouts(
|
||||
&Pubkey::default(),
|
||||
vote_to_evaluate,
|
||||
accounts.into_iter(),
|
||||
&ancestors,
|
||||
&mut PubkeyReferences::default(),
|
||||
);
|
||||
assert!(!tower.check_vote_stake_threshold(
|
||||
vote_to_evaluate,
|
||||
&staked_lockouts,
|
||||
total_staked
|
||||
));
|
||||
assert!(!tower.check_vote_stake_threshold(vote_to_evaluate, &stake_lockouts, total_staked,));
|
||||
}
|
||||
|
||||
fn vote_and_check_recent(num_votes: usize) {
|
||||
|
@ -36,6 +36,7 @@ use std::collections::HashMap;
|
||||
pub struct Crds {
|
||||
/// Stores the map of labels and values
|
||||
pub table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
pub num_inserts: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@ -84,6 +85,7 @@ impl Default for Crds {
|
||||
fn default() -> Self {
|
||||
Crds {
|
||||
table: IndexMap::new(),
|
||||
num_inserts: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -93,6 +95,24 @@ impl Crds {
|
||||
pub fn new_versioned(&self, local_timestamp: u64, value: CrdsValue) -> VersionedCrdsValue {
|
||||
VersionedCrdsValue::new(local_timestamp, value)
|
||||
}
|
||||
pub fn would_insert(
|
||||
&self,
|
||||
value: CrdsValue,
|
||||
local_timestamp: u64,
|
||||
) -> Option<VersionedCrdsValue> {
|
||||
let new_value = self.new_versioned(local_timestamp, value);
|
||||
let label = new_value.value.label();
|
||||
let would_insert = self
|
||||
.table
|
||||
.get(&label)
|
||||
.map(|current| new_value > *current)
|
||||
.unwrap_or(true);
|
||||
if would_insert {
|
||||
Some(new_value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
/// insert the new value, returns the old value if insert succeeds
|
||||
pub fn insert_versioned(
|
||||
&mut self,
|
||||
@ -107,6 +127,7 @@ impl Crds {
|
||||
.unwrap_or(true);
|
||||
if do_insert {
|
||||
let old = self.table.insert(label, new_value);
|
||||
self.num_inserts += 1;
|
||||
Ok(old)
|
||||
} else {
|
||||
trace!("INSERT FAILED data: {} new.wallclock: {}", label, wallclock,);
|
||||
|
@ -6,7 +6,7 @@
|
||||
use crate::{
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
|
||||
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
|
||||
crds_value::{CrdsValue, CrdsValueLabel},
|
||||
};
|
||||
@ -76,17 +76,10 @@ impl CrdsGossip {
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> HashMap<Pubkey, HashSet<Pubkey>> {
|
||||
let id = &self.id;
|
||||
let crds = &self.crds;
|
||||
let push = &mut self.push;
|
||||
let versioned = labels
|
||||
.into_iter()
|
||||
.filter_map(|label| crds.lookup_versioned(&label));
|
||||
|
||||
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
|
||||
for val in versioned {
|
||||
let origin = val.value.pubkey();
|
||||
let hash = val.value_hash;
|
||||
let peers = push.prune_received_cache(id, &origin, hash, stakes);
|
||||
for origin in labels.iter().map(|k| k.pubkey()) {
|
||||
let peers = push.prune_received_cache(id, &origin, stakes);
|
||||
for from in peers {
|
||||
prune_map.entry(from).or_default().insert(origin);
|
||||
}
|
||||
@ -113,7 +106,7 @@ impl CrdsGossip {
|
||||
return Err(CrdsGossipError::PruneMessageTimeout);
|
||||
}
|
||||
if self.id == *destination {
|
||||
self.push.process_prune_msg(peer, origin);
|
||||
self.push.process_prune_msg(&self.id, peer, origin);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(CrdsGossipError::BadPruneDestination)
|
||||
@ -158,24 +151,47 @@ impl CrdsGossip {
|
||||
self.pull.mark_pull_request_creation_time(from, now)
|
||||
}
|
||||
/// process a pull request and create a response
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
filters: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
pub fn process_pull_requests(&mut self, filters: Vec<(CrdsValue, CrdsFilter)>, now: u64) {
|
||||
self.pull
|
||||
.process_pull_requests(&mut self.crds, filters, now)
|
||||
.process_pull_requests(&mut self.crds, filters, now);
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull.generate_pull_responses(&self.crds, filters)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
self.pull
|
||||
.process_pull_response(&mut self.crds, from, timeouts, response, now)
|
||||
.filter_pull_responses(&self.crds, timeouts, response, now, process_pull_stats)
|
||||
}
|
||||
|
||||
/// process a pull response
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
process_pull_stats: &mut ProcessPullStats,
|
||||
) {
|
||||
let success = self.pull.process_pull_responses(
|
||||
&mut self.crds,
|
||||
from,
|
||||
responses,
|
||||
responses_expired_timeout,
|
||||
now,
|
||||
process_pull_stats,
|
||||
);
|
||||
self.push.push_pull_responses(success, now);
|
||||
}
|
||||
|
||||
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {
|
||||
|
@ -2,7 +2,6 @@
|
||||
pub enum CrdsGossipError {
|
||||
NoPeers,
|
||||
PushMessageTimeout,
|
||||
PushMessageAlreadyReceived,
|
||||
PushMessageOldVersion,
|
||||
BadPruneDestination,
|
||||
PruneMessageTimeout,
|
||||
|
@ -10,7 +10,7 @@
|
||||
//! of false positives.
|
||||
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds::Crds;
|
||||
use crate::crds::{Crds, VersionedCrdsValue};
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
@ -20,8 +20,8 @@ use solana_runtime::bloom::Bloom;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
|
||||
// The maximum age of a value received over pull responses
|
||||
@ -118,6 +118,14 @@ impl CrdsFilter {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ProcessPullStats {
|
||||
pub success: usize,
|
||||
pub failed_insert: usize,
|
||||
pub failed_timeout: usize,
|
||||
pub timeout_count: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPull {
|
||||
/// timestamp of last request
|
||||
@ -126,6 +134,7 @@ pub struct CrdsGossipPull {
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
pub num_pulls: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPull {
|
||||
@ -135,6 +144,7 @@ impl Default for CrdsGossipPull {
|
||||
pull_request_time: HashMap::new(),
|
||||
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
num_pulls: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -204,14 +214,13 @@ impl CrdsGossipPull {
|
||||
self.purged_values.push_back((hash, timestamp))
|
||||
}
|
||||
|
||||
/// process a pull request and create a response
|
||||
/// process a pull request
|
||||
pub fn process_pull_requests(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
requests: Vec<(CrdsValue, CrdsFilter)>,
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let rv = self.filter_crds_values(crds, &requests);
|
||||
) {
|
||||
requests.into_iter().for_each(|(caller, _)| {
|
||||
let key = caller.label().pubkey();
|
||||
let old = crds.insert(caller, now);
|
||||
@ -221,19 +230,33 @@ impl CrdsGossipPull {
|
||||
}
|
||||
crds.update_record_timestamp(&key, now);
|
||||
});
|
||||
rv
|
||||
}
|
||||
/// process a pull response
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
|
||||
/// Create gossip responses to pull requests
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
// returns those responses converted to VersionedCrdsValue
|
||||
// Separated in two vecs as:
|
||||
// .0 => responses that update the owner timestamp
|
||||
// .1 => responses that do not update the owner timestamp
|
||||
pub fn filter_pull_responses(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
responses: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> usize {
|
||||
let mut failed = 0;
|
||||
for r in response {
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> (Vec<VersionedCrdsValue>, Vec<VersionedCrdsValue>) {
|
||||
let mut versioned = vec![];
|
||||
let mut versioned_expired_timestamp = vec![];
|
||||
for r in responses {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now
|
||||
@ -252,11 +275,8 @@ impl CrdsGossipPull {
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -264,32 +284,69 @@ impl CrdsGossipPull {
|
||||
// Before discarding this value, check if a ContactInfo for the owner
|
||||
// exists in the table. If it doesn't, that implies that this value can be discarded
|
||||
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
failed += 1;
|
||||
stats.timeout_count += 1;
|
||||
stats.failed_timeout += 1;
|
||||
continue;
|
||||
} else {
|
||||
// Silently insert this old value without bumping record timestamps
|
||||
failed += crds.insert(r, now).is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned_expired_timestamp.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let old = crds.insert(r, now);
|
||||
failed += old.is_err() as usize;
|
||||
match crds.would_insert(r, now) {
|
||||
Some(resp) => versioned.push(resp),
|
||||
None => stats.failed_insert += 1,
|
||||
}
|
||||
}
|
||||
(versioned, versioned_expired_timestamp)
|
||||
}
|
||||
|
||||
/// process a vec of pull responses
|
||||
pub fn process_pull_responses(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
responses: Vec<VersionedCrdsValue>,
|
||||
responses_expired_timeout: Vec<VersionedCrdsValue>,
|
||||
now: u64,
|
||||
stats: &mut ProcessPullStats,
|
||||
) -> Vec<(CrdsValueLabel, Hash, u64)> {
|
||||
let mut success = vec![];
|
||||
let mut owners = HashSet::new();
|
||||
for r in responses_expired_timeout {
|
||||
stats.failed_insert += crds.insert_versioned(r).is_err() as usize;
|
||||
}
|
||||
for r in responses {
|
||||
let owner = r.value.label().pubkey();
|
||||
let label = r.value.label();
|
||||
let wc = r.value.wallclock();
|
||||
let hash = r.value_hash;
|
||||
let old = crds.insert_versioned(r);
|
||||
if old.is_err() {
|
||||
stats.failed_insert += 1;
|
||||
} else {
|
||||
stats.success += 1;
|
||||
self.num_pulls += 1;
|
||||
success.push((label, hash, wc));
|
||||
}
|
||||
old.ok().map(|opt| {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
owners.insert(owner);
|
||||
opt.map(|val| {
|
||||
self.purged_values
|
||||
.push_back((val.value_hash, val.local_timestamp))
|
||||
})
|
||||
});
|
||||
}
|
||||
crds.update_record_timestamp(from, now);
|
||||
failed
|
||||
owners.insert(*from);
|
||||
for owner in owners {
|
||||
crds.update_record_timestamp(&owner, now);
|
||||
}
|
||||
success
|
||||
}
|
||||
// build a set of filters of the current crds table
|
||||
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
|
||||
@ -379,6 +436,34 @@ impl CrdsGossipPull {
|
||||
.count();
|
||||
self.purged_values.drain(..cnt);
|
||||
}
|
||||
|
||||
/// For legacy tests
|
||||
#[cfg(test)]
|
||||
pub fn process_pull_response(
|
||||
&mut self,
|
||||
crds: &mut Crds,
|
||||
from: &Pubkey,
|
||||
timeouts: &HashMap<Pubkey, u64>,
|
||||
response: Vec<CrdsValue>,
|
||||
now: u64,
|
||||
) -> (usize, usize, usize) {
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (versioned, versioned_expired_timeout) =
|
||||
self.filter_pull_responses(crds, timeouts, response, now, &mut stats);
|
||||
self.process_pull_responses(
|
||||
crds,
|
||||
from,
|
||||
versioned,
|
||||
versioned_expired_timeout,
|
||||
now,
|
||||
&mut stats,
|
||||
);
|
||||
(
|
||||
stats.failed_timeout + stats.failed_insert,
|
||||
stats.timeout_count,
|
||||
stats.success,
|
||||
)
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@ -578,8 +663,9 @@ mod test {
|
||||
let mut dest_crds = Crds::default();
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
assert_eq!(
|
||||
@ -648,8 +734,9 @@ mod test {
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
if rsp.is_empty() {
|
||||
@ -660,13 +747,15 @@ mod test {
|
||||
continue;
|
||||
}
|
||||
assert_eq!(rsp.len(), 1);
|
||||
let failed = node.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
);
|
||||
let failed = node
|
||||
.process_pull_response(
|
||||
&mut node_crds,
|
||||
&node_pubkey,
|
||||
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
|
||||
rsp.pop().unwrap(),
|
||||
1,
|
||||
)
|
||||
.0;
|
||||
assert_eq!(failed, 0);
|
||||
assert_eq!(
|
||||
node_crds
|
||||
@ -827,7 +916,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone()],
|
||||
1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -843,7 +933,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry.clone(), unstaked_peer_entry],
|
||||
node.msg_timeout + 100,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
2
|
||||
);
|
||||
|
||||
@ -856,7 +947,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_entry],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -872,7 +964,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_vote.clone()],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
0
|
||||
);
|
||||
|
||||
@ -885,7 +978,8 @@ mod test {
|
||||
&timeouts,
|
||||
vec![peer_vote],
|
||||
node.msg_timeout + 1,
|
||||
),
|
||||
)
|
||||
.0,
|
||||
1
|
||||
);
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
//!
|
||||
//! Main differences are:
|
||||
//! 1. There is no `max hop`. Messages are signed with a local wallclock. If they are outside of
|
||||
//! the local nodes wallclock window they are drooped silently.
|
||||
//! the local nodes wallclock window they are dropped silently.
|
||||
//! 2. The prune set is stored in a Bloom filter.
|
||||
|
||||
use crate::{
|
||||
@ -35,6 +35,7 @@ pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
|
||||
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
|
||||
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
|
||||
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
|
||||
pub const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CrdsGossipPush {
|
||||
@ -44,12 +45,18 @@ pub struct CrdsGossipPush {
|
||||
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
|
||||
/// push message queue
|
||||
push_messages: HashMap<CrdsValueLabel, Hash>,
|
||||
/// cache that tracks which validators a message was received from
|
||||
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
|
||||
/// Cache that tracks which validators a message was received from
|
||||
/// bool indicates it has been pruned.
|
||||
/// This cache represents a lagging view of which validators
|
||||
/// currently have this node in their `active_set`
|
||||
received_cache: HashMap<Pubkey, HashMap<Pubkey, (bool, u64)>>,
|
||||
pub num_active: usize,
|
||||
pub push_fanout: usize,
|
||||
pub msg_timeout: u64,
|
||||
pub prune_timeout: u64,
|
||||
pub num_total: usize,
|
||||
pub num_old: usize,
|
||||
pub num_pushes: usize,
|
||||
}
|
||||
|
||||
impl Default for CrdsGossipPush {
|
||||
@ -64,6 +71,9 @@ impl Default for CrdsGossipPush {
|
||||
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
|
||||
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
|
||||
prune_timeout: CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS,
|
||||
num_total: 0,
|
||||
num_old: 0,
|
||||
num_pushes: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -81,18 +91,21 @@ impl CrdsGossipPush {
|
||||
&mut self,
|
||||
self_pubkey: &Pubkey,
|
||||
origin: &Pubkey,
|
||||
hash: Hash,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<Pubkey> {
|
||||
let origin_stake = stakes.get(origin).unwrap_or(&0);
|
||||
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
|
||||
let cache = self.received_cache.get(&hash);
|
||||
let cache = self.received_cache.get(origin);
|
||||
if cache.is_none() {
|
||||
return Vec::new();
|
||||
}
|
||||
let peers = cache.unwrap();
|
||||
|
||||
let peers = &cache.unwrap().1;
|
||||
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
|
||||
let peer_stake_total: u64 = peers
|
||||
.iter()
|
||||
.filter(|v| !(v.1).0)
|
||||
.map(|v| stakes.get(v.0).unwrap_or(&0))
|
||||
.sum();
|
||||
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
|
||||
if peer_stake_total < prune_stake_threshold {
|
||||
return Vec::new();
|
||||
@ -100,7 +113,8 @@ impl CrdsGossipPush {
|
||||
|
||||
let staked_peers: Vec<(Pubkey, u64)> = peers
|
||||
.iter()
|
||||
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
|
||||
.filter(|v| !(v.1).0)
|
||||
.filter_map(|p| stakes.get(p.0).map(|s| (*p.0, *s)))
|
||||
.filter(|(_, s)| *s > 0)
|
||||
.collect();
|
||||
|
||||
@ -117,16 +131,27 @@ impl CrdsGossipPush {
|
||||
let (next_peer, next_stake) = staked_peers[next];
|
||||
keep.insert(next_peer);
|
||||
peer_stake_sum += next_stake;
|
||||
if peer_stake_sum >= prune_stake_threshold {
|
||||
if peer_stake_sum >= prune_stake_threshold
|
||||
&& keep.len() >= CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
peers
|
||||
.iter()
|
||||
let pruned_peers: Vec<Pubkey> = peers
|
||||
.keys()
|
||||
.filter(|p| !keep.contains(p))
|
||||
.cloned()
|
||||
.collect()
|
||||
.collect();
|
||||
pruned_peers.iter().for_each(|p| {
|
||||
self.received_cache
|
||||
.get_mut(origin)
|
||||
.unwrap()
|
||||
.get_mut(p)
|
||||
.unwrap()
|
||||
.0 = true;
|
||||
});
|
||||
pruned_peers
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
@ -137,6 +162,7 @@ impl CrdsGossipPush {
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
self.num_total += 1;
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
@ -149,21 +175,32 @@ impl CrdsGossipPush {
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
let label = value.label();
|
||||
let origin = label.pubkey();
|
||||
let new_value = crds.new_versioned(now, value);
|
||||
let value_hash = new_value.value_hash;
|
||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||
received_set.insert(from.clone());
|
||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||
}
|
||||
let received_set = self
|
||||
.received_cache
|
||||
.entry(origin)
|
||||
.or_insert_with(HashMap::new);
|
||||
received_set.entry(*from).or_insert((false, 0)).1 = now;
|
||||
|
||||
let old = crds.insert_versioned(new_value);
|
||||
if old.is_err() {
|
||||
self.num_old += 1;
|
||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||
}
|
||||
let mut received_set = HashSet::new();
|
||||
received_set.insert(from.clone());
|
||||
self.push_messages.insert(label, value_hash);
|
||||
self.received_cache.insert(value_hash, (now, received_set));
|
||||
Ok(old.ok().and_then(|opt| opt))
|
||||
Ok(old.unwrap())
|
||||
}
|
||||
|
||||
/// push pull responses
|
||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||
for (label, value_hash, wc) in values {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
||||
continue;
|
||||
}
|
||||
self.push_messages.insert(label, value_hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// New push message to broadcast to peers.
|
||||
@ -172,18 +209,10 @@ impl CrdsGossipPush {
|
||||
/// The list of push messages is created such that all the randomly selected peers have not
|
||||
/// pruned the source addresses.
|
||||
pub fn new_push_messages(&mut self, crds: &Crds, now: u64) -> HashMap<Pubkey, Vec<CrdsValue>> {
|
||||
let max = self.active_set.len();
|
||||
let mut nodes: Vec<_> = (0..max).collect();
|
||||
nodes.shuffle(&mut rand::thread_rng());
|
||||
let peers: Vec<Pubkey> = nodes
|
||||
.into_iter()
|
||||
.filter_map(|n| self.active_set.get_index(n))
|
||||
.take(self.push_fanout)
|
||||
.map(|n| *n.0)
|
||||
.collect();
|
||||
let mut total_bytes: usize = 0;
|
||||
let mut values = vec![];
|
||||
let mut push_messages: HashMap<Pubkey, Vec<CrdsValue>> = HashMap::new();
|
||||
trace!("new_push_messages {}", self.push_messages.len());
|
||||
for (label, hash) in &self.push_messages {
|
||||
let res = crds.lookup_versioned(label);
|
||||
if res.is_none() {
|
||||
@ -203,21 +232,37 @@ impl CrdsGossipPush {
|
||||
}
|
||||
values.push(value.clone());
|
||||
}
|
||||
trace!(
|
||||
"new_push_messages {} {}",
|
||||
values.len(),
|
||||
self.active_set.len()
|
||||
);
|
||||
for v in values {
|
||||
for p in peers.iter() {
|
||||
let filter = self.active_set.get_mut(p);
|
||||
if filter.is_some() && !filter.unwrap().contains(&v.label().pubkey()) {
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
//use a consistent index for the same origin so
|
||||
//the active set learns the MST for that origin
|
||||
let start = v.label().pubkey().as_ref()[0] as usize;
|
||||
let max = self.push_fanout.min(self.active_set.len());
|
||||
for i in start..(start + max) {
|
||||
let ix = i % self.active_set.len();
|
||||
if let Some((p, filter)) = self.active_set.get_index(ix) {
|
||||
if !filter.contains(&v.label().pubkey()) {
|
||||
trace!("new_push_messages insert {} {:?}", *p, v);
|
||||
push_messages.entry(*p).or_default().push(v.clone());
|
||||
self.num_pushes += 1;
|
||||
}
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
self.push_messages.remove(&v.label());
|
||||
}
|
||||
push_messages
|
||||
}
|
||||
|
||||
/// add the `from` to the peer's filter of nodes
|
||||
pub fn process_prune_msg(&mut self, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
pub fn process_prune_msg(&mut self, self_pubkey: &Pubkey, peer: &Pubkey, origins: &[Pubkey]) {
|
||||
for origin in origins {
|
||||
if origin == self_pubkey {
|
||||
continue;
|
||||
}
|
||||
if let Some(p) = self.active_set.get_mut(peer) {
|
||||
p.add(origin)
|
||||
}
|
||||
@ -339,15 +384,11 @@ impl CrdsGossipPush {
|
||||
|
||||
/// purge received push message cache
|
||||
pub fn purge_old_received_cache(&mut self, min_time: u64) {
|
||||
let old_msgs: Vec<Hash> = self
|
||||
.received_cache
|
||||
.iter()
|
||||
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
|
||||
.cloned()
|
||||
.collect();
|
||||
for k in old_msgs {
|
||||
self.received_cache.remove(&k);
|
||||
}
|
||||
self.received_cache
|
||||
.iter_mut()
|
||||
.for_each(|v| v.1.retain(|_, v| v.1 > min_time));
|
||||
|
||||
self.received_cache.retain(|_, v| !v.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@ -371,7 +412,6 @@ mod test {
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&origin, 0,
|
||||
)));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
low_staked_peers.for_each(|p| {
|
||||
@ -380,11 +420,7 @@ mod test {
|
||||
stakes.insert(p, 1);
|
||||
});
|
||||
|
||||
let versioned = crds
|
||||
.lookup_versioned(&label)
|
||||
.expect("versioned value should exist");
|
||||
let hash = versioned.value_hash;
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.is_empty(),
|
||||
"should not prune if min threshold has not been reached"
|
||||
@ -395,7 +431,7 @@ mod test {
|
||||
stakes.insert(high_staked_peer, high_stake);
|
||||
let _ = push.process_push_message(&mut crds, &high_staked_peer, value, 0);
|
||||
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
|
||||
let pruned = push.prune_received_cache(&self_id, &origin, &stakes);
|
||||
assert!(
|
||||
pruned.len() < low_staked_set.len() + 1,
|
||||
"should not prune all peers"
|
||||
@ -409,7 +445,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_push() {
|
||||
fn test_process_push_one() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@ -425,9 +461,9 @@ mod test {
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
@ -690,6 +726,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let self_id = Pubkey::new_rand();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@ -707,7 +744,11 @@ mod test {
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
|
||||
push.process_prune_msg(
|
||||
&self_id,
|
||||
&peer.label().pubkey(),
|
||||
&[new_msg.label().pubkey()],
|
||||
);
|
||||
assert_eq!(push.new_push_messages(&crds, 0), expected);
|
||||
}
|
||||
#[test]
|
||||
@ -749,9 +790,9 @@ mod test {
|
||||
assert_eq!(crds.lookup(&label), Some(&value));
|
||||
|
||||
// push it again
|
||||
assert_eq!(
|
||||
assert_matches!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
|
||||
Err(CrdsGossipError::PushMessageAlreadyReceived)
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
);
|
||||
|
||||
// purge the old pushed
|
||||
|
40
core/src/fork_choice.rs
Normal file
40
core/src/fork_choice.rs
Normal file
@ -0,0 +1,40 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, SwitchForkDecision, Tower},
|
||||
progress_map::ProgressMap,
|
||||
replay_stage::HeaviestForkFailures,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub(crate) struct SelectVoteAndResetForkResult {
|
||||
pub vote_bank: Option<(Arc<Bank>, SwitchForkDecision)>,
|
||||
pub reset_bank: Option<Arc<Bank>>,
|
||||
pub heaviest_fork_failures: Vec<HeaviestForkFailures>,
|
||||
}
|
||||
|
||||
pub(crate) trait ForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
tower: &Tower,
|
||||
progress: &mut ProgressMap,
|
||||
computed_bank_stats: &ComputedBankState,
|
||||
);
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bbank
|
||||
// 2) The heavest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
progress: &ProgressMap,
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>);
|
||||
}
|
72
core/src/heaviest_subtree_fork_choice/fork_choice.rs
Normal file
72
core/src/heaviest_subtree_fork_choice/fork_choice.rs
Normal file
@ -0,0 +1,72 @@
|
||||
use crate::{
|
||||
consensus::{ComputedBankState, Tower},
|
||||
fork_choice::ForkChoice,
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
progress_map::ProgressMap,
|
||||
};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
impl ForkChoice for HeaviestSubtreeForkChoice {
|
||||
fn compute_bank_stats(
|
||||
&mut self,
|
||||
bank: &Bank,
|
||||
_tower: &Tower,
|
||||
_progress: &mut ProgressMap,
|
||||
computed_bank_stats: &ComputedBankState,
|
||||
) {
|
||||
let ComputedBankState { pubkey_votes, .. } = computed_bank_stats;
|
||||
|
||||
// Update `heaviest_subtree_fork_choice` to find the best fork to build on
|
||||
let best_overall_slot = self.add_votes(
|
||||
&pubkey_votes,
|
||||
bank.epoch_stakes_map(),
|
||||
bank.epoch_schedule(),
|
||||
);
|
||||
|
||||
datapoint_info!(
|
||||
"best_slot",
|
||||
("slot", bank.slot(), i64),
|
||||
("best_slot", best_overall_slot, i64),
|
||||
);
|
||||
}
|
||||
|
||||
// Returns:
|
||||
// 1) The heaviest overall bbank
|
||||
// 2) The heavest bank on the same fork as the last vote (doesn't require a
|
||||
// switching proof to vote for)
|
||||
fn select_forks(
|
||||
&self,
|
||||
_frozen_banks: &[Arc<Bank>],
|
||||
tower: &Tower,
|
||||
_progress: &ProgressMap,
|
||||
_ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
) -> (Arc<Bank>, Option<Arc<Bank>>) {
|
||||
let last_vote = tower.last_vote().slots.last().cloned();
|
||||
let heaviest_slot_on_same_voted_fork = last_vote.map(|last_vote| {
|
||||
let heaviest_slot_on_same_voted_fork =
|
||||
self.best_slot(last_vote).expect("last_vote is a frozen bank so must have been added to heaviest_subtree_fork_choice at time of freezing");
|
||||
if heaviest_slot_on_same_voted_fork == last_vote {
|
||||
None
|
||||
} else {
|
||||
Some(heaviest_slot_on_same_voted_fork)
|
||||
}
|
||||
}).unwrap_or(None);
|
||||
let heaviest_slot = self.best_overall_slot();
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
(
|
||||
r_bank_forks.get(heaviest_slot).unwrap().clone(),
|
||||
heaviest_slot_on_same_voted_fork.map(|heaviest_slot_on_same_voted_fork| {
|
||||
r_bank_forks
|
||||
.get(heaviest_slot_on_same_voted_fork)
|
||||
.unwrap()
|
||||
.clone()
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
1040
core/src/heaviest_subtree_fork_choice/mod.rs
Normal file
1040
core/src/heaviest_subtree_fork_choice/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,9 @@
|
||||
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
|
||||
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::blockstore::{Blockstore, PurgeType};
|
||||
use solana_ledger::blockstore_db::Result as BlockstoreResult;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::clock::{Slot, DEFAULT_TICKS_PER_SLOT, TICKS_PER_DAY};
|
||||
use std::string::ToString;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
@ -32,6 +32,10 @@ pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
|
||||
// Delay between purges to cooperate with other blockstore users
|
||||
pub const DEFAULT_DELAY_BETWEEN_PURGES: Duration = Duration::from_millis(500);
|
||||
|
||||
// Compacting at a slower interval than purging helps keep IOPS down.
|
||||
// Once a day should be ample
|
||||
const DEFAULT_COMPACTION_SLOT_INTERVAL: u64 = TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT;
|
||||
|
||||
pub struct LedgerCleanupService {
|
||||
t_cleanup: JoinHandle<()>,
|
||||
}
|
||||
@ -49,6 +53,8 @@ impl LedgerCleanupService {
|
||||
);
|
||||
let exit = exit.clone();
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
|
||||
let t_cleanup = Builder::new()
|
||||
.name("solana-ledger-cleanup".to_string())
|
||||
.spawn(move || loop {
|
||||
@ -62,6 +68,8 @@ impl LedgerCleanupService {
|
||||
&mut last_purge_slot,
|
||||
DEFAULT_PURGE_SLOT_INTERVAL,
|
||||
Some(DEFAULT_DELAY_BETWEEN_PURGES),
|
||||
&mut last_compaction_slot,
|
||||
DEFAULT_COMPACTION_SLOT_INTERVAL,
|
||||
) {
|
||||
match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
@ -116,7 +124,7 @@ impl LedgerCleanupService {
|
||||
}
|
||||
}
|
||||
|
||||
(true, lowest_cleanup_slot, first_slot, total_shreds)
|
||||
(true, first_slot, lowest_cleanup_slot, total_shreds)
|
||||
}
|
||||
|
||||
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
|
||||
@ -135,6 +143,8 @@ impl LedgerCleanupService {
|
||||
last_purge_slot: &mut u64,
|
||||
purge_interval: u64,
|
||||
delay_between_purges: Option<Duration>,
|
||||
last_compaction_slot: &mut u64,
|
||||
compaction_interval: u64,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let root = Self::receive_new_roots(new_root_receiver)?;
|
||||
if root - *last_purge_slot <= purge_interval {
|
||||
@ -143,19 +153,20 @@ impl LedgerCleanupService {
|
||||
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
"purge: last_root={}, last_purge_slot={}, purge_interval={}, disk_utilization={:?}",
|
||||
root, last_purge_slot, purge_interval, disk_utilization_pre
|
||||
"purge: last_root={}, last_purge_slot={}, purge_interval={}, last_compaction_slot={}, disk_utilization={:?}",
|
||||
root, last_purge_slot, purge_interval, last_compaction_slot, disk_utilization_pre
|
||||
);
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (slots_to_clean, lowest_cleanup_slot, first_slot, total_shreds) =
|
||||
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
|
||||
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
|
||||
|
||||
if slots_to_clean {
|
||||
info!(
|
||||
"purging data from slots {} to {}",
|
||||
first_slot, lowest_cleanup_slot
|
||||
);
|
||||
let mut compact_first_slot = std::u64::MAX;
|
||||
if lowest_cleanup_slot.saturating_sub(*last_compaction_slot) > compaction_interval {
|
||||
compact_first_slot = *last_compaction_slot;
|
||||
*last_compaction_slot = lowest_cleanup_slot;
|
||||
}
|
||||
|
||||
let purge_complete = Arc::new(AtomicBool::new(false));
|
||||
let blockstore = blockstore.clone();
|
||||
@ -167,14 +178,37 @@ impl LedgerCleanupService {
|
||||
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot;
|
||||
slot_update_time.stop();
|
||||
|
||||
info!(
|
||||
"purging data from slots {} to {}",
|
||||
purge_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
|
||||
let mut purge_time = Measure::start("purge_slots_with_delay");
|
||||
blockstore.purge_slots_with_delay(
|
||||
first_slot,
|
||||
purge_first_slot,
|
||||
lowest_cleanup_slot,
|
||||
delay_between_purges,
|
||||
PurgeType::PrimaryIndex,
|
||||
);
|
||||
purge_time.stop();
|
||||
info!("{}", purge_time);
|
||||
|
||||
if compact_first_slot < lowest_cleanup_slot {
|
||||
info!(
|
||||
"compacting data from slots {} to {}",
|
||||
compact_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
if let Err(err) =
|
||||
blockstore.compact_storage(compact_first_slot, lowest_cleanup_slot)
|
||||
{
|
||||
// This error is not fatal and indicates an internal error?
|
||||
error!(
|
||||
"Error: {:?}; Couldn't compact storage from {:?} to {:?}",
|
||||
err, compact_first_slot, lowest_cleanup_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
purge_complete1.store(true, Ordering::Relaxed);
|
||||
})
|
||||
.unwrap();
|
||||
@ -233,6 +267,7 @@ mod tests {
|
||||
|
||||
//send a signal to kill all but 5 shreds, which will be in the newest slots
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
sender.send(50).unwrap();
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
@ -241,6 +276,8 @@ mod tests {
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -272,6 +309,7 @@ mod tests {
|
||||
info!("{}", first_insert);
|
||||
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
let mut slot = initial_slots;
|
||||
let mut num_slots = 6;
|
||||
for _ in 0..5 {
|
||||
@ -296,6 +334,8 @@ mod tests {
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
time.stop();
|
||||
|
@ -11,10 +11,12 @@ pub mod banking_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod cluster_info_vote_listener;
|
||||
pub mod commitment;
|
||||
pub mod commitment_service;
|
||||
mod deprecated;
|
||||
pub mod shred_fetch_stage;
|
||||
#[macro_use]
|
||||
pub mod contact_info;
|
||||
pub mod bank_weight_fork_choice;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_slots;
|
||||
pub mod consensus;
|
||||
@ -26,8 +28,10 @@ pub mod crds_gossip_push;
|
||||
pub mod crds_value;
|
||||
pub mod epoch_slots;
|
||||
pub mod fetch_stage;
|
||||
pub mod fork_choice;
|
||||
pub mod gen_keys;
|
||||
pub mod gossip_service;
|
||||
pub mod heaviest_subtree_fork_choice;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod non_circulating_supply;
|
||||
@ -43,10 +47,12 @@ pub mod retransmit_stage;
|
||||
pub mod rewards_recorder_service;
|
||||
pub mod rpc;
|
||||
pub mod rpc_error;
|
||||
pub mod rpc_health;
|
||||
pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
pub mod rpc_subscriptions;
|
||||
pub mod send_transaction_service;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod sigverify;
|
||||
|
@ -57,8 +57,8 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
|
||||
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
|
||||
"CWeRmXme7LmbaUWTZWFLt6FMnpzLCHaQLuR2TdgFn4Lq",
|
||||
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
|
||||
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
|
||||
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
|
||||
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
|
||||
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
|
||||
@ -85,6 +85,7 @@ solana_sdk::pubkeys!(
|
||||
[
|
||||
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
|
||||
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
|
||||
"FdGYQdiRky8NZzN9wZtczTBcWLYYRXrJ3LMDhqDPn5rM",
|
||||
]
|
||||
);
|
||||
|
||||
|
@ -354,7 +354,7 @@ impl PohRecorder {
|
||||
pub fn tick(&mut self) {
|
||||
let now = Instant::now();
|
||||
let poh_entry = self.poh.lock().unwrap().tick();
|
||||
inc_new_counter_warn!(
|
||||
inc_new_counter_info!(
|
||||
"poh_recorder-tick_lock_contention",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -364,7 +364,7 @@ impl PohRecorder {
|
||||
trace!("tick_height {}", self.tick_height);
|
||||
|
||||
if self.leader_first_tick_height.is_none() {
|
||||
inc_new_counter_warn!(
|
||||
inc_new_counter_info!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -380,7 +380,7 @@ impl PohRecorder {
|
||||
self.tick_cache.push((entry, self.tick_height));
|
||||
let _ = self.flush_cache(true);
|
||||
}
|
||||
inc_new_counter_warn!(
|
||||
inc_new_counter_info!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
|
@ -89,18 +89,22 @@ impl PohService {
|
||||
let poh = poh_recorder.lock().unwrap().poh.clone();
|
||||
let mut now = Instant::now();
|
||||
let mut num_ticks = 0;
|
||||
let mut num_hashes = 0;
|
||||
loop {
|
||||
num_hashes += NUM_HASHES_PER_BATCH;
|
||||
if poh.lock().unwrap().hash(NUM_HASHES_PER_BATCH) {
|
||||
// Lock PohRecorder only for the final hash...
|
||||
poh_recorder.lock().unwrap().tick();
|
||||
num_ticks += 1;
|
||||
if num_ticks >= DEFAULT_TICKS_PER_SLOT * 2 {
|
||||
datapoint_debug!(
|
||||
datapoint_info!(
|
||||
"poh-service",
|
||||
("ticks", num_ticks as i64, i64),
|
||||
("hashes", num_hashes as i64, i64),
|
||||
("elapsed_ms", now.elapsed().as_millis() as i64, i64),
|
||||
);
|
||||
num_ticks = 0;
|
||||
num_hashes = 0;
|
||||
now = Instant::now();
|
||||
}
|
||||
if poh_exit.load(Ordering::Relaxed) {
|
||||
|
@ -3,7 +3,7 @@
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_slots::ClusterSlots,
|
||||
consensus::VOTE_THRESHOLD_SIZE,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
result::Result,
|
||||
serve_repair::{RepairType, ServeRepair, DEFAULT_NONCE},
|
||||
};
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,7 @@
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_slots::ClusterSlots,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::DuplicateSlotsResetSender,
|
||||
repair_service::RepairInfo,
|
||||
result::{Error, Result},
|
||||
@ -18,8 +19,9 @@ use solana_ledger::{
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_error;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::clock::{Epoch, Slot};
|
||||
use solana_sdk::epoch_schedule::EpochSchedule;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_streamer::streamer::PacketReceiver;
|
||||
use std::{
|
||||
@ -44,6 +46,8 @@ struct RetransmitStats {
|
||||
total_packets: AtomicU64,
|
||||
total_batches: AtomicU64,
|
||||
total_time: AtomicU64,
|
||||
epoch_fetch: AtomicU64,
|
||||
epoch_cache_update: AtomicU64,
|
||||
repair_total: AtomicU64,
|
||||
discard_total: AtomicU64,
|
||||
retransmit_total: AtomicU64,
|
||||
@ -65,6 +69,8 @@ fn update_retransmit_stats(
|
||||
peers_len: usize,
|
||||
packets_by_slot: HashMap<Slot, usize>,
|
||||
packets_by_source: HashMap<String, usize>,
|
||||
epoch_fetch: u64,
|
||||
epoch_cach_update: u64,
|
||||
) {
|
||||
stats.total_time.fetch_add(total_time, Ordering::Relaxed);
|
||||
stats
|
||||
@ -83,6 +89,10 @@ fn update_retransmit_stats(
|
||||
.compute_turbine_peers_total
|
||||
.fetch_add(compute_turbine_peers_total, Ordering::Relaxed);
|
||||
stats.total_batches.fetch_add(1, Ordering::Relaxed);
|
||||
stats.epoch_fetch.fetch_add(epoch_fetch, Ordering::Relaxed);
|
||||
stats
|
||||
.epoch_cache_update
|
||||
.fetch_add(epoch_cach_update, Ordering::Relaxed);
|
||||
{
|
||||
let mut stats_packets_by_slot = stats.packets_by_slot.lock().unwrap();
|
||||
for (slot, count) in packets_by_slot {
|
||||
@ -107,6 +117,16 @@ fn update_retransmit_stats(
|
||||
stats.total_time.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"epoch_fetch",
|
||||
stats.epoch_fetch.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"epoch_cache_update",
|
||||
stats.epoch_cache_update.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"total_batches",
|
||||
stats.total_batches.swap(0, Ordering::Relaxed) as i64,
|
||||
@ -148,6 +168,14 @@ fn update_retransmit_stats(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct EpochStakesCache {
|
||||
epoch: Epoch,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
peers: Vec<ContactInfo>,
|
||||
stakes_and_index: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
@ -156,6 +184,8 @@ fn retransmit(
|
||||
sock: &UdpSocket,
|
||||
id: u32,
|
||||
stats: &Arc<RetransmitStats>,
|
||||
epoch_stakes_cache: &Arc<RwLock<EpochStakesCache>>,
|
||||
last_peer_update: &Arc<AtomicU64>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let r_lock = r.lock().unwrap();
|
||||
@ -172,12 +202,42 @@ fn retransmit(
|
||||
}
|
||||
drop(r_lock);
|
||||
|
||||
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
|
||||
let r_bank = bank_forks.read().unwrap().working_bank();
|
||||
let bank_epoch = r_bank.get_leader_schedule_epoch(r_bank.slot());
|
||||
epoch_fetch.stop();
|
||||
|
||||
let mut epoch_cache_update = Measure::start("retransmit_epoch_cach_update");
|
||||
let mut r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
if r_epoch_stakes_cache.epoch != bank_epoch {
|
||||
drop(r_epoch_stakes_cache);
|
||||
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
if w_epoch_stakes_cache.epoch != bank_epoch {
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
w_epoch_stakes_cache.stakes = stakes;
|
||||
w_epoch_stakes_cache.epoch = bank_epoch;
|
||||
}
|
||||
drop(w_epoch_stakes_cache);
|
||||
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
}
|
||||
|
||||
let now = timestamp();
|
||||
let last = last_peer_update.load(Ordering::Relaxed);
|
||||
if now - last > 1000 && last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
|
||||
{
|
||||
drop(r_epoch_stakes_cache);
|
||||
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
let (peers, stakes_and_index) =
|
||||
cluster_info.sorted_retransmit_peers_and_stakes(w_epoch_stakes_cache.stakes.clone());
|
||||
w_epoch_stakes_cache.peers = peers;
|
||||
w_epoch_stakes_cache.stakes_and_index = stakes_and_index;
|
||||
drop(w_epoch_stakes_cache);
|
||||
r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
}
|
||||
let mut peers_len = 0;
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
|
||||
epoch_cache_update.stop();
|
||||
|
||||
let my_id = cluster_info.id();
|
||||
let mut discard_total = 0;
|
||||
let mut repair_total = 0;
|
||||
@ -202,8 +262,8 @@ fn retransmit(
|
||||
let mut compute_turbine_peers = Measure::start("turbine_start");
|
||||
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
|
||||
&my_id,
|
||||
&peers,
|
||||
&stakes_and_index,
|
||||
&r_epoch_stakes_cache.peers,
|
||||
&r_epoch_stakes_cache.stakes_and_index,
|
||||
packet.meta.seed,
|
||||
);
|
||||
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
|
||||
@ -216,8 +276,14 @@ fn retransmit(
|
||||
|
||||
let (neighbors, children) =
|
||||
compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, indexes);
|
||||
let neighbors: Vec<_> = neighbors.into_iter().map(|index| &peers[index]).collect();
|
||||
let children: Vec<_> = children.into_iter().map(|index| &peers[index]).collect();
|
||||
let neighbors: Vec<_> = neighbors
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
let children: Vec<_> = children
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
compute_turbine_peers.stop();
|
||||
compute_turbine_peers_total += compute_turbine_peers.as_us();
|
||||
|
||||
@ -258,6 +324,8 @@ fn retransmit(
|
||||
peers_len,
|
||||
packets_by_slot,
|
||||
packets_by_source,
|
||||
epoch_fetch.as_us(),
|
||||
epoch_cache_update.as_us(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@ -287,6 +355,8 @@ pub fn retransmitter(
|
||||
let r = r.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let stats = stats.clone();
|
||||
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
|
||||
let last_peer_update = Arc::new(AtomicU64::new(0));
|
||||
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
@ -301,6 +371,8 @@ pub fn retransmitter(
|
||||
&sockets[s],
|
||||
s as u32,
|
||||
&stats,
|
||||
&epoch_stakes_cache,
|
||||
&last_peer_update,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
|
866
core/src/rpc.rs
866
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,7 @@ use solana_sdk::clock::Slot;
|
||||
|
||||
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
|
||||
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
|
||||
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
NonexistentClusterRoot {
|
||||
@ -13,6 +14,9 @@ pub enum RpcCustomError {
|
||||
slot: Slot,
|
||||
first_available_block: Slot,
|
||||
},
|
||||
SendTransactionPreflightFailure {
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@ -40,6 +44,11 @@ impl From<RpcCustomError> for Error {
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SendTransactionPreflightFailure { message } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_2),
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
118
core/src/rpc_health.rs
Normal file
118
core/src/rpc_health.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
pub enum RpcHealthStatus {
|
||||
Ok,
|
||||
Behind, // Validator is behind its trusted validators
|
||||
}
|
||||
|
||||
pub struct RpcHealth {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
health_check_slot_distance: u64,
|
||||
override_health_check: Arc<AtomicBool>,
|
||||
#[cfg(test)]
|
||||
stub_health_status: std::sync::RwLock<Option<RpcHealthStatus>>,
|
||||
}
|
||||
|
||||
impl RpcHealth {
|
||||
pub fn new(
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
health_check_slot_distance: u64,
|
||||
override_health_check: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
health_check_slot_distance,
|
||||
override_health_check,
|
||||
#[cfg(test)]
|
||||
stub_health_status: std::sync::RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(&self) -> RpcHealthStatus {
|
||||
#[cfg(test)]
|
||||
{
|
||||
if let Some(stub_health_status) = *self.stub_health_status.read().unwrap() {
|
||||
return stub_health_status;
|
||||
}
|
||||
}
|
||||
|
||||
if self.override_health_check.load(Ordering::Relaxed) {
|
||||
RpcHealthStatus::Ok
|
||||
} else if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
(
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `health_check_slot_distance` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(self.health_check_slot_distance)
|
||||
{
|
||||
RpcHealthStatus::Ok
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
RpcHealthStatus::Behind
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
RpcHealthStatus::Ok
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn stub() -> Arc<Self> {
|
||||
Arc::new(Self::new(
|
||||
Arc::new(ClusterInfo::default()),
|
||||
None,
|
||||
42,
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn stub_set_health_status(&self, stub_health_status: Option<RpcHealthStatus>) {
|
||||
*self.stub_health_status.write().unwrap() = stub_health_status;
|
||||
}
|
||||
}
|
@ -355,8 +355,8 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info_vote_listener::{ClusterInfoVoteListener, VoteTracker},
|
||||
commitment::{BlockCommitmentCache, CacheSlotInfo},
|
||||
rpc_subscriptions::tests::robust_poll_or_panic,
|
||||
commitment::BlockCommitmentCache,
|
||||
rpc_subscriptions::{tests::robust_poll_or_panic, CacheSlotInfo},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
@ -420,7 +420,7 @@ mod tests {
|
||||
let bob_pubkey = bob.pubkey();
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
@ -471,7 +471,7 @@ mod tests {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
@ -528,7 +528,7 @@ mod tests {
|
||||
let budget_program_id = solana_budget_program::id();
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
@ -638,7 +638,7 @@ mod tests {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, Bank::new(&genesis_config))));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::new(&genesis_config))));
|
||||
|
||||
let mut io = PubSubHandler::default();
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
|
||||
@ -680,7 +680,7 @@ mod tests {
|
||||
} = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(1, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bob = Keypair::new();
|
||||
@ -731,7 +731,7 @@ mod tests {
|
||||
} = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
@ -807,7 +807,7 @@ mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
|
||||
@ -837,7 +837,7 @@ mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
|
||||
@ -884,7 +884,7 @@ mod tests {
|
||||
create_genesis_config_with_vote_accounts(10_000, &validator_voting_keypairs, 100);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = BankForks::new(0, bank);
|
||||
let bank_forks = BankForks::new(bank);
|
||||
let bank = bank_forks.get(0).unwrap().clone();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
@ -944,7 +944,7 @@ mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore_bank_forks(blockstore, bank_forks);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, _) = Subscriber::new_test("voteNotification");
|
||||
|
@ -93,7 +93,7 @@ mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks,
|
||||
|
@ -1,7 +1,8 @@
|
||||
//! The `rpc_service` module implements the Solana JSON RPC service.
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, validator::ValidatorExit,
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, rpc_health::*,
|
||||
send_transaction_service::SendTransactionService, validator::ValidatorExit,
|
||||
};
|
||||
use jsonrpc_core::MetaIoHandler;
|
||||
use jsonrpc_http_server::{
|
||||
@ -19,20 +20,17 @@ use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
|
||||
// If trusted validators are specified, consider this validator healthy if its latest account hash
|
||||
// is no further behind than this distance from the latest trusted validator account hash
|
||||
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
|
||||
#[cfg(test)]
|
||||
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>, // Used only by test_rpc_new()...
|
||||
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
|
||||
|
||||
close_handle: Option<CloseHandle>,
|
||||
}
|
||||
@ -41,18 +39,16 @@ struct RpcRequestMiddleware {
|
||||
ledger_path: PathBuf,
|
||||
snapshot_archive_path_regex: Regex,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
health: Arc<RpcHealth>,
|
||||
}
|
||||
|
||||
impl RpcRequestMiddleware {
|
||||
pub fn new(
|
||||
ledger_path: PathBuf,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
health: Arc<RpcHealth>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ledger_path,
|
||||
@ -61,9 +57,8 @@ impl RpcRequestMiddleware {
|
||||
)
|
||||
.unwrap(),
|
||||
snapshot_config,
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
bank_forks,
|
||||
health,
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,58 +129,10 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
|
||||
fn health_check(&self) -> &'static str {
|
||||
let response = if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
(
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
|
||||
{
|
||||
"ok"
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
"behind"
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
"ok"
|
||||
let response = match self.health.check() {
|
||||
RpcHealthStatus::Ok => "ok",
|
||||
RpcHealthStatus::Behind => "behind",
|
||||
};
|
||||
|
||||
info!("health check: {}", response);
|
||||
response
|
||||
}
|
||||
@ -290,16 +237,36 @@ impl JsonRpcService {
|
||||
ledger_path: &Path,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
override_health_check: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
info!("rpc bound to {:?}", rpc_addr);
|
||||
info!("rpc configuration: {:?}", config);
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
|
||||
let health = Arc::new(RpcHealth::new(
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
config.health_check_slot_distance,
|
||||
override_health_check,
|
||||
));
|
||||
|
||||
let exit_send_transaction_service = Arc::new(AtomicBool::new(false));
|
||||
let send_transaction_service = Arc::new(SendTransactionService::new(
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
&exit_send_transaction_service,
|
||||
));
|
||||
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
validator_exit.clone(),
|
||||
)));
|
||||
health.clone(),
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
let test_request_processor = request_processor.clone();
|
||||
@ -317,17 +284,12 @@ impl JsonRpcService {
|
||||
let request_middleware = RpcRequestMiddleware::new(
|
||||
ledger_path,
|
||||
snapshot_config,
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
bank_forks.clone(),
|
||||
health.clone(),
|
||||
);
|
||||
let server = ServerBuilder::with_meta_extractor(
|
||||
io,
|
||||
move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
request_processor: request_processor.clone(),
|
||||
cluster_info: cluster_info.clone(),
|
||||
genesis_hash,
|
||||
},
|
||||
move |_req: &hyper::Request<hyper::Body>| request_processor.clone(),
|
||||
)
|
||||
.threads(num_cpus::get())
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
@ -350,6 +312,7 @@ impl JsonRpcService {
|
||||
let server = server.unwrap();
|
||||
close_handle_sender.send(server.close_handle()).unwrap();
|
||||
server.wait();
|
||||
exit_send_transaction_service.store(true, Ordering::Relaxed);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@ -383,7 +346,6 @@ impl JsonRpcService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_value::{CrdsData, CrdsValue, SnapshotHash},
|
||||
rpc::tests::create_validator_exit,
|
||||
};
|
||||
@ -394,8 +356,7 @@ mod tests {
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::signature::Signer;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
#[test]
|
||||
fn test_rpc_new() {
|
||||
@ -407,13 +368,13 @@ mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let rpc_addr = SocketAddr::new(
|
||||
ip_addr,
|
||||
solana_net_utils::find_available_port_in_range(ip_addr, (10000, 65535)).unwrap(),
|
||||
);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
@ -431,6 +392,7 @@ mod tests {
|
||||
&PathBuf::from("farf"),
|
||||
validator_exit,
|
||||
None,
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
);
|
||||
let thread = rpc_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||
@ -439,8 +401,6 @@ mod tests {
|
||||
10_000,
|
||||
rpc_service
|
||||
.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_balance(Ok(mint_keypair.pubkey()), None)
|
||||
.unwrap()
|
||||
.value
|
||||
@ -452,7 +412,7 @@ mod tests {
|
||||
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)))
|
||||
Arc::new(RwLock::new(BankForks::new(bank)))
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -472,15 +432,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_file_get_path() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let bank_forks = create_bank_forks();
|
||||
|
||||
let rrm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info.clone(),
|
||||
None,
|
||||
bank_forks.clone(),
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
@ -490,9 +447,8 @@ mod tests {
|
||||
snapshot_path: PathBuf::from("/"),
|
||||
compression: CompressionType::Bzip2,
|
||||
}),
|
||||
cluster_info,
|
||||
None,
|
||||
bank_forks,
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
|
||||
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
|
||||
@ -518,30 +474,30 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_no_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info,
|
||||
None,
|
||||
create_bank_forks(),
|
||||
RpcHealth::stub(),
|
||||
);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let health_check_slot_distance = 123;
|
||||
let override_health_check = Arc::new(AtomicBool::new(false));
|
||||
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
|
||||
let health = Arc::new(RpcHealth::new(
|
||||
cluster_info.clone(),
|
||||
Some(trusted_validators.clone().into_iter().collect()),
|
||||
create_bank_forks(),
|
||||
);
|
||||
health_check_slot_distance,
|
||||
override_health_check.clone(),
|
||||
));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, create_bank_forks(), health);
|
||||
|
||||
// No account hashes for this node or any trusted validators == "behind"
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
@ -549,6 +505,9 @@ mod tests {
|
||||
// No account hashes for any trusted validators == "behind"
|
||||
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
override_health_check.store(true, Ordering::Relaxed);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
override_health_check.store(false, Ordering::Relaxed);
|
||||
|
||||
// This node is ahead of the trusted validators == "ok"
|
||||
cluster_info
|
||||
@ -579,7 +538,7 @@ mod tests {
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[1],
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
|
||||
vec![(1000 + health_check_slot_distance - 1, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
@ -595,7 +554,7 @@ mod tests {
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[2],
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
|
||||
vec![(1000 + health_check_slot_distance, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
|
@ -1,6 +1,6 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::commitment::{BlockCommitmentCache, CacheSlotInfo};
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use core::hash::Hash;
|
||||
use jsonrpc_core::futures::Future;
|
||||
use jsonrpc_pubsub::{
|
||||
@ -44,6 +44,14 @@ pub struct SlotInfo {
|
||||
pub root: Slot,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CacheSlotInfo {
|
||||
pub current_slot: Slot,
|
||||
pub node_root: Slot,
|
||||
pub largest_confirmed_root: Slot,
|
||||
pub highest_confirmed_slot: Slot,
|
||||
}
|
||||
|
||||
// A more human-friendly version of Vote, with the bank state signature base58 encoded.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct RpcVote {
|
||||
@ -880,7 +888,7 @@ pub(crate) mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
@ -974,7 +982,7 @@ pub(crate) mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let alice = Keypair::new();
|
||||
let tx = system_transaction::create_account(
|
||||
&mint_keypair,
|
||||
@ -1062,7 +1070,7 @@ pub(crate) mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
let mut bank_forks = BankForks::new(bank);
|
||||
let alice = Keypair::new();
|
||||
|
||||
let past_bank_tx =
|
||||
@ -1218,7 +1226,7 @@ pub(crate) mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks,
|
||||
@ -1270,7 +1278,7 @@ pub(crate) mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks,
|
||||
@ -1368,7 +1376,7 @@ pub(crate) mod tests {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
|
377
core/src/send_transaction_service.rs
Normal file
377
core/src/send_transaction_service.rs
Normal file
@ -0,0 +1,377 @@
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
|
||||
|
||||
pub struct SendTransactionService {
|
||||
thread: JoinHandle<()>,
|
||||
sender: Mutex<Sender<TransactionInfo>>,
|
||||
send_socket: UdpSocket,
|
||||
tpu_address: SocketAddr,
|
||||
}
|
||||
|
||||
struct TransactionInfo {
|
||||
signature: Signature,
|
||||
wire_transaction: Vec<u8>,
|
||||
last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
struct ProcessTransactionsResult {
|
||||
rooted: u64,
|
||||
expired: u64,
|
||||
retried: u64,
|
||||
failed: u64,
|
||||
retained: u64,
|
||||
}
|
||||
|
||||
impl SendTransactionService {
|
||||
pub fn new(
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let (sender, receiver) = channel::<TransactionInfo>();
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
|
||||
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address, exit.clone());
|
||||
Self {
|
||||
thread,
|
||||
sender: Mutex::new(sender),
|
||||
send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
tpu_address,
|
||||
}
|
||||
}
|
||||
|
||||
fn retry_thread(
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
tpu_address: SocketAddr,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let mut last_status_check = Instant::now();
|
||||
let mut transactions = HashMap::new();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
Builder::new()
|
||||
.name("send-tx-svc".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if let Ok(transaction_info) = receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
|
||||
transactions.insert(transaction_info.signature, transaction_info);
|
||||
} else {
|
||||
datapoint_warn!("send_transaction_service-queue-overflow");
|
||||
}
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
|
||||
if !transactions.is_empty() {
|
||||
datapoint_info!(
|
||||
"send_transaction_service-queue-size",
|
||||
("len", transactions.len(), i64)
|
||||
);
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let working_bank = bank_forks.working_bank();
|
||||
|
||||
let _result = Self::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
}
|
||||
last_status_check = Instant::now();
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn process_transactions(
|
||||
working_bank: &Arc<Bank>,
|
||||
root_bank: &Arc<Bank>,
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
transactions: &mut HashMap<Signature, TransactionInfo>,
|
||||
) -> ProcessTransactionsResult {
|
||||
let mut result = ProcessTransactionsResult::default();
|
||||
|
||||
transactions.retain(|signature, transaction_info| {
|
||||
if root_bank.has_signature(signature) {
|
||||
info!("Transaction is rooted: {}", signature);
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
false
|
||||
} else {
|
||||
match working_bank.get_signature_status_slot(signature) {
|
||||
None => {
|
||||
// Transaction is unknown to the working bank, it might have been
|
||||
// dropped or landed in another fork. Re-send it
|
||||
info!("Retrying transaction: {}", signature);
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
}
|
||||
Some((_slot, status)) => {
|
||||
if status.is_err() {
|
||||
info!("Dropping failed transaction: {}", signature);
|
||||
result.failed += 1;
|
||||
inc_new_counter_info!("send_transaction_service-failed", 1);
|
||||
false
|
||||
} else {
|
||||
result.retained += 1;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
wire_transaction: &[u8],
|
||||
) {
|
||||
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
|
||||
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&self, signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) {
|
||||
inc_new_counter_info!("send_transaction_service-enqueue", 1, 1);
|
||||
Self::send_transaction(&self.send_socket, &self.tpu_address, &wire_transaction);
|
||||
|
||||
self.sender
|
||||
.lock()
|
||||
.unwrap()
|
||||
.send(TransactionInfo {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_slot,
|
||||
})
|
||||
.unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err));
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::rpc::tests::new_bank_forks;
|
||||
use solana_sdk::{pubkey::Pubkey, signature::Signer};
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let send_tranaction_service =
|
||||
SendTransactionService::new(&cluster_info, &bank_forks, &exit);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
send_tranaction_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_transactions() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (bank_forks, mint_keypair, _voting_keypair) = new_bank_forks();
|
||||
let cluster_info = ClusterInfo::default();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
|
||||
let root_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank_forks.read().unwrap().working_bank(),
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
));
|
||||
let rooted_signature = root_bank
|
||||
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
|
||||
|
||||
let non_rooted_signature = working_bank
|
||||
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let failed_signature = {
|
||||
let blockhash = working_bank.last_blockhash();
|
||||
let transaction = solana_sdk::system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
blockhash,
|
||||
);
|
||||
let signature = transaction.signatures[0];
|
||||
working_bank.process_transaction(&transaction).unwrap_err();
|
||||
signature
|
||||
};
|
||||
|
||||
let mut transactions = HashMap::new();
|
||||
|
||||
info!("Expired transactions are dropped..");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo {
|
||||
signature: Signature::default(),
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: root_bank.slot() - 1,
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
expired: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Rooted transactions are dropped...");
|
||||
transactions.insert(
|
||||
rooted_signature,
|
||||
TransactionInfo {
|
||||
signature: rooted_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
rooted: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Failed transactions are dropped...");
|
||||
transactions.insert(
|
||||
failed_signature,
|
||||
TransactionInfo {
|
||||
signature: failed_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
failed: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Non-rooted transactions are kept...");
|
||||
transactions.insert(
|
||||
non_rooted_signature,
|
||||
TransactionInfo {
|
||||
signature: non_rooted_signature,
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retained: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
transactions.clear();
|
||||
|
||||
info!("Unknown transactions are retried...");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo {
|
||||
signature: Signature::default(),
|
||||
wire_transaction: vec![],
|
||||
last_valid_slot: working_bank.slot(),
|
||||
},
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retried: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
@ -532,6 +532,8 @@ impl ServeRepair {
|
||||
);
|
||||
if let Some(packet) = packet {
|
||||
res.packets.push(packet);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if meta.is_parent_set() && res.packets.len() <= max_responses {
|
||||
slot = meta.parent_slot;
|
||||
@ -864,4 +866,64 @@ mod tests {
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn run_orphan_corrupted_shred_size() {
|
||||
solana_logger::setup();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
// Create slots [1, 2] with 1 shred apiece
|
||||
let (mut shreds, _) = make_many_slot_entries(1, 2, 1);
|
||||
|
||||
// Make shred for slot 1 too large
|
||||
assert_eq!(shreds[0].slot(), 1);
|
||||
assert_eq!(shreds[0].index(), 0);
|
||||
shreds[0].payload.push(10);
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
let nonce = 42;
|
||||
// Make sure repair response is corrupted
|
||||
assert!(repair_response::repair_response_packet(
|
||||
&blockstore,
|
||||
1,
|
||||
0,
|
||||
&socketaddr_any!(),
|
||||
nonce,
|
||||
)
|
||||
.is_none());
|
||||
|
||||
// Orphan request for slot 2 should only return slot 1 since
|
||||
// calling `repair_response_packet` on slot 1's shred will
|
||||
// be corrupted
|
||||
let rv: Vec<_> = ServeRepair::run_orphan(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
5,
|
||||
nonce,
|
||||
)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
// Verify responses
|
||||
let expected = vec![repair_response::repair_response_packet(
|
||||
&blockstore,
|
||||
2,
|
||||
0,
|
||||
&socketaddr_any!(),
|
||||
nonce,
|
||||
)
|
||||
.unwrap()];
|
||||
assert_eq!(rv, expected);
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ pub mod tests {
|
||||
let bank =
|
||||
Bank::new(&create_genesis_config_with_leader(100, &leader_pubkey, 10).genesis_config);
|
||||
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let bf = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bf = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let verifier = ShredSigVerifier::new(bf, cache);
|
||||
|
||||
let mut batch = vec![Packets::default()];
|
||||
|
@ -256,7 +256,7 @@ pub mod tests {
|
||||
let starting_balance = 10_000;
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(starting_balance);
|
||||
|
||||
let bank_forks = BankForks::new(0, Bank::new(&genesis_config));
|
||||
let bank_forks = BankForks::new(Bank::new(&genesis_config));
|
||||
|
||||
//start cluster_info1
|
||||
let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
||||
|
@ -234,6 +234,7 @@ impl Validator {
|
||||
block_commitment_cache.clone(),
|
||||
));
|
||||
|
||||
let rpc_override_health_check = Arc::new(AtomicBool::new(false));
|
||||
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
|
||||
if ContactInfo::is_valid_address(&node.info.rpc) {
|
||||
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
|
||||
@ -255,6 +256,7 @@ impl Validator {
|
||||
ledger_path,
|
||||
validator_exit.clone(),
|
||||
config.trusted_validators.clone(),
|
||||
rpc_override_health_check.clone(),
|
||||
),
|
||||
PubSubService::new(
|
||||
&subscriptions,
|
||||
@ -374,7 +376,7 @@ impl Validator {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
wait_for_supermajority(config, &bank, &cluster_info);
|
||||
wait_for_supermajority(config, &bank, &cluster_info, rpc_override_health_check);
|
||||
|
||||
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
|
||||
assert_eq!(
|
||||
@ -620,7 +622,12 @@ fn new_banks_from_blockstore(
|
||||
)
|
||||
}
|
||||
|
||||
fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo) {
|
||||
fn wait_for_supermajority(
|
||||
config: &ValidatorConfig,
|
||||
bank: &Bank,
|
||||
cluster_info: &ClusterInfo,
|
||||
rpc_override_health_check: Arc<AtomicBool>,
|
||||
) {
|
||||
if config.wait_for_supermajority != Some(bank.slot()) {
|
||||
return;
|
||||
}
|
||||
@ -635,8 +642,13 @@ fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &
|
||||
if gossip_stake_percent >= 80 {
|
||||
break;
|
||||
}
|
||||
// The normal RPC health checks don't apply as the node is waiting, so feign health to
|
||||
// prevent load balancers from removing the node from their list of candidates during a
|
||||
// manual restart.
|
||||
rpc_override_health_check.store(true, Ordering::Relaxed);
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
rpc_override_health_check.store(false, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub struct TestValidator {
|
||||
|
@ -49,7 +49,7 @@ mod tests {
|
||||
&[],
|
||||
);
|
||||
bank0.freeze();
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
bank_forks.accounts_hash_interval_slots = snapshot_interval_slots;
|
||||
|
||||
let snapshot_config = SnapshotConfig {
|
||||
|
@ -96,7 +96,7 @@ fn test_slot_subscription() {
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
bank_forks,
|
||||
|
@ -5,7 +5,7 @@ use solana_core::cluster_info;
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::crds_gossip::*;
|
||||
use solana_core::crds_gossip_error::CrdsGossipError;
|
||||
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
use solana_core::crds_gossip_pull::{ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS};
|
||||
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValueLabel;
|
||||
use solana_core::crds_value::{CrdsData, CrdsValue};
|
||||
@ -426,36 +426,35 @@ fn network_run_pull(
|
||||
.map(|f| f.filter.bits.len() as usize / 8)
|
||||
.sum::<usize>();
|
||||
bytes += serialized_size(&caller_info).unwrap() as usize;
|
||||
let filters = filters
|
||||
let filters: Vec<_> = filters
|
||||
.into_iter()
|
||||
.map(|f| (caller_info.clone(), f))
|
||||
.collect();
|
||||
let rsp = network
|
||||
let rsp: Vec<_> = network
|
||||
.get(&to)
|
||||
.map(|node| {
|
||||
let mut rsp = vec![];
|
||||
rsp.append(
|
||||
&mut node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_requests(filters, now)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect(),
|
||||
);
|
||||
let rsp = node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.generate_pull_responses(&filters)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
node.lock().unwrap().process_pull_requests(filters, now);
|
||||
rsp
|
||||
})
|
||||
.unwrap();
|
||||
bytes += serialized_size(&rsp).unwrap() as usize;
|
||||
msgs += rsp.len();
|
||||
if let Some(node) = network.get(&from) {
|
||||
node.lock()
|
||||
.unwrap()
|
||||
.mark_pull_request_creation_time(&from, now);
|
||||
overhead += node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_response(&from, &timeouts, rsp, now);
|
||||
let mut node = node.lock().unwrap();
|
||||
node.mark_pull_request_creation_time(&from, now);
|
||||
let mut stats = ProcessPullStats::default();
|
||||
let (vers, vers_expired_timeout) =
|
||||
node.filter_pull_responses(&timeouts, rsp, now, &mut stats);
|
||||
node.process_pull_responses(&from, vers, vers_expired_timeout, now, &mut stats);
|
||||
overhead += stats.failed_insert;
|
||||
overhead += stats.failed_timeout;
|
||||
}
|
||||
(bytes, msgs, overhead)
|
||||
})
|
||||
|
@ -4,13 +4,14 @@ extern crate log;
|
||||
use rayon::iter::*;
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::gossip_service::GossipService;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
|
||||
use solana_perf::packet::Packet;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -27,6 +28,28 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSoc
|
||||
)
|
||||
}
|
||||
|
||||
fn test_node_with_bank(
|
||||
node_keypair: Keypair,
|
||||
exit: &Arc<AtomicBool>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
|
||||
let keypair = Arc::new(node_keypair);
|
||||
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
|
||||
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), keypair));
|
||||
let gossip_service = GossipService::new(
|
||||
&cluster_info,
|
||||
Some(bank_forks),
|
||||
test_node.sockets.gossip,
|
||||
exit,
|
||||
);
|
||||
let _ = cluster_info.my_contact_info();
|
||||
(
|
||||
cluster_info,
|
||||
gossip_service,
|
||||
test_node.sockets.tvu.pop().unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Test that the network converges.
|
||||
/// Run until every node in the network has a full ContactInfo set.
|
||||
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
|
||||
@ -181,3 +204,120 @@ pub fn cluster_info_retransmit() {
|
||||
dr2.join().unwrap();
|
||||
dr3.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
pub fn cluster_info_scale() {
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::genesis_utils::{
|
||||
create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
|
||||
};
|
||||
solana_logger::setup();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let num_nodes: usize = std::env::var("NUM_NODES")
|
||||
.unwrap_or_else(|_| "10".to_string())
|
||||
.parse()
|
||||
.expect("could not parse NUM_NODES as a number");
|
||||
|
||||
let vote_keypairs: Vec<_> = (0..num_nodes)
|
||||
.map(|_| ValidatorVoteKeypairs::new_rand())
|
||||
.collect();
|
||||
let genesis_config_info = create_genesis_config_with_vote_accounts(10_000, &vote_keypairs, 100);
|
||||
let bank0 = Bank::new(&genesis_config_info.genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
||||
|
||||
let nodes: Vec<_> = vote_keypairs
|
||||
.into_iter()
|
||||
.map(|keypairs| test_node_with_bank(keypairs.node_keypair, &exit, bank_forks.clone()))
|
||||
.collect();
|
||||
let ci0 = nodes[0].0.my_contact_info();
|
||||
for node in &nodes[1..] {
|
||||
node.0.insert_info(ci0.clone());
|
||||
}
|
||||
|
||||
let mut time = Measure::start("time");
|
||||
let mut done;
|
||||
let mut success = false;
|
||||
for _ in 0..30 {
|
||||
done = true;
|
||||
for (i, node) in nodes.iter().enumerate() {
|
||||
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
|
||||
if node.0.gossip_peers().len() != num_nodes - 1 {
|
||||
done = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if done {
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
time.stop();
|
||||
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
|
||||
|
||||
for num_votes in 1..1000 {
|
||||
let mut time = Measure::start("votes");
|
||||
let tx = test_tx();
|
||||
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
|
||||
nodes[0].0.push_vote(0, tx.clone());
|
||||
let mut success = false;
|
||||
for _ in 0..(30 * 5) {
|
||||
let mut not_done = 0;
|
||||
let mut num_old = 0;
|
||||
let mut num_push_total = 0;
|
||||
let mut num_pushes = 0;
|
||||
let mut num_pulls = 0;
|
||||
let mut num_inserts = 0;
|
||||
for node in nodes.iter() {
|
||||
//if node.0.get_votes(0).1.len() != (num_nodes * num_votes) {
|
||||
let has_tx = node
|
||||
.0
|
||||
.get_votes(0)
|
||||
.1
|
||||
.iter()
|
||||
.filter(|v| v.message.account_keys == tx.message.account_keys)
|
||||
.count();
|
||||
num_old += node.0.gossip.read().unwrap().push.num_old;
|
||||
num_push_total += node.0.gossip.read().unwrap().push.num_total;
|
||||
num_pushes += node.0.gossip.read().unwrap().push.num_pushes;
|
||||
num_pulls += node.0.gossip.read().unwrap().pull.num_pulls;
|
||||
num_inserts += node.0.gossip.read().unwrap().crds.num_inserts;
|
||||
if has_tx == 0 {
|
||||
not_done += 1;
|
||||
}
|
||||
}
|
||||
warn!("not_done: {}/{}", not_done, nodes.len());
|
||||
warn!("num_old: {}", num_old);
|
||||
warn!("num_push_total: {}", num_push_total);
|
||||
warn!("num_pushes: {}", num_pushes);
|
||||
warn!("num_pulls: {}", num_pulls);
|
||||
warn!("num_inserts: {}", num_inserts);
|
||||
success = not_done < (nodes.len() / 20);
|
||||
if success {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(200));
|
||||
}
|
||||
time.stop();
|
||||
warn!(
|
||||
"propagated vote {} in {} success: {}",
|
||||
num_votes, time, success
|
||||
);
|
||||
sleep(Duration::from_millis(200));
|
||||
for node in nodes.iter() {
|
||||
node.0.gossip.write().unwrap().push.num_old = 0;
|
||||
node.0.gossip.write().unwrap().push.num_total = 0;
|
||||
node.0.gossip.write().unwrap().push.num_pushes = 0;
|
||||
node.0.gossip.write().unwrap().pull.num_pulls = 0;
|
||||
node.0.gossip.write().unwrap().crds.num_inserts = 0;
|
||||
}
|
||||
}
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for node in nodes {
|
||||
node.1.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -374,14 +374,17 @@ mod tests {
|
||||
// send signal to cleanup slots
|
||||
let (sender, receiver) = channel();
|
||||
sender.send(n).unwrap();
|
||||
let mut next_purge_batch = 0;
|
||||
let mut last_purge_slot = 0;
|
||||
let mut last_compaction_slot = 0;
|
||||
LedgerCleanupService::cleanup_ledger(
|
||||
&receiver,
|
||||
&blockstore,
|
||||
max_ledger_shreds,
|
||||
&mut next_purge_batch,
|
||||
&mut last_purge_slot,
|
||||
10,
|
||||
None,
|
||||
&mut last_compaction_slot,
|
||||
10,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,20 +1,20 @@
|
||||
|
||||
.----------------------------------------.
|
||||
| Solana Runtime |
|
||||
| |
|
||||
| .------------. .------------. |
|
||||
| | | | | |
|
||||
.-------->| Verifier +-->| Accounts | |
|
||||
| | | | | | |
|
||||
.----------. | | `------------` `------------` |
|
||||
| +--------` | ^ |
|
||||
| Client | | LoadAccounts | |
|
||||
| +--------. | .----------------` |
|
||||
`----------` | | | |
|
||||
| | .------+-----. .-------------. |
|
||||
| | | | | | |
|
||||
`-------->| Loader +-->| Interpreter | |
|
||||
| | | | | |
|
||||
| `------------` `-------------` |
|
||||
| |
|
||||
`----------------------------------------`
|
||||
.-------------------------------------.
|
||||
| Solana Runtime |
|
||||
| |
|
||||
.----------. | .------------. .------------. |
|
||||
| Program | | | BPF | | Executable | |
|
||||
| Author +------>| Bytecode +-->| Account | |
|
||||
| | | | Verifier | | | |
|
||||
`----------` | `------------` `------------` |
|
||||
| | |
|
||||
| .----------------` |
|
||||
| | LoadAccounts |
|
||||
| V |
|
||||
.----------. | .------------. .-------------. |
|
||||
| | | | BPF | | BPF | |
|
||||
| Client +------>| Loader +-->| Interpreter | |
|
||||
| | | | | | | |
|
||||
`----------` | `------------` `-------------` |
|
||||
| |
|
||||
`-------------------------------------`
|
||||
|
@ -13,6 +13,11 @@ else
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -z "$LATEST_SOLANA_RELEASE_VERSION" ]]; then
|
||||
echo Error: release version not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
find html/ -name \*.html -exec sed -i "s/LATEST_SOLANA_RELEASE_VERSION/$LATEST_SOLANA_RELEASE_VERSION/g" {} \;
|
||||
if [[ -n $CI ]]; then
|
||||
|
@ -24,12 +24,16 @@
|
||||
* [Command-line Reference](cli/usage.md)
|
||||
* [Solana Clusters](clusters.md)
|
||||
* [Develop Applications](apps/README.md)
|
||||
* [Accounts and Rent](apps/rent.md)
|
||||
* [Example: Web Wallet](apps/webwallet.md)
|
||||
* [Example: Tic-Tac-Toe](apps/tictactoe.md)
|
||||
* [Drones](apps/drones.md)
|
||||
* [Anatomy of a Transaction](transaction.md)
|
||||
* [JSON RPC API](apps/jsonrpc-api.md)
|
||||
* [JavaScript API](apps/javascript-api.md)
|
||||
* [Builtin Programs](apps/builtins/README.md)
|
||||
* [Integration Guides](integrations/README.md)
|
||||
* [Exchange](integrations/exchange.md)
|
||||
* [Run a Validator](running-validator/README.md)
|
||||
* [Validator Requirements](running-validator/validator-reqs.md)
|
||||
* [Start a Validator](running-validator/validator-start.md)
|
||||
@ -95,14 +99,15 @@
|
||||
* [Validator Timestamp Oracle](implemented-proposals/validator-timestamp-oracle.md)
|
||||
* [Commitment](implemented-proposals/commitment.md)
|
||||
* [Snapshot Verification](implemented-proposals/snapshot-verification.md)
|
||||
* [Cross-Program Invocation](implemented-proposals/cross-program-invocation.md)
|
||||
* [Program Derived Addresses](implemented-proposals/program-derived-addresses.md)
|
||||
* [Accepted Design Proposals](proposals/README.md)
|
||||
* [Ledger Replication](proposals/ledger-replication-to-implement.md)
|
||||
* [Optimistic Confirmation and Slashing](proposals/optimistic-confirmation-and-slashing.md)
|
||||
* [Secure Vote Signing](proposals/vote-signing-to-implement.md)
|
||||
* [Cluster Test Framework](proposals/cluster-test-framework.md)
|
||||
* [Validator](proposals/validator-proposal.md)
|
||||
* [Simple Payment and State Verification](proposals/simple-payment-and-state-verification.md)
|
||||
* [Cross-Program Invocation](proposals/cross-program-invocation.md)
|
||||
* [Program Keys and Signatures](proposals/program-keys-and-signatures.md)
|
||||
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
|
||||
* [Snapshot Verification](proposals/snapshot-verification.md)
|
||||
* [Bankless Leader](proposals/bankless-leader.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Programming Model
|
||||
|
||||
An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to user-contributed _programs_. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transfered. Instructions are executed sequentially and atomically. If any instruction is invalid, any changes made within the transaction are discarded.
|
||||
An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to _programs_ deployed by app developers beforehand. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transferred. Instructions are executed sequentially and atomically for each transaction. If any instruction is invalid, all account changes in the transaction are discarded.
|
||||
|
||||
### Accounts and Signatures
|
||||
|
||||
@ -10,7 +10,7 @@ The transaction also marks some accounts as _read-only accounts_. The runtime pe
|
||||
|
||||
### Recent Blockhash
|
||||
|
||||
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
|
||||
A transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
|
||||
|
||||
### Instructions
|
||||
|
||||
@ -20,25 +20,32 @@ Each instruction specifies a single program account \(which must be marked execu
|
||||
|
||||

|
||||
|
||||
As shown in the diagram above a client creates a program and compiles it to an ELF shared object containing BPF bytecode and sends it to the Solana cluster. The cluster stores the program locally and makes it available to clients via a _program ID_. The program ID is a _public key_ generated by the client and is used to reference the program in subsequent transactions.
|
||||
As shown in the diagram above, a program author creates a program and compiles it to an ELF shared object containing BPF bytecode and uploads it to the Solana cluster with a special _deploy_ transaction. The cluster makes it available to clients via a _program ID_. The program ID is a _address_ specified when deploying and is used to reference the program in subsequent transactions.
|
||||
|
||||
A program may be written in any programming language that can target the Berkley Packet Filter \(BPF\) safe execution environment. The Solana SDK offers the best support for C programs, which is compiled to BPF using the [LLVM compiler infrastructure](https://llvm.org).
|
||||
A program may be written in any programming language that can target the Berkley Packet Filter \(BPF\) safe execution environment. The Solana SDK offers the best support for C/C++ and Rust programs, which are compiled to BPF using the [LLVM compiler infrastructure](https://llvm.org).
|
||||
|
||||
## Storing State between Transactions
|
||||
|
||||
If the program needs to store state between transactions, it does so using _accounts_. Accounts are similar to files in operating systems such as Linux. Like a file, an account may hold arbitrary data and that data persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how. Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay "rent" to stay there. Each validator periodically scan all accounts and collects rent. Any account that drops to zero lamports is purged.
|
||||
If the program needs to store state between transactions, it does so using _accounts_. Accounts are similar to files in operating systems such as Linux. Like a file, an account may hold arbitrary data and that data persists beyond the lifetime of a program. Also like a file, an account includes metadata that tells the runtime who is allowed to access the data and how.
|
||||
|
||||
If an account is marked "executable", it will only be used by a _loader_ to run programs. For example, a BPF-compiled program is marked executable and loaded by the BPF loader. No program is allowed to modify the contents of an executable account.
|
||||
Unlike a file, the account includes metadata for the lifetime of the file. That lifetime is expressed in "tokens", which is a number of fractional native tokens, called _lamports_. Accounts are held in validator memory and pay ["rent"](rent.md) to stay there. Each validator periodically scans all accounts and collects rent. Any account that drops to zero lamports is purged.
|
||||
|
||||
An account also includes "owner" metadata. The owner is a program ID. The runtime grants the program write access to the account if its ID matches the owner. If an account is not owned by a program, the program is permitted to read its data and credit the account.
|
||||
In the same way that a Linux user uses a path to look up a file, a Solana client uses an _address_ to look up an account. The address is usually a 256-bit public key. To create an account with a public-key address, the client generates a _keypair_ and registers its public key using the `CreateAccount` instruction with preallocated fixed storage size in bytes. In fact, the account address can be an arbitrary 32 bytes, and there is a mechanism for advanced users to create derived addresses (`CreateAccountWithSeed`). Addresses are presented in Base58 encoding on user interfaces.
|
||||
|
||||
In the same way that a Linux user uses a path to look up a file, a Solana client uses public keys to look up accounts. To create an account, the client generates a _keypair_ and registers its public key using the `CreateAccount` instruction. The account created by `CreateAccount` is called a _system account_ and is owned by a built-in program called the System program. The System program allows clients to transfer lamports and assign account ownership.
|
||||
## Ownership of Accounts and Assignment to Programs
|
||||
|
||||
The runtime only permits the owner to debit the account or modify its data. The program then defines additional rules for whether the client can modify accounts it owns. In the case of the System program, it allows users to transfer lamports by recognizing transaction signatures. If it sees the client signed the transaction using the keypair's _private key_, it knows the client authorized the token transfer.
|
||||
The created account is initialized to be _owned_ by a built-in program called the System program and is called a _system account_ aptly. An account includes "owner" metadata. The owner is a program ID. The runtime grants the program write access to the account if its ID matches the owner. For the case of the System program, the runtime allows clients to transfer lamports and importantly _assign_ account ownership, meaning changing owner to different program ID. If an account is not owned by a program, the program is only permitted to read its data and credit the account.
|
||||
|
||||
Also, if an account is marked "executable" in metadata, it will only be used by a _loader_ to run programs. For example, a BPF-compiled program is marked executable and loaded by the BPF loader when executing its transactions. No program is allowed to modify the contents of an executable account once deployed.
|
||||
|
||||
## Runtime Capability of Programs on Accounts
|
||||
|
||||
The runtime only permits the owner program to debit the account or modify its data. The program then defines additional rules for whether the client can modify accounts it owns. In the case of the System program, it allows users to transfer lamports by recognizing transaction signatures. If it sees the client signed the transaction using the keypair's _private key_, it knows the client authorized the token transfer.
|
||||
|
||||
In other words, the entire set of accounts owned by a given program can be regarded as a key-value store where a key is the account address and value is program-specific arbitrary binary data. A program author can decide how to manage the program's whole state as possibly many accounts.
|
||||
|
||||
After the runtime executes each of the transaction's instructions, it uses the account metadata to verify that none of the access rules were violated. If a program violates an access rule, the runtime discards all account changes made by all instructions and marks the transaction as failed.
|
||||
|
||||
## Smart Contracts
|
||||
|
||||
Programs don't always require transaction signatures, as the System program does. Instead, the program may manage _smart contracts_. A smart contract is a set of constraints that once satisfied, signal to a program that a token transfer or account update is permitted. For example, one could use the Budget program to create a smart contract that authorizes a token transfer only after some date. Once evidence that the date has past, the contract progresses, and token transfer completes.
|
||||
|
||||
|
61
docs/src/apps/builtins/README.md
Normal file
61
docs/src/apps/builtins/README.md
Normal file
@ -0,0 +1,61 @@
|
||||
# Builtin Programs
|
||||
|
||||
Solana contains a small handful of builtin programs, which are required to run
|
||||
validator nodes. Unlike third-party programs, the builtin programs are part of
|
||||
the validator implementation and can be upgraded as part of cluster upgrades.
|
||||
Upgrades may occur to add features, fix bugs, or improve performance. Interface
|
||||
changes to individual instructions should rarely, if ever, occur. Instead, when
|
||||
change is needed, new instructions are added and previous ones are marked
|
||||
deprecated. Apps can upgrade on their own timeline without concern of breakages
|
||||
across upgrades.
|
||||
|
||||
The builtin programs include the System, Config, Stake, Vote, and BPFLoader
|
||||
programs. For each, we provide the program ID and describe each supported
|
||||
instruction. A transaction can mix and match instructions from different
|
||||
programs, as well include instructions from third-party programs.
|
||||
|
||||
## System Program
|
||||
|
||||
Create accounts and transfer lamports between them
|
||||
|
||||
* Program ID: `11111111111111111111111111111111`
|
||||
* Instructions: [SystemInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/system_instruction/enum.SystemInstruction.html)
|
||||
|
||||
## Config Program
|
||||
|
||||
Add configuration data to the chain and the list of public keys that are permitted to modify it
|
||||
|
||||
* Program ID: `Config1111111111111111111111111111111111111`
|
||||
* Instructions: [config_instruction](https://docs.rs/solana-config-program/LATEST_SOLANA_RELEASE_VERSION/solana_config_program/config_instruction/index.html)
|
||||
|
||||
Unlike the other programs, the Config program does not define any individual
|
||||
instructions. It has just one implicit instruction, a "store" instruction. Its
|
||||
instruction data is a set of keys that gate access to the account, and the
|
||||
data to store in it.
|
||||
|
||||
## Stake Program
|
||||
|
||||
Create stake accounts and delegate it to validators
|
||||
|
||||
* Program ID: `Stake11111111111111111111111111111111111111`
|
||||
* Instructions: [StakeInstruction](https://docs.rs/solana-stake-program/LATEST_SOLANA_RELEASE_VERSION/solana_stake_program/stake_instruction/enum.StakeInstruction.html)
|
||||
|
||||
## Vote Program
|
||||
|
||||
Create vote accounts and vote on blocks
|
||||
|
||||
* Program ID: `Vote111111111111111111111111111111111111111`
|
||||
* Instructions: [VoteInstruction](https://docs.rs/solana-vote-program/LATEST_SOLANA_RELEASE_VERSION/solana_vote_program/vote_instruction/enum.VoteInstruction.html)
|
||||
|
||||
## BPF Loader
|
||||
|
||||
Add programs to the chain.
|
||||
|
||||
* Program ID: `BPFLoader1111111111111111111111111111111111`
|
||||
* Instructions: [LoaderInstruction](https://docs.rs/solana-sdk/LATEST_SOLANA_RELEASE_VERSION/solana_sdk/loader_instruction/enum.LoaderInstruction.html)
|
||||
|
||||
The BPF Loader marks itself as its "owner" of the executable account it
|
||||
creates to store your program. When a user invokes an instruction via a
|
||||
program ID, the Solana runtime will load both your executable account and its
|
||||
owner, the BPF Loader. The runtime then passes your program to the BPF Loader
|
||||
to process the instruction.
|
@ -27,10 +27,12 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
|
||||
* [getFees](jsonrpc-api.md#getfees)
|
||||
* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
* [getIdentity](jsonrpc-api.md#getidentity)
|
||||
* [getInflation](jsonrpc-api.md#getinflation)
|
||||
* [getInflationGovernor](jsonrpc-api.md#getinflationgovernor)
|
||||
* [getInflationRate](jsonrpc-api.md#getinflationrate)
|
||||
* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
|
||||
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
|
||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
||||
@ -39,10 +41,6 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses)
|
||||
* [getSlot](jsonrpc-api.md#getslot)
|
||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
||||
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
|
||||
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
|
||||
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
|
||||
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
|
||||
* [getSupply](jsonrpc-api.md#getsupply)
|
||||
* [getTransactionCount](jsonrpc-api.md#gettransactioncount)
|
||||
* [getVersion](jsonrpc-api.md#getversion)
|
||||
@ -494,7 +492,8 @@ Returns the fee calculator associated with the query blockhash, or `null` if the
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `blockhash: <string>`, query blockhash as a Base58 encoded string
|
||||
* `<string>` - query blockhash as a Base58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -542,6 +541,34 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":{"feeRateGovernor":{"burnPercent":50,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getFees
|
||||
|
||||
Returns a recent block hash from the ledger, a fee schedule that can be used to
|
||||
compute the cost of submitting a transaction using it, and the last slot in
|
||||
which the blockhash will be valid.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
|
||||
|
||||
* `blockhash: <string>` - a Hash as base-58 encoded string
|
||||
* `feeCalculator: <object>` - FeeCalculator object, the fee schedule for this block hash
|
||||
* `lastValidSlot: <u64>` - last slot in which a blockhash will be valid
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getFees"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"lamportsPerSignature":5000},"lastValidSlot":297}},"id":1}
|
||||
```
|
||||
|
||||
### getFirstAvailableBlock
|
||||
|
||||
Returns the slot of the lowest confirmed block that has not been purged from the ledger
|
||||
@ -609,33 +636,59 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"identity": "2r1F4iWqVcb8M1DbAjQuFpebkQHY9hcVU4WuW2DJBppN"},"id":1}
|
||||
```
|
||||
|
||||
### getInflation
|
||||
### getInflationGovernor
|
||||
|
||||
Returns the inflation configuration of the cluster
|
||||
Returns the current inflation governor
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an Inflation object with the following fields:
|
||||
The result field will be a JSON object with the following fields:
|
||||
|
||||
* `initial: <f64>`, the initial inflation percentage from time 0
|
||||
* `terminal: <f64>`, terminal inflation percentage
|
||||
* `taper: <f64>`, rate per year at which inflation is lowered
|
||||
* `foundation: <f64>`, percentage of total inflation allocated to the foundation
|
||||
* `foundationTerm: <f64>`, duration of foundation pool inflation in years
|
||||
* `storage: <f64>`, percentage of total inflation allocated to storage rewards
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflation"}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflationGovernor"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"taper":0.15,"terminal":0.015},"id":1}
|
||||
```
|
||||
|
||||
### getInflationRate
|
||||
|
||||
Returns the specific inflation values for a particular epoch
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<u64>` - (optional) Epoch, default is the current epoch
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be a JSON object with the following fields:
|
||||
|
||||
* `total: <f64>`, total inflation
|
||||
* `validator: <f64>`, inflation allocated to validators
|
||||
* `foundation: <f64>`, inflation allocated to the foundation
|
||||
* `epoch: <f64>`, epoch for which these values are valid
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflationRate"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"epoch":100,"foundation":0.001,"total":0.149,"validator":0.148},"id":1}
|
||||
```
|
||||
|
||||
### getLargestAccounts
|
||||
@ -769,7 +822,7 @@ An RpcResponse containing a JSON object consisting of a string blockhash and Fee
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"lamportsPerSignature":5000}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatuses
|
||||
@ -862,93 +915,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
||||
```
|
||||
|
||||
### getSlotsPerSegment
|
||||
|
||||
Returns the current storage segment size in terms of slots
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Number of slots in a storage segment
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getStoragePubkeysForSlot
|
||||
|
||||
Returns the storage Pubkeys for a particular slot
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
An array of Pubkeys, as base-58 encoded strings
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStoragePubkeysForSlot","params":[1]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC"],"id":1}
|
||||
```
|
||||
|
||||
### getStorageTurn
|
||||
|
||||
Returns the current storage turn's blockhash and slot
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
A JSON object consisting of
|
||||
|
||||
* `blockhash: <string>` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||
* `slot: <u64>` - the current storage turn slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": 2048},"id":1}
|
||||
```
|
||||
|
||||
### getStorageTurnRate
|
||||
|
||||
Returns the current storage turn rate in terms of slots per turn
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Number of slots in storage turn
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getSupply
|
||||
|
||||
Returns information about the current supply.
|
||||
@ -1017,7 +983,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.2.0"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.2.2"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@ -1099,11 +1065,20 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
### sendTransaction
|
||||
|
||||
Creates new transaction
|
||||
Submits a signed transaction to the cluster for processing.
|
||||
|
||||
Before submitting, the following preflight checks are performed:
|
||||
1. The transaction signatures are verified
|
||||
2. The transaction is simulated against the latest max confirmed bank
|
||||
and on failure an error will be returned. Preflight checks may be disabled if
|
||||
desired.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - fully-signed Transaction, as base-58 encoded string
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `skipPreflight: <bool>` - if true, skip the preflight transaction checks (default: false)
|
||||
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -1132,6 +1107,10 @@ Simulate sending a transaction
|
||||
#### Results:
|
||||
|
||||
An RpcResponse containing a TransactionStatus object
|
||||
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
|
||||
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `logs: <array | null>` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure)
|
||||
|
||||
#### Example:
|
||||
|
||||
|
55
docs/src/apps/rent.md
Normal file
55
docs/src/apps/rent.md
Normal file
@ -0,0 +1,55 @@
|
||||
# Storage Rent for Accounts
|
||||
|
||||
Keeping accounts alive on Solana incurs a storage cost called _rent_ because the cluster must actively maintain the data to process any future transactions on it. This is different from Bitcoin and Ethereum, where storing accounts doesn't incur any costs.
|
||||
|
||||
The rent is debited from an account's balance by the runtime upon the first access (including the initial account creation) in the current epoch by transactions or once per an epoch if there are no transactions. The fee is currently a fixed rate, measured in bytes-times-epochs. The fee may change in the future.
|
||||
|
||||
For the sake of simple rent calculation, rent is always collected for a single, full epoch. Rent is not pro-rated, meaning there are neither fees nor refunds for partial epochs. This means that, on account creation, the first rent collected isn't for the current partial epoch, but collected up front for the next full epoch. Subsequent rent collections are for further future epochs. On the other end, if the balance of an already-rent-collected account drops below another rent fee mid-epoch, the account will continue to exist through the current epoch and be purged immediately at the start of the upcoming epoch.
|
||||
|
||||
Accounts can be exempt from paying rent if they maintain a minimum balance. This rent-exemption is described below.
|
||||
|
||||
## Calculation of rent
|
||||
|
||||
Note: The rent rate can change in the future.
|
||||
|
||||
As of writing, the fixed rent fee is 19.055441478439427 lamports per byte-epoch on the testnet and mainnet-beta clusters. An [epoch](../terminology.md#epoch) is targeted to be 2 days (For devnet, the rent fee is 0.3608183131797095 lamports per byte-epoch with its 54m36s-long epoch).
|
||||
|
||||
This value is calculated to target 0.01 SOL per mebibyte-day (exactly matching to 3.56 SOL per mebibyte-year):
|
||||
|
||||
```
|
||||
Rent fee: 19.055441478439427 = 10_000_000 (0.01 SOL) * 365(approx. day in a year) / (1024 * 1024)(1 MiB) / (365.25/2)(epochs in 1 year)
|
||||
```
|
||||
|
||||
And rent calculation is done with the `f64` precision and the final result is truncated to `u64` in lamports.
|
||||
|
||||
The rent calculation includes account metadata (address, owner, lamports, etc) in the size of an account. Therefore the smallest an account can be for rent calculations is 128 bytes.
|
||||
|
||||
For example, an account is created with the initial transfer of 10,000 lamports and no additional data. Rent is immediately debited from it on creation, resulting in a balance of 7,561 lamports:
|
||||
|
||||
|
||||
```
|
||||
Rent: 2,439 = 19.055441478439427 (rent rate) * 128 bytes (minimum account size) * 1 (epoch)
|
||||
Account Balance: 7,561 = 10,000 (transfered lamports) - 2,439 (this account's rent fee for an epoch)
|
||||
```
|
||||
|
||||
The account balance will be reduced to 5,122 lamports at the next epoch even if there is no activity:
|
||||
|
||||
```
|
||||
Account Balance: 5,122 = 7,561 (current balance) - 2,439 (this account's rent fee for an epoch)
|
||||
```
|
||||
|
||||
Accordingly, a minimum-size account will be immediately removed after creation if the transferred lamports are less than or equal to 2,439.
|
||||
|
||||
## Rent exemption
|
||||
|
||||
Alternatively, an account can be made entirely exempt from rent collection by depositing at least 2 years-worth of rent. This is checked every time an account's balance is reduced and rent is immediately debited once the balance goes below the minimum amount.
|
||||
|
||||
Program executable accounts are required by the runtime to be rent-exempt to avoid being purged.
|
||||
|
||||
Note: Use the [`getMinimumBalanceForRentExemption` RPC endpoint](jsonrpc-api.md#getminimumbalanceforrentexemption) to calculate the minimum balance for a particular account size. The following calculation is illustrative only.
|
||||
|
||||
For example, a program executable with the size of 15,000 bytes requires a balance of 105,290,880 lamports (=~ 0.105 SOL) to be rent-exempt:
|
||||
|
||||
```
|
||||
105,290,880 = 19.055441478439427 (fee rate) * (128 + 15_000)(account size including metadata) * ((365.25/2) * 2)(epochs in 2 years)
|
||||
```
|
@ -44,6 +44,8 @@ $ solana-validator \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
The `--trusted-validator`s is operated by Solana
|
||||
|
||||
|
||||
## Testnet
|
||||
* Testnet is where we stress test recent release features on a live
|
||||
@ -72,6 +74,8 @@ $ solana-validator \
|
||||
--identity ~/validator-keypair.json \
|
||||
--vote-account ~/vote-account-keypair.json \
|
||||
--trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
|
||||
--trusted-validator Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQAD \
|
||||
--trusted-validator 9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv \
|
||||
--no-untrusted-rpc \
|
||||
--ledger ~/validator-ledger \
|
||||
--rpc-port 8899 \
|
||||
@ -82,6 +86,11 @@ $ solana-validator \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
The identity of the `--trusted-validator`s are:
|
||||
* `5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on` - testnet.solana.com (Solana)
|
||||
* `Ft5fbkqNa76vnsjYNwjDZUXoTWpP7VYm3mtsaQckQADN` - Certus One
|
||||
* `9QxCLckBiJc783jnMvXZubK4wH86Eqqvashtrwvcsgkv` - Algo|Stake
|
||||
|
||||
## Mainnet Beta
|
||||
A permissionless, persistent cluster for early token holders and launch partners.
|
||||
Currently smart contracts, rewards, and inflation are disabled.
|
||||
@ -114,6 +123,8 @@ $ solana-validator \
|
||||
--dynamic-port-range 8000-8010 \
|
||||
--entrypoint mainnet-beta.solana.com:8001 \
|
||||
--expected-genesis-hash 5eykt4UsFv8P8NJdTREpY1vzqKqZKvdpKuc147dw2N9d \
|
||||
--expected-shred-version 54208 \
|
||||
--expected-shred-version 64864 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
All four `--trusted-validator`s are operated by Solana
|
||||
|
95
docs/src/implemented-proposals/cross-program-invocation.md
Normal file
95
docs/src/implemented-proposals/cross-program-invocation.md
Normal file
@ -0,0 +1,95 @@
|
||||
# Cross-Program Invocation
|
||||
|
||||
## Problem
|
||||
|
||||
In today's implementation, a client can create a transaction that modifies two accounts, each owned by a separate on-chain program:
|
||||
|
||||
```rust,ignore
|
||||
let message = Message::new(vec![
|
||||
token_instruction::pay(&alice_pubkey),
|
||||
acme_instruction::launch_missiles(&bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
However, the current implementation does not allow the `acme` program to conveniently invoke `token` instructions on the client's behalf:
|
||||
|
||||
```rust,ignore
|
||||
let message = Message::new(vec![
|
||||
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes `token_instruction::pay` from the `acme` program. A possible workaround is to extend the `acme` program with the implementation of the `token` program and create `token` accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify. With that workaround, `acme` can modify token-like accounts created by the `acme` program, but not token accounts created by the `token` program.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The goal of this design is to modify Solana's runtime such that an on-chain program can invoke an instruction from another program.
|
||||
|
||||
Given two on-chain programs `token` and `acme`, each implementing instructions `pay()` and `launch_missiles()` respectively, we would ideally like to implement the `acme` module with a call to a function defined in the `token` module:
|
||||
|
||||
```rust,ignore
|
||||
mod acme {
|
||||
use token;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
token::pay(&keyed_accounts[1..])?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
The above code would require that the `token` crate be dynamically linked so that a custom linker could intercept calls and validate accesses to `keyed_accounts`. Even though the client intends to modify both `token` and `acme` accounts, only `token` program is permitted to modify the `token` account, and only the `acme` program is allowed to modify the `acme` account.
|
||||
|
||||
Backing off from that ideal direct cross-program call, a slightly more verbose solution is to allow `acme` to invoke `token` by issuing a token instruction via the runtime.
|
||||
|
||||
```rust,ignore
|
||||
mod acme {
|
||||
use token_instruction;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
let alice_pubkey = keyed_accounts[1].key;
|
||||
let instruction = token_instruction::pay(&alice_pubkey);
|
||||
invoke(&instruction, accounts)?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
`invoke()` is built into Solana's runtime and is responsible for routing the given instruction to the `token` program via the instruction's `program_id` field.
|
||||
|
||||
Before invoking `pay()`, the runtime must ensure that `acme` didn't modify any accounts owned by `token`. It does this by applying the runtime's policy to the current state of the accounts at the time `acme` calls `invoke` vs. the initial state of the accounts at the beginning of the `acme`'s instruction. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme` by again applying the runtime's policy, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must apply the runtime policy one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all these account modifications.
|
||||
|
||||
### Instructions that require privileges
|
||||
|
||||
The runtime uses the privileges granted to the caller program to determine what privileges can be extended to the callee. Privileges in this context refer to signers and writable accounts. For example, if the instruction the caller is processing contains a signer or writable account, then the caller can invoke an instruction that also contains that signer and/or writable account.
|
||||
|
||||
This privilege extension relies on the fact that programs are immutable. In the case of the `acme` program, the runtime can safely treat the transaction's signature as a signature of a `token` instruction. When the runtime sees the `token` instruction references `alice_pubkey`, it looks up the key in the `acme` instruction to see if that key corresponds to a signed account. In this case, it does and thereby authorizes the `token` program to modify Alice's account.
|
||||
|
||||
### Program signed accounts
|
||||
|
||||
Programs can issue instructions that contain signed accounts that were not signed in the original transaction by
|
||||
using [Program derived addresses](program-derived-addresses.md).
|
||||
|
||||
To sign an account with program derived addresses, a program may `invoke_signed()`.
|
||||
|
||||
```rust,ignore
|
||||
invoke_signed(
|
||||
&instruction,
|
||||
accounts,
|
||||
&[&["First addresses seed"],
|
||||
&["Second addresses first seed", "Second addresses second seed"]],
|
||||
)?;
|
||||
|
||||
### Reentrancy
|
||||
|
||||
Reentrancy is currently limited to direct self recursion capped at a fixed depth. This restriction prevents situations where a program might invoke another from an intermediary state without the knowledge that it might later be called back into. Direct recursion gives the program full control of its state at the point that it gets called back.
|
@ -10,7 +10,7 @@ With this approach, accounts with two-years worth of rent deposits secured are e
|
||||
|
||||
Method 2: Pay per byte
|
||||
|
||||
If an account has less than two-years worth of deposited rent the network charges rent on a per-epoch basis, in credit for the next epoch (but in arrears when necessary). This rent is deducted at a rate specified in genesis, in lamports per kilobyte-year.
|
||||
If an account has less than two-years worth of deposited rent the network charges rent on a per-epoch basis, in credit for the next epoch. This rent is deducted at a rate specified in genesis, in lamports per kilobyte-year.
|
||||
|
||||
For information on the technical implementation details of this design, see the [Rent](../rent.md) section.
|
||||
|
||||
|
154
docs/src/implemented-proposals/program-derived-addresses.md
Normal file
154
docs/src/implemented-proposals/program-derived-addresses.md
Normal file
@ -0,0 +1,154 @@
|
||||
# Program Derived Addresses
|
||||
|
||||
## Problem
|
||||
|
||||
Programs cannot generate signatures when issuing instructions to
|
||||
other programs as defined in the [Cross-Program Invocations](cross-program-invocation.md)
|
||||
design.
|
||||
|
||||
The lack of programmatic signature generation limits the kinds of programs
|
||||
that can be implemented in Solana. A program may be given the
|
||||
authority over an account and later want to transfer that authority to another.
|
||||
This is impossible today because the program cannot act as the signer in the transaction that gives authority.
|
||||
|
||||
For example, if two users want
|
||||
to make a wager on the outcome of a game in Solana, they must each
|
||||
transfer their wager's assets to some intermediary that will honor
|
||||
their agreement. Currently, there is no way to implement this intermediary
|
||||
as a program in Solana because the intermediary program cannot transfer the
|
||||
assets to the winner.
|
||||
|
||||
This capability is necessary for many DeFi applications since they
|
||||
require assets to be transferred to an escrow agent until some event
|
||||
occurs that determines the new owner.
|
||||
|
||||
* Decentralized Exchanges that transfer assets between matching bid and
|
||||
ask orders.
|
||||
|
||||
* Auctions that transfer assets to the winner.
|
||||
|
||||
* Games or prediction markets that collect and redistribute prizes to
|
||||
the winners.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The key to the design is two-fold:
|
||||
|
||||
1. Allow programs to control specific addresses, called Program-Addresses, in such a way that no external
|
||||
user can generate valid transactions with signatures for those
|
||||
addresses.
|
||||
|
||||
2. Allow programs to programmatically sign for Program-Addresses that are
|
||||
present in instructions invoked via [Cross-Program Invocations](cross-program-invocation.md).
|
||||
|
||||
Given the two conditions, users can securely transfer or assign
|
||||
the authority of on-chain assets to Program-Addresses and the program
|
||||
can then assign that authority elsewhere at its discretion.
|
||||
|
||||
### Private keys for Program Addresses
|
||||
|
||||
A Program -Address has no private key associated with it, and generating
|
||||
a signature for it is impossible. While it has no private key of
|
||||
its own, it can issue an instruction that includes the Program-Address as a signer.
|
||||
|
||||
### Hash-based generated Program Addresses
|
||||
|
||||
All 256-bit values are valid ed25519 curve points and valid ed25519 public
|
||||
keys. All are equally secure and equally as hard to break.
|
||||
Based on this assumption, Program Addresses can be deterministically
|
||||
derived from a base seed using a 256-bit preimage resistant hash function.
|
||||
|
||||
Deterministic Program Addresses for programs follow a similar derivation
|
||||
path as Accounts created with `SystemInstruction::CreateAccountWithSeed`
|
||||
which is implemented with `system_instruction::create_address_with_seed`.
|
||||
|
||||
For reference that implementation is as follows:
|
||||
|
||||
```rust,ignore
|
||||
pub fn create_address_with_seed(
|
||||
base: &Pubkey,
|
||||
seed: &str,
|
||||
program_id: &Pubkey,
|
||||
) -> Result<Pubkey, SystemError> {
|
||||
if seed.len() > MAX_ADDRESS_SEED_LEN {
|
||||
return Err(SystemError::MaxSeedLengthExceeded);
|
||||
}
|
||||
|
||||
Ok(Pubkey::new(
|
||||
hashv(&[base.as_ref(), seed.as_ref(), program_id.as_ref()]).as_ref(),
|
||||
))
|
||||
}
|
||||
```
|
||||
|
||||
Programs can deterministically derive any number of addresses by
|
||||
using keywords. These keywords can symbolically identify how the addresses are used.
|
||||
|
||||
```rust,ignore
|
||||
//! Generate a derived program address
|
||||
//! * seeds, symbolic keywords used to derive the key
|
||||
//! * owner, program that the key is derived for
|
||||
pub fn create_program_address(seeds: &[&str], owner: &Pubkey) -> Result<Pubkey, PubkeyError> {
|
||||
let mut hasher = Hasher::default();
|
||||
for seed in seeds.iter() {
|
||||
if seed.len() > MAX_SEED_LEN {
|
||||
return Err(PubkeyError::MaxSeedLengthExceeded);
|
||||
}
|
||||
hasher.hash(seed.as_ref());
|
||||
}
|
||||
hasher.hashv(&[owner.as_ref(), "ProgramDerivedAddress".as_ref()]);
|
||||
|
||||
Ok(Pubkey::new(hashv(&[hasher.result().as_ref()]).as_ref()))
|
||||
}
|
||||
```
|
||||
|
||||
### Using Program Addresses
|
||||
|
||||
Clients can use the `create_program_address` function to generate
|
||||
a destination address.
|
||||
|
||||
```rust,ignore
|
||||
//deterministically derive the escrow key
|
||||
let escrow_pubkey = create_program_address(&[&["escrow"]], &escrow_program_id);
|
||||
let message = Message::new(vec![
|
||||
token_instruction::transfer(&alice_pubkey, &escrow_pubkey, 1),
|
||||
]);
|
||||
//transfer 1 token to escrow
|
||||
client.send_message(&[&alice_keypair], &message);
|
||||
```
|
||||
|
||||
Programs can use the same function to generate the same address.
|
||||
In the function below the program issues a `token_instruction::transfer` from
|
||||
Program Address as if it had the private key to sign the transaction.
|
||||
|
||||
```rust,ignore
|
||||
fn transfer_one_token_from_escrow(
|
||||
program_id: &Pubkey,
|
||||
keyed_accounts: &[KeyedAccount]
|
||||
) -> Result<()> {
|
||||
|
||||
// User supplies the destination
|
||||
let alice_pubkey = keyed_accounts[1].unsigned_key();
|
||||
|
||||
// Deterministically derive the escrow pubkey.
|
||||
let escrow_pubkey = create_program_address(&[&["escrow"]], program_id);
|
||||
|
||||
// Create the transfer instruction
|
||||
let instruction = token_instruction::transfer(&escrow_pubkey, &alice_pubkey, 1);
|
||||
|
||||
// The runtime deterministically derives the key from the currently
|
||||
// executing program ID and the supplied keywords.
|
||||
// If the derived key matches a key marked as signed in the instruction
|
||||
// then that key is accepted as signed.
|
||||
invoke_signed(&instruction, &[&["escrow"]])?
|
||||
}
|
||||
```
|
||||
|
||||
### Instructions that require signers
|
||||
|
||||
The addresses generated with `create_program_address` are indistinguishable
|
||||
from any other public key. The only way for the runtime to verify that the
|
||||
address belongs to a program is for the program to supply the keywords used
|
||||
to generate the address.
|
||||
|
||||
The runtime will internally call `create_program_address`, and compare the
|
||||
result against the addresses supplied in the instruction.
|
@ -4,11 +4,17 @@ Accounts on Solana may have owner-controlled state \(`Account::data`\) that's se
|
||||
|
||||
## Two-tiered rent regime
|
||||
|
||||
Accounts which maintain a minimum balance equivalent to 2 years of rent payments are exempt. Accounts whose balance falls below this threshold are charged rent at a rate specified in genesis, in lamports per kilobyte-year. The network charges rent on a per-epoch basis, in credit for the next epoch \(but in arrears when necessary\), and `Account::rent_epoch` keeps track of the next time rent should be collected from the account.
|
||||
Accounts which maintain a minimum balance equivalent to 2 years of rent payments are exempt. The _2 years_ is drawn from the fact hardware cost drops by 50% in price every 2 years and the resulting convergence due to being a geometric series. Accounts whose balance falls below this threshold are charged rent at a rate specified in genesis, in lamports per byte-year. The network charges rent on a per-epoch basis, in credit for the next epoch, and `Account::rent_epoch` keeps track of the next time rent should be collected from the account.
|
||||
|
||||
## Collecting rent
|
||||
Currently, the rent cost is fixed at the genesis. However, it's anticipated to be dynamic, reflecting the underlying hardware storage cost at the time. So the price is generally expected to decrease as the hardware cost declines as the technology advances.
|
||||
|
||||
Rent is due at account creation time for one epoch's worth of time, and the new account has `Account::rent_epoch` of `current_epoch + 1`. After that, the bank deducts rent from accounts during normal transaction processing as part of the load phase.
|
||||
## Timings of collecting rent
|
||||
|
||||
There are two timings of collecting rent from accounts: \(1\) when referenced by a transaction, \(2\) periodically once an epoch. \(1\) includes the transaction to create the new account itself, and it happens during the normal transaction processing by the bank as part of the load phase. \(2\) exists to ensure to collect rents from stale accounts, which aren't referenced in recent epochs at all. \(2\) requires the whole scan of accounts and is spread over an epoch based on account address prefix to avoid load spikes due to this rent collection.
|
||||
|
||||
## Actual processing of collecting rent
|
||||
|
||||
Rent is due for one epoch's worth of time, and accounts always have `Account::rent_epoch` of `current_epoch + 1`.
|
||||
|
||||
If the account is in the exempt regime, `Account::rent_epoch` is simply pushed to `current_epoch + 1`.
|
||||
|
||||
@ -18,13 +24,19 @@ Accounts whose balance is insufficient to satisfy the rent that would be due sim
|
||||
|
||||
A percentage of the rent collected is destroyed. The rest is distributed to validator accounts by stake weight, a la transaction fees, at the end of every slot.
|
||||
|
||||
## Read-only accounts
|
||||
Finally, rent collection happens according to the protocol-level account updates like the rent distribution to validators, meaning there is no corresponding transaction for rent deductions. So, rent collection is rather invisible, only implicitly observable by a recent transaction or predetermined timing given its account address prefix.
|
||||
|
||||
Read-only accounts are not being charged rent in current implementation.
|
||||
## Design considerations
|
||||
|
||||
## Design considerations, others considered
|
||||
### Current design rationale
|
||||
|
||||
Under this design, it is possible to have accounts that linger, never get touched, and never have to pay rent. `Noop` instructions that name these accounts can be used to "garbage collect", but it'd also be possible for accounts that never get touched to migrate out of a validator's working set, thereby reducing memory consumption and obviating the need to charge rent.
|
||||
Under the preceding design, it is NOT possible to have accounts that linger, never get touched, and never have to pay rent. Accounts are always pay rent exactly once for each epoch, except rent-exempt, sysvar and executable accounts.
|
||||
|
||||
This is intended design choice. Otherwise, it would be possible to trigger unauthorized rent collection with `Noop` instruction by anyone who may unfairly profit the rent (a leader at the moment) or save the rent given anticipated fluctuating rent cost.
|
||||
|
||||
As another side-effect of this choice, also note that this periodic rent collection effectively forces validator not to store stale accounts into a cold storage optimistically and save the storage cost, which is unfavorable for account owners and may cause transactions on them to stall longer than the others. On the flip side, this prevents malicious users from piling significant amount of garbage accounts, burdening validators.
|
||||
|
||||
As the overall consequence of this design, all of accounts is stored equally as validator's working set with same performance characteristics, straightly reflecting the uniform rent pricing structure.
|
||||
|
||||
### Ad-hoc collection
|
||||
|
||||
@ -45,8 +57,3 @@ Collecting rent via a system instruction was considered, as it would naturally h
|
||||
* it would have adversely affected network throughput
|
||||
* it would require special-casing by the runtime, as accounts with non-SystemProgram owners may be debited by this instruction
|
||||
* someone would have to issue the transactions
|
||||
|
||||
### Account scans on every epoch
|
||||
|
||||
Scanning the entire Bank for accounts that owe rent at the beginning of each epoch was considered. This would have been an expensive operation, and would require that the entire current state of the network be present on every validator at the beginning of each epoch.
|
||||
|
||||
|
0
docs/src/integrations/README.md
Normal file
0
docs/src/integrations/README.md
Normal file
390
docs/src/integrations/exchange.md
Normal file
390
docs/src/integrations/exchange.md
Normal file
@ -0,0 +1,390 @@
|
||||
# Add Solana to Your Exchange
|
||||
|
||||
This guide describes how to add Solana's native token SOL to your cryptocurrency
|
||||
exchange.
|
||||
|
||||
## Node Setup
|
||||
|
||||
We highly recommend setting up at least two of your own Solana api nodes to
|
||||
give you a trusted entrypoint to the network, allow you full control over how
|
||||
much data is retained, and ensure you do not miss any data if one node fails.
|
||||
|
||||
To run an api node:
|
||||
|
||||
1. [Install the Solana command-line tool suite](../cli/install-solana-cli-tools.md)
|
||||
2. Boot the node with at least the following parameters:
|
||||
```bash
|
||||
solana-validator \
|
||||
--ledger <LEDGER_PATH> \
|
||||
--entrypoint <CLUSTER_ENTRYPOINT> \
|
||||
--expected-genesis-hash <EXPECTED_GENESIS_HASH> \
|
||||
--expected-shred-version <EXPECTED_SHRED_VERSION> \
|
||||
--rpc-port 8899 \
|
||||
--no-voting \
|
||||
--enable-rpc-transaction-history \
|
||||
--limit-ledger-size <SHRED_COUNT> \
|
||||
--trusted-validator <VALIDATOR_ADDRESS> \
|
||||
--no-untrusted-rpc
|
||||
```
|
||||
|
||||
Customize `--ledger` to your desired ledger storage location, and `--rpc-port` to the port you want to expose.
|
||||
|
||||
The `--entrypoint`, `--expected-genesis-hash`, and `--expected-shred-version` parameters are all specific to the cluster you are joining. The shred version will change on any hard forks in the cluster, so including `--expected-shred-version` ensures you are receiving current data from the cluster you expect.
|
||||
[Current parameters for Mainnet Beta](../clusters.md#example-solana-validator-command-line-2)
|
||||
|
||||
The `--limit-ledger-size` parameter allows you to specify how many ledger [shreds](../terminology.md#shred) your node retains on disk. If you do not include this parameter, the ledger will keep the entire ledger until it runs out of disk space. A larger value like `--limit-ledger-size 250000000000` is good for a couple days
|
||||
|
||||
Specifying one or more `--trusted-validator` parameters can protect you from booting from a malicious snapshot. [More on the value of booting with trusted validators](../running-validator/validator-start.md#trusted-validators)
|
||||
|
||||
Optional parameters to consider:
|
||||
- `--private-rpc` prevents your RPC port from being published for use by other nodes
|
||||
- `--rpc-bind-address` allows you to specify a different IP address to bind the RPC port
|
||||
|
||||
### Automatic Restarts
|
||||
|
||||
We recommend configuring each of your nodes to restart automatically on exit, to
|
||||
ensure you miss as little data as possible. Running the solana software as a
|
||||
systemd service is one great option.
|
||||
|
||||
### Ledger Continuity
|
||||
|
||||
By default, each of your nodes will boot from a snapshot provided by one of your
|
||||
trusted validators. This snapshot reflects the current state of the chain, but
|
||||
does not contain the complete historical ledger. If one of your node exits and
|
||||
boots from a new snapshot, there may be a gap in the ledger on that node. In
|
||||
order to prevent this issue, add the `--no-snapshot-fetch` parameter to your
|
||||
`solana-validator` command to receive historical ledger data instead of a
|
||||
snapshot.
|
||||
|
||||
If you pass the `--no-snapshot-fetch` parameter on your initial boot, it will
|
||||
take your node a very long time to catch up. We recommend booting from a
|
||||
snapshot first, and then using the `--no-snapshot-fetch` parameter for reboots.
|
||||
|
||||
It is important to note that the amount of historical ledger available to your
|
||||
nodes is limited to what your trusted validators retain. You will need to ensure
|
||||
your nodes do not experience downtimes longer than this span, if ledger
|
||||
continuity is crucial for you.
|
||||
|
||||
## Setting up Deposit Accounts
|
||||
|
||||
Solana accounts do not require any on-chain initialization; once they contain
|
||||
some SOL, they exist. To set up a deposit account for your exchange, simply
|
||||
generate a Solana keypair using any of our [wallet tools](../wallet-guide/cli.md).
|
||||
|
||||
We recommend using a unique deposit account for each of your users.
|
||||
|
||||
Solana accounts are charged [rent](../apps/rent.md) on creation and once per
|
||||
epoch, but they can be made rent-exempt if they contain 2-years worth of rent in
|
||||
SOL. In order to find the minimum rent-exempt balance for your deposit accounts,
|
||||
query the
|
||||
[`getMinimumBalanceForRentExemption` endpoint](../apps/jsonrpc-api.md#getminimumbalanceforrentexemption):
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getMinimumBalanceForRentExemption","params":[0]}' localhost:8899
|
||||
|
||||
{"jsonrpc":"2.0","result":890880,"id":1}
|
||||
```
|
||||
|
||||
### Offline Accounts
|
||||
|
||||
You may wish to keep the keys for one or more collection accounts offline for
|
||||
greater security. If so, you will need to move SOL to hot accounts using our
|
||||
[offline methods](../offline-signing/README.md).
|
||||
|
||||
## Listening for Deposits
|
||||
|
||||
When a user wants to deposit SOL into your exchange, instruct them to send a
|
||||
transfer to the appropriate deposit address.
|
||||
|
||||
### Poll for Blocks
|
||||
|
||||
The easiest way to track all the deposit accounts for your exchange is to poll
|
||||
for each confirmed block and inspect for addresses of interest, using the
|
||||
JSON-RPC service of your Solana api node.
|
||||
|
||||
* To identify which blocks are available, send a [`getConfirmedBlocks` request](../apps/jsonrpc-api.md#getconfirmedblocks),
|
||||
passing the last block you have already processed as the start-slot parameter:
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5]}' localhost:8899
|
||||
|
||||
{"jsonrpc":"2.0","result":[5,6,8,9,11],"id":1}
|
||||
```
|
||||
Not every slot produces a block, so there may be gaps in the sequence of integers.
|
||||
|
||||
* For each block, request its contents with a [`getConfirmedBlock` request](../apps/jsonrpc-api.md#getconfirmedblock):
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[5, "json"]}' localhost:8899
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"blockhash": "2WcrsKSVANoe6xQHKtCcqNdUpCQPQ3vb6QTgi1dcE2oL",
|
||||
"parentSlot": 4,
|
||||
"previousBlockhash": "7ZDoGW83nXgP14vnn9XhGSaGjbuLdLWkQAoUQ7pg6qDZ",
|
||||
"rewards": [],
|
||||
"transactions": [
|
||||
{
|
||||
"meta": {
|
||||
"err": null,
|
||||
"fee": 5000,
|
||||
"postBalances": [
|
||||
2033973061360,
|
||||
218099990000,
|
||||
42000000003
|
||||
],
|
||||
"preBalances": [
|
||||
2044973066360,
|
||||
207099990000,
|
||||
42000000003
|
||||
],
|
||||
"status": {
|
||||
"Ok": null
|
||||
}
|
||||
},
|
||||
"transaction": {
|
||||
"message": {
|
||||
"accountKeys": [
|
||||
"Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX",
|
||||
"47Sbuv6jL7CViK9F2NMW51aQGhfdpUu7WNvKyH645Rfi",
|
||||
"11111111111111111111111111111111"
|
||||
],
|
||||
"header": {
|
||||
"numReadonlySignedAccounts": 0,
|
||||
"numReadonlyUnsignedAccounts": 1,
|
||||
"numRequiredSignatures": 1
|
||||
},
|
||||
"instructions": [
|
||||
{
|
||||
"accounts": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"data": "3Bxs3zyH82bhpB8j",
|
||||
"programIdIndex": 2
|
||||
}
|
||||
],
|
||||
"recentBlockhash": "7GytRgrWXncJWKhzovVoP9kjfLwoiuDb3cWjpXGnmxWh"
|
||||
},
|
||||
"signatures": [
|
||||
"dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
The `preBalances` and `postBalances` fields allow you to track the balance
|
||||
changes in every account without having to parse the entire transaction. They
|
||||
list the starting and ending balances of each account in
|
||||
[lamports](../terminology.md#lamport), indexed to the `accountKeys` list. For
|
||||
example, if the deposit address if interest is
|
||||
`47Sbuv6jL7CViK9F2NMW51aQGhfdpUu7WNvKyH645Rfi`, this transaction represents a
|
||||
transfer of 218099990000 - 207099990000 = 11000000000 lamports = 11 SOL
|
||||
|
||||
If you need more information about the transaction type or other specifics, you
|
||||
can request the block from RPC in binary format, and parse it using either our
|
||||
[Rust SDK](https://github.com/solana-labs/solana) or
|
||||
[Javascript SDK](https://github.com/solana-labs/solana-web3.js).
|
||||
|
||||
### Address History
|
||||
|
||||
You can also query the transaction history of a specific address.
|
||||
|
||||
* Send a [`getConfirmedSignaturesForAddress`](../apps/jsonrpc-api.md#getconfirmedsignaturesforaddress)
|
||||
request to the api node, specifying a range of recent slots:
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", 0, 10]}' localhost:8899
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"result": [
|
||||
"35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby",
|
||||
"4bJdGN8Tt2kLWZ3Fa1dpwPSEkXWWTSszPSf1rRVsCwNjxbbUdwTeiWtmi8soA26YmwnKD4aAxNp8ci1Gjpdv4gsr",
|
||||
"dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6"
|
||||
],
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
* For each signature returned, get the transaction details by sending a
|
||||
[`getConfirmedTransaction`](../apps/jsonrpc-api.md#getconfirmedtransaction) request:
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6", "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"slot": 5,
|
||||
"transaction": {
|
||||
"message": {
|
||||
"accountKeys": [
|
||||
"Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX",
|
||||
"47Sbuv6jL7CViK9F2NMW51aQGhfdpUu7WNvKyH645Rfi",
|
||||
"11111111111111111111111111111111"
|
||||
],
|
||||
"header": {
|
||||
"numReadonlySignedAccounts": 0,
|
||||
"numReadonlyUnsignedAccounts": 1,
|
||||
"numRequiredSignatures": 1
|
||||
},
|
||||
"instructions": [
|
||||
{
|
||||
"accounts": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"data": "3Bxs3zyH82bhpB8j",
|
||||
"programIdIndex": 2
|
||||
}
|
||||
],
|
||||
"recentBlockhash": "7GytRgrWXncJWKhzovVoP9kjfLwoiuDb3cWjpXGnmxWh"
|
||||
},
|
||||
"signatures": [
|
||||
"dhjhJp2V2ybQGVfELWM1aZy98guVVsxRCB5KhNiXFjCBMK5KEyzV8smhkVvs3xwkAug31KnpzJpiNPtcD5bG1t6"
|
||||
]
|
||||
},
|
||||
"meta": {
|
||||
"err": null,
|
||||
"fee": 5000,
|
||||
"postBalances": [
|
||||
2033973061360,
|
||||
218099990000,
|
||||
42000000003
|
||||
],
|
||||
"preBalances": [
|
||||
2044973066360,
|
||||
207099990000,
|
||||
42000000003
|
||||
],
|
||||
"status": {
|
||||
"Ok": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
## Sending Withdrawals
|
||||
|
||||
To accommodate a user's request to withdraw SOL, you must generate a Solana
|
||||
transfer transaction, and send it to the api node to be forwarded to your
|
||||
cluster.
|
||||
|
||||
### Synchronous
|
||||
|
||||
Sending a synchronous transfer to the Solana cluster allows you to easily ensure
|
||||
that a transfer is successful and finalized by the cluster.
|
||||
|
||||
Solana's command-line tool offers a simple command, `solana transfer`, to
|
||||
generate, submit, and confirm transfer transactions. By default, this method
|
||||
will wait and track progress on stderr until the transaction has been finalized
|
||||
by the cluster. If the transaction fails, it will report any transaction errors.
|
||||
|
||||
```bash
|
||||
solana transfer <USER_ADDRESS> <AMOUNT> --keypair <KEYPAIR> --url http://localhost:8899
|
||||
```
|
||||
|
||||
The [Solana Javascript SDK](https://github.com/solana-labs/solana-web3.js)
|
||||
offers a similar approach for the JS ecosystem. Use the `SystemProgram` to build
|
||||
a transfer transaction, and submit it using the `sendAndConfirmTransaction`
|
||||
method.
|
||||
|
||||
### Asynchronous
|
||||
|
||||
For greater flexibility, you can submit withdrawal transfers asynchronously. In
|
||||
these cases, it is your responsibility to verify that the transaction succeeded
|
||||
and was finalized by the cluster.
|
||||
|
||||
**Note:** Each transaction contains a [recent blockhash](../transaction.md#blockhash-format)
|
||||
to indicate its liveness. It is **critical** to wait until this blockhash
|
||||
expires before retrying a withdrawal transfer that does not appear to have been
|
||||
confirmed or finalized by the cluster. Otherwise, you risk a double spend. See
|
||||
more on [blockhash expiration](#blockhash-expiration) below.
|
||||
|
||||
First, get a recent blockhash using the [`getFees` endpoint](../apps/jsonrpc-api.md#getfees)
|
||||
or the CLI command:
|
||||
```bash
|
||||
solana fees --url http://localhost:8899
|
||||
```
|
||||
|
||||
In the command-line tool, pass the `--no-wait` argument to send a transfer
|
||||
asynchronously, and include your recent blockhash with the `--blockhash` argument:
|
||||
|
||||
```bash
|
||||
solana transfer <USER_ADDRESS> <AMOUNT> --no-wait --blockhash <RECENT_BLOCKHASH> --keypair <KEYPAIR> --url http://localhost:8899
|
||||
```
|
||||
|
||||
You can also build, sign, and serialize the transaction manually, and fire it off to
|
||||
the cluster using the JSON-RPC [`sendTransaction` endpoint](../apps/jsonrpc-api.md#sendtransaction).
|
||||
|
||||
#### Transaction Confirmations & Finality
|
||||
|
||||
Get the status of a batch of transactions using the
|
||||
[`getSignatureStatuses` JSON-RPC endpoint](../apps/jsonrpc-api.md#getsignaturestatuses).
|
||||
The `confirmations` field reports how many
|
||||
[confirmed blocks](../terminology.md#confirmed-block) have elapsed since the
|
||||
transaction was processed. If `confirmations: null`, it is [finalized](../terminology.md#finality).
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]}' http://localhost:8899
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"context": {
|
||||
"slot": 82
|
||||
},
|
||||
"value": [
|
||||
{
|
||||
"slot": 72,
|
||||
"confirmations": 10,
|
||||
"err": null,
|
||||
"status": {
|
||||
"Ok": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"slot": 48,
|
||||
"confirmations": null,
|
||||
"err": null,
|
||||
"status": {
|
||||
"Ok": null
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
```
|
||||
|
||||
#### Blockhash Expiration
|
||||
|
||||
When you request a recent blockhash for your withdrawal transaction using the
|
||||
[`getFees` endpoint](../apps/jsonrpc-api.md#getfees) or `solana fees`, the
|
||||
response will include the `lastValidSlot`, the last slot in which the blockhash
|
||||
will be valid. You can check the cluster slot with a
|
||||
[`getSlot` query](../apps/jsonrpc-api.md#getslot); once the cluster slot is
|
||||
greater than `lastValidSlot`, the withdrawal transaction using that blockhash
|
||||
should never succeed.
|
||||
|
||||
You can also doublecheck whether a particular blockhash is still valid by sending a
|
||||
[`getFeeCalculatorForBlockhash`](../apps/jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
request with the blockhash as a parameter. If the response value is null, the
|
||||
blockhash is expired, and the withdrawal transaction should never succeed.
|
||||
|
||||
## Testing the Integration
|
||||
|
||||
Be sure to test your complete workflow on Solana devnet and testnet
|
||||
[clusters](../clusters.md) before moving to production on mainnet-beta. Devnet
|
||||
is the most open and flexible, and ideal for initial development, while testnet
|
||||
offers more realistic cluster configuration. Devnet features a token faucet, but
|
||||
you will need to request some testnet SOL to get going on testnet.
|
@ -1,71 +0,0 @@
|
||||
# Cross-Program Invocation
|
||||
|
||||
## Problem
|
||||
|
||||
In today's implementation a client can create a transaction that modifies two accounts, each owned by a separate on-chain program:
|
||||
|
||||
```text
|
||||
let message = Message::new(vec![
|
||||
token_instruction::pay(&alice_pubkey),
|
||||
acme_instruction::launch_missiles(&bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
The current implementation does not, however, allow the `acme` program to conveniently invoke `token` instructions on the client's behalf:
|
||||
|
||||
```text
|
||||
let message = Message::new(vec![
|
||||
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
||||
]);
|
||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||
```
|
||||
|
||||
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes `token_instruction::pay` from the `acme` program. The workaround is to extend the `acme` program with the implementation of the `token` program, and create `token` accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify. With that workaround, `acme` can modify token-like accounts created by the `acme` program, but not token accounts created by the `token` program.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The goal of this design is to modify Solana's runtime such that an on-chain program can invoke an instruction from another program.
|
||||
|
||||
Given two on-chain programs `token` and `acme`, each implementing instructions `pay()` and `launch_missiles()` respectively, we would ideally like to implement the `acme` module with a call to a function defined in the `token` module:
|
||||
|
||||
```text
|
||||
use token;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
token::pay(&keyed_accounts[1..])?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
The above code would require that the `token` crate be dynamically linked, so that a custom linker could intercept calls and validate accesses to `keyed_accounts`. That is, even though the client intends to modify both `token` and `acme` accounts, only `token` program is permitted to modify the `token` account, and only the `acme` program is permitted to modify the `acme` account.
|
||||
|
||||
Backing off from that ideal cross-program call, a slightly more verbose solution is to expose token's existing `process_instruction()` entrypoint to the acme program:
|
||||
|
||||
```text
|
||||
use token_instruction;
|
||||
|
||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
...
|
||||
}
|
||||
|
||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
let alice_pubkey = keyed_accounts[1].key;
|
||||
let instruction = token_instruction::pay(&alice_pubkey);
|
||||
process_instruction(&instruction)?;
|
||||
|
||||
launch_missiles(keyed_accounts)?;
|
||||
}
|
||||
```
|
||||
|
||||
where `process_instruction()` is built into Solana's runtime and responsible for routing the given instruction to the `token` program via the instruction's `program_id` field. Before invoking `pay()`, the runtime must also ensure that `acme` didn't modify any accounts owned by `token`. It does this by calling `runtime::verify_account_changes()` and then afterward updating all the `pre_*` variables to tentatively commit `acme`'s account modifications. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme`. It should call `verify_account_changes()` again, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must call `verify_account_changes()` one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all account modifications.
|
||||
|
||||
### Setting `KeyedAccount.is_signer`
|
||||
|
||||
When `process_instruction()` is invoked, the runtime must create a new `KeyedAccounts` parameter using the signatures from the _original_ transaction data. Since the `token` program is immutable and existed on-chain prior to the `acme` program, the runtime can safely treat the transaction signature as a signature of a transaction with a `token` instruction. When the runtime sees the given instruction references `alice_pubkey`, it looks up the key in the transaction to see if that key corresponds to a transaction signature. In this case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the `token` program to modify Alice's account.
|
||||
|
415
docs/src/proposals/ledger-replication-to-implement.md
Normal file
415
docs/src/proposals/ledger-replication-to-implement.md
Normal file
@ -0,0 +1,415 @@
|
||||
# Ledger Replication
|
||||
|
||||
Note: this ledger replication solution was partially implemented, but not
|
||||
completed. The partial implementation was removed by
|
||||
https://github.com/solana-labs/solana/pull/9992 in order to prevent the security
|
||||
risk of unused code. The first part of this design document reflects the
|
||||
once-implemented parts of ledger replication. The
|
||||
[second part of this document](#ledger-replication-not-implemented) describes the
|
||||
parts of the solution never implemented.
|
||||
|
||||
## Proof of Replication
|
||||
|
||||
At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around validators that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the data.
|
||||
|
||||
The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as it's hashed. The simple solution is to periodically regenerate the hash based on a signed PoH value. This ensures that all the data is present during the generation of the proof and it also requires validators to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `number_of_proofs * data_size`
|
||||
|
||||
## Optimization with PoH
|
||||
|
||||
Our improvement on this approach is to randomly sample the encrypted segments faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the segments stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. The total space required for verification is `1_ledger_segment + 2_cbc_blocks * number_of_identities` with core count equal to `number_of_identities`. We use a 64-byte chacha CBC block size.
|
||||
|
||||
## Network
|
||||
|
||||
Validators for PoRep are the same validators that are verifying transactions. If an archiver can prove that a validator verified a fake PoRep, then the validator will not receive a reward for that storage epoch.
|
||||
|
||||
Archivers are specialized _light clients_. They download a part of the ledger \(a.k.a Segment\) and store it, and provide PoReps of storing the ledger. For each verified PoRep archivers earn a reward of sol from the mining pool.
|
||||
|
||||
## Constraints
|
||||
|
||||
We have the following constraints:
|
||||
|
||||
* Verification requires generating the CBC blocks. That requires space of 2
|
||||
|
||||
blocks per identity, and 1 CUDA core per identity for the same dataset. So as
|
||||
|
||||
many identities at once should be batched with as many proofs for those
|
||||
|
||||
identities verified concurrently for the same dataset.
|
||||
|
||||
* Validators will randomly sample the set of storage proofs to the set that
|
||||
|
||||
they can handle, and only the creators of those chosen proofs will be
|
||||
|
||||
rewarded. The validator can run a benchmark whenever its hardware configuration
|
||||
|
||||
changes to determine what rate it can validate storage proofs.
|
||||
|
||||
## Validation and Replication Protocol
|
||||
|
||||
### Constants
|
||||
|
||||
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
|
||||
|
||||
unit of storage for an archiver.
|
||||
|
||||
2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which archivers
|
||||
|
||||
regenerate their encryption keys and select a new dataset to store.
|
||||
|
||||
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
|
||||
|
||||
claim to be successfully rewarded.
|
||||
|
||||
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
|
||||
|
||||
mining proof claim has to contain to be valid for a reward.
|
||||
|
||||
5. NUM\_STORAGE\_SAMPLES: Number of samples required for a storage mining
|
||||
|
||||
proof.
|
||||
|
||||
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
|
||||
|
||||
encrypted state.
|
||||
|
||||
7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or
|
||||
|
||||
a "turn" of the PoRep game.
|
||||
|
||||
### Validator behavior
|
||||
|
||||
1. Validators join the network and begin looking for archiver accounts at each
|
||||
|
||||
storage epoch/turn boundary.
|
||||
|
||||
2. Every turn, Validators sign the PoH value at the boundary and use that signature
|
||||
|
||||
to randomly pick proofs to verify from each storage account found in the turn boundary.
|
||||
|
||||
This signed value is also submitted to the validator's storage account and will be used by
|
||||
|
||||
archivers at a later stage to cross-verify.
|
||||
|
||||
3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value
|
||||
|
||||
is also served to Archivers via RPC interfaces.
|
||||
|
||||
4. For a given turn N, all validations get locked out until turn N+3 \(a gap of 2 turn/epoch\).
|
||||
|
||||
At which point all validations during that turn are available for reward collection.
|
||||
|
||||
5. Any incorrect validations will be marked during the turn in between.
|
||||
|
||||
### Archiver behavior
|
||||
|
||||
1. Since an archiver is somewhat of a light client and not downloading all the
|
||||
|
||||
ledger data, they have to rely on other validators and archivers for information.
|
||||
|
||||
Any given validator may or may not be malicious and give incorrect information, although
|
||||
|
||||
there are not any obvious attack vectors that this could accomplish besides having the
|
||||
|
||||
archiver do extra wasted work. For many of the operations there are a number of options
|
||||
|
||||
depending on how paranoid an archiver is:
|
||||
|
||||
* \(a\) archiver can ask a validator
|
||||
* \(b\) archiver can ask multiple validators
|
||||
* \(c\) archiver can ask other archivers
|
||||
* \(d\) archiver can subscribe to the full transaction stream and generate
|
||||
|
||||
the information itself \(assuming the slot is recent enough\)
|
||||
|
||||
* \(e\) archiver can subscribe to an abbreviated transaction stream to
|
||||
|
||||
generate the information itself \(assuming the slot is recent enough\)
|
||||
|
||||
2. An archiver obtains the PoH hash corresponding to the last turn with its slot.
|
||||
3. The archiver signs the PoH hash with its keypair. That signature is the
|
||||
|
||||
seed used to pick the segment to replicate and also the encryption key. The
|
||||
|
||||
archiver mods the signature with the slot to get which segment to
|
||||
|
||||
replicate.
|
||||
|
||||
4. The archiver retrives the ledger by asking peer validators and
|
||||
|
||||
archivers. See 6.5.
|
||||
|
||||
5. The archiver then encrypts that segment with the key with chacha algorithm
|
||||
|
||||
in CBC mode with `NUM_CHACHA_ROUNDS` of encryption.
|
||||
|
||||
6. The archiver initializes a chacha rng with the a signed recent PoH value as
|
||||
|
||||
the seed.
|
||||
|
||||
7. The archiver generates `NUM_STORAGE_SAMPLES` samples in the range of the
|
||||
|
||||
entry size and samples the encrypted segment with sha256 for 32-bytes at each
|
||||
|
||||
offset value. Sampling the state should be faster than generating the encrypted
|
||||
|
||||
segment.
|
||||
|
||||
8. The archiver sends a PoRep proof transaction which contains its sha state
|
||||
|
||||
at the end of the sampling operation, its seed and the samples it used to the
|
||||
|
||||
current leader and it is put onto the ledger.
|
||||
|
||||
9. During a given turn the archiver should submit many proofs for the same segment
|
||||
|
||||
and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake.
|
||||
|
||||
10. As the PoRep game enters the next turn, the archiver must submit a
|
||||
|
||||
transaction with the mask of which proofs were fake during the last turn. This
|
||||
|
||||
transaction will define the rewards for both archivers and validators.
|
||||
|
||||
11. Finally for a turn N, as the PoRep game enters turn N + 3, archiver's proofs for
|
||||
|
||||
turn N will be counted towards their rewards.
|
||||
|
||||
### The PoRep Game
|
||||
|
||||
The Proof of Replication game has 4 primary stages. For each "turn" multiple PoRep games can be in progress but each in a different stage.
|
||||
|
||||
The 4 stages of the PoRep Game are as follows:
|
||||
|
||||
1. Proof submission stage
|
||||
* Archivers: submit as many proofs as possible during this stage
|
||||
* Validators: No-op
|
||||
2. Proof verification stage
|
||||
* Archivers: No-op
|
||||
* Validators: Select archivers and verify their proofs from the previous turn
|
||||
3. Proof challenge stage
|
||||
* Archivers: Submit the proof mask with justifications \(for fake proofs submitted 2 turns ago\)
|
||||
* Validators: No-op
|
||||
4. Reward collection stage
|
||||
* Archivers: Collect rewards for 3 turns ago
|
||||
* Validators: Collect rewards for 3 turns ago
|
||||
|
||||
For each turn of the PoRep game, both Validators and Archivers evaluate each stage. The stages are run as separate transactions on the storage program.
|
||||
|
||||
### Finding who has a given block of ledger
|
||||
|
||||
1. Validators monitor the turns in the PoRep game and look at the rooted bank
|
||||
|
||||
at turn boundaries for any proofs.
|
||||
|
||||
2. Validators maintain a map of ledger segments and corresponding archiver public keys.
|
||||
|
||||
The map is updated when a Validator processes an archiver's proofs for a segment.
|
||||
|
||||
The validator provides an RPC interface to access the this map. Using this API, clients
|
||||
|
||||
can map a segment to an archiver's network address \(correlating it via cluster\_info table\).
|
||||
|
||||
The clients can then send repair requests to the archiver to retrieve segments.
|
||||
|
||||
3. Validators would need to invalidate this list every N turns.
|
||||
|
||||
## Sybil attacks
|
||||
|
||||
For any random seed, we force everyone to use a signature that is derived from a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity.
|
||||
|
||||
Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger.
|
||||
|
||||
Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for archivers need to store the first block for free and the network can reward long lived client identities more than new ones.
|
||||
|
||||
## Validator attacks
|
||||
|
||||
* If a validator approves fake proofs, archiver can easily out them by
|
||||
|
||||
showing the initial state for the hash.
|
||||
|
||||
* If a validator marks real proofs as fake, no on-chain computation can be done
|
||||
|
||||
to distinguish who is correct. Rewards would have to rely on the results from
|
||||
|
||||
multiple validators to catch bad actors and archivers from being denied rewards.
|
||||
|
||||
* Validator stealing mining proof results for itself. The proofs are derived
|
||||
|
||||
from a signature from an archiver, since the validator does not know the
|
||||
|
||||
private key used to generate the encryption key, it cannot be the generator of
|
||||
|
||||
the proof.
|
||||
|
||||
## Reward incentives
|
||||
|
||||
Fake proofs are easy to generate but difficult to verify. For this reason, PoRep proof transactions generated by archivers may require a higher fee than a normal transaction to represent the computational cost required by validators.
|
||||
|
||||
Some percentage of fake proofs are also necessary to receive a reward from storage mining.
|
||||
|
||||
## Notes
|
||||
|
||||
* We can reduce the costs of verification of PoRep by using PoH, and actually
|
||||
|
||||
make it feasible to verify a large number of proofs for a global dataset.
|
||||
|
||||
* We can eliminate grinding by forcing everyone to sign the same PoH hash and
|
||||
|
||||
use the signatures as the seed
|
||||
|
||||
* The game between validators and archivers is over random blocks and random
|
||||
|
||||
encryption identities and random data samples. The goal of randomization is
|
||||
|
||||
to prevent colluding groups from having overlap on data or validation.
|
||||
|
||||
* Archiver clients fish for lazy validators by submitting fake proofs that
|
||||
|
||||
they can prove are fake.
|
||||
|
||||
* To defend against Sybil client identities that try to store the same block we
|
||||
|
||||
force the clients to store for multiple rounds before receiving a reward.
|
||||
|
||||
* Validators should also get rewarded for validating submitted storage proofs
|
||||
|
||||
as incentive for storing the ledger. They can only validate proofs if they
|
||||
|
||||
are storing that slice of the ledger.
|
||||
|
||||
# Ledger Replication Not Implemented
|
||||
|
||||
Replication behavior yet to be implemented.
|
||||
|
||||
## Storage epoch
|
||||
|
||||
The storage epoch should be the number of slots which results in around 100GB-1TB of ledger to be generated for archivers to store. Archivers will start storing ledger when a given fork has a high probability of not being rolled back.
|
||||
|
||||
## Validator behavior
|
||||
|
||||
1. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from
|
||||
|
||||
archivers. It signs the PoH hash at that point and uses the following
|
||||
|
||||
algorithm with the signature as the input:
|
||||
|
||||
* The low 5 bits of the first byte of the signature creates an index into
|
||||
|
||||
another starting byte of the signature.
|
||||
|
||||
* The validator then looks at the set of storage proofs where the byte of
|
||||
|
||||
the proof's sha state vector starting from the low byte matches exactly
|
||||
|
||||
with the chosen byte\(s\) of the signature.
|
||||
|
||||
* If the set of proofs is larger than the validator can handle, then it
|
||||
|
||||
increases to matching 2 bytes in the signature.
|
||||
|
||||
* Validator continues to increase the number of matching bytes until a
|
||||
|
||||
workable set is found.
|
||||
|
||||
* It then creates a mask of valid proofs and fake proofs and sends it to
|
||||
|
||||
the leader. This is a storage proof confirmation transaction.
|
||||
|
||||
2. After a lockout period of NUM\_SECONDS\_STORAGE\_LOCKOUT seconds, the
|
||||
|
||||
validator then submits a storage proof claim transaction which then causes the
|
||||
|
||||
distribution of the storage reward if no challenges were seen for the proof to
|
||||
|
||||
the validators and archivers party to the proofs.
|
||||
|
||||
## Archiver behavior
|
||||
|
||||
1. The archiver then generates another set of offsets which it submits a fake
|
||||
|
||||
proof with an incorrect sha state. It can be proven to be fake by providing the
|
||||
|
||||
seed for the hash result.
|
||||
|
||||
* A fake proof should consist of an archiver hash of a signature of a PoH
|
||||
|
||||
value. That way when the archiver reveals the fake proof, it can be
|
||||
|
||||
verified on chain.
|
||||
|
||||
2. The archiver monitors the ledger, if it sees a fake proof integrated, it
|
||||
|
||||
creates a challenge transaction and submits it to the current leader. The
|
||||
|
||||
transacation proves the validator incorrectly validated a fake storage proof.
|
||||
|
||||
The archiver is rewarded and the validator's staking balance is slashed or
|
||||
|
||||
frozen.
|
||||
|
||||
## Storage proof contract logic
|
||||
|
||||
Each archiver and validator will have their own storage account. The validator's account would be separate from their gossip id similiar to their vote account. These should be implemented as two programs one which handles the validator as the keysigner and one for the archiver. In that way when the programs reference other accounts, they can check the program id to ensure it is a validator or archiver account they are referencing.
|
||||
|
||||
### SubmitMiningProof
|
||||
|
||||
```text
|
||||
SubmitMiningProof {
|
||||
slot: u64,
|
||||
sha_state: Hash,
|
||||
signature: Signature,
|
||||
};
|
||||
keys = [archiver_keypair]
|
||||
```
|
||||
|
||||
Archivers create these after mining their stored ledger data for a certain hash value. The slot is the end slot of the segment of ledger they are storing, the sha\_state the result of the archiver using the hash function to sample their encrypted ledger segment. The signature is the signature that was created when they signed a PoH value for the current storage epoch. The list of proofs from the current storage epoch should be saved in the account state, and then transfered to a list of proofs for the previous epoch when the epoch passes. In a given storage epoch a given archiver should only submit proofs for one segment.
|
||||
|
||||
The program should have a list of slots which are valid storage mining slots. This list should be maintained by keeping track of slots which are rooted slots in which a significant portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT number of slots would be added to this set. The program should check that the slot is in this set. The set can be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state.
|
||||
|
||||
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of the previous storage epoch PoH value.
|
||||
|
||||
### ProofValidation
|
||||
|
||||
```text
|
||||
ProofValidation {
|
||||
proof_mask: Vec<ProofStatus>,
|
||||
}
|
||||
keys = [validator_keypair, archiver_keypair(s) (unsigned)]
|
||||
```
|
||||
|
||||
A validator will submit this transaction to indicate that a set of proofs for a given segment are valid/not-valid or skipped where the validator did not look at it. The keypairs for the archivers that it looked at should be referenced in the keys so the program logic can go to those accounts and see that the proofs are generated in the previous epoch. The sampling of the storage proofs should be verified ensuring that the correct proofs are skipped by the validator according to the logic outlined in the validator behavior of sampling.
|
||||
|
||||
The included archiver keys will indicate the the storage samples which are being referenced; the length of the proof\_mask should be verified against the set of storage proofs in the referenced archiver account\(s\), and should match with the number of proofs submitted in the previous storage epoch in the state of said archiver account.
|
||||
|
||||
### ClaimStorageReward
|
||||
|
||||
```text
|
||||
ClaimStorageReward {
|
||||
}
|
||||
keys = [validator_keypair or archiver_keypair, validator/archiver_keypairs (unsigned)]
|
||||
```
|
||||
|
||||
Archivers and validators will use this transaction to get paid tokens from a program state where SubmitStorageProof, ProofValidation and ChallengeProofValidations are in a state where proofs have been submitted and validated and there are no ChallengeProofValidations referencing those proofs. For a validator, it should reference the archiver keypairs to which it has validated proofs in the relevant epoch. And for an archiver it should reference validator keypairs for which it has validated and wants to be rewarded.
|
||||
|
||||
### ChallengeProofValidation
|
||||
|
||||
```text
|
||||
ChallengeProofValidation {
|
||||
proof_index: u64,
|
||||
hash_seed_value: Vec<u8>,
|
||||
}
|
||||
keys = [archiver_keypair, validator_keypair]
|
||||
```
|
||||
|
||||
This transaction is for catching lazy validators who are not doing the work to validate proofs. An archiver will submit this transaction when it sees a validator has approved a fake SubmitMiningProof transaction. Since the archiver is a light client not looking at the full chain, it will have to ask a validator or some set of validators for this information maybe via RPC call to obtain all ProofValidations for a certain segment in the previous storage epoch. The program will look in the validator account state see that a ProofValidation is submitted in the previous storage epoch and hash the hash\_seed\_value and see that the hash matches the SubmitMiningProof transaction and that the validator marked it as valid. If so, then it will save the challenge to the list of challenges that it has in its state.
|
||||
|
||||
### AdvertiseStorageRecentBlockhash
|
||||
|
||||
```text
|
||||
AdvertiseStorageRecentBlockhash {
|
||||
hash: Hash,
|
||||
slot: u64,
|
||||
}
|
||||
```
|
||||
|
||||
Validators and archivers will submit this to indicate that a new storage epoch has passed and that the storage proofs which are current proofs should now be for the previous epoch. Other transactions should check to see that the epoch that they are referencing is accurate according to current chain state.
|
@ -1,169 +0,0 @@
|
||||
# Program Keys and Signatures
|
||||
|
||||
## Problem
|
||||
|
||||
Programs cannot generate their own signatures in `process_instruction`
|
||||
as defined in the [Cross-Program Invocations](cross-program-invocation.md)
|
||||
design.
|
||||
|
||||
Lack of programmatic signature generation limits the kinds of programs
|
||||
that can be implemented in Solana. For example, a program cannot take
|
||||
ownership of a TokenAccount and later in a different transaction transfer
|
||||
the ownership based on the state of another program. If two users want
|
||||
to make a wager in tokens on the outcome of a game in Solana, they must
|
||||
transfer tokens to some intermediary that will honor their agreement.
|
||||
Currently there is no way to implement this intermediary as a program
|
||||
in Solana.
|
||||
|
||||
This capability is necessary for many DeFi applications, since they
|
||||
require assets to be transferred to an escrow agent until some event
|
||||
occurs that determines the new owner.
|
||||
|
||||
* Decentralized Exchanges that transfer assets between matching bid and
|
||||
ask orders.
|
||||
|
||||
* Auctions that transfer assets to the winner.
|
||||
|
||||
* Games or prediction markets that collect and redistribute prizes to
|
||||
the winners.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The key to the design is two fold:
|
||||
|
||||
1. Allow programs to control specific addresses, called Program
|
||||
Addresses, in such a way that it is impossible for any external
|
||||
user to generate valid transactions with signatures for those
|
||||
addresses.
|
||||
|
||||
2. To allow programs to programatically control
|
||||
`KeyedAccount::is_signer` value for Program Addresses that are
|
||||
present in instructions that is invoked via `process_instruction()`.
|
||||
|
||||
Given the two conditions, users can securely transfer or assign
|
||||
ownershp of on chain assets to Program Addresses. Once assigned,
|
||||
the program and only the program can execute instructions that
|
||||
refences a Program Address with `KeyedAccount::is_signer` set to
|
||||
true.
|
||||
|
||||
### Private keys for Program Addresses
|
||||
|
||||
This address has no private key associated with it, and generating
|
||||
a signature for it is impossible. While it has no private key of
|
||||
its own, the program can issue an instruction to set the
|
||||
`KeyedAccount::is_signer` flag for this address.
|
||||
|
||||
### Hash based generated Program Addresses
|
||||
|
||||
All 256 bit values are valid ed25519 curve points, and valid ed25519 public
|
||||
keys. All are equally secure and equally as hard to break.
|
||||
Based on this assumption, Program Addresses can be deterministically
|
||||
derived from a base seed using a 256 bit preimage resistant hash function.
|
||||
|
||||
Deterministic Program Addresses for programs follow a similar derivation
|
||||
path as Accounts created with `SystemInstruction::CreateAccountWithSeed`
|
||||
which is implemented with `system_instruction::create_address_with_seed`.
|
||||
|
||||
For reference the implementation is as follows:
|
||||
|
||||
```rust,ignore
|
||||
pub fn create_address_with_seed(
|
||||
base: &Pubkey,
|
||||
seed: &str,
|
||||
program_id: &Pubkey,
|
||||
) -> Result<Pubkey, SystemError> {
|
||||
if seed.len() > MAX_ADDRESS_SEED_LEN {
|
||||
return Err(SystemError::MaxSeedLengthExceeded);
|
||||
}
|
||||
|
||||
Ok(Pubkey::new(
|
||||
hashv(&[base.as_ref(), seed.as_ref(), program_id.as_ref()]).as_ref(),
|
||||
))
|
||||
}
|
||||
```
|
||||
|
||||
Programs can deterministically derive any number of addresses by
|
||||
using a keyword. The keyword can symbolically identify how this
|
||||
address is used.
|
||||
|
||||
```rust,ignore
|
||||
//! Generate a derived program address
|
||||
//! * program_id, the program's id
|
||||
//! * key_base, can be any public key chosen by the program
|
||||
//! * keyword, symbolic keyword to identify the key
|
||||
//!
|
||||
//! The tuple (`key_base`, `keyword`) is used by programs to create user specific
|
||||
//! symbolic keys. For example for the staking contact, the program may need:
|
||||
//! * <user account>/<"withdrawer">
|
||||
//! * <user account>/<"staker">
|
||||
//! * <user account>/<"custodian">
|
||||
//! As generated keys to control a single stake account for each user.
|
||||
pub fn derive_program_address(
|
||||
program_id: &Pubkey,
|
||||
key_base, &Pubkey,
|
||||
keyword, &str,
|
||||
) -> Result<Pubkey, SystemError> {
|
||||
|
||||
// Generate a deterministic base for all program addresses that
|
||||
// are owned by `program_id`.
|
||||
// Hashing twice is recommended to prevent lenght extension attacks.
|
||||
Ok(Pubkey::new(
|
||||
hashv(&[hashv(&[program_id.as_ref(), key_base.as_ref(), keyword.as_ref(),
|
||||
&"ProgramAddress11111111111111111111111111111"]).as_ref()])
|
||||
))
|
||||
}
|
||||
```
|
||||
|
||||
### Using Program Addresses
|
||||
|
||||
Clients can use the `derive_program_address` function to generate
|
||||
a destination address.
|
||||
|
||||
```rust,ignore
|
||||
//deterministically derive the escrow key
|
||||
let escrow_pubkey = derive_program_address(&escrow_program_id, &alice_pubkey, &"escrow");
|
||||
let message = Message::new(vec![
|
||||
token_instruction::transfer(&alice_pubkey, &escrow_pubkey, 1),
|
||||
]);
|
||||
//transfer 1 token to escrow
|
||||
client.send_message(&[&alice_keypair], &message);
|
||||
```
|
||||
|
||||
Programs can use the same function to generate the same address.
|
||||
Below the program issue a `token_instruction::transfer` from its
|
||||
own address as if it had a private key to sign the transaction.
|
||||
|
||||
```rust,ignore
|
||||
fn transfer_one_token_from_escrow(
|
||||
program_id: &Pubkey,
|
||||
keyed_accounts: &[KeyedAccount]
|
||||
) -> Result<()> {
|
||||
|
||||
|
||||
//user supplies the destination
|
||||
let alice_pubkey = keyed_accounts[1].key;
|
||||
|
||||
// Deterministically derive the escrow pubkey.
|
||||
let escrow_pubkey = derive_program_address(program_id, &alice_pubkey, &"escrow");
|
||||
|
||||
//create the transfer instruction
|
||||
let instruction = token_instruction::transfer(&escrow_pubkey, &alice_pubkey, 1);
|
||||
|
||||
// The runtime deterministically derives the key from the current
|
||||
// program id and the supplied keywords.
|
||||
// If the derived key matches a key in the instruction
|
||||
// the `is_signed` flag is set.
|
||||
process_signed_instruction(&instruction, &[(&alice_pubkey, &"escrow")])?
|
||||
}
|
||||
```
|
||||
|
||||
### Setting `KeyedAccount::is_signer`
|
||||
|
||||
The addresses generated with `derive_program_address` are blinded
|
||||
and are indistinguishable from any other pubkey. The only way for
|
||||
the runtime to verify that the address belongs to a program is for
|
||||
the program to supply the keyword used to generate the address.
|
||||
|
||||
The runtime will internally run `derive_program_address(program_id,
|
||||
&alice_pubkey, &"escrow")`, and compare the result against the addresses
|
||||
supplied in the instruction.
|
@ -7,6 +7,25 @@ experience for most people who are new or experienced with using crypto wallets.
|
||||
currently the easiest and fastest way to get set up with a new wallet on Solana.
|
||||
The app is free and getting your wallet set up only takes a few minutes.
|
||||
|
||||
### Trust Wallet Security
|
||||
|
||||
Tokens held in Trust Wallet are only as secure as the device on which the app is
|
||||
installed. Anyone who is able to unlock your phone or tablet may be able to
|
||||
use the Trust Wallet app and transfer your tokens. To improve security,
|
||||
you can add a passcode to the Trust Wallet application.
|
||||
To add a Trust Wallet passcode, open the app and go to
|
||||
Settings -> Security -> Passcode.
|
||||
|
||||
If someone gains access to your Trust Wallet application, they can access your
|
||||
recovery seed phrase.
|
||||
Anyone who has access to your seed phrase will be able to recreate
|
||||
your Trust Wallet keys on a different device. From there, they could
|
||||
sign transactions from that device rather than on your own phone or tablet.
|
||||
The seed phrase is displayed when a new wallet is created and it can also be
|
||||
viewed at any later time in the app by following these steps:
|
||||
- Go to Setting -> Wallets
|
||||
- Under the Options menu for a particular wallet tap "Show Recovery Phrase"
|
||||
|
||||
{% page-ref page="trust-wallet.md" %}
|
||||
|
||||
## Ledger Live with Ledger Nano S
|
||||
|
@ -59,7 +59,7 @@ some interface for signing transactions.
|
||||
A hardware wallet, such as the
|
||||
[Ledger hardware wallet](https://www.ledger.com/), offers a great blend of
|
||||
security and convenience for cryptocurrencies. It effectively automates the
|
||||
process of offline signing while retaining nearly all the convenience of an FS
|
||||
wallet.
|
||||
process of offline signing while retaining nearly all the convenience of a file
|
||||
system wallet.
|
||||
|
||||
{% page-ref page="../hardware-wallet/README.md" %}
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-dos"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -13,14 +13,14 @@ clap = "2.33.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.0" }
|
||||
solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-version = { path = "../version", version = "1.2.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.2" }
|
||||
solana-core = { path = "../core", version = "1.2.2" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.2" }
|
||||
solana-logger = { path = "../logger", version = "1.2.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.2" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.2" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.2" }
|
||||
solana-version = { path = "../version", version = "1.2.2" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user