Compare commits
212 Commits
revert-213
...
v1.9.5
Author | SHA1 | Date | |
---|---|---|---|
|
39a4cc95dc | ||
|
187ed6a387 | ||
|
91bc44931f | ||
|
35ca3182ba | ||
|
24345d8e63 | ||
|
bf45f5b88e | ||
|
2ddb5b27c1 | ||
|
7f10fd6a21 | ||
|
a0a881594a | ||
|
e9e35fd7bd | ||
|
66b94b86a9 | ||
|
59f406d78a | ||
|
dbf9a32883 | ||
|
37e9076db0 | ||
|
f77ea5f324 | ||
|
c9df037dae | ||
|
2b87d99479 | ||
|
2546ef4ad6 | ||
|
96ae795758 | ||
|
9bddb4e437 | ||
|
4079f12a3e | ||
|
e121b94524 | ||
|
a7623ad18c | ||
|
054e475c6c | ||
|
7a421fe602 | ||
|
2ef0b85829 | ||
|
a6b7a3b7ff | ||
|
9d69f2b324 | ||
|
4f82a4ba1f | ||
|
ed0b30efcc | ||
|
4ee6bc9a93 | ||
|
676c43b9d2 | ||
|
b1d8296498 | ||
|
34984ed16e | ||
|
f4d1577337 | ||
|
58dcc451a9 | ||
|
f0695ef6d9 | ||
|
41b0d6cca3 | ||
|
ae77a52c97 | ||
|
133314e58c | ||
|
cb49ae21b4 | ||
|
a9ebba5643 | ||
|
8ce65878da | ||
|
a4ca18a54d | ||
|
7cb147fdcd | ||
|
2d693be9fa | ||
|
50e716fc80 | ||
|
1f00926874 | ||
|
662c6be51e | ||
|
9761f5b67f | ||
|
7b1da62763 | ||
|
2f97fee71a | ||
|
3ae674dd28 | ||
|
8214bc9db4 | ||
|
1132def37c | ||
|
7267ebaaf2 | ||
|
4be6e52a4f | ||
|
e7348243b4 | ||
|
fc0c74d722 | ||
|
687cd4779e | ||
|
b28d7050ab | ||
|
6d72acfd6d | ||
|
840ec0686e | ||
|
ba0188a36d | ||
|
05b9a2f203 | ||
|
8578429c4d | ||
|
87f4a1f4b6 | ||
|
17411f9b4c | ||
|
fb0e5adc7e | ||
|
f4ded6fb6b | ||
|
f89bf7b939 | ||
|
c99aed4abf | ||
|
edfd8c1717 | ||
|
09dbf069e8 | ||
|
9764d4349b | ||
|
d84b994451 | ||
|
185f52b712 | ||
|
3b59f67562 | ||
|
7d2589e2ac | ||
|
77558c315d | ||
|
464d533da3 | ||
|
f8bf478fde | ||
|
35fb47d1ce | ||
|
5bd27dd175 | ||
|
794f28d9ab | ||
|
d7a673f7f5 | ||
|
b3fa1288aa | ||
|
3e4e2e9113 | ||
|
fd4754e5a9 | ||
|
0a9460ed8b | ||
|
478c641cb5 | ||
|
735f000952 | ||
|
264bb903a3 | ||
|
7c5d3e5874 | ||
|
70d5b6aeaf | ||
|
ca451ea23e | ||
|
113d261a2c | ||
|
c6ab915668 | ||
|
d5c0ffc11f | ||
|
6a2b62de62 | ||
|
4645be3e52 | ||
|
7efd0391e9 | ||
|
6a556c5adb | ||
|
0cd45400ca | ||
|
531f36c571 | ||
|
9c9d3e8b6b | ||
|
74b98c2dd4 | ||
|
9fb67f9b07 | ||
|
401c542d2a | ||
|
14ed446923 | ||
|
adc584ee22 | ||
|
810ca36eae | ||
|
16f821ea8c | ||
|
584e9bfbe7 | ||
|
3ad4c3306c | ||
|
be0bcd85ed | ||
|
8708186760 | ||
|
8f3e37c174 | ||
|
7d61935bf1 | ||
|
a70eb098f4 | ||
|
f31593bfbe | ||
|
8f26c71964 | ||
|
9fbaaa5102 | ||
|
78e7913352 | ||
|
f58b87befe | ||
|
1a2823b875 | ||
|
75fe0d3ecf | ||
|
c296a6c9ed | ||
|
57e5406476 | ||
|
4f57c4a4fe | ||
|
c4b3b2865d | ||
|
f58c375b1f | ||
|
bf41c53f11 | ||
|
e3a4b98432 | ||
|
91657ba8fe | ||
|
35ee48bec9 | ||
|
02cfa85214 | ||
|
02be3a6568 | ||
|
b20fae5a09 | ||
|
e572678176 | ||
|
f4521002b9 | ||
|
0c5a2bcd5a | ||
|
c25d16bf0d | ||
|
301e38044a | ||
|
bfa6302985 | ||
|
b66e2ae353 | ||
|
3967dc8685 | ||
|
569c83295d | ||
|
a462c58594 | ||
|
7dba8bb49f | ||
|
c907d4444d | ||
|
b4c847557b | ||
|
de48347078 | ||
|
9f173d3717 | ||
|
dcd76e484f | ||
|
2246135654 | ||
|
41ea597256 | ||
|
fb955bd4ec | ||
|
5c3fbb384f | ||
|
a056fd88cb | ||
|
2f1816d1db | ||
|
2cd2f3ba7b | ||
|
135dfdbf1e | ||
|
fad4bfdf2a | ||
|
a9d4728c35 | ||
|
3977bcde63 | ||
|
cf2a9de19c | ||
|
5e2b12aee5 | ||
|
6c329e2fd3 | ||
|
0376045c7d | ||
|
c1f54c22ed | ||
|
0576d133ad | ||
|
9956afb2bd | ||
|
01941cf3de | ||
|
4b63d51e3e | ||
|
5bf4445ae6 | ||
|
7782d34bbf | ||
|
2c4765e75a | ||
|
e71ea19e60 | ||
|
ed0040d555 | ||
|
da9e6826ac | ||
|
68fc72a7f4 | ||
|
2a6bb2b954 | ||
|
ef51778c78 | ||
|
abecf292a3 | ||
|
a31660815f | ||
|
539ad4bea6 | ||
|
85f601993f | ||
|
b0754cc575 | ||
|
effd0b2547 | ||
|
8836069719 | ||
|
2698a5c705 | ||
|
dd157fd47f | ||
|
8cacf82cb8 | ||
|
8ee5fbc5c0 | ||
|
f2a6b94e5c | ||
|
ef970bb14a | ||
|
cabd851904 | ||
|
2d2ef59550 | ||
|
b7b56d5016 | ||
|
18e3a635b4 | ||
|
2b4347d502 | ||
|
87accd16d8 | ||
|
0e969015fc | ||
|
46935c022e | ||
|
8a7106bc08 | ||
|
89d2f34a03 | ||
|
b3fa1e4550 | ||
|
58c755e1d4 | ||
|
60085305b4 | ||
|
b4c8e095bd | ||
|
3e28ffa884 |
6
.github/workflows/explorer_preview.yml
vendored
6
.github/workflows/explorer_preview.yml
vendored
@@ -2,16 +2,14 @@ name : explorer_preview
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Explorer_build&test_on_PR"]
|
||||
# types:
|
||||
# - completed
|
||||
types:
|
||||
- completed
|
||||
jobs:
|
||||
explorer_preview:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,6 +4,7 @@
|
||||
/solana-metrics/
|
||||
/solana-metrics.tar.bz2
|
||||
/target/
|
||||
/test-ledger/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
651
Cargo.lock
generated
651
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -12,6 +12,7 @@ members = [
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bucket_map",
|
||||
"bloom",
|
||||
"clap-utils",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
@@ -49,6 +50,7 @@ members = [
|
||||
"programs/address-lookup-table",
|
||||
"programs/address-lookup-table-tests",
|
||||
"programs/bpf_loader",
|
||||
"programs/bpf_loader/gen-syscall-list",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/stake",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -16,12 +16,12 @@ bs58 = "0.4.0"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.5" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.9.0"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
clap = "2.33.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,25 +13,25 @@ clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.5" }
|
||||
solana-client = { path = "../client", version = "=1.9.5" }
|
||||
solana-core = { path = "../core", version = "=1.9.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-interface"
|
||||
description = "The Solana AccountsDb plugin interface."
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,8 +12,8 @@ documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
thiserror = "1.0.30"
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,8 +3,8 @@
|
||||
/// In addition, the dynamic library must export a "C" function _create_plugin which
|
||||
/// creates the implementation of the plugin.
|
||||
use {
|
||||
solana_sdk::{signature::Signature, transaction::SanitizedTransaction},
|
||||
solana_transaction_status::TransactionStatusMeta,
|
||||
solana_sdk::{clock::UnixTimestamp, signature::Signature, transaction::SanitizedTransaction},
|
||||
solana_transaction_status::{Reward, TransactionStatusMeta},
|
||||
std::{any::Any, error, io},
|
||||
thiserror::Error,
|
||||
};
|
||||
@@ -48,18 +48,43 @@ pub enum ReplicaAccountInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaAccountInfo<'a>),
|
||||
}
|
||||
|
||||
/// Information about a transaction
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReplicaTransactionInfo<'a> {
|
||||
/// The first signature of the transaction, used for identifying the transaction.
|
||||
pub signature: &'a Signature,
|
||||
|
||||
/// Indicates if the transaction is a simple vote transaction.
|
||||
pub is_vote: bool,
|
||||
|
||||
/// The sanitized transaction.
|
||||
pub transaction: &'a SanitizedTransaction,
|
||||
|
||||
/// Metadata of the transaction status.
|
||||
pub transaction_status_meta: &'a TransactionStatusMeta,
|
||||
}
|
||||
|
||||
/// A wrapper to future-proof ReplicaTransactionInfo handling.
|
||||
/// If there were a change to the structure of ReplicaTransactionInfo,
|
||||
/// there would be new enum entry for the newer version, forcing
|
||||
/// plugin implementations to handle the change.
|
||||
pub enum ReplicaTransactionInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaTransactionInfo<'a>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReplicaBlockInfo<'a> {
|
||||
pub slot: u64,
|
||||
pub blockhash: &'a str,
|
||||
pub rewards: &'a [Reward],
|
||||
pub block_time: Option<UnixTimestamp>,
|
||||
pub block_height: Option<u64>,
|
||||
}
|
||||
|
||||
pub enum ReplicaBlockInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaBlockInfo<'a>),
|
||||
}
|
||||
|
||||
/// Errors returned by plugin calls
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginError {
|
||||
@@ -173,6 +198,12 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when block's metadata is updated.
|
||||
#[allow(unused_variables)]
|
||||
fn notify_block_metadata(&mut self, blockinfo: ReplicaBlockInfoVersions) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in account data
|
||||
/// Default is true -- if the plugin is not interested in
|
||||
/// account data, please return false.
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,17 @@ bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.5"
|
||||
libloading = "0.7.2"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.5" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
thiserror = "1.0.30"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -2,6 +2,8 @@ use {
|
||||
crate::{
|
||||
accounts_update_notifier::AccountsUpdateNotifierImpl,
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
block_metadata_notifier::BlockMetadataNotifierImpl,
|
||||
block_metadata_notifier_interface::BlockMetadataNotifierLock,
|
||||
slot_status_notifier::SlotStatusNotifierImpl, slot_status_observer::SlotStatusObserver,
|
||||
transaction_notifier::TransactionNotifierImpl,
|
||||
},
|
||||
@@ -50,6 +52,7 @@ pub struct AccountsDbPluginService {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
||||
transaction_notifier: Option<TransactionNotifierLock>,
|
||||
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginService {
|
||||
@@ -102,17 +105,24 @@ impl AccountsDbPluginService {
|
||||
None
|
||||
};
|
||||
|
||||
let slot_status_observer =
|
||||
if account_data_notifications_enabled || transaction_notifications_enabled {
|
||||
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
|
||||
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
|
||||
let (slot_status_observer, block_metadata_notifier): (
|
||||
Option<SlotStatusObserver>,
|
||||
Option<BlockMetadataNotifierLock>,
|
||||
) = if account_data_notifications_enabled || transaction_notifications_enabled {
|
||||
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
|
||||
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
|
||||
(
|
||||
Some(SlotStatusObserver::new(
|
||||
confirmed_bank_receiver,
|
||||
slot_status_notifier,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
)),
|
||||
Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new(
|
||||
plugin_manager.clone(),
|
||||
)))),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
info!("Started AccountsDbPluginService");
|
||||
Ok(AccountsDbPluginService {
|
||||
@@ -120,6 +130,7 @@ impl AccountsDbPluginService {
|
||||
plugin_manager,
|
||||
accounts_update_notifier,
|
||||
transaction_notifier,
|
||||
block_metadata_notifier,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -186,6 +197,10 @@ impl AccountsDbPluginService {
|
||||
self.transaction_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn get_block_metadata_notifier(&self) -> Option<BlockMetadataNotifierLock> {
|
||||
self.block_metadata_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
if let Some(mut slot_status_observer) = self.slot_status_observer {
|
||||
slot_status_observer.join()?;
|
||||
|
105
accountsdb-plugin-manager/src/block_metadata_notifier.rs
Normal file
105
accountsdb-plugin-manager/src/block_metadata_notifier.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use {
|
||||
crate::{
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
block_metadata_notifier_interface::BlockMetadataNotifier,
|
||||
},
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaBlockInfo, ReplicaBlockInfoVersions,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_runtime::bank::RewardInfo,
|
||||
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
|
||||
solana_transaction_status::{Reward, Rewards},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub(crate) struct BlockMetadataNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl BlockMetadataNotifier for BlockMetadataNotifierImpl {
|
||||
/// Notify the block metadata
|
||||
fn notify_block_metadata(
|
||||
&self,
|
||||
slot: u64,
|
||||
blockhash: &str,
|
||||
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
|
||||
block_time: Option<UnixTimestamp>,
|
||||
block_height: Option<u64>,
|
||||
) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
let rewards = Self::build_rewards(rewards);
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
||||
let block_info =
|
||||
Self::build_replica_block_info(slot, blockhash, &rewards, block_time, block_height);
|
||||
let block_info = ReplicaBlockInfoVersions::V0_0_1(&block_info);
|
||||
match plugin.notify_block_metadata(block_info) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update block metadata at slot {}, error: {} to plugin {}",
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated block metadata at slot {} to plugin {}",
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-block-metadata-us",
|
||||
measure.as_us() as usize,
|
||||
1000,
|
||||
1000
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockMetadataNotifierImpl {
|
||||
fn build_rewards(rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>) -> Rewards {
|
||||
let rewards = rewards.read().unwrap();
|
||||
rewards
|
||||
.iter()
|
||||
.map(|(pubkey, reward)| Reward {
|
||||
pubkey: pubkey.to_string(),
|
||||
lamports: reward.lamports,
|
||||
post_balance: reward.post_balance,
|
||||
reward_type: Some(reward.reward_type),
|
||||
commission: reward.commission,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn build_replica_block_info<'a>(
|
||||
slot: u64,
|
||||
blockhash: &'a str,
|
||||
rewards: &'a [Reward],
|
||||
block_time: Option<UnixTimestamp>,
|
||||
block_height: Option<u64>,
|
||||
) -> ReplicaBlockInfo<'a> {
|
||||
ReplicaBlockInfo {
|
||||
slot,
|
||||
blockhash,
|
||||
rewards,
|
||||
block_time,
|
||||
block_height,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
Self { plugin_manager }
|
||||
}
|
||||
}
|
@@ -0,0 +1,20 @@
|
||||
use {
|
||||
solana_runtime::bank::RewardInfo,
|
||||
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
/// Interface for notifying block metadata changes
|
||||
pub trait BlockMetadataNotifier {
|
||||
/// Notify the block metadata
|
||||
fn notify_block_metadata(
|
||||
&self,
|
||||
slot: u64,
|
||||
blockhash: &str,
|
||||
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
|
||||
block_time: Option<UnixTimestamp>,
|
||||
block_height: Option<u64>,
|
||||
);
|
||||
}
|
||||
|
||||
pub type BlockMetadataNotifierLock = Arc<RwLock<dyn BlockMetadataNotifier + Sync + Send>>;
|
@@ -1,6 +1,8 @@
|
||||
pub mod accounts_update_notifier;
|
||||
pub mod accountsdb_plugin_manager;
|
||||
pub mod accountsdb_plugin_service;
|
||||
pub mod block_metadata_notifier;
|
||||
pub mod block_metadata_notifier_interface;
|
||||
pub mod slot_status_notifier;
|
||||
pub mod slot_status_observer;
|
||||
pub mod transaction_notifier;
|
||||
|
@@ -8,7 +8,6 @@ use {
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_rpc::transaction_notifier_interface::TransactionNotifier,
|
||||
solana_runtime::bank,
|
||||
solana_sdk::{clock::Slot, signature::Signature, transaction::SanitizedTransaction},
|
||||
solana_transaction_status::TransactionStatusMeta,
|
||||
std::sync::{Arc, RwLock},
|
||||
@@ -85,7 +84,7 @@ impl TransactionNotifierImpl {
|
||||
) -> ReplicaTransactionInfo<'a> {
|
||||
ReplicaTransactionInfo {
|
||||
signature,
|
||||
is_vote: bank::is_simple_vote_transaction(transaction),
|
||||
is_vote: transaction.is_simple_vote_transaction(),
|
||||
transaction,
|
||||
transaction_status_meta,
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-postgres"
|
||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -19,21 +19,21 @@ crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
postgres = { version = "0.19.2", features = ["with-chrono-0_4"] }
|
||||
postgres-types = { version = "0.2.2", features = ["derive"] }
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
thiserror = "1.0.30"
|
||||
tokio-postgres = "0.7.4"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -47,7 +47,14 @@ Create TYPE "TransactionErrorCode" AS ENUM (
|
||||
'WouldExceedMaxAccountCostLimit',
|
||||
'WouldExceedMaxBlockCostLimit',
|
||||
'UnsupportedVersion',
|
||||
'InvalidWritableAccount'
|
||||
'InvalidWritableAccount',
|
||||
'WouldExceedMaxAccountDataCostLimit',
|
||||
'TooManyAccountLocks',
|
||||
'AddressLookupTableNotFound',
|
||||
'InvalidAddressLookupTableOwner',
|
||||
'InvalidAddressLookupTableData',
|
||||
'InvalidAddressLookupTableIndex',
|
||||
'InvalidRentPayingAccount'
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionError" AS (
|
||||
@@ -114,7 +121,7 @@ CREATE TYPE "TransactionMessage" AS (
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessageAddressTableLookup" AS (
|
||||
account_key: BYTEA[],
|
||||
account_key BYTEA,
|
||||
writable_indexes SMALLINT[],
|
||||
readonly_indexes SMALLINT[]
|
||||
);
|
||||
@@ -152,6 +159,16 @@ CREATE TABLE transaction (
|
||||
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
|
||||
);
|
||||
|
||||
-- The table storing block metadata
|
||||
CREATE TABLE block (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
blockhash VARCHAR(44),
|
||||
rewards "Reward"[],
|
||||
block_time BIGINT,
|
||||
block_height BIGINT,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
/**
|
||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
||||
*/
|
||||
|
@@ -8,6 +8,7 @@ DROP TABLE account_audit;
|
||||
DROP TABLE account;
|
||||
DROP TABLE slot;
|
||||
DROP TABLE transaction;
|
||||
DROP TABLE block;
|
||||
|
||||
DROP TYPE "TransactionError" CASCADE;
|
||||
DROP TYPE "TransactionErrorCode" CASCADE;
|
||||
|
@@ -12,7 +12,7 @@ use {
|
||||
serde_json,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
|
||||
ReplicaTransactionInfoVersions, Result, SlotStatus,
|
||||
ReplicaBlockInfoVersions, ReplicaTransactionInfoVersions, Result, SlotStatus,
|
||||
},
|
||||
solana_metrics::*,
|
||||
std::{fs::File, io::Read},
|
||||
@@ -41,6 +41,8 @@ pub struct AccountsDbPluginPostgresConfig {
|
||||
pub threads: Option<usize>,
|
||||
pub batch_size: Option<usize>,
|
||||
pub panic_on_db_errors: Option<bool>,
|
||||
/// Indicates if to store historical data for accounts
|
||||
pub store_account_historical_data: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
@@ -74,7 +76,7 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
||||
/// When only owners is specified,
|
||||
/// all accounts belonging to the owners will be streamed.
|
||||
/// The accounts field support wildcard to select all accounts:
|
||||
/// The accounts field supports wildcard to select all accounts:
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["*"\],
|
||||
/// }
|
||||
@@ -85,6 +87,8 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
|
||||
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
|
||||
/// `host` and `user` must be given.
|
||||
/// "store_account_historical_data", optional, set it to 'true', to store historical account data to account_audit
|
||||
/// table.
|
||||
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
|
||||
/// maintains a PostgreSQL connection to the server. The default is '10'.
|
||||
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
||||
@@ -334,6 +338,31 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_block_metadata(&mut self, block_info: ReplicaBlockInfoVersions) -> Result<()> {
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => match block_info {
|
||||
ReplicaBlockInfoVersions::V0_0_1(block_info) => {
|
||||
let result = client.update_block_metadata(block_info);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in account data
|
||||
/// Default is true -- if the plugin is not interested in
|
||||
/// account data, please return false.
|
||||
|
@@ -1,4 +1,6 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
mod postgres_client_block_metadata;
|
||||
mod postgres_client_transaction;
|
||||
|
||||
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
|
||||
@@ -10,9 +12,10 @@ use {
|
||||
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
|
||||
log::*,
|
||||
postgres::{Client, NoTls, Statement},
|
||||
postgres_client_block_metadata::DbBlockInfo,
|
||||
postgres_client_transaction::LogTransactionRequest,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
|
||||
AccountsDbPluginError, ReplicaAccountInfo, ReplicaBlockInfo, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
@@ -36,6 +39,7 @@ const DEFAULT_THREADS_COUNT: usize = 100;
|
||||
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
|
||||
const ACCOUNT_COLUMN_COUNT: usize = 9;
|
||||
const DEFAULT_PANIC_ON_DB_ERROR: bool = false;
|
||||
const DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA: bool = false;
|
||||
|
||||
struct PostgresSqlClientWrapper {
|
||||
client: Client,
|
||||
@@ -44,6 +48,8 @@ struct PostgresSqlClientWrapper {
|
||||
update_slot_with_parent_stmt: Statement,
|
||||
update_slot_without_parent_stmt: Statement,
|
||||
update_transaction_log_stmt: Statement,
|
||||
update_block_metadata_stmt: Statement,
|
||||
insert_account_audit_stmt: Option<Statement>,
|
||||
}
|
||||
|
||||
pub struct SimplePostgresClient {
|
||||
@@ -195,6 +201,11 @@ pub trait PostgresClient {
|
||||
&mut self,
|
||||
transaction_log_info: LogTransactionRequest,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn update_block_metadata(
|
||||
&mut self,
|
||||
block_info: UpdateBlockMetadataRequest,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
@@ -315,6 +326,28 @@ impl SimplePostgresClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn build_account_audit_insert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO account_audit (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the account_audit update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_slot_upsert_statement_with_parent(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
@@ -361,8 +394,8 @@ impl SimplePostgresClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal function for updating or inserting a single account
|
||||
fn upsert_account_internal(
|
||||
/// Internal function for inserting an account into account_audit table.
|
||||
fn insert_account_audit(
|
||||
account: &DbAccountInfo,
|
||||
statement: &Statement,
|
||||
client: &mut Client,
|
||||
@@ -370,7 +403,43 @@ impl SimplePostgresClient {
|
||||
let lamports = account.lamports() as i64;
|
||||
let rent_epoch = account.rent_epoch() as i64;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let result = client.query(
|
||||
let result = client.execute(
|
||||
statement,
|
||||
&[
|
||||
&account.pubkey(),
|
||||
&account.slot,
|
||||
&account.owner(),
|
||||
&lamports,
|
||||
&account.executable(),
|
||||
&rent_epoch,
|
||||
&account.data(),
|
||||
&account.write_version(),
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the insert of account_audit to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Internal function for updating or inserting a single account
|
||||
fn upsert_account_internal(
|
||||
account: &DbAccountInfo,
|
||||
statement: &Statement,
|
||||
client: &mut Client,
|
||||
insert_account_audit_stmt: &Option<Statement>,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let lamports = account.lamports() as i64;
|
||||
let rent_epoch = account.rent_epoch() as i64;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let result = client.execute(
|
||||
statement,
|
||||
&[
|
||||
&account.pubkey(),
|
||||
@@ -392,6 +461,11 @@ impl SimplePostgresClient {
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
} else if result.unwrap() == 0 && insert_account_audit_stmt.is_some() {
|
||||
// If no records modified (inserted or updated), it is because the account is updated
|
||||
// at an older slot, insert the record directly into the account_audit table.
|
||||
let statement = insert_account_audit_stmt.as_ref().unwrap();
|
||||
Self::insert_account_audit(account, statement, client)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -400,9 +474,10 @@ impl SimplePostgresClient {
|
||||
/// Update or insert a single account
|
||||
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let insert_account_audit_stmt = &client.insert_account_audit_stmt;
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
Self::upsert_account_internal(account, statement, client)
|
||||
Self::upsert_account_internal(account, statement, client, insert_account_audit_stmt)
|
||||
}
|
||||
|
||||
/// Insert accounts in batch to reduce network overhead
|
||||
@@ -478,11 +553,12 @@ impl SimplePostgresClient {
|
||||
}
|
||||
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let insert_account_audit_stmt = &client.insert_account_audit_stmt;
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
|
||||
for account in self.pending_account_updates.drain(..) {
|
||||
Self::upsert_account_internal(&account, statement, client)?;
|
||||
Self::upsert_account_internal(&account, statement, client, insert_account_audit_stmt)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -501,10 +577,24 @@ impl SimplePostgresClient {
|
||||
Self::build_slot_upsert_statement_without_parent(&mut client, config)?;
|
||||
let update_transaction_log_stmt =
|
||||
Self::build_transaction_info_upsert_statement(&mut client, config)?;
|
||||
let update_block_metadata_stmt =
|
||||
Self::build_block_metadata_upsert_statement(&mut client, config)?;
|
||||
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
|
||||
let store_account_historical_data = config
|
||||
.store_account_historical_data
|
||||
.unwrap_or(DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA);
|
||||
|
||||
let insert_account_audit_stmt = if store_account_historical_data {
|
||||
let stmt = Self::build_account_audit_insert_statement(&mut client, config)?;
|
||||
Some(stmt)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
info!("Created SimplePostgresClient.");
|
||||
Ok(Self {
|
||||
batch_size,
|
||||
@@ -516,6 +606,8 @@ impl SimplePostgresClient {
|
||||
update_slot_with_parent_stmt,
|
||||
update_slot_without_parent_stmt,
|
||||
update_transaction_log_stmt,
|
||||
update_block_metadata_stmt,
|
||||
insert_account_audit_stmt,
|
||||
}),
|
||||
})
|
||||
}
|
||||
@@ -591,6 +683,13 @@ impl PostgresClient for SimplePostgresClient {
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.log_transaction_impl(transaction_log_info)
|
||||
}
|
||||
|
||||
fn update_block_metadata(
|
||||
&mut self,
|
||||
block_info: UpdateBlockMetadataRequest,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.update_block_metadata_impl(block_info)
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateAccountRequest {
|
||||
@@ -604,11 +703,16 @@ struct UpdateSlotRequest {
|
||||
slot_status: SlotStatus,
|
||||
}
|
||||
|
||||
pub struct UpdateBlockMetadataRequest {
|
||||
pub block_info: DbBlockInfo,
|
||||
}
|
||||
|
||||
#[warn(clippy::large_enum_variant)]
|
||||
enum DbWorkItem {
|
||||
UpdateAccount(Box<UpdateAccountRequest>),
|
||||
UpdateSlot(Box<UpdateSlotRequest>),
|
||||
LogTransaction(Box<LogTransactionRequest>),
|
||||
UpdateBlockMetadata(Box<UpdateBlockMetadataRequest>),
|
||||
}
|
||||
|
||||
impl PostgresClientWorker {
|
||||
@@ -670,8 +774,11 @@ impl PostgresClientWorker {
|
||||
}
|
||||
}
|
||||
DbWorkItem::LogTransaction(transaction_log_info) => {
|
||||
if let Err(err) = self.client.log_transaction(*transaction_log_info) {
|
||||
error!("Failed to update transaction: ({})", err);
|
||||
self.client.log_transaction(*transaction_log_info)?;
|
||||
}
|
||||
DbWorkItem::UpdateBlockMetadata(block_info) => {
|
||||
if let Err(err) = self.client.update_block_metadata(*block_info) {
|
||||
error!("Failed to update block metadata: ({})", err);
|
||||
if panic_on_db_errors {
|
||||
abort();
|
||||
}
|
||||
@@ -868,6 +975,25 @@ impl ParallelPostgresClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_block_metadata(
|
||||
&mut self,
|
||||
block_info: &ReplicaBlockInfo,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if let Err(err) = self.sender.send(DbWorkItem::UpdateBlockMetadata(Box::new(
|
||||
UpdateBlockMetadataRequest {
|
||||
block_info: DbBlockInfo::from(block_info),
|
||||
},
|
||||
))) {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError {
|
||||
msg: format!(
|
||||
"Failed to update the block metadata at slot {:?}, error: {:?}",
|
||||
block_info.slot, err
|
||||
),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Notifying the end of startup");
|
||||
// Ensure all items in the queue has been received by the workers
|
||||
|
@@ -0,0 +1,97 @@
|
||||
use {
|
||||
crate::{
|
||||
accountsdb_plugin_postgres::{
|
||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
||||
},
|
||||
postgres_client::{
|
||||
postgres_client_transaction::DbReward, SimplePostgresClient, UpdateBlockMetadataRequest,
|
||||
},
|
||||
},
|
||||
chrono::Utc,
|
||||
log::*,
|
||||
postgres::{Client, Statement},
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaBlockInfo,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DbBlockInfo {
|
||||
pub slot: i64,
|
||||
pub blockhash: String,
|
||||
pub rewards: Vec<DbReward>,
|
||||
pub block_time: Option<i64>,
|
||||
pub block_height: Option<i64>,
|
||||
}
|
||||
|
||||
impl<'a> From<&ReplicaBlockInfo<'a>> for DbBlockInfo {
|
||||
fn from(block_info: &ReplicaBlockInfo) -> Self {
|
||||
Self {
|
||||
slot: block_info.slot as i64,
|
||||
blockhash: block_info.blockhash.to_string(),
|
||||
rewards: block_info.rewards.iter().map(DbReward::from).collect(),
|
||||
block_time: block_info.block_time,
|
||||
block_height: block_info
|
||||
.block_height
|
||||
.map(|block_height| block_height as i64),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
pub(crate) fn build_block_metadata_upsert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt =
|
||||
"INSERT INTO block (slot, blockhash, rewards, block_time, block_height, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the block metadata update PostgreSQL database: ({}) host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update_block_metadata_impl(
|
||||
&mut self,
|
||||
block_info: UpdateBlockMetadataRequest,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_block_metadata_stmt;
|
||||
let client = &mut client.client;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
|
||||
let block_info = block_info.block_info;
|
||||
let result = client.query(
|
||||
statement,
|
||||
&[
|
||||
&block_info.slot,
|
||||
&block_info.blockhash,
|
||||
&block_info.rewards,
|
||||
&block_info.block_time,
|
||||
&block_info.block_height,
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}",
|
||||
err);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -330,6 +330,14 @@ pub enum DbTransactionErrorCode {
|
||||
WouldExceedMaxBlockCostLimit,
|
||||
UnsupportedVersion,
|
||||
InvalidWritableAccount,
|
||||
WouldExceedMaxAccountDataCostLimit,
|
||||
TooManyAccountLocks,
|
||||
AddressLookupTableNotFound,
|
||||
InvalidAddressLookupTableOwner,
|
||||
InvalidAddressLookupTableData,
|
||||
InvalidAddressLookupTableIndex,
|
||||
InvalidRentPayingAccount,
|
||||
WouldExceedMaxVoteCostLimit,
|
||||
}
|
||||
|
||||
impl From<&TransactionError> for DbTransactionErrorCode {
|
||||
@@ -356,8 +364,22 @@ impl From<&TransactionError> for DbTransactionErrorCode {
|
||||
Self::WouldExceedMaxAccountCostLimit
|
||||
}
|
||||
TransactionError::WouldExceedMaxBlockCostLimit => Self::WouldExceedMaxBlockCostLimit,
|
||||
TransactionError::WouldExceedMaxVoteCostLimit => Self::WouldExceedMaxVoteCostLimit,
|
||||
TransactionError::UnsupportedVersion => Self::UnsupportedVersion,
|
||||
TransactionError::InvalidWritableAccount => Self::InvalidWritableAccount,
|
||||
TransactionError::WouldExceedMaxAccountDataCostLimit => {
|
||||
Self::WouldExceedMaxAccountDataCostLimit
|
||||
}
|
||||
TransactionError::TooManyAccountLocks => Self::TooManyAccountLocks,
|
||||
TransactionError::AddressLookupTableNotFound => Self::AddressLookupTableNotFound,
|
||||
TransactionError::InvalidAddressLookupTableOwner => {
|
||||
Self::InvalidAddressLookupTableOwner
|
||||
}
|
||||
TransactionError::InvalidAddressLookupTableData => Self::InvalidAddressLookupTableData,
|
||||
TransactionError::InvalidAddressLookupTableIndex => {
|
||||
Self::InvalidAddressLookupTableIndex
|
||||
}
|
||||
TransactionError::InvalidRentPayingAccount => Self::InvalidRentPayingAccount,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -488,7 +510,15 @@ impl SimplePostgresClient {
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO transaction AS txn (signature, is_vote, slot, message_type, legacy_message, \
|
||||
v0_loaded_message, signatures, message_hash, meta, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)";
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) \
|
||||
ON CONFLICT (slot, signature) DO UPDATE SET is_vote=excluded.is_vote, \
|
||||
message_type=excluded.message_type, \
|
||||
legacy_message=excluded.legacy_message, \
|
||||
v0_loaded_message=excluded.v0_loaded_message, \
|
||||
signatures=excluded.signatures, \
|
||||
message_hash=excluded.message_hash, \
|
||||
meta=excluded.meta, \
|
||||
updated_on=excluded.updated_on";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-core = { path = "../core", version = "=1.9.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.5" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.5" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,16 +12,17 @@ edition = "2021"
|
||||
[dependencies]
|
||||
borsh = "0.9.1"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
tarpc = { version = "0.26.2", features = ["full"] }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.5" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.9.5" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
73
banks-client/src/error.rs
Normal file
73
banks-client/src/error.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use {
|
||||
solana_sdk::{transaction::TransactionError, transport::TransportError},
|
||||
std::io,
|
||||
tarpc::client::RpcError,
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
/// Errors from BanksClient
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BanksClientError {
|
||||
#[error("client error: {0}")]
|
||||
ClientError(&'static str),
|
||||
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
RpcError(#[from] RpcError),
|
||||
|
||||
#[error("transport transaction error: {0}")]
|
||||
TransactionError(#[from] TransactionError),
|
||||
|
||||
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
|
||||
SimulationError {
|
||||
err: TransactionError,
|
||||
logs: Vec<String>,
|
||||
units_consumed: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl BanksClientError {
|
||||
pub fn unwrap(&self) -> TransactionError {
|
||||
match self {
|
||||
BanksClientError::TransactionError(err)
|
||||
| BanksClientError::SimulationError { err, .. } => err.clone(),
|
||||
_ => panic!("unexpected transport error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for io::Error {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::Io(err) => err,
|
||||
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::TransactionError(err) => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
BanksClientError::SimulationError { err, .. } => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for TransportError {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::Io(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::RpcError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::TransactionError(err) => Self::TransactionError(err),
|
||||
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
|
||||
}
|
||||
}
|
||||
}
|
@@ -7,9 +7,10 @@
|
||||
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use {
|
||||
crate::error::BanksClientError,
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse},
|
||||
futures::{future::join_all, Future, FutureExt, TryFutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
|
||||
solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar::Sysvar,
|
||||
@@ -22,7 +23,7 @@ use {
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
},
|
||||
std::io::{self, Error, ErrorKind},
|
||||
std::io,
|
||||
tarpc::{
|
||||
client::{self, NewClient, RequestDispatch},
|
||||
context::{self, Context},
|
||||
@@ -33,6 +34,8 @@ use {
|
||||
tokio_serde::formats::Bincode,
|
||||
};
|
||||
|
||||
mod error;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
|
||||
@@ -58,7 +61,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.inner.send_transaction_with_context(ctx, transaction)
|
||||
self.inner
|
||||
.send_transaction_with_context(ctx, transaction)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
@@ -73,6 +79,8 @@ impl BanksClient {
|
||||
#[allow(deprecated)]
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_transaction_status_with_context(
|
||||
@@ -82,6 +90,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_slot_with_context(
|
||||
@@ -89,7 +99,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
self.inner
|
||||
.get_slot_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_context(
|
||||
@@ -97,7 +110,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_block_height_with_context(ctx, commitment)
|
||||
self.inner
|
||||
.get_block_height_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
@@ -108,6 +124,24 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
|
||||
{
|
||||
self.inner
|
||||
.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_account_with_commitment_and_context(
|
||||
@@ -118,6 +152,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Send a transaction and return immediately. The server will resend the
|
||||
@@ -148,9 +184,13 @@ impl BanksClient {
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
||||
.ok_or(BanksClientError::ClientError("Sysvar not present"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
||||
.ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
@@ -164,7 +204,8 @@ impl BanksClient {
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
self.get_latest_blockhash()
|
||||
#[allow(deprecated)]
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
|
||||
/// Send a transaction and return after the transaction has been rejected or
|
||||
@@ -178,11 +219,60 @@ impl BanksClient {
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map(|result| match result? {
|
||||
None => {
|
||||
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
|
||||
}
|
||||
None => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been rejected or reached the given level of commitment.
|
||||
pub fn process_transaction_with_preflight_and_commitment(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map(|result| match result? {
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: None,
|
||||
simulation_details: _,
|
||||
} => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(simulation_details),
|
||||
} => Err(BanksClientError::SimulationError {
|
||||
err,
|
||||
logs: simulation_details.logs,
|
||||
units_consumed: simulation_details.units_consumed,
|
||||
}),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(result),
|
||||
simulation_details: _,
|
||||
} => result.map_err(Into::into),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been finalized or rejected.
|
||||
pub fn process_transaction_with_preflight(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transaction_with_preflight_and_commitment(
|
||||
transaction,
|
||||
CommitmentLevel::default(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Send a transaction and return until the transaction has been finalized or rejected.
|
||||
@@ -255,10 +345,12 @@ impl BanksClient {
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
|
||||
let account = result?
|
||||
.ok_or(BanksClientError::ClientError("Account not found"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
T::unpack_from_slice(&account.data)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
|
||||
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
@@ -269,9 +361,8 @@ impl BanksClient {
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
|
||||
T::try_from_slice(&account.data)
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::try_from_slice(&account.data).map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -330,7 +421,8 @@ impl BanksClient {
|
||||
.map(|result| {
|
||||
result?
|
||||
.map(|x| x.0)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))
|
||||
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -348,6 +440,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
|
||||
self.inner
|
||||
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_fee_for_message_with_commitment_and_context(
|
||||
@@ -358,6 +452,8 @@ impl BanksClient {
|
||||
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
|
||||
self.inner
|
||||
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,7 +495,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_server() -> io::Result<()> {
|
||||
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
|
||||
// This test shows the preferred way to interact with BanksServer.
|
||||
// It creates a runtime explicitly (no globals via tokio macros) and calls
|
||||
// `runtime.block_on()` just once, to run all the async code.
|
||||
@@ -432,7 +528,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_client() -> io::Result<()> {
|
||||
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
|
||||
// The caller may not want to hold the connection open until the transaction
|
||||
// is processed (or blockhash expires). In this test, we verify the
|
||||
// server-side functionality is available to the client.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-banks-interface"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.131", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
tarpc = { version = "0.26.2", features = ["full"] }
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -30,6 +30,19 @@ pub struct TransactionStatus {
|
||||
pub confirmation_status: Option<TransactionConfirmationStatus>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TransactionSimulationDetails {
|
||||
pub logs: Vec<String>,
|
||||
pub units_consumed: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BanksTransactionResultWithSimulation {
|
||||
pub result: Option<transaction::Result<()>>,
|
||||
pub simulation_details: Option<TransactionSimulationDetails>,
|
||||
}
|
||||
|
||||
#[tarpc::service]
|
||||
pub trait Banks {
|
||||
async fn send_transaction_with_context(transaction: Transaction);
|
||||
@@ -44,6 +57,10 @@ pub trait Banks {
|
||||
-> Option<TransactionStatus>;
|
||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation;
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,11 +12,11 @@ edition = "2021"
|
||||
[dependencies]
|
||||
bincode = "1.3.3"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
|
||||
tarpc = { version = "0.26.2", features = ["full"] }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.5" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
tokio-stream = "0.1"
|
||||
|
@@ -2,9 +2,14 @@ use {
|
||||
bincode::{deserialize, serialize},
|
||||
futures::{future, prelude::stream::StreamExt},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
|
||||
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
|
||||
},
|
||||
solana_runtime::{
|
||||
bank::{Bank, TransactionSimulationResult},
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
},
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
@@ -15,7 +20,7 @@ use {
|
||||
message::{Message, SanitizedMessage},
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transaction::{self, SanitizedTransaction, Transaction},
|
||||
},
|
||||
solana_send_transaction_service::{
|
||||
send_transaction_service::{SendTransactionService, TransactionInfo},
|
||||
@@ -35,7 +40,7 @@ use {
|
||||
tarpc::{
|
||||
context::Context,
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Incoming},
|
||||
server::{self, incoming::Incoming, Channel},
|
||||
transport::{self, channel::UnboundedChannel},
|
||||
ClientMessage, Response,
|
||||
},
|
||||
@@ -242,6 +247,47 @@ impl Banks for BanksServer {
|
||||
self.bank(commitment).block_height()
|
||||
}
|
||||
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation {
|
||||
let sanitized_transaction =
|
||||
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
|
||||
Err(err) => {
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: None,
|
||||
};
|
||||
}
|
||||
Ok(tx) => tx,
|
||||
};
|
||||
if let TransactionSimulationResult {
|
||||
result: Err(err),
|
||||
logs,
|
||||
post_simulation_accounts: _,
|
||||
units_consumed,
|
||||
} = self
|
||||
.bank(commitment)
|
||||
.simulate_transaction_unchecked(sanitized_transaction)
|
||||
{
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(TransactionSimulationDetails {
|
||||
logs,
|
||||
units_consumed,
|
||||
}),
|
||||
};
|
||||
}
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: self
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.await,
|
||||
simulation_details: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,23 +14,23 @@ log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
serde_json = "1.0.72"
|
||||
serde_yaml = "0.8.21"
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-core = { path = "../core", version = "=1.9.5" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.9.5" }
|
||||
solana-client = { path = "../client", version = "=1.9.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
32
bloom/Cargo.toml
Normal file
32
bloom/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.9.5"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
rand = "0.7.0"
|
||||
serde = { version = "1.0.133", features = ["rc"] }
|
||||
rayon = "1.5.1"
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.5" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
log = "0.4.14"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@@ -5,7 +5,7 @@ use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::Rng,
|
||||
solana_runtime::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
1
bloom/build.rs
Symbolic link
1
bloom/build.rs
Symbolic link
@@ -0,0 +1 @@
|
||||
../frozen-abi/build.rs
|
@@ -101,7 +101,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
}
|
||||
}
|
||||
fn pos(&self, key: &T, k: u64) -> u64 {
|
||||
key.hash_at_index(k) % self.bits.len()
|
||||
key.hash_at_index(k).wrapping_rem(self.bits.len())
|
||||
}
|
||||
pub fn clear(&mut self) {
|
||||
self.bits = BitVec::new_fill(false, self.bits.len());
|
||||
@@ -111,7 +111,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
for k in &self.keys {
|
||||
let pos = self.pos(key, *k);
|
||||
if !self.bits.get(pos) {
|
||||
self.num_bits_set += 1;
|
||||
self.num_bits_set = self.num_bits_set.saturating_add(1);
|
||||
self.bits.set(pos, true);
|
||||
}
|
||||
}
|
||||
@@ -164,21 +164,26 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
|
||||
|
||||
impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
|
||||
let pos = key.hash_at_index(hash_index) % self.num_bits;
|
||||
let pos = key.hash_at_index(hash_index).wrapping_rem(self.num_bits);
|
||||
// Divide by 64 to figure out which of the
|
||||
// AtomicU64 bit chunks we need to modify.
|
||||
let index = pos >> 6;
|
||||
let index = pos.wrapping_shr(6);
|
||||
// (pos & 63) is equivalent to mod 64 so that we can find
|
||||
// the index of the bit within the AtomicU64 to modify.
|
||||
let mask = 1u64 << (pos & 63);
|
||||
let mask = 1u64.wrapping_shl(u32::try_from(pos & 63).unwrap());
|
||||
(index as usize, mask)
|
||||
}
|
||||
|
||||
pub fn add(&self, key: &T) {
|
||||
/// Adds an item to the bloom filter and returns true if the item
|
||||
/// was not in the filter before.
|
||||
pub fn add(&self, key: &T) -> bool {
|
||||
let mut added = false;
|
||||
for k in &self.keys {
|
||||
let (index, mask) = self.pos(key, *k);
|
||||
self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
let prev_val = self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
added = added || prev_val & mask == 0u64;
|
||||
}
|
||||
added
|
||||
}
|
||||
|
||||
pub fn contains(&self, key: &T) -> bool {
|
||||
@@ -189,6 +194,12 @@ impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear_for_tests(&mut self) {
|
||||
self.bits.iter().for_each(|bit| {
|
||||
bit.store(0u64, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
@@ -320,7 +331,9 @@ mod test {
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.num_bits, 6168);
|
||||
assert_eq!(bloom.bits.len(), 97);
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
let bloom: Bloom<Hash> = bloom.into();
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.bits.len(), 6168);
|
||||
@@ -362,7 +375,9 @@ mod test {
|
||||
}
|
||||
// Round trip, re-inserting the same hash values.
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
||||
@@ -380,7 +395,9 @@ mod test {
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
assert_eq!(bloom.num_bits, 9731);
|
||||
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
|
||||
more_hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
more_hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
5
bloom/src/lib.rs
Normal file
5
bloom/src/lib.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bucket-map"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "solana-bucket-map"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bucket-map"
|
||||
@@ -12,11 +12,11 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
memmap2 = "0.5.0"
|
||||
log = { version = "0.4.11" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
rand = "0.7.0"
|
||||
fs_extra = "1.2.0"
|
||||
tempfile = "3.2.0"
|
||||
|
@@ -9,5 +9,8 @@ for a in "$@"; do
|
||||
fi
|
||||
done
|
||||
|
||||
set -x
|
||||
set -ex
|
||||
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
|
||||
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
|
||||
fi
|
||||
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"
|
||||
|
@@ -256,7 +256,15 @@ EOF
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
50
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
|
@@ -19,3 +19,8 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build-aarch64-apple-darwin"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball (aarch64-apple-darwin)"
|
||||
|
16
ci/env.sh
16
ci/env.sh
@@ -23,6 +23,9 @@ if [[ -n $CI ]]; then
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
@@ -35,7 +38,18 @@ if [[ -n $CI ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
export CI_OS_NAME=linux
|
||||
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
Darwin)
|
||||
export CI_OS_NAME=osx
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||
# triggered it
|
||||
|
@@ -39,7 +39,11 @@ fi
|
||||
|
||||
case "$CI_OS_NAME" in
|
||||
osx)
|
||||
TARGET=x86_64-apple-darwin
|
||||
_cputype="$(uname -m)"
|
||||
if [[ $_cputype = arm64 ]]; then
|
||||
_cputype=aarch64
|
||||
fi
|
||||
TARGET=${_cputype}-apple-darwin
|
||||
;;
|
||||
linux)
|
||||
TARGET=x86_64-unknown-linux-gnu
|
||||
|
@@ -27,6 +27,8 @@ steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
steps+=(test-local-cluster)
|
||||
steps+=(test-local-cluster-flakey)
|
||||
steps+=(test-local-cluster-slow)
|
||||
|
||||
step_index=0
|
||||
if [[ -n "$1" ]]; then
|
||||
|
1
ci/test-local-cluster-flakey.sh
Symbolic link
1
ci/test-local-cluster-flakey.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
1
ci/test-local-cluster-slow.sh
Symbolic link
1
ci/test-local-cluster-slow.sh
Symbolic link
@@ -0,0 +1 @@
|
||||
test-stable.sh
|
@@ -100,7 +100,17 @@ test-stable-perf)
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-flakey)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster-slow)
|
||||
_ "$cargo" stable build --release --bins ${V:+--verbose}
|
||||
_ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
exit 0
|
||||
;;
|
||||
test-wasm)
|
||||
|
@@ -19,13 +19,24 @@ upload-ci-artifact() {
|
||||
upload-s3-artifact() {
|
||||
echo "--- artifact: $1 to $2"
|
||||
(
|
||||
set -x
|
||||
docker run \
|
||||
--rm \
|
||||
--env AWS_ACCESS_KEY_ID \
|
||||
--env AWS_SECRET_ACCESS_KEY \
|
||||
--volume "$PWD:/solana" \
|
||||
eremite/aws-cli:2018.12.18 \
|
||||
args=(
|
||||
--rm
|
||||
--env AWS_ACCESS_KEY_ID
|
||||
--env AWS_SECRET_ACCESS_KEY
|
||||
--volume "$PWD:/solana"
|
||||
|
||||
)
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
args+=(
|
||||
--platform linux/amd64
|
||||
)
|
||||
fi
|
||||
args+=(
|
||||
eremite/aws-cli:2018.12.18
|
||||
/usr/bin/s3cmd --acl-public put "$1" "$2"
|
||||
)
|
||||
set -x
|
||||
docker run "${args[@]}"
|
||||
)
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,9 +12,9 @@ edition = "2021"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "5.0"
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.5" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
uriparse = "0.6.3"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-cli-config"
|
||||
[dependencies]
|
||||
dirs-next = "2.0.0"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.21"
|
||||
url = "2.2.2"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -17,14 +17,14 @@ console = "0.15.0"
|
||||
humantime = "2.0.1"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.16.2"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.5" }
|
||||
solana-client = { path = "../client", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.5" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -99,7 +99,7 @@ impl OutputFormat {
|
||||
pub struct CliAccount {
|
||||
#[serde(flatten)]
|
||||
pub keyed_account: RpcKeyedAccount,
|
||||
#[serde(skip_serializing)]
|
||||
#[serde(skip_serializing, skip_deserializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
|
@@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
|
||||
} else {
|
||||
"-"
|
||||
},
|
||||
if message.is_writable(index, /*demote_program_write_locks=*/ true) {
|
||||
if message.is_writable(index) {
|
||||
"w" // comment for consistent rust fmt (no joking; lol)
|
||||
} else {
|
||||
"-"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -23,32 +23,32 @@ num-traits = "0.2"
|
||||
pretty-hex = "0.2.1"
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.4"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.10.0" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
|
||||
solana_rbpf = "=0.2.18"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.5" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.5" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.9.5" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.9.5" }
|
||||
solana-client = { path = "../client", version = "=1.9.5" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.5" }
|
||||
solana_rbpf = "=0.2.21"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.5" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.5" }
|
||||
tempfile = "3.2.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -98,10 +98,7 @@ pub fn get_fee_for_messages(
|
||||
) -> Result<u64, CliError> {
|
||||
Ok(messages
|
||||
.iter()
|
||||
.map(|message| {
|
||||
println!("msg {:?}", message.recent_blockhash);
|
||||
rpc_client.get_fee_for_message(message)
|
||||
})
|
||||
.map(|message| rpc_client.get_fee_for_message(message))
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.iter()
|
||||
.sum())
|
||||
|
@@ -5,7 +5,7 @@ use {
|
||||
},
|
||||
clap::{App, AppSettings, Arg, ArgMatches, SubCommand},
|
||||
console::style,
|
||||
serde::{Deserialize, Serialize},
|
||||
serde::{Deserialize, Deserializer, Serialize, Serializer},
|
||||
solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*},
|
||||
solana_cli_output::{QuietDisplay, VerboseDisplay},
|
||||
solana_client::{client_error::ClientError, rpc_client::RpcClient},
|
||||
@@ -23,6 +23,7 @@ use {
|
||||
cmp::Ordering,
|
||||
collections::{HashMap, HashSet},
|
||||
fmt,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
},
|
||||
};
|
||||
@@ -45,7 +46,7 @@ pub enum FeatureCliCommand {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")]
|
||||
pub enum CliFeatureStatus {
|
||||
Inactive,
|
||||
@@ -53,7 +54,29 @@ pub enum CliFeatureStatus {
|
||||
Active(Slot),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
impl PartialOrd for CliFeatureStatus {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for CliFeatureStatus {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match (self, other) {
|
||||
(Self::Inactive, Self::Inactive) => Ordering::Equal,
|
||||
(Self::Inactive, _) => Ordering::Greater,
|
||||
(_, Self::Inactive) => Ordering::Less,
|
||||
(Self::Pending, Self::Pending) => Ordering::Equal,
|
||||
(Self::Pending, _) => Ordering::Greater,
|
||||
(_, Self::Pending) => Ordering::Less,
|
||||
(Self::Active(self_active_slot), Self::Active(other_active_slot)) => {
|
||||
self_active_slot.cmp(other_active_slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFeature {
|
||||
pub id: String,
|
||||
@@ -62,11 +85,28 @@ pub struct CliFeature {
|
||||
pub status: CliFeatureStatus,
|
||||
}
|
||||
|
||||
impl PartialOrd for CliFeature {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for CliFeature {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.status.cmp(&other.status) {
|
||||
Ordering::Equal => self.id.cmp(&other.id),
|
||||
ordering => ordering,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFeatures {
|
||||
pub features: Vec<CliFeature>,
|
||||
pub feature_activation_allowed: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cluster_feature_sets: Option<CliClusterFeatureSets>,
|
||||
#[serde(skip)]
|
||||
pub inactive: bool,
|
||||
}
|
||||
@@ -93,11 +133,16 @@ impl fmt::Display for CliFeatures {
|
||||
CliFeatureStatus::Inactive => style("inactive".to_string()).red(),
|
||||
CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(),
|
||||
CliFeatureStatus::Active(activation_slot) =>
|
||||
style(format!("active since slot {}", activation_slot)).green(),
|
||||
style(format!("active since slot {:>9}", activation_slot)).green(),
|
||||
},
|
||||
feature.description,
|
||||
)?;
|
||||
}
|
||||
|
||||
if let Some(feature_sets) = &self.cluster_feature_sets {
|
||||
write!(f, "{}", feature_sets)?;
|
||||
}
|
||||
|
||||
if self.inactive && !self.feature_activation_allowed {
|
||||
writeln!(
|
||||
f,
|
||||
@@ -114,6 +159,191 @@ impl fmt::Display for CliFeatures {
|
||||
impl QuietDisplay for CliFeatures {}
|
||||
impl VerboseDisplay for CliFeatures {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliClusterFeatureSets {
|
||||
pub tool_feature_set: u32,
|
||||
pub feature_sets: Vec<CliFeatureSet>,
|
||||
#[serde(skip)]
|
||||
pub stake_allowed: bool,
|
||||
#[serde(skip)]
|
||||
pub rpc_allowed: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliClusterFeatureSets {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let mut tool_feature_set_matches_cluster = false;
|
||||
|
||||
let software_versions_title = "Software Version";
|
||||
let feature_set_title = "Feature Set";
|
||||
let stake_percent_title = "Stake";
|
||||
let rpc_percent_title = "RPC";
|
||||
let mut max_software_versions_len = software_versions_title.len();
|
||||
let mut max_feature_set_len = feature_set_title.len();
|
||||
let mut max_stake_percent_len = stake_percent_title.len();
|
||||
let mut max_rpc_percent_len = rpc_percent_title.len();
|
||||
|
||||
let feature_sets: Vec<_> = self
|
||||
.feature_sets
|
||||
.iter()
|
||||
.map(|feature_set_info| {
|
||||
let me = if self.tool_feature_set == feature_set_info.feature_set {
|
||||
tool_feature_set_matches_cluster = true;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let software_versions: Vec<_> = feature_set_info
|
||||
.software_versions
|
||||
.iter()
|
||||
.map(ToString::to_string)
|
||||
.collect();
|
||||
let software_versions = software_versions.join(", ");
|
||||
let feature_set = if feature_set_info.feature_set == 0 {
|
||||
"unknown".to_string()
|
||||
} else {
|
||||
feature_set_info.feature_set.to_string()
|
||||
};
|
||||
let stake_percent = format!("{:.2}%", feature_set_info.stake_percent);
|
||||
let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent);
|
||||
|
||||
max_software_versions_len = max_software_versions_len.max(software_versions.len());
|
||||
max_feature_set_len = max_feature_set_len.max(feature_set.len());
|
||||
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
|
||||
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
|
||||
|
||||
(
|
||||
software_versions,
|
||||
feature_set,
|
||||
stake_percent,
|
||||
rpc_percent,
|
||||
me,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
if !tool_feature_set_matches_cluster {
|
||||
writeln!(
|
||||
f,
|
||||
"\n{}",
|
||||
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
|
||||
.bold())?;
|
||||
} else {
|
||||
if !self.stake_allowed {
|
||||
write!(
|
||||
f,
|
||||
"\n{}",
|
||||
style("To activate features the stake must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
)?;
|
||||
}
|
||||
if !self.rpc_allowed {
|
||||
write!(
|
||||
f,
|
||||
"\n{}",
|
||||
style("To activate features the RPC nodes must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
)?;
|
||||
}
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"\n\n{}",
|
||||
style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold()
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
|
||||
max_software_versions_len,
|
||||
software_versions_title,
|
||||
max_feature_set_len,
|
||||
feature_set_title,
|
||||
max_stake_percent_len,
|
||||
stake_percent_title,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent_title,
|
||||
))
|
||||
.bold(),
|
||||
)?;
|
||||
for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets {
|
||||
writeln!(
|
||||
f,
|
||||
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
|
||||
max_software_versions_len,
|
||||
software_versions,
|
||||
max_feature_set_len,
|
||||
feature_set,
|
||||
max_stake_percent_len,
|
||||
stake_percent,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent,
|
||||
if me { "<-- me" } else { "" },
|
||||
)?;
|
||||
}
|
||||
writeln!(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliClusterFeatureSets {}
|
||||
impl VerboseDisplay for CliClusterFeatureSets {}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliFeatureSet {
|
||||
software_versions: Vec<CliVersion>,
|
||||
feature_set: u32,
|
||||
stake_percent: f64,
|
||||
rpc_percent: f32,
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Ord, PartialOrd)]
|
||||
struct CliVersion(Option<semver::Version>);
|
||||
|
||||
impl fmt::Display for CliVersion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let s = match &self.0 {
|
||||
None => "unknown".to_string(),
|
||||
Some(version) => version.to_string(),
|
||||
};
|
||||
write!(f, "{}", s)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for CliVersion {
|
||||
type Err = semver::Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let version_option = if s == "unknown" {
|
||||
None
|
||||
} else {
|
||||
Some(semver::Version::from_str(s)?)
|
||||
};
|
||||
Ok(CliVersion(version_option))
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for CliVersion {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for CliVersion {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s: &str = Deserialize::deserialize(deserializer)?;
|
||||
CliVersion::from_str(s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FeatureSubCommands {
|
||||
fn feature_subcommands(self) -> Self;
|
||||
}
|
||||
@@ -330,7 +560,10 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result<FeatureSetStats, ClientEr
|
||||
}
|
||||
|
||||
// Feature activation is only allowed when 95% of the active stake is on the current feature set
|
||||
fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<bool, ClientError> {
|
||||
fn feature_activation_allowed(
|
||||
rpc_client: &RpcClient,
|
||||
quiet: bool,
|
||||
) -> Result<(bool, Option<CliClusterFeatureSets>), ClientError> {
|
||||
let my_feature_set = solana_version::Version::default().feature_set;
|
||||
|
||||
let feature_set_stats = feature_set_stats(rpc_client)?;
|
||||
@@ -346,54 +579,43 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
)
|
||||
.unwrap_or((false, false));
|
||||
|
||||
if !quiet {
|
||||
if feature_set_stats.get(&my_feature_set).is_none() {
|
||||
println!(
|
||||
"{}",
|
||||
style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster")
|
||||
.bold());
|
||||
} else {
|
||||
if !stake_allowed {
|
||||
print!(
|
||||
"\n{}",
|
||||
style("To activate features the stake must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
);
|
||||
}
|
||||
if !rpc_allowed {
|
||||
print!(
|
||||
"\n{}",
|
||||
style("To activate features the RPC nodes must be >= 95%")
|
||||
.bold()
|
||||
.red()
|
||||
);
|
||||
}
|
||||
}
|
||||
println!(
|
||||
"\n\n{}",
|
||||
style(format!("Tool Feature Set: {}", my_feature_set)).bold()
|
||||
);
|
||||
|
||||
let mut feature_set_stats = feature_set_stats.into_iter().collect::<Vec<_>>();
|
||||
feature_set_stats.sort_by(|l, r| {
|
||||
match l.1.software_versions[0]
|
||||
.cmp(&r.1.software_versions[0])
|
||||
let cluster_feature_sets = if quiet {
|
||||
None
|
||||
} else {
|
||||
let mut feature_sets = feature_set_stats
|
||||
.into_iter()
|
||||
.map(
|
||||
|(
|
||||
feature_set,
|
||||
FeatureSetStatsEntry {
|
||||
stake_percent,
|
||||
rpc_nodes_percent: rpc_percent,
|
||||
software_versions,
|
||||
},
|
||||
)| {
|
||||
CliFeatureSet {
|
||||
software_versions: software_versions.into_iter().map(CliVersion).collect(),
|
||||
feature_set,
|
||||
stake_percent,
|
||||
rpc_percent,
|
||||
}
|
||||
},
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
feature_sets.sort_by(|l, r| {
|
||||
match l.software_versions[0]
|
||||
.cmp(&r.software_versions[0])
|
||||
.reverse()
|
||||
{
|
||||
Ordering::Equal => {
|
||||
match l
|
||||
.1
|
||||
.stake_percent
|
||||
.partial_cmp(&r.1.stake_percent)
|
||||
.partial_cmp(&r.stake_percent)
|
||||
.unwrap()
|
||||
.reverse()
|
||||
{
|
||||
Ordering::Equal => {
|
||||
l.1.rpc_nodes_percent
|
||||
.partial_cmp(&r.1.rpc_nodes_percent)
|
||||
.unwrap()
|
||||
.reverse()
|
||||
l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse()
|
||||
}
|
||||
o => o,
|
||||
}
|
||||
@@ -401,96 +623,15 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
o => o,
|
||||
}
|
||||
});
|
||||
Some(CliClusterFeatureSets {
|
||||
tool_feature_set: my_feature_set,
|
||||
feature_sets,
|
||||
stake_allowed,
|
||||
rpc_allowed,
|
||||
})
|
||||
};
|
||||
|
||||
let software_versions_title = "Software Version";
|
||||
let feature_set_title = "Feature Set";
|
||||
let stake_percent_title = "Stake";
|
||||
let rpc_percent_title = "RPC";
|
||||
let mut stats_output = Vec::new();
|
||||
let mut max_software_versions_len = software_versions_title.len();
|
||||
let mut max_feature_set_len = feature_set_title.len();
|
||||
let mut max_stake_percent_len = stake_percent_title.len();
|
||||
let mut max_rpc_percent_len = rpc_percent_title.len();
|
||||
for (
|
||||
feature_set,
|
||||
FeatureSetStatsEntry {
|
||||
stake_percent,
|
||||
rpc_nodes_percent,
|
||||
software_versions,
|
||||
},
|
||||
) in feature_set_stats.into_iter()
|
||||
{
|
||||
let me = feature_set == my_feature_set;
|
||||
let feature_set = if feature_set == 0 {
|
||||
"unknown".to_string()
|
||||
} else {
|
||||
feature_set.to_string()
|
||||
};
|
||||
let stake_percent = format!("{:.2}%", stake_percent);
|
||||
let rpc_percent = format!("{:.2}%", rpc_nodes_percent);
|
||||
|
||||
let mut has_unknown = false;
|
||||
let mut software_versions = software_versions
|
||||
.iter()
|
||||
.filter_map(|v| {
|
||||
if v.is_none() {
|
||||
has_unknown = true;
|
||||
}
|
||||
v.as_ref()
|
||||
})
|
||||
.map(ToString::to_string)
|
||||
.collect::<Vec<_>>();
|
||||
if has_unknown {
|
||||
software_versions.push("unknown".to_string());
|
||||
}
|
||||
let software_versions = software_versions.join(", ");
|
||||
max_software_versions_len = max_software_versions_len.max(software_versions.len());
|
||||
|
||||
max_feature_set_len = max_feature_set_len.max(feature_set.len());
|
||||
max_stake_percent_len = max_stake_percent_len.max(stake_percent.len());
|
||||
max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len());
|
||||
|
||||
stats_output.push((
|
||||
software_versions,
|
||||
feature_set,
|
||||
stake_percent,
|
||||
rpc_percent,
|
||||
me,
|
||||
));
|
||||
}
|
||||
println!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"{1:<0$} {3:<2$} {5:<4$} {7:<6$}",
|
||||
max_software_versions_len,
|
||||
software_versions_title,
|
||||
max_feature_set_len,
|
||||
feature_set_title,
|
||||
max_stake_percent_len,
|
||||
stake_percent_title,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent_title,
|
||||
))
|
||||
.bold(),
|
||||
);
|
||||
for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output {
|
||||
println!(
|
||||
"{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}",
|
||||
max_software_versions_len,
|
||||
software_versions,
|
||||
max_feature_set_len,
|
||||
feature_set,
|
||||
max_stake_percent_len,
|
||||
stake_percent,
|
||||
max_rpc_percent_len,
|
||||
rpc_percent,
|
||||
if me { "<-- me" } else { "" },
|
||||
);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(stake_allowed && rpc_allowed)
|
||||
Ok((stake_allowed && rpc_allowed, cluster_feature_sets))
|
||||
}
|
||||
|
||||
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
|
||||
@@ -550,10 +691,14 @@ fn process_status(
|
||||
});
|
||||
}
|
||||
|
||||
let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?;
|
||||
features.sort_unstable();
|
||||
|
||||
let (feature_activation_allowed, cluster_feature_sets) =
|
||||
feature_activation_allowed(rpc_client, features.len() <= 1)?;
|
||||
let feature_set = CliFeatures {
|
||||
features,
|
||||
feature_activation_allowed,
|
||||
cluster_feature_sets,
|
||||
inactive,
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&feature_set))
|
||||
@@ -577,7 +722,7 @@ fn process_activate(
|
||||
}
|
||||
}
|
||||
|
||||
if !feature_activation_allowed(rpc_client, false)? {
|
||||
if !feature_activation_allowed(rpc_client, false)?.0 {
|
||||
match force {
|
||||
ForceActivation::Almost =>
|
||||
return Err("Add force argument once more to override the sanity check to force feature activation ".into()),
|
||||
|
@@ -16,6 +16,7 @@ use {
|
||||
pub enum SpendAmount {
|
||||
All,
|
||||
Some(u64),
|
||||
RentExempt,
|
||||
}
|
||||
|
||||
impl Default for SpendAmount {
|
||||
@@ -90,6 +91,7 @@ where
|
||||
0,
|
||||
from_pubkey,
|
||||
fee_pubkey,
|
||||
0,
|
||||
build_message,
|
||||
)?;
|
||||
Ok((message, spend))
|
||||
@@ -97,6 +99,12 @@ where
|
||||
let from_balance = rpc_client
|
||||
.get_balance_with_commitment(from_pubkey, commitment)?
|
||||
.value;
|
||||
let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt {
|
||||
let data = rpc_client.get_account_data(from_pubkey)?;
|
||||
rpc_client.get_minimum_balance_for_rent_exemption(data.len())?
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
|
||||
rpc_client,
|
||||
amount,
|
||||
@@ -104,6 +112,7 @@ where
|
||||
from_balance,
|
||||
from_pubkey,
|
||||
fee_pubkey,
|
||||
from_rent_exempt_minimum,
|
||||
build_message,
|
||||
)?;
|
||||
if from_pubkey == fee_pubkey {
|
||||
@@ -140,6 +149,7 @@ fn resolve_spend_message<F>(
|
||||
from_balance: u64,
|
||||
from_pubkey: &Pubkey,
|
||||
fee_pubkey: &Pubkey,
|
||||
from_rent_exempt_minimum: u64,
|
||||
build_message: F,
|
||||
) -> Result<(Message, SpendAndFee), CliError>
|
||||
where
|
||||
@@ -176,5 +186,20 @@ where
|
||||
},
|
||||
))
|
||||
}
|
||||
SpendAmount::RentExempt => {
|
||||
let mut lamports = if from_pubkey == fee_pubkey {
|
||||
from_balance.saturating_sub(fee)
|
||||
} else {
|
||||
from_balance
|
||||
};
|
||||
lamports = lamports.saturating_sub(from_rent_exempt_minimum);
|
||||
Ok((
|
||||
build_message(lamports),
|
||||
SpendAndFee {
|
||||
spend: lamports,
|
||||
fee,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,23 +1,29 @@
|
||||
use {
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig, pubkey::Pubkey},
|
||||
solana_sdk::{clock::DEFAULT_MS_PER_SLOT, commitment_config::CommitmentConfig},
|
||||
std::{thread::sleep, time::Duration},
|
||||
};
|
||||
|
||||
pub fn check_recent_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = client
|
||||
.get_balance_with_commitment(pubkey, CommitmentConfig::processed())
|
||||
.unwrap()
|
||||
.value;
|
||||
if balance == expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, expected_balance);
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
});
|
||||
#[macro_export]
|
||||
macro_rules! check_balance {
|
||||
($expected_balance:expr, $client:expr, $pubkey:expr) => {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = $client
|
||||
.get_balance_with_commitment($pubkey, CommitmentConfig::processed())
|
||||
.unwrap()
|
||||
.value;
|
||||
if balance == $expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, $expected_balance);
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_millis(500));
|
||||
});
|
||||
};
|
||||
($expected_balance:expr, $client:expr, $pubkey:expr,) => {
|
||||
check_balance!($expected_balance, $client, $pubkey)
|
||||
};
|
||||
}
|
||||
|
||||
pub fn check_ready(rpc_client: &RpcClient) {
|
||||
|
@@ -359,7 +359,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_amount_or_all)
|
||||
.help("The amount to withdraw, in SOL; accepts keyword ALL"),
|
||||
.help("The amount to withdraw, in SOL; accepts keyword ALL, which for this command means account balance minus rent-exempt minimum"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_withdrawer")
|
||||
@@ -653,7 +653,13 @@ pub fn parse_withdraw_from_vote_account(
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let destination_account_pubkey =
|
||||
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
|
||||
let withdraw_amount = SpendAmount::new_from_matches(matches, "amount");
|
||||
let mut withdraw_amount = SpendAmount::new_from_matches(matches, "amount");
|
||||
// As a safeguard for vote accounts for running validators, `ALL` withdraws only the amount in
|
||||
// excess of the rent-exempt minimum. In order to close the account with this subcommand, a
|
||||
// validator must specify the withdrawal amount precisely.
|
||||
if withdraw_amount == SpendAmount::All {
|
||||
withdraw_amount = SpendAmount::RentExempt;
|
||||
}
|
||||
|
||||
let (withdraw_authority, withdraw_authority_pubkey) =
|
||||
signer_of(matches, "authorized_withdrawer", wallet_manager)?;
|
||||
@@ -1990,7 +1996,7 @@ mod tests {
|
||||
vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
destination_account_pubkey: pubkey,
|
||||
withdraw_authority: 0,
|
||||
withdraw_amount: SpendAmount::All,
|
||||
withdraw_amount: SpendAmount::RentExempt,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
|
@@ -462,18 +462,27 @@ pub fn process_show_account(
|
||||
|
||||
let mut account_string = config.output_format.formatted_string(&cli_account);
|
||||
|
||||
if config.output_format == OutputFormat::Display
|
||||
|| config.output_format == OutputFormat::DisplayVerbose
|
||||
{
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(&data)?;
|
||||
writeln!(&mut account_string)?;
|
||||
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
|
||||
} else if !data.is_empty() {
|
||||
use pretty_hex::*;
|
||||
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
|
||||
match config.output_format {
|
||||
OutputFormat::Json | OutputFormat::JsonCompact => {
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(account_string.as_bytes())?;
|
||||
writeln!(&mut account_string)?;
|
||||
writeln!(&mut account_string, "Wrote account to {}", output_file)?;
|
||||
}
|
||||
}
|
||||
OutputFormat::Display | OutputFormat::DisplayVerbose => {
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(&data)?;
|
||||
writeln!(&mut account_string)?;
|
||||
writeln!(&mut account_string, "Wrote account data to {}", output_file)?;
|
||||
} else if !data.is_empty() {
|
||||
use pretty_hex::*;
|
||||
writeln!(&mut account_string, "{:?}", data.hex_dump())?;
|
||||
}
|
||||
}
|
||||
OutputFormat::DisplayQuiet => (),
|
||||
}
|
||||
|
||||
Ok(account_string)
|
||||
|
@@ -1,8 +1,10 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
test_utils::check_ready,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -14,6 +16,7 @@ use {
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
system_program,
|
||||
@@ -73,10 +76,14 @@ fn full_battery_tests(
|
||||
&rpc_client,
|
||||
&config_payer,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
2000,
|
||||
sol_to_lamports(2000.0),
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(2000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_balance!(
|
||||
sol_to_lamports(2000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
|
||||
let mut config_nonce = CliConfig::recent_for_tests();
|
||||
config_nonce.json_rpc_url = json_rpc_url;
|
||||
@@ -108,12 +115,16 @@ fn full_battery_tests(
|
||||
seed,
|
||||
nonce_authority: optional_authority,
|
||||
memo: None,
|
||||
amount: SpendAmount::Some(1000),
|
||||
amount: SpendAmount::Some(sol_to_lamports(1000.0)),
|
||||
};
|
||||
|
||||
process_command(&config_payer).unwrap();
|
||||
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_recent_balance(1000, &rpc_client, &nonce_account);
|
||||
check_balance!(
|
||||
sol_to_lamports(1000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(1000.0), &rpc_client, &nonce_account);
|
||||
|
||||
// Get nonce
|
||||
config_payer.signers.pop();
|
||||
@@ -161,12 +172,16 @@ fn full_battery_tests(
|
||||
nonce_authority: index,
|
||||
memo: None,
|
||||
destination_account_pubkey: payee_pubkey,
|
||||
lamports: 100,
|
||||
lamports: sol_to_lamports(100.0),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_recent_balance(900, &rpc_client, &nonce_account);
|
||||
check_recent_balance(100, &rpc_client, &payee_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(1000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(900.0), &rpc_client, &nonce_account);
|
||||
check_balance!(sol_to_lamports(100.0), &rpc_client, &payee_pubkey);
|
||||
|
||||
// Show nonce account
|
||||
config_payer.command = CliCommand::ShowNonceAccount {
|
||||
@@ -208,12 +223,16 @@ fn full_battery_tests(
|
||||
nonce_authority: 1,
|
||||
memo: None,
|
||||
destination_account_pubkey: payee_pubkey,
|
||||
lamports: 100,
|
||||
lamports: sol_to_lamports(100.0),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
check_recent_balance(1000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_recent_balance(800, &rpc_client, &nonce_account);
|
||||
check_recent_balance(200, &rpc_client, &payee_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(1000.0),
|
||||
&rpc_client,
|
||||
&config_payer.signers[0].pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(800.0), &rpc_client, &nonce_account);
|
||||
check_balance!(sol_to_lamports(200.0), &rpc_client, &payee_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -241,19 +260,27 @@ fn test_create_account_with_seed() {
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
42,
|
||||
sol_to_lamports(42.0),
|
||||
)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
4242,
|
||||
sol_to_lamports(4242.0),
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4242, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(0, &rpc_client, &to_address);
|
||||
check_balance!(
|
||||
sol_to_lamports(42.0),
|
||||
&rpc_client,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(4242.0),
|
||||
&rpc_client,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
);
|
||||
check_balance!(0, &rpc_client, &to_address);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -263,7 +290,7 @@ fn test_create_account_with_seed() {
|
||||
let seed = authority_pubkey.to_string()[0..32].to_string();
|
||||
let nonce_address =
|
||||
Pubkey::create_with_seed(&creator_pubkey, &seed, &system_program::id()).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &nonce_address);
|
||||
check_balance!(0, &rpc_client, &nonce_address);
|
||||
|
||||
let mut creator_config = CliConfig::recent_for_tests();
|
||||
creator_config.json_rpc_url = test_validator.rpc_url();
|
||||
@@ -273,13 +300,21 @@ fn test_create_account_with_seed() {
|
||||
seed: Some(seed),
|
||||
nonce_authority: Some(authority_pubkey),
|
||||
memo: None,
|
||||
amount: SpendAmount::Some(241),
|
||||
amount: SpendAmount::Some(sol_to_lamports(241.0)),
|
||||
};
|
||||
process_command(&creator_config).unwrap();
|
||||
check_recent_balance(241, &rpc_client, &nonce_address);
|
||||
check_recent_balance(42, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(0, &rpc_client, &to_address);
|
||||
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
|
||||
check_balance!(
|
||||
sol_to_lamports(42.0),
|
||||
&rpc_client,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(4000.999999999),
|
||||
&rpc_client,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
);
|
||||
check_balance!(0, &rpc_client, &to_address);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -299,7 +334,7 @@ fn test_create_account_with_seed() {
|
||||
authority_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&authority_config).unwrap_err();
|
||||
authority_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(10.0)),
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
@@ -325,7 +360,7 @@ fn test_create_account_with_seed() {
|
||||
submit_config.json_rpc_url = test_validator.rpc_url();
|
||||
submit_config.signers = vec![&authority_presigner];
|
||||
submit_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(10.0)),
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -344,8 +379,16 @@ fn test_create_account_with_seed() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&submit_config).unwrap();
|
||||
check_recent_balance(241, &rpc_client, &nonce_address);
|
||||
check_recent_balance(31, &rpc_client, &offline_nonce_authority_signer.pubkey());
|
||||
check_recent_balance(4000, &rpc_client, &online_nonce_creator_signer.pubkey());
|
||||
check_recent_balance(10, &rpc_client, &to_address);
|
||||
check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address);
|
||||
check_balance!(
|
||||
sol_to_lamports(31.999999999),
|
||||
&rpc_client,
|
||||
&offline_nonce_authority_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(4000.999999999),
|
||||
&rpc_client,
|
||||
&online_nonce_creator_signer.pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(10.0), &rpc_client, &to_address);
|
||||
}
|
||||
|
@@ -1,3 +1,4 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
serde_json::Value,
|
||||
solana_cli::{
|
||||
|
@@ -1,9 +1,11 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
solana_cli::cli::{process_command, CliCommand, CliConfig},
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
native_token::sol_to_lamports,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
@@ -22,7 +24,7 @@ fn test_cli_request_airdrop() {
|
||||
bob_config.json_rpc_url = test_validator.rpc_url();
|
||||
bob_config.command = CliCommand::Airdrop {
|
||||
pubkey: None,
|
||||
lamports: 50,
|
||||
lamports: sol_to_lamports(50.0),
|
||||
};
|
||||
let keypair = Keypair::new();
|
||||
bob_config.signers = vec![&keypair];
|
||||
@@ -36,5 +38,5 @@ fn test_cli_request_airdrop() {
|
||||
let balance = rpc_client
|
||||
.get_balance(&bob_config.signers[0].pubkey())
|
||||
.unwrap();
|
||||
assert_eq!(balance, 50);
|
||||
assert_eq!(balance, sol_to_lamports(50.0));
|
||||
}
|
||||
|
@@ -1,10 +1,12 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#![allow(clippy::redundant_closure)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
stake::StakeAuthorizationIndexed,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
test_utils::check_ready,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -150,7 +152,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
let stake_address = Pubkey::create_with_seed(
|
||||
&config_validator.signers[0].pubkey(),
|
||||
@@ -239,7 +241,7 @@ fn test_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
// Create stake account
|
||||
config_validator.signers.push(&stake_keypair);
|
||||
@@ -333,7 +335,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_validator.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
@@ -342,7 +344,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
|
||||
// Create stake account
|
||||
config_validator.signers.push(&stake_keypair);
|
||||
@@ -911,13 +913,13 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
process_command(&config_offline).unwrap_err();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &default_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_payer, &payer_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &payer_pubkey);
|
||||
check_balance!(100_000, &rpc_client, &payer_pubkey);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
check_balance!(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -944,7 +946,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance should be 50,000 - 1 stake account sig - 1 fee sig
|
||||
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
check_balance!(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
|
||||
// Assign authority with separate fee payer
|
||||
config.signers = vec![&default_signer, &payer_keypair];
|
||||
@@ -968,10 +970,10 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config` balance has not changed, despite submitting the TX
|
||||
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
check_balance!(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
// `config_payer` however has paid `config`'s authority sig
|
||||
// and `config_payer`'s fee sig
|
||||
check_recent_balance(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey);
|
||||
check_balance!(100_000 - SIG_FEE - SIG_FEE, &rpc_client, &payer_pubkey);
|
||||
|
||||
// Assign authority with offline fee payer
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
@@ -1019,10 +1021,10 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
// `config`'s balance again has not changed
|
||||
check_recent_balance(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
check_balance!(50_000 - SIG_FEE - SIG_FEE, &rpc_client, &default_pubkey);
|
||||
// `config_offline` however has paid 1 sig due to being both authority
|
||||
// and fee payer
|
||||
check_recent_balance(100_000 - SIG_FEE, &rpc_client, &offline_pubkey);
|
||||
check_balance!(100_000 - SIG_FEE, &rpc_client, &offline_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1058,10 +1060,10 @@ fn test_stake_split() {
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
|
||||
.unwrap();
|
||||
check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey());
|
||||
check_balance!(500_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
check_balance!(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let minimum_stake_balance = rpc_client
|
||||
@@ -1088,7 +1090,7 @@ fn test_stake_split() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
@@ -1108,7 +1110,7 @@ fn test_stake_split() {
|
||||
amount: SpendAmount::Some(minimum_nonce_balance),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account.pubkey());
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1122,7 +1124,7 @@ fn test_stake_split() {
|
||||
|
||||
// Nonced offline split
|
||||
let split_account = keypair_from_seed(&[2u8; 32]).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &split_account.pubkey());
|
||||
check_balance!(0, &rpc_client, &split_account.pubkey());
|
||||
config_offline.signers.push(&split_account);
|
||||
config_offline.command = CliCommand::SplitStake {
|
||||
stake_account_pubkey,
|
||||
@@ -1162,12 +1164,12 @@ fn test_stake_split() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
8 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
);
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
2 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&split_account.pubkey(),
|
||||
@@ -1207,10 +1209,10 @@ fn test_stake_set_lockup() {
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 500_000)
|
||||
.unwrap();
|
||||
check_recent_balance(500_000, &rpc_client, &config.signers[0].pubkey());
|
||||
check_balance!(500_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
check_balance!(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create stake account, identity is authority
|
||||
let minimum_stake_balance = rpc_client
|
||||
@@ -1244,7 +1246,12 @@ fn test_stake_set_lockup() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(
|
||||
check_balance!(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
);
|
||||
check_balance!(
|
||||
10 * minimum_stake_balance,
|
||||
&rpc_client,
|
||||
&stake_account_pubkey,
|
||||
@@ -1377,7 +1384,7 @@ fn test_stake_set_lockup() {
|
||||
amount: SpendAmount::Some(minimum_nonce_balance),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
|
||||
check_balance!(minimum_nonce_balance, &rpc_client, &nonce_account_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1473,10 +1480,10 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 200_000)
|
||||
.unwrap();
|
||||
check_recent_balance(200_000, &rpc_client, &config.signers[0].pubkey());
|
||||
check_balance!(200_000, &rpc_client, &config.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config_offline, &offline_pubkey, 100_000).unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &offline_pubkey);
|
||||
check_balance!(100_000, &rpc_client, &offline_pubkey);
|
||||
|
||||
// Create nonce account
|
||||
let minimum_nonce_balance = rpc_client
|
||||
@@ -1553,7 +1560,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &stake_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &stake_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1572,7 +1579,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
config_offline.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
destination_account_pubkey: recipient_pubkey,
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: true,
|
||||
@@ -1591,7 +1598,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
config.command = CliCommand::WithdrawStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
destination_account_pubkey: recipient_pubkey,
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
@@ -1607,7 +1614,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(42, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -1667,7 +1674,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
process_command(&config).unwrap();
|
||||
let seed_address =
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &seed_address);
|
||||
check_balance!(50_000, &rpc_client, &seed_address);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -1,9 +1,11 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#![allow(clippy::redundant_closure)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
test_utils::check_ready,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -14,6 +16,7 @@ use {
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
native_token::sol_to_lamports,
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
|
||||
@@ -49,15 +52,16 @@ fn test_transfer() {
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(5.0))
|
||||
.unwrap();
|
||||
check_balance!(sol_to_lamports(5.0), &rpc_client, &sender_pubkey);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
// Plain ole transfer
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(1.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -73,12 +77,12 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_989, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(10, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(sol_to_lamports(4.0) - 1, &rpc_client, &sender_pubkey);
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Plain ole transfer, failure due to InsufficientFundsForSpendAndFee
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(49_989),
|
||||
amount: SpendAmount::Some(sol_to_lamports(4.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -94,8 +98,8 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
check_recent_balance(49_989, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(10, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(sol_to_lamports(4.0) - 1, &rpc_client, &sender_pubkey);
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey);
|
||||
|
||||
let mut offline = CliConfig::recent_for_tests();
|
||||
offline.json_rpc_url = String::default();
|
||||
@@ -105,13 +109,14 @@ fn test_transfer() {
|
||||
process_command(&offline).unwrap_err();
|
||||
|
||||
let offline_pubkey = offline.signers[0].pubkey();
|
||||
request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, 50).unwrap();
|
||||
check_recent_balance(50, &rpc_client, &offline_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, sol_to_lamports(1.0))
|
||||
.unwrap();
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &offline_pubkey);
|
||||
|
||||
// Offline transfer
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
offline.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.5)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
@@ -133,7 +138,7 @@ fn test_transfer() {
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.5)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -149,8 +154,8 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(39, &rpc_client, &offline_pubkey);
|
||||
check_recent_balance(20, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(sol_to_lamports(0.5) - 1, &rpc_client, &offline_pubkey);
|
||||
check_balance!(sol_to_lamports(1.5), &rpc_client, &recipient_pubkey);
|
||||
|
||||
// Create nonce account
|
||||
let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap();
|
||||
@@ -166,7 +171,11 @@ fn test_transfer() {
|
||||
amount: SpendAmount::Some(minimum_nonce_balance),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_987 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(4.0) - 3 - minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&sender_pubkey,
|
||||
);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -181,7 +190,7 @@ fn test_transfer() {
|
||||
// Nonced transfer
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(1.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -200,8 +209,12 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_976 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(30, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(3.0) - 4 - minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&sender_pubkey,
|
||||
);
|
||||
check_balance!(sol_to_lamports(2.5), &rpc_client, &recipient_pubkey);
|
||||
let new_nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
&rpc_client,
|
||||
&nonce_account.pubkey(),
|
||||
@@ -221,7 +234,11 @@ fn test_transfer() {
|
||||
new_authority: offline_pubkey,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(49_975 - minimum_nonce_balance, &rpc_client, &sender_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(3.0) - 5 - minimum_nonce_balance,
|
||||
&rpc_client,
|
||||
&sender_pubkey,
|
||||
);
|
||||
|
||||
// Fetch nonce hash
|
||||
let nonce_hash = nonce_utils::get_account_with_commitment(
|
||||
@@ -236,7 +253,7 @@ fn test_transfer() {
|
||||
// Offline, nonced transfer
|
||||
offline.signers = vec![&default_offline_signer];
|
||||
offline.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.4)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
@@ -257,7 +274,7 @@ fn test_transfer() {
|
||||
let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap();
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(10),
|
||||
amount: SpendAmount::Some(sol_to_lamports(0.4)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -276,8 +293,8 @@ fn test_transfer() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(28, &rpc_client, &offline_pubkey);
|
||||
check_recent_balance(40, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(sol_to_lamports(0.1) - 2, &rpc_client, &offline_pubkey);
|
||||
check_balance!(sol_to_lamports(2.9), &rpc_client, &recipient_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -305,19 +322,27 @@ fn test_transfer_multisession_signing() {
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&offline_from_signer.pubkey(),
|
||||
43,
|
||||
sol_to_lamports(43.0),
|
||||
)
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&CliConfig::recent_for_tests(),
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
3,
|
||||
sol_to_lamports(1.0) + 3,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(43, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_recent_balance(3, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_recent_balance(0, &rpc_client, &to_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(43.0),
|
||||
&rpc_client,
|
||||
&offline_from_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(1.0) + 3,
|
||||
&rpc_client,
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
);
|
||||
check_balance!(0, &rpc_client, &to_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -331,7 +356,7 @@ fn test_transfer_multisession_signing() {
|
||||
fee_payer_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&fee_payer_config).unwrap_err();
|
||||
fee_payer_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(sol_to_lamports(42.0)),
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
@@ -362,7 +387,7 @@ fn test_transfer_multisession_signing() {
|
||||
from_config.command = CliCommand::ClusterVersion;
|
||||
process_command(&from_config).unwrap_err();
|
||||
from_config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(sol_to_lamports(42.0)),
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
@@ -390,7 +415,7 @@ fn test_transfer_multisession_signing() {
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.signers = vec![&fee_payer_presigner, &from_presigner];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(42),
|
||||
amount: SpendAmount::Some(sol_to_lamports(42.0)),
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: false,
|
||||
@@ -407,9 +432,17 @@ fn test_transfer_multisession_signing() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
check_recent_balance(1, &rpc_client, &offline_from_signer.pubkey());
|
||||
check_recent_balance(1, &rpc_client, &offline_fee_payer_signer.pubkey());
|
||||
check_recent_balance(42, &rpc_client, &to_pubkey);
|
||||
check_balance!(
|
||||
sol_to_lamports(1.0),
|
||||
&rpc_client,
|
||||
&offline_from_signer.pubkey(),
|
||||
);
|
||||
check_balance!(
|
||||
sol_to_lamports(1.0) + 1,
|
||||
&rpc_client,
|
||||
&offline_fee_payer_signer.pubkey(),
|
||||
);
|
||||
check_balance!(sol_to_lamports(42.0), &rpc_client, &to_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -438,8 +471,8 @@ fn test_transfer_all() {
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &sender_pubkey);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -461,8 +494,8 @@ fn test_transfer_all() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(49_999, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(0, &rpc_client, &sender_pubkey);
|
||||
check_balance!(49_999, &rpc_client, &recipient_pubkey);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -491,8 +524,8 @@ fn test_transfer_unfunded_recipient() {
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 50_000).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
check_balance!(50_000, &rpc_client, &sender_pubkey);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
@@ -551,17 +584,19 @@ fn test_transfer_with_seed() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 1).unwrap();
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &derived_address, 50_000).unwrap();
|
||||
check_recent_balance(1, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(50_000, &rpc_client, &derived_address);
|
||||
check_recent_balance(0, &rpc_client, &recipient_pubkey);
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(1.0))
|
||||
.unwrap();
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &derived_address, sol_to_lamports(5.0))
|
||||
.unwrap();
|
||||
check_balance!(sol_to_lamports(1.0), &rpc_client, &sender_pubkey);
|
||||
check_balance!(sol_to_lamports(5.0), &rpc_client, &derived_address);
|
||||
check_balance!(0, &rpc_client, &recipient_pubkey);
|
||||
|
||||
check_ready(&rpc_client);
|
||||
|
||||
// Transfer with seed
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(50_000),
|
||||
amount: SpendAmount::Some(sol_to_lamports(5.0)),
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -577,7 +612,7 @@ fn test_transfer_with_seed() {
|
||||
derived_address_program_id: Some(derived_address_program_id),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &sender_pubkey);
|
||||
check_recent_balance(50_000, &rpc_client, &recipient_pubkey);
|
||||
check_recent_balance(0, &rpc_client, &derived_address);
|
||||
check_balance!(sol_to_lamports(1.0) - 1, &rpc_client, &sender_pubkey);
|
||||
check_balance!(sol_to_lamports(5.0), &rpc_client, &recipient_pubkey);
|
||||
check_balance!(0, &rpc_client, &derived_address);
|
||||
}
|
||||
|
@@ -1,8 +1,9 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
solana_cli::{
|
||||
check_balance,
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
test_utils::check_recent_balance,
|
||||
},
|
||||
solana_cli_output::{parse_sign_only_reply_string, OutputFormat},
|
||||
solana_client::{
|
||||
@@ -69,12 +70,12 @@ fn test_vote_authorize_and_withdraw() {
|
||||
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
|
||||
.unwrap()
|
||||
.max(1);
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Transfer in some more SOL
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(1_000),
|
||||
amount: SpendAmount::Some(10_000),
|
||||
to: vote_account_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -90,8 +91,8 @@ fn test_vote_authorize_and_withdraw() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let expected_balance = expected_balance + 1_000;
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
let expected_balance = expected_balance + 10_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer
|
||||
let first_withdraw_authority = Keypair::new();
|
||||
@@ -169,7 +170,7 @@ fn test_vote_authorize_and_withdraw() {
|
||||
config.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(100),
|
||||
withdraw_amount: SpendAmount::Some(1_000),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@@ -180,9 +181,9 @@ fn test_vote_authorize_and_withdraw() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let expected_balance = expected_balance - 100;
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_recent_balance(100, &rpc_client, &destination_account);
|
||||
let expected_balance = expected_balance - 1_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(1_000, &rpc_client, &destination_account);
|
||||
|
||||
// Re-assign validator identity
|
||||
let new_identity_keypair = Keypair::new();
|
||||
@@ -212,8 +213,8 @@ fn test_vote_authorize_and_withdraw() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
|
||||
check_recent_balance(expected_balance, &rpc_client, &destination_account);
|
||||
check_balance!(0, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &destination_account);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -247,7 +248,7 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_payer.signers[0].pubkey());
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
@@ -256,7 +257,7 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
check_balance!(100_000, &rpc_client, &config_offline.signers[0].pubkey());
|
||||
|
||||
// Create vote account with specific withdrawer
|
||||
let vote_account_keypair = Keypair::new();
|
||||
@@ -288,12 +289,12 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
.get_minimum_balance_for_rent_exemption(VoteState::size_of())
|
||||
.unwrap()
|
||||
.max(1);
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Transfer in some more SOL
|
||||
config_payer.signers = vec![&default_signer];
|
||||
config_payer.command = CliCommand::Transfer {
|
||||
amount: SpendAmount::Some(1_000),
|
||||
amount: SpendAmount::Some(10_000),
|
||||
to: vote_account_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
@@ -309,8 +310,8 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
derived_address_program_id: None,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
let expected_balance = expected_balance + 1_000;
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
let expected_balance = expected_balance + 10_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer, offline
|
||||
let withdraw_authority = Keypair::new();
|
||||
@@ -367,7 +368,7 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
config_offline.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(100),
|
||||
withdraw_amount: SpendAmount::Some(1_000),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
@@ -387,7 +388,7 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
config_payer.command = CliCommand::WithdrawFromVoteAccount {
|
||||
vote_account_pubkey,
|
||||
withdraw_authority: 1,
|
||||
withdraw_amount: SpendAmount::Some(100),
|
||||
withdraw_amount: SpendAmount::Some(1_000),
|
||||
destination_account_pubkey: destination_account,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@@ -398,9 +399,9 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
let expected_balance = expected_balance - 100;
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_recent_balance(100, &rpc_client, &destination_account);
|
||||
let expected_balance = expected_balance - 1_000;
|
||||
check_balance!(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(1_000, &rpc_client, &destination_account);
|
||||
|
||||
// Re-assign validator identity offline
|
||||
let blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
@@ -483,9 +484,7 @@ fn test_offline_vote_authorize_and_withdraw() {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
let result = process_command(&config_payer).unwrap();
|
||||
println!("{:?}", result);
|
||||
check_recent_balance(0, &rpc_client, &vote_account_pubkey);
|
||||
println!("what");
|
||||
check_recent_balance(expected_balance, &rpc_client, &destination_account);
|
||||
process_command(&config_payer).unwrap();
|
||||
check_balance!(0, &rpc_client, &vote_account_pubkey);
|
||||
check_balance!(expected_balance, &rpc_client, &destination_account);
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client-test"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana RPC Test"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,22 +12,24 @@ edition = "2021"
|
||||
[dependencies]
|
||||
serde_json = "1.0.72"
|
||||
serial_test = "0.5.1"
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.9.5" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.5" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.5" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.5" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
systemstat = "0.1.10"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,11 +4,16 @@ use {
|
||||
solana_client::{
|
||||
pubsub_client::PubsubClient,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_response::SlotInfo,
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
|
||||
RpcProgramAccountsConfig,
|
||||
},
|
||||
rpc_response::{RpcBlockUpdate, SlotInfo},
|
||||
},
|
||||
solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path},
|
||||
solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank,
|
||||
rpc::create_test_transactions_and_populate_blockstore,
|
||||
rpc_pubsub_service::{PubSubConfig, PubSubService},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
},
|
||||
@@ -20,7 +25,7 @@ use {
|
||||
},
|
||||
solana_sdk::{
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentConfig,
|
||||
commitment_config::{CommitmentConfig, CommitmentLevel},
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
rpc_port,
|
||||
@@ -29,11 +34,12 @@ use {
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_test_validator::TestValidator,
|
||||
solana_transaction_status::{TransactionDetails, UiTransactionEncoding},
|
||||
std::{
|
||||
collections::HashSet,
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::sleep,
|
||||
@@ -119,9 +125,10 @@ fn test_account_subscription() {
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bob = Keypair::new();
|
||||
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -194,6 +201,112 @@ fn test_account_subscription() {
|
||||
assert_eq!(errors, [].to_vec());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_block_subscription() {
|
||||
// setup BankForks
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair: alice,
|
||||
..
|
||||
} = create_genesis_config(10_000);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
|
||||
// setup Blockstore
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
// populate ledger with test txs
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
let keypair3 = Keypair::new();
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root()));
|
||||
let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
|
||||
vec![&alice, &keypair1, &keypair2, &keypair3],
|
||||
0,
|
||||
bank,
|
||||
blockstore.clone(),
|
||||
max_complete_transaction_status_slot,
|
||||
);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
// setup RpcSubscriptions && PubSubService
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
blockstore.clone(),
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
));
|
||||
let pubsub_addr = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
|
||||
);
|
||||
let pub_cfg = PubSubConfig {
|
||||
enable_block_subscription: true,
|
||||
..PubSubConfig::default()
|
||||
};
|
||||
let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr);
|
||||
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
|
||||
// setup PubsubClient
|
||||
let (mut client, receiver) = PubsubClient::block_subscribe(
|
||||
&format!("ws://0.0.0.0:{}/", pubsub_addr.port()),
|
||||
RpcBlockSubscribeFilter::All,
|
||||
Some(RpcBlockSubscribeConfig {
|
||||
commitment: Some(CommitmentConfig {
|
||||
commitment: CommitmentLevel::Confirmed,
|
||||
}),
|
||||
encoding: Some(UiTransactionEncoding::Json),
|
||||
transaction_details: Some(TransactionDetails::Signatures),
|
||||
show_rewards: None,
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// trigger Gossip notification
|
||||
let slot = bank_forks.read().unwrap().highest_slot();
|
||||
subscriptions.notify_gossip_subscribers(slot);
|
||||
let maybe_actual = receiver.recv_timeout(Duration::from_millis(400));
|
||||
match maybe_actual {
|
||||
Ok(actual) => {
|
||||
let complete_block = blockstore.get_complete_block(slot, false).unwrap();
|
||||
let block = complete_block.clone().configure(
|
||||
UiTransactionEncoding::Json,
|
||||
TransactionDetails::Signatures,
|
||||
false,
|
||||
);
|
||||
let expected = RpcBlockUpdate {
|
||||
slot,
|
||||
block: Some(block),
|
||||
err: None,
|
||||
};
|
||||
let block = complete_block.configure(
|
||||
UiTransactionEncoding::Json,
|
||||
TransactionDetails::Signatures,
|
||||
false,
|
||||
);
|
||||
assert_eq!(actual.value.slot, expected.slot);
|
||||
assert!(block.eq(&actual.value.block.unwrap()));
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("unexpected websocket receive timeout");
|
||||
assert_eq!(Some(e), None);
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
trigger.cancel();
|
||||
client.shutdown().unwrap();
|
||||
pubsub_service.close().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_program_subscription() {
|
||||
@@ -215,9 +328,10 @@ fn test_program_subscription() {
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bob = Keypair::new();
|
||||
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -300,9 +414,10 @@ fn test_root_subscription() {
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -350,8 +465,10 @@ fn test_slot_subscription() {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
optimistically_confirmed_bank,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -20,18 +20,18 @@ log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
semver = "1.0.4"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.5" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] }
|
||||
@@ -40,7 +40,7 @@ url = "2.2.2"
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
jsonrpc-http-server = "18.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,12 +1,13 @@
|
||||
use {
|
||||
crate::{
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig,
|
||||
RpcTransactionLogsConfig, RpcTransactionLogsFilter,
|
||||
RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter,
|
||||
RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_response::{
|
||||
Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult,
|
||||
SlotInfo, SlotUpdate,
|
||||
Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse,
|
||||
RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate,
|
||||
},
|
||||
},
|
||||
log::*,
|
||||
@@ -173,6 +174,12 @@ pub type SignatureSubscription = (
|
||||
Receiver<RpcResponse<RpcSignatureResult>>,
|
||||
);
|
||||
|
||||
pub type PubsubBlockClientSubscription = PubsubClientSubscription<RpcResponse<RpcBlockUpdate>>;
|
||||
pub type BlockSubscription = (
|
||||
PubsubBlockClientSubscription,
|
||||
Receiver<RpcResponse<RpcBlockUpdate>>,
|
||||
);
|
||||
|
||||
pub type PubsubProgramClientSubscription = PubsubClientSubscription<RpcResponse<RpcKeyedAccount>>;
|
||||
pub type ProgramSubscription = (
|
||||
PubsubProgramClientSubscription,
|
||||
@@ -185,6 +192,9 @@ pub type AccountSubscription = (
|
||||
Receiver<RpcResponse<UiAccount>>,
|
||||
);
|
||||
|
||||
pub type PubsubVoteClientSubscription = PubsubClientSubscription<RpcVote>;
|
||||
pub type VoteSubscription = (PubsubVoteClientSubscription, Receiver<RpcVote>);
|
||||
|
||||
pub type PubsubRootClientSubscription = PubsubClientSubscription<Slot>;
|
||||
pub type RootSubscription = (PubsubRootClientSubscription, Receiver<Slot>);
|
||||
|
||||
@@ -266,6 +276,45 @@ impl PubsubClient {
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn block_subscribe(
|
||||
url: &str,
|
||||
filter: RpcBlockSubscribeFilter,
|
||||
config: Option<RpcBlockSubscribeConfig>,
|
||||
) -> Result<BlockSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_clone = exit.clone();
|
||||
let body = json!({
|
||||
"jsonrpc":"2.0",
|
||||
"id":1,
|
||||
"method":"blockSubscribe",
|
||||
"params":[filter, config]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let subscription_id = PubsubBlockClientSubscription::send_subscribe(&socket_clone, body)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
|
||||
});
|
||||
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: "block",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn logs_subscribe(
|
||||
url: &str,
|
||||
filter: RpcTransactionLogsFilter,
|
||||
@@ -346,6 +395,39 @@ impl PubsubClient {
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn vote_subscribe(url: &str) -> Result<VoteSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let socket = Arc::new(RwLock::new(socket));
|
||||
let socket_clone = socket.clone();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_clone = exit.clone();
|
||||
let body = json!({
|
||||
"jsonrpc":"2.0",
|
||||
"id":1,
|
||||
"method":"voteSubscribe",
|
||||
})
|
||||
.to_string();
|
||||
let subscription_id = PubsubVoteClientSubscription::send_subscribe(&socket_clone, body)?;
|
||||
|
||||
let t_cleanup = std::thread::spawn(move || {
|
||||
Self::cleanup_with_sender(exit_clone, &socket_clone, sender)
|
||||
});
|
||||
|
||||
let result = PubsubClientSubscription {
|
||||
message_type: PhantomData,
|
||||
operation: "vote",
|
||||
socket,
|
||||
subscription_id,
|
||||
t_cleanup: Some(t_cleanup),
|
||||
exit,
|
||||
};
|
||||
|
||||
Ok((result, receiver))
|
||||
}
|
||||
|
||||
pub fn root_subscribe(url: &str) -> Result<RootSubscription, PubsubClientError> {
|
||||
let url = Url::parse(url)?;
|
||||
let socket = connect_with_retry(url)?;
|
||||
|
@@ -182,6 +182,23 @@ pub struct RpcSignatureSubscribeConfig {
|
||||
pub enable_received_notification: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcBlockSubscribeFilter {
|
||||
All,
|
||||
MentionsAccountOrProgram(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockSubscribeConfig {
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub encoding: Option<UiTransactionEncoding>,
|
||||
pub transaction_details: Option<TransactionDetails>,
|
||||
pub show_rewards: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignaturesForAddressConfig {
|
||||
|
@@ -9,9 +9,10 @@ use {
|
||||
transaction::{Result, TransactionError},
|
||||
},
|
||||
solana_transaction_status::{
|
||||
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus,
|
||||
ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock,
|
||||
},
|
||||
std::{collections::HashMap, fmt, net::SocketAddr},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
@@ -424,6 +425,20 @@ pub struct RpcInflationReward {
|
||||
pub commission: Option<u8>, // Vote account commission when the reward was credited
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize, Debug, Error, Eq, PartialEq)]
|
||||
pub enum RpcBlockUpdateError {
|
||||
#[error("block store error")]
|
||||
BlockStoreError,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcBlockUpdate {
|
||||
pub slot: Slot,
|
||||
pub block: Option<UiConfirmedBlock>,
|
||||
pub err: Option<RpcBlockUpdateError>,
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
|
||||
let ConfirmedTransactionStatusWithSignature {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.10.0"
|
||||
version = "1.9.5"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@@ -24,40 +24,42 @@ dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] }
|
||||
etcd-client = { version = "0.8.1", features = ["tls"]}
|
||||
fs_extra = "1.2.0"
|
||||
histogram = "0.6.9"
|
||||
itertools = "0.10.3"
|
||||
itertools = "0.10.1"
|
||||
log = "0.4.14"
|
||||
lru = "0.7.0"
|
||||
lru = "0.7.1"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
raptorq = "1.6.4"
|
||||
rayon = "1.5.1"
|
||||
retain_mut = "0.1.5"
|
||||
serde = "1.0.131"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-entry = { path = "../entry", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-replica-lib = { path = "../replica-lib", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.5" }
|
||||
solana-bloom = { path = "../bloom", version = "=1.9.5" }
|
||||
solana-client = { path = "../client", version = "=1.9.5" }
|
||||
solana-entry = { path = "../entry", version = "=1.9.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.5" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.5" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.5" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.5" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.5" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.5" }
|
||||
solana-replica-lib = { path = "../replica-lib", version = "=1.9.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.5" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.5" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.5" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.5" }
|
||||
tempfile = "3.2.0"
|
||||
thiserror = "1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.10.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.5" }
|
||||
sys-info = "0.9.1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
trees = "0.4.2"
|
||||
@@ -71,9 +73,9 @@ matches = "0.1.9"
|
||||
reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.72"
|
||||
serial_test = "0.5.1"
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.10.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
solana-program-runtime = { path = "../program-runtime", version = "=1.9.5" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.9.5" }
|
||||
solana-version = { path = "../version", version = "=1.9.5" }
|
||||
static_assertions = "1.1.0"
|
||||
systemstat = "0.1.10"
|
||||
|
||||
|
@@ -174,7 +174,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(std::u64::MAX, std::u64::MAX);
|
||||
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
||||
|
||||
debug!("threads: {} txs: {}", num_threads, txes);
|
||||
|
||||
|
@@ -100,7 +100,11 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
let slot = 0;
|
||||
let parent = 0;
|
||||
let shredder = Shredder::new(slot, parent, 0, 0).unwrap();
|
||||
let mut data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
||||
let (mut data_shreds, _) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
|
||||
let num_packets = data_shreds.len();
|
||||
|
||||
|
@@ -40,16 +40,14 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
|
||||
);
|
||||
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
let data_shreds = shredder
|
||||
.entries_to_data_shreds(
|
||||
&Keypair::new(),
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // fec_set_offset
|
||||
&mut ProcessShredsStats::default(),
|
||||
)
|
||||
.0;
|
||||
let data_shreds = shredder.entries_to_data_shreds(
|
||||
&Keypair::new(),
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // fec_set_offset
|
||||
&mut ProcessShredsStats::default(),
|
||||
);
|
||||
assert!(data_shreds.len() >= num_shreds);
|
||||
data_shreds
|
||||
}
|
||||
@@ -76,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
|
||||
let entries = create_ticks(num_ticks, 0, Hash::default());
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0);
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
|
||||
})
|
||||
}
|
||||
|
||||
@@ -95,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
|
||||
// 1Mb
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0);
|
||||
shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
|
||||
})
|
||||
}
|
||||
|
||||
@@ -108,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
|
||||
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
|
||||
let entries = create_ticks(num_ticks, 0, Hash::default());
|
||||
let shredder = Shredder::new(1, 0, 0, 0).unwrap();
|
||||
let data_shreds = shredder.entries_to_shreds(&kp, &entries, true, 0).0;
|
||||
let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0);
|
||||
bencher.iter(|| {
|
||||
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
|
||||
assert_ne!(raw.len(), 0);
|
||||
@@ -135,6 +133,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
|
||||
Shredder::generate_coding_shreds(
|
||||
&data_shreds[..symbol_count],
|
||||
true, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
)
|
||||
.len();
|
||||
})
|
||||
@@ -147,6 +146,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
|
||||
let coding_shreds = Shredder::generate_coding_shreds(
|
||||
&data_shreds[..symbol_count],
|
||||
true, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
);
|
||||
bencher.iter(|| {
|
||||
Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap();
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#![feature(test)]
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
extern crate solana_core;
|
||||
extern crate test;
|
||||
@@ -22,8 +23,7 @@ use {
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
#[bench]
|
||||
fn bench_packet_discard(bencher: &mut Bencher) {
|
||||
fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) {
|
||||
solana_logger::setup();
|
||||
let len = 30 * 1000;
|
||||
let chunk_size = 1024;
|
||||
@@ -32,12 +32,12 @@ fn bench_packet_discard(bencher: &mut Bencher) {
|
||||
|
||||
let mut total = 0;
|
||||
|
||||
let ips: Vec<_> = (0..10_000)
|
||||
let ips: Vec<_> = (0..num_ips)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let mut addr = [0u16; 8];
|
||||
thread_rng().fill(&mut addr);
|
||||
addr
|
||||
std::net::IpAddr::from(addr)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -52,11 +52,63 @@ fn bench_packet_discard(bencher: &mut Bencher) {
|
||||
|
||||
bencher.iter(move || {
|
||||
SigVerifyStage::discard_excess_packets(&mut batches, 10_000);
|
||||
let mut num_packets = 0;
|
||||
for batch in batches.iter_mut() {
|
||||
for p in batch.packets.iter_mut() {
|
||||
p.meta.discard = false;
|
||||
if !p.meta.discard() {
|
||||
num_packets += 1;
|
||||
}
|
||||
p.meta.set_discard(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(num_packets, 10_000);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_packet_discard_many_senders(bencher: &mut Bencher) {
|
||||
run_bench_packet_discard(1000, bencher);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_packet_discard_single_sender(bencher: &mut Bencher) {
|
||||
run_bench_packet_discard(1, bencher);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_packet_discard_mixed_senders(bencher: &mut Bencher) {
|
||||
const SIZE: usize = 30 * 1000;
|
||||
const CHUNK_SIZE: usize = 1024;
|
||||
fn new_rand_addr<R: Rng>(rng: &mut R) -> std::net::IpAddr {
|
||||
let mut addr = [0u16; 8];
|
||||
rng.fill(&mut addr);
|
||||
std::net::IpAddr::from(addr)
|
||||
}
|
||||
let mut rng = thread_rng();
|
||||
let mut batches = to_packet_batches(&vec![test_tx(); SIZE], CHUNK_SIZE);
|
||||
let spam_addr = new_rand_addr(&mut rng);
|
||||
for batch in batches.iter_mut() {
|
||||
for packet in batch.packets.iter_mut() {
|
||||
// One spam address, ~1000 unique addresses.
|
||||
packet.meta.addr = if rng.gen_ratio(1, 30) {
|
||||
new_rand_addr(&mut rng)
|
||||
} else {
|
||||
spam_addr
|
||||
}
|
||||
}
|
||||
}
|
||||
bencher.iter(move || {
|
||||
SigVerifyStage::discard_excess_packets(&mut batches, 10_000);
|
||||
let mut num_packets = 0;
|
||||
for batch in batches.iter_mut() {
|
||||
for packet in batch.packets.iter_mut() {
|
||||
if !packet.meta.discard() {
|
||||
num_packets += 1;
|
||||
}
|
||||
packet.meta.set_discard(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(num_packets, 10_000);
|
||||
});
|
||||
}
|
||||
|
||||
|
@@ -328,7 +328,7 @@ impl AncestorHashesService {
|
||||
blockstore: &Blockstore,
|
||||
) -> Option<(Slot, DuplicateAncestorDecision)> {
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size - SIZE_OF_NONCE])
|
||||
limited_deserialize(&packet.data[..packet.meta.size.saturating_sub(SIZE_OF_NONCE)])
|
||||
.ok()
|
||||
.and_then(|ancestor_hashes_response| {
|
||||
// Verify the response
|
||||
@@ -1033,15 +1033,6 @@ mod test {
|
||||
is_frozen,
|
||||
);
|
||||
|
||||
/*{
|
||||
let w_bank_forks = bank_forks.write().unwrap();
|
||||
assert!(w_bank_forks.get(dead_slot).is_none());
|
||||
let parent = w_bank_forks.get(dead_slot - 1).unwrap().clone();
|
||||
let dead_bank = Bank::new_from_parent(&parent, &Pubkey::default(), dead_slot);
|
||||
bank_forks.insert(dead_bank);
|
||||
|
||||
}*/
|
||||
|
||||
// Create slots [slot, slot + num_ancestors) with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(dead_slot, dead_slot, 5);
|
||||
blockstore
|
||||
@@ -1369,6 +1360,34 @@ mod test {
|
||||
assert!(ancestor_hashes_request_statuses.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_and_process_ancestor_responses_invalid_packet() {
|
||||
let bank0 = Bank::default_for_tests();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
||||
|
||||
let ManageAncestorHashesState {
|
||||
ancestor_hashes_request_statuses,
|
||||
outstanding_requests,
|
||||
..
|
||||
} = ManageAncestorHashesState::new(bank_forks);
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
|
||||
// Create invalid packet with fewer bytes than the size of the nonce
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = 0;
|
||||
|
||||
assert!(AncestorHashesService::verify_and_process_ancestor_response(
|
||||
&packet,
|
||||
&ancestor_hashes_request_statuses,
|
||||
&mut AncestorHashesResponsesStats::default(),
|
||||
&outstanding_requests,
|
||||
&blockstore,
|
||||
)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ancestor_hashes_service_manage_ancestor_hashes_after_replay_dump() {
|
||||
let dead_slot = MAX_ANCESTOR_RESPONSES as Slot;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -496,6 +496,7 @@ pub mod test {
|
||||
&keypair,
|
||||
&data_shreds[0..],
|
||||
true, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
&mut ProcessShredsStats::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
@@ -28,6 +28,7 @@ pub(super) struct BroadcastDuplicatesRun {
|
||||
config: BroadcastDuplicatesConfig,
|
||||
current_slot: Slot,
|
||||
next_shred_index: u32,
|
||||
next_code_index: u32,
|
||||
shred_version: u16,
|
||||
recent_blockhash: Option<Hash>,
|
||||
prev_entry_hash: Option<Hash>,
|
||||
@@ -46,6 +47,7 @@ impl BroadcastDuplicatesRun {
|
||||
Self {
|
||||
config,
|
||||
next_shred_index: u32::MAX,
|
||||
next_code_index: 0,
|
||||
shred_version,
|
||||
current_slot: 0,
|
||||
recent_blockhash: None,
|
||||
@@ -74,6 +76,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
|
||||
if bank.slot() != self.current_slot {
|
||||
self.next_shred_index = 0;
|
||||
self.next_code_index = 0;
|
||||
self.current_slot = bank.slot();
|
||||
self.prev_entry_hash = None;
|
||||
self.num_slots_broadcasted += 1;
|
||||
@@ -154,22 +157,26 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let (data_shreds, _, _) = shredder.entries_to_shreds(
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
keypair,
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
|
||||
self.next_shred_index,
|
||||
self.next_code_index,
|
||||
);
|
||||
|
||||
self.next_shred_index += data_shreds.len() as u32;
|
||||
if let Some(index) = coding_shreds.iter().map(Shred::index).max() {
|
||||
self.next_code_index = index + 1;
|
||||
}
|
||||
let last_shreds = last_entries.map(|(original_last_entry, duplicate_extra_last_entries)| {
|
||||
let (original_last_data_shred, _, _) =
|
||||
shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index);
|
||||
let (original_last_data_shred, _) =
|
||||
shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index, self.next_code_index);
|
||||
|
||||
let (partition_last_data_shred, _, _) =
|
||||
let (partition_last_data_shred, _) =
|
||||
// Don't mark the last shred as last so that validators won't know that
|
||||
// they've gotten all the shreds, and will continue trying to repair
|
||||
shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index);
|
||||
shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index, self.next_code_index);
|
||||
|
||||
let sigs: Vec<_> = partition_last_data_shred.iter().map(|s| (s.signature(), s.index())).collect();
|
||||
info!(
|
||||
|
@@ -10,6 +10,7 @@ pub(super) struct BroadcastFakeShredsRun {
|
||||
last_blockhash: Hash,
|
||||
partition: usize,
|
||||
shred_version: u16,
|
||||
next_code_index: u32,
|
||||
}
|
||||
|
||||
impl BroadcastFakeShredsRun {
|
||||
@@ -18,6 +19,7 @@ impl BroadcastFakeShredsRun {
|
||||
last_blockhash: Hash::default(),
|
||||
partition,
|
||||
shred_version,
|
||||
next_code_index: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -52,11 +54,12 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
keypair,
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
next_shred_index,
|
||||
self.next_code_index,
|
||||
);
|
||||
|
||||
// If the last blockhash is default, a new block is being created
|
||||
@@ -69,13 +72,23 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
.map(|_| Entry::new(&self.last_blockhash, 0, vec![]))
|
||||
.collect();
|
||||
|
||||
let (fake_data_shreds, fake_coding_shreds, _) = shredder.entries_to_shreds(
|
||||
let (fake_data_shreds, fake_coding_shreds) = shredder.entries_to_shreds(
|
||||
keypair,
|
||||
&fake_entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
next_shred_index,
|
||||
self.next_code_index,
|
||||
);
|
||||
|
||||
if let Some(index) = coding_shreds
|
||||
.iter()
|
||||
.chain(&fake_coding_shreds)
|
||||
.map(Shred::index)
|
||||
.max()
|
||||
{
|
||||
self.next_code_index = index + 1;
|
||||
}
|
||||
|
||||
// If it's the last tick, reset the last block hash to default
|
||||
// this will cause next run to grab last bank's blockhash
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
|
@@ -21,6 +21,7 @@ pub(super) struct ReceiveResults {
|
||||
#[derive(Clone)]
|
||||
pub struct UnfinishedSlotInfo {
|
||||
pub next_shred_index: u32,
|
||||
pub(crate) next_code_index: u32,
|
||||
pub slot: Slot,
|
||||
pub parent: Slot,
|
||||
// Data shreds buffered to make a batch of size
|
||||
|
@@ -15,6 +15,7 @@ pub(super) struct FailEntryVerificationBroadcastRun {
|
||||
good_shreds: Vec<Shred>,
|
||||
current_slot: Slot,
|
||||
next_shred_index: u32,
|
||||
next_code_index: u32,
|
||||
cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>,
|
||||
}
|
||||
|
||||
@@ -29,6 +30,7 @@ impl FailEntryVerificationBroadcastRun {
|
||||
good_shreds: vec![],
|
||||
current_slot: 0,
|
||||
next_shred_index: 0,
|
||||
next_code_index: 0,
|
||||
cluster_nodes_cache,
|
||||
}
|
||||
}
|
||||
@@ -50,6 +52,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
|
||||
if bank.slot() != self.current_slot {
|
||||
self.next_shred_index = 0;
|
||||
self.next_code_index = 0;
|
||||
self.current_slot = bank.slot();
|
||||
}
|
||||
|
||||
@@ -83,22 +86,26 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let (data_shreds, _, _) = shredder.entries_to_shreds(
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
keypair,
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
|
||||
self.next_shred_index,
|
||||
self.next_code_index,
|
||||
);
|
||||
|
||||
self.next_shred_index += data_shreds.len() as u32;
|
||||
if let Some(index) = coding_shreds.iter().map(Shred::index).max() {
|
||||
self.next_code_index = index + 1;
|
||||
}
|
||||
let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| {
|
||||
let (good_last_data_shred, _, _) =
|
||||
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index);
|
||||
let (good_last_data_shred, _) =
|
||||
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index, self.next_code_index);
|
||||
|
||||
let (bad_last_data_shred, _, _) =
|
||||
let (bad_last_data_shred, _) =
|
||||
// Don't mark the last shred as last so that validators won't know that
|
||||
// they've gotten all the shreds, and will continue trying to repair
|
||||
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index);
|
||||
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index, self.next_code_index);
|
||||
|
||||
self.next_shred_index += 1;
|
||||
(good_last_data_shred, bad_last_data_shred)
|
||||
|
@@ -119,17 +119,16 @@ impl StandardBroadcastRun {
|
||||
None => (0, 0),
|
||||
},
|
||||
};
|
||||
let (data_shreds, next_shred_index) =
|
||||
Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
|
||||
.unwrap()
|
||||
.entries_to_data_shreds(
|
||||
keypair,
|
||||
entries,
|
||||
is_slot_end,
|
||||
next_shred_index,
|
||||
fec_set_offset,
|
||||
process_stats,
|
||||
);
|
||||
let data_shreds = Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
|
||||
.unwrap()
|
||||
.entries_to_data_shreds(
|
||||
keypair,
|
||||
entries,
|
||||
is_slot_end,
|
||||
next_shred_index,
|
||||
fec_set_offset,
|
||||
process_stats,
|
||||
);
|
||||
let mut data_shreds_buffer = match &mut self.unfinished_slot {
|
||||
Some(state) => {
|
||||
assert_eq!(state.slot, slot);
|
||||
@@ -138,8 +137,17 @@ impl StandardBroadcastRun {
|
||||
None => Vec::default(),
|
||||
};
|
||||
data_shreds_buffer.extend(data_shreds.clone());
|
||||
let next_shred_index = match data_shreds.iter().map(Shred::index).max() {
|
||||
Some(index) => index + 1,
|
||||
None => next_shred_index,
|
||||
};
|
||||
let next_code_index = match &self.unfinished_slot {
|
||||
Some(state) => state.next_code_index,
|
||||
None => 0,
|
||||
};
|
||||
self.unfinished_slot = Some(UnfinishedSlotInfo {
|
||||
next_shred_index,
|
||||
next_code_index,
|
||||
slot,
|
||||
parent: parent_slot,
|
||||
data_shreds_buffer,
|
||||
@@ -446,23 +454,40 @@ fn make_coding_shreds(
|
||||
is_slot_end: bool,
|
||||
stats: &mut ProcessShredsStats,
|
||||
) -> Vec<Shred> {
|
||||
let data_shreds = match unfinished_slot {
|
||||
None => Vec::default(),
|
||||
Some(unfinished_slot) => {
|
||||
let size = unfinished_slot.data_shreds_buffer.len();
|
||||
// Consume a multiple of 32, unless this is the slot end.
|
||||
let offset = if is_slot_end {
|
||||
0
|
||||
} else {
|
||||
size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize
|
||||
};
|
||||
unfinished_slot
|
||||
.data_shreds_buffer
|
||||
.drain(0..size - offset)
|
||||
.collect()
|
||||
}
|
||||
let unfinished_slot = match unfinished_slot {
|
||||
None => return Vec::default(),
|
||||
Some(state) => state,
|
||||
};
|
||||
Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, is_slot_end, stats).unwrap()
|
||||
let data_shreds: Vec<_> = {
|
||||
let size = unfinished_slot.data_shreds_buffer.len();
|
||||
// Consume a multiple of 32, unless this is the slot end.
|
||||
let offset = if is_slot_end {
|
||||
0
|
||||
} else {
|
||||
size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize
|
||||
};
|
||||
unfinished_slot
|
||||
.data_shreds_buffer
|
||||
.drain(0..size - offset)
|
||||
.collect()
|
||||
};
|
||||
let shreds = Shredder::data_shreds_to_coding_shreds(
|
||||
keypair,
|
||||
&data_shreds,
|
||||
is_slot_end,
|
||||
unfinished_slot.next_code_index,
|
||||
stats,
|
||||
)
|
||||
.unwrap();
|
||||
if let Some(index) = shreds
|
||||
.iter()
|
||||
.filter(|shred| shred.is_code())
|
||||
.map(Shred::index)
|
||||
.max()
|
||||
{
|
||||
unfinished_slot.next_code_index = unfinished_slot.next_code_index.max(index + 1);
|
||||
}
|
||||
shreds
|
||||
}
|
||||
|
||||
impl BroadcastRun for StandardBroadcastRun {
|
||||
@@ -579,6 +604,7 @@ mod test {
|
||||
let parent = 0;
|
||||
run.unfinished_slot = Some(UnfinishedSlotInfo {
|
||||
next_shred_index,
|
||||
next_code_index: 17,
|
||||
slot,
|
||||
parent,
|
||||
data_shreds_buffer: Vec::default(),
|
||||
|
@@ -13,7 +13,6 @@ use {
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select,
|
||||
Sender as CrossbeamSender,
|
||||
},
|
||||
itertools::izip,
|
||||
log::*,
|
||||
solana_gossip::{
|
||||
cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS},
|
||||
@@ -32,24 +31,22 @@ use {
|
||||
bank::Bank,
|
||||
bank_forks::BankForks,
|
||||
commitment::VOTE_THRESHOLD_SIZE,
|
||||
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
|
||||
vote_sender_types::ReplayVoteReceiver,
|
||||
epoch_stakes::EpochStakes,
|
||||
vote_parser,
|
||||
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
|
||||
},
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
|
||||
epoch_schedule::EpochSchedule,
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
slot_hashes,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_vote_program::{
|
||||
vote_state::VoteTransaction,
|
||||
vote_transaction::{self, ParsedVote},
|
||||
},
|
||||
solana_vote_program::vote_state::Vote,
|
||||
std::{
|
||||
collections::{HashMap, HashSet},
|
||||
iter::repeat,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
@@ -61,7 +58,6 @@ use {
|
||||
|
||||
// Map from a vote account to the authorized voter for an epoch
|
||||
pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>;
|
||||
pub type VotedHashUpdates = HashMap<Hash, Vec<Pubkey>>;
|
||||
pub type VerifiedLabelVotePacketsSender = CrossbeamSender<Vec<VerifiedVoteMetadata>>;
|
||||
pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver<Vec<VerifiedVoteMetadata>>;
|
||||
pub type VerifiedVoteTransactionsSender = CrossbeamSender<Vec<Transaction>>;
|
||||
@@ -88,14 +84,14 @@ pub struct SlotVoteTracker {
|
||||
}
|
||||
|
||||
impl SlotVoteTracker {
|
||||
pub fn get_voted_slot_updates(&mut self) -> Option<Vec<Pubkey>> {
|
||||
pub(crate) fn get_voted_slot_updates(&mut self) -> Option<Vec<Pubkey>> {
|
||||
self.voted_slot_updates.take()
|
||||
}
|
||||
|
||||
pub fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker {
|
||||
fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker {
|
||||
self.optimistic_votes_tracker.entry(hash).or_default()
|
||||
}
|
||||
pub fn optimistic_votes_tracker(&self, hash: &Hash) -> Option<&VoteStakeTracker> {
|
||||
pub(crate) fn optimistic_votes_tracker(&self, hash: &Hash) -> Option<&VoteStakeTracker> {
|
||||
self.optimistic_votes_tracker.get(hash)
|
||||
}
|
||||
}
|
||||
@@ -104,82 +100,29 @@ impl SlotVoteTracker {
|
||||
pub struct VoteTracker {
|
||||
// Map from a slot to a set of validators who have voted for that slot
|
||||
slot_vote_trackers: RwLock<HashMap<Slot, Arc<RwLock<SlotVoteTracker>>>>,
|
||||
// Don't track votes from people who are not staked, acts as a spam filter
|
||||
epoch_authorized_voters: RwLock<HashMap<Epoch, Arc<EpochAuthorizedVoters>>>,
|
||||
leader_schedule_epoch: RwLock<Epoch>,
|
||||
current_epoch: RwLock<Epoch>,
|
||||
epoch_schedule: EpochSchedule,
|
||||
}
|
||||
|
||||
impl VoteTracker {
|
||||
pub fn new(root_bank: &Bank) -> Self {
|
||||
let current_epoch = root_bank.epoch();
|
||||
let vote_tracker = Self {
|
||||
leader_schedule_epoch: RwLock::new(current_epoch),
|
||||
current_epoch: RwLock::new(current_epoch),
|
||||
epoch_schedule: *root_bank.epoch_schedule(),
|
||||
..VoteTracker::default()
|
||||
};
|
||||
pub(crate) fn new(root_bank: &Bank) -> Self {
|
||||
let vote_tracker = VoteTracker::default();
|
||||
vote_tracker.progress_with_new_root_bank(root_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
root_bank.get_leader_schedule_epoch(root_bank.slot())
|
||||
);
|
||||
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch,);
|
||||
vote_tracker
|
||||
}
|
||||
|
||||
pub fn get_or_insert_slot_tracker(&self, slot: Slot) -> Arc<RwLock<SlotVoteTracker>> {
|
||||
let mut slot_tracker = self.slot_vote_trackers.read().unwrap().get(&slot).cloned();
|
||||
|
||||
if slot_tracker.is_none() {
|
||||
let new_slot_tracker = Arc::new(RwLock::new(SlotVoteTracker {
|
||||
voted: HashMap::new(),
|
||||
optimistic_votes_tracker: HashMap::default(),
|
||||
voted_slot_updates: None,
|
||||
gossip_only_stake: 0,
|
||||
}));
|
||||
self.slot_vote_trackers
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(slot, new_slot_tracker.clone());
|
||||
slot_tracker = Some(new_slot_tracker);
|
||||
fn get_or_insert_slot_tracker(&self, slot: Slot) -> Arc<RwLock<SlotVoteTracker>> {
|
||||
if let Some(slot_vote_tracker) = self.slot_vote_trackers.read().unwrap().get(&slot) {
|
||||
return slot_vote_tracker.clone();
|
||||
}
|
||||
|
||||
slot_tracker.unwrap()
|
||||
let mut slot_vote_trackers = self.slot_vote_trackers.write().unwrap();
|
||||
slot_vote_trackers.entry(slot).or_default().clone()
|
||||
}
|
||||
|
||||
pub fn get_slot_vote_tracker(&self, slot: Slot) -> Option<Arc<RwLock<SlotVoteTracker>>> {
|
||||
pub(crate) fn get_slot_vote_tracker(&self, slot: Slot) -> Option<Arc<RwLock<SlotVoteTracker>>> {
|
||||
self.slot_vote_trackers.read().unwrap().get(&slot).cloned()
|
||||
}
|
||||
|
||||
pub fn get_authorized_voter(&self, pubkey: &Pubkey, slot: Slot) -> Option<Pubkey> {
|
||||
let epoch = self.epoch_schedule.get_epoch(slot);
|
||||
self.epoch_authorized_voters
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&epoch)
|
||||
.map(|epoch_authorized_voters| epoch_authorized_voters.get(pubkey))
|
||||
.unwrap_or(None)
|
||||
.cloned()
|
||||
}
|
||||
|
||||
pub fn vote_contains_authorized_voter(
|
||||
vote_tx: &Transaction,
|
||||
authorized_voter: &Pubkey,
|
||||
) -> bool {
|
||||
let message = &vote_tx.message;
|
||||
for (i, key) in message.account_keys.iter().enumerate() {
|
||||
if message.is_signer(i) && key == authorized_voter {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn insert_vote(&self, slot: Slot, pubkey: Pubkey) {
|
||||
pub(crate) fn insert_vote(&self, slot: Slot, pubkey: Pubkey) {
|
||||
let mut w_slot_vote_trackers = self.slot_vote_trackers.write().unwrap();
|
||||
|
||||
let slot_vote_tracker = w_slot_vote_trackers.entry(slot).or_default();
|
||||
@@ -194,59 +137,16 @@ impl VoteTracker {
|
||||
}
|
||||
}
|
||||
|
||||
fn progress_leader_schedule_epoch(&self, root_bank: &Bank) {
|
||||
// Update with any newly calculated epoch state about future epochs
|
||||
let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap();
|
||||
let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch;
|
||||
for leader_schedule_epoch in
|
||||
start_leader_schedule_epoch..=root_bank.get_leader_schedule_epoch(root_bank.slot())
|
||||
{
|
||||
let exists = self
|
||||
.epoch_authorized_voters
|
||||
.read()
|
||||
.unwrap()
|
||||
.contains_key(&leader_schedule_epoch);
|
||||
if !exists {
|
||||
let epoch_authorized_voters = root_bank
|
||||
.epoch_stakes(leader_schedule_epoch)
|
||||
.unwrap()
|
||||
.epoch_authorized_voters()
|
||||
.clone();
|
||||
self.epoch_authorized_voters
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(leader_schedule_epoch, epoch_authorized_voters);
|
||||
greatest_leader_schedule_epoch = leader_schedule_epoch;
|
||||
}
|
||||
}
|
||||
|
||||
if greatest_leader_schedule_epoch != start_leader_schedule_epoch {
|
||||
*self.leader_schedule_epoch.write().unwrap() = greatest_leader_schedule_epoch;
|
||||
}
|
||||
}
|
||||
|
||||
fn purge_stale_state(&self, root_bank: &Bank) {
|
||||
// Purge any outdated slot data
|
||||
let new_root = root_bank.slot();
|
||||
let root_epoch = root_bank.epoch();
|
||||
self.slot_vote_trackers
|
||||
.write()
|
||||
.unwrap()
|
||||
.retain(|slot, _| *slot >= new_root);
|
||||
|
||||
let current_epoch = *self.current_epoch.read().unwrap();
|
||||
if root_epoch != current_epoch {
|
||||
// If root moved to a new epoch, purge outdated state
|
||||
self.epoch_authorized_voters
|
||||
.write()
|
||||
.unwrap()
|
||||
.retain(|epoch, _| *epoch >= root_epoch);
|
||||
*self.current_epoch.write().unwrap() = root_epoch;
|
||||
}
|
||||
}
|
||||
|
||||
fn progress_with_new_root_bank(&self, root_bank: &Bank) {
|
||||
self.progress_leader_schedule_epoch(root_bank);
|
||||
self.purge_stale_state(root_bank);
|
||||
}
|
||||
}
|
||||
@@ -297,10 +197,10 @@ pub struct ClusterInfoVoteListener {
|
||||
impl ClusterInfoVoteListener {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
verified_packets_sender: CrossbeamSender<Vec<PacketBatch>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
@@ -311,25 +211,26 @@ impl ClusterInfoVoteListener {
|
||||
bank_notification_sender: Option<BankNotificationSender>,
|
||||
cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender,
|
||||
) -> Self {
|
||||
let exit_ = exit.clone();
|
||||
|
||||
let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) =
|
||||
unbounded();
|
||||
let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded();
|
||||
let listen_thread = Builder::new()
|
||||
.name("solana-cluster_info_vote_listener".to_string())
|
||||
.spawn(move || {
|
||||
let _ = Self::recv_loop(
|
||||
exit_,
|
||||
&cluster_info,
|
||||
verified_vote_label_packets_sender,
|
||||
verified_vote_transactions_sender,
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let listen_thread = {
|
||||
let exit = exit.clone();
|
||||
let bank_forks = bank_forks.clone();
|
||||
Builder::new()
|
||||
.name("solana-cluster_info_vote_listener".to_string())
|
||||
.spawn(move || {
|
||||
let _ = Self::recv_loop(
|
||||
exit,
|
||||
&cluster_info,
|
||||
&bank_forks,
|
||||
verified_vote_label_packets_sender,
|
||||
verified_vote_transactions_sender,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
let exit_ = exit.clone();
|
||||
let poh_recorder = poh_recorder.clone();
|
||||
let bank_send_thread = Builder::new()
|
||||
.name("solana-cluster_info_bank_send".to_string())
|
||||
.spawn(move || {
|
||||
@@ -342,12 +243,11 @@ impl ClusterInfoVoteListener {
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let exit_ = exit.clone();
|
||||
let send_thread = Builder::new()
|
||||
.name("solana-cluster_info_process_votes".to_string())
|
||||
.spawn(move || {
|
||||
let _ = Self::process_votes_loop(
|
||||
exit_,
|
||||
exit,
|
||||
verified_vote_transactions_receiver,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
@@ -367,16 +267,14 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
pub(crate) fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdls.into_iter().try_for_each(JoinHandle::join)
|
||||
}
|
||||
|
||||
fn recv_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: &ClusterInfo,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender,
|
||||
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
|
||||
) -> Result<()> {
|
||||
@@ -385,7 +283,7 @@ impl ClusterInfoVoteListener {
|
||||
let votes = cluster_info.get_votes(&mut cursor);
|
||||
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
|
||||
if !votes.is_empty() {
|
||||
let (vote_txs, packets) = Self::verify_votes(votes);
|
||||
let (vote_txs, packets) = Self::verify_votes(votes, bank_forks);
|
||||
verified_vote_transactions_sender.send(vote_txs)?;
|
||||
verified_vote_label_packets_sender.send(packets)?;
|
||||
}
|
||||
@@ -395,43 +293,45 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn verify_votes(votes: Vec<Transaction>) -> (Vec<Transaction>, Vec<VerifiedVoteMetadata>) {
|
||||
fn verify_votes(
|
||||
votes: Vec<Transaction>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
) -> (Vec<Transaction>, Vec<VerifiedVoteMetadata>) {
|
||||
let mut packet_batches = packet::to_packet_batches(&votes, 1);
|
||||
|
||||
// Votes should already be filtered by this point.
|
||||
let reject_non_vote = false;
|
||||
sigverify::ed25519_verify_cpu(&mut packet_batches, reject_non_vote);
|
||||
|
||||
let (vote_txs, vote_metadata) = izip!(votes.into_iter(), packet_batches)
|
||||
.filter_map(|(vote_tx, packet_batch)| {
|
||||
let (vote, vote_account_key) = vote_transaction::parse_vote_transaction(&vote_tx)
|
||||
.and_then(|(vote_account_key, vote, _)| {
|
||||
if vote.slots().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some((vote, vote_account_key))
|
||||
}
|
||||
})?;
|
||||
|
||||
sigverify::ed25519_verify_cpu(&mut packet_batches, /*reject_non_vote=*/ false);
|
||||
let root_bank = bank_forks.read().unwrap().root_bank();
|
||||
let epoch_schedule = root_bank.epoch_schedule();
|
||||
votes
|
||||
.into_iter()
|
||||
.zip(packet_batches)
|
||||
.filter(|(_, packet_batch)| {
|
||||
// to_packet_batches() above splits into 1 packet long batches
|
||||
assert_eq!(packet_batch.packets.len(), 1);
|
||||
if !packet_batch.packets[0].meta.discard {
|
||||
if let Some(signature) = vote_tx.signatures.first().cloned() {
|
||||
return Some((
|
||||
vote_tx,
|
||||
VerifiedVoteMetadata {
|
||||
vote_account_key,
|
||||
vote,
|
||||
packet_batch,
|
||||
signature,
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
None
|
||||
!packet_batch.packets[0].meta.discard()
|
||||
})
|
||||
.unzip();
|
||||
(vote_txs, vote_metadata)
|
||||
.filter_map(|(tx, packet_batch)| {
|
||||
let (vote_account_key, vote, _) = vote_parser::parse_vote_transaction(&tx)?;
|
||||
let slot = vote.last_voted_slot()?;
|
||||
let epoch = epoch_schedule.get_epoch(slot);
|
||||
let authorized_voter = root_bank
|
||||
.epoch_stakes(epoch)?
|
||||
.epoch_authorized_voters()
|
||||
.get(&vote_account_key)?;
|
||||
let mut keys = tx.message.account_keys.iter().enumerate();
|
||||
if !keys.any(|(i, key)| tx.message.is_signer(i) && key == authorized_voter) {
|
||||
return None;
|
||||
}
|
||||
let verified_vote_metadata = VerifiedVoteMetadata {
|
||||
vote_account_key,
|
||||
vote,
|
||||
packet_batch,
|
||||
signature: *tx.signatures.first()?,
|
||||
};
|
||||
Some((tx, verified_vote_metadata))
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
fn bank_send_loop(
|
||||
@@ -460,7 +360,7 @@ impl ClusterInfoVoteListener {
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected)
|
||||
| Error::ReadyTimeout => (),
|
||||
| Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@@ -558,7 +458,7 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
let root_bank = bank_forks.read().unwrap().root_bank();
|
||||
if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 {
|
||||
let unrooted_optimistic_slots = confirmation_verifier
|
||||
.verify_for_unrooted_optimistic_slots(&root_bank, &blockstore);
|
||||
@@ -677,7 +577,7 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn track_new_votes_and_notify_confirmations(
|
||||
vote: Box<dyn VoteTransaction>,
|
||||
vote: Vote,
|
||||
vote_pubkey: &Pubkey,
|
||||
vote_tracker: &VoteTracker,
|
||||
root_bank: &Bank,
|
||||
@@ -690,17 +590,17 @@ impl ClusterInfoVoteListener {
|
||||
bank_notification_sender: &Option<BankNotificationSender>,
|
||||
cluster_confirmed_slot_sender: &Option<GossipDuplicateConfirmedSlotsSender>,
|
||||
) {
|
||||
if vote.is_empty() {
|
||||
if vote.slots.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let (last_vote_slot, last_vote_hash) = vote.last_voted_slot_hash().unwrap();
|
||||
let last_vote_slot = *vote.slots.last().unwrap();
|
||||
let last_vote_hash = vote.hash;
|
||||
|
||||
let root = root_bank.slot();
|
||||
let mut is_new_vote = false;
|
||||
let vote_slots = vote.slots();
|
||||
// If slot is before the root, ignore it
|
||||
for slot in vote_slots.iter().filter(|slot| **slot > root).rev() {
|
||||
for slot in vote.slots.iter().filter(|slot| **slot > root).rev() {
|
||||
let slot = *slot;
|
||||
|
||||
// if we don't have stake information, ignore it
|
||||
@@ -784,48 +684,15 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
|
||||
if is_new_vote {
|
||||
subscriptions.notify_vote(vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote_slots));
|
||||
subscriptions.notify_vote(&vote);
|
||||
let _ = verified_vote_sender.send((*vote_pubkey, vote.slots));
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_gossip_votes(
|
||||
vote_tracker: &VoteTracker,
|
||||
vote_pubkey: &Pubkey,
|
||||
vote: &dyn VoteTransaction,
|
||||
gossip_tx: &Transaction,
|
||||
) -> bool {
|
||||
if vote.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let last_vote_slot = vote.last_voted_slot().unwrap();
|
||||
// Votes from gossip need to be verified as they have not been
|
||||
// verified by the replay pipeline. Determine the authorized voter
|
||||
// based on the last vote slot. This will drop votes from authorized
|
||||
// voters trying to make votes for slots earlier than the epoch for
|
||||
// which they are authorized
|
||||
let actual_authorized_voter =
|
||||
vote_tracker.get_authorized_voter(vote_pubkey, last_vote_slot);
|
||||
|
||||
if actual_authorized_voter.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Voting without the correct authorized pubkey, dump the vote
|
||||
if !VoteTracker::vote_contains_authorized_voter(
|
||||
gossip_tx,
|
||||
&actual_authorized_voter.unwrap(),
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
fn filter_and_confirm_with_new_votes(
|
||||
vote_tracker: &VoteTracker,
|
||||
gossip_vote_txs: Vec<Transaction>,
|
||||
replayed_votes: Vec<ParsedVote>,
|
||||
replayed_votes: Vec<ReplayedVote>,
|
||||
root_bank: &Bank,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
gossip_verified_vote_hash_sender: &GossipVerifiedVoteHashSender,
|
||||
@@ -837,17 +704,12 @@ impl ClusterInfoVoteListener {
|
||||
let mut new_optimistic_confirmed_slots = vec![];
|
||||
|
||||
// Process votes from gossip and ReplayStage
|
||||
for (is_gossip, (vote_pubkey, vote, _)) in gossip_vote_txs
|
||||
let votes = gossip_vote_txs
|
||||
.iter()
|
||||
.filter_map(|gossip_tx| {
|
||||
vote_transaction::parse_vote_transaction(gossip_tx)
|
||||
.filter(|(vote_pubkey, vote, _)| {
|
||||
Self::filter_gossip_votes(vote_tracker, vote_pubkey, &**vote, gossip_tx)
|
||||
})
|
||||
.map(|v| (true, v))
|
||||
})
|
||||
.chain(replayed_votes.into_iter().map(|v| (false, v)))
|
||||
{
|
||||
.filter_map(vote_parser::parse_vote_transaction)
|
||||
.zip(repeat(/*is_gossip:*/ true))
|
||||
.chain(replayed_votes.into_iter().zip(repeat(/*is_gossip:*/ false)));
|
||||
for ((vote_pubkey, vote, _), is_gossip) in votes {
|
||||
Self::track_new_votes_and_notify_confirmations(
|
||||
vote,
|
||||
&vote_pubkey,
|
||||
@@ -962,8 +824,12 @@ mod tests {
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signature, Signer},
|
||||
},
|
||||
solana_vote_program::vote_state::Vote,
|
||||
std::{collections::BTreeSet, sync::Arc},
|
||||
solana_vote_program::{vote_state::Vote, vote_transaction},
|
||||
std::{
|
||||
collections::BTreeSet,
|
||||
iter::repeat_with,
|
||||
sync::{atomic::AtomicU64, Arc},
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -991,73 +857,6 @@ mod tests {
|
||||
assert_eq!(packet_batches.len(), 1);
|
||||
}
|
||||
|
||||
fn run_vote_contains_authorized_voter(hash: Option<Hash>) {
|
||||
let node_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let authorized_voter = Keypair::new();
|
||||
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vec![0],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&authorized_voter,
|
||||
hash,
|
||||
);
|
||||
|
||||
// Check that the two signing keys pass the check
|
||||
assert!(VoteTracker::vote_contains_authorized_voter(
|
||||
&vote_tx,
|
||||
&node_keypair.pubkey()
|
||||
));
|
||||
|
||||
assert!(VoteTracker::vote_contains_authorized_voter(
|
||||
&vote_tx,
|
||||
&authorized_voter.pubkey()
|
||||
));
|
||||
|
||||
// Non signing key shouldn't pass the check
|
||||
assert!(!VoteTracker::vote_contains_authorized_voter(
|
||||
&vote_tx,
|
||||
&vote_keypair.pubkey()
|
||||
));
|
||||
|
||||
// Set the authorized voter == vote keypair
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vec![0],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
hash,
|
||||
);
|
||||
|
||||
// Check that the node_keypair and vote keypair pass the authorized voter check
|
||||
assert!(VoteTracker::vote_contains_authorized_voter(
|
||||
&vote_tx,
|
||||
&node_keypair.pubkey()
|
||||
));
|
||||
|
||||
assert!(VoteTracker::vote_contains_authorized_voter(
|
||||
&vote_tx,
|
||||
&vote_keypair.pubkey()
|
||||
));
|
||||
|
||||
// The other keypair should not pass the check
|
||||
assert!(!VoteTracker::vote_contains_authorized_voter(
|
||||
&vote_tx,
|
||||
&authorized_voter.pubkey()
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vote_contains_authorized_voter() {
|
||||
run_vote_contains_authorized_voter(None);
|
||||
run_vote_contains_authorized_voter(Some(Hash::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_new_root() {
|
||||
let (vote_tracker, bank, _, _) = setup();
|
||||
@@ -1091,15 +890,11 @@ mod tests {
|
||||
.get_first_slot_in_epoch(current_epoch + 1),
|
||||
);
|
||||
vote_tracker.progress_with_new_root_bank(&new_epoch_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.current_epoch.read().unwrap(),
|
||||
current_epoch + 1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_new_leader_schedule_epoch() {
|
||||
let (vote_tracker, bank, _, _) = setup();
|
||||
let (_, bank, _, _) = setup();
|
||||
|
||||
// Check outdated slots are purged with new root
|
||||
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
@@ -1117,25 +912,6 @@ mod tests {
|
||||
bank.get_leader_schedule_epoch(next_leader_schedule_computed),
|
||||
next_leader_schedule_epoch
|
||||
);
|
||||
let next_leader_schedule_bank =
|
||||
Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed);
|
||||
vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
next_leader_schedule_epoch
|
||||
);
|
||||
assert_eq!(
|
||||
vote_tracker
|
||||
.epoch_authorized_voters
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&next_leader_schedule_epoch)
|
||||
.unwrap(),
|
||||
next_leader_schedule_bank
|
||||
.epoch_stakes(next_leader_schedule_epoch)
|
||||
.unwrap()
|
||||
.epoch_authorized_voters()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1246,7 +1022,7 @@ mod tests {
|
||||
replay_votes_sender
|
||||
.send((
|
||||
vote_keypair.pubkey(),
|
||||
Box::new(replay_vote.clone()),
|
||||
replay_vote.clone(),
|
||||
switch_proof_hash,
|
||||
))
|
||||
.unwrap();
|
||||
@@ -1493,8 +1269,7 @@ mod tests {
|
||||
let (votes_sender, votes_receiver) = unbounded();
|
||||
let (verified_vote_sender, _verified_vote_receiver) = unbounded();
|
||||
let (gossip_verified_vote_hash_sender, _gossip_verified_vote_hash_receiver) = unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver): (ReplayVoteSender, ReplayVoteReceiver) =
|
||||
unbounded();
|
||||
let (replay_votes_sender, replay_votes_receiver) = unbounded();
|
||||
|
||||
let vote_slot = 1;
|
||||
let vote_bank_hash = Hash::default();
|
||||
@@ -1534,7 +1309,7 @@ mod tests {
|
||||
replay_votes_sender
|
||||
.send((
|
||||
vote_keypair.pubkey(),
|
||||
Box::new(Vote::new(vec![vote_slot], Hash::default())),
|
||||
Vote::new(vec![vote_slot], Hash::default()),
|
||||
switch_proof_hash,
|
||||
))
|
||||
.unwrap();
|
||||
@@ -1578,59 +1353,6 @@ mod tests {
|
||||
run_test_process_votes3(Some(Hash::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_voters_by_epoch() {
|
||||
// Create some voters at genesis
|
||||
let (vote_tracker, bank, validator_voting_keypairs, _) = setup();
|
||||
let last_known_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let last_known_slot = bank
|
||||
.epoch_schedule()
|
||||
.get_last_slot_in_epoch(last_known_epoch);
|
||||
|
||||
// Check we can get the authorized voters
|
||||
for keypairs in &validator_voting_keypairs {
|
||||
assert!(vote_tracker
|
||||
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), last_known_slot)
|
||||
.is_some());
|
||||
assert!(vote_tracker
|
||||
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), last_known_slot + 1)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Create the set of relevant voters for the next epoch
|
||||
let new_epoch = last_known_epoch + 1;
|
||||
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
|
||||
let new_keypairs: Vec<_> = (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
|
||||
let new_epoch_authorized_voters: HashMap<_, _> = new_keypairs
|
||||
.iter()
|
||||
.chain(validator_voting_keypairs[0..5].iter())
|
||||
.map(|keypair| (keypair.vote_keypair.pubkey(), keypair.vote_keypair.pubkey()))
|
||||
.collect();
|
||||
|
||||
vote_tracker
|
||||
.epoch_authorized_voters
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(new_epoch, Arc::new(new_epoch_authorized_voters));
|
||||
|
||||
// These keypairs made it into the new epoch
|
||||
for keypairs in new_keypairs
|
||||
.iter()
|
||||
.chain(validator_voting_keypairs[0..5].iter())
|
||||
{
|
||||
assert!(vote_tracker
|
||||
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), first_slot_in_new_epoch)
|
||||
.is_some());
|
||||
}
|
||||
|
||||
// These keypairs were not refreshed in new epoch
|
||||
for keypairs in validator_voting_keypairs[5..10].iter() {
|
||||
assert!(vote_tracker
|
||||
.get_authorized_voter(&keypairs.vote_keypair.pubkey(), first_slot_in_new_epoch)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vote_tracker_references() {
|
||||
// Create some voters at genesis
|
||||
@@ -1650,8 +1372,10 @@ mod tests {
|
||||
let vote_tracker = VoteTracker::new(&bank);
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
optimistically_confirmed_bank,
|
||||
@@ -1680,7 +1404,7 @@ mod tests {
|
||||
// Add gossip vote for same slot, should not affect outcome
|
||||
vec![(
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
Box::new(Vote::new(vec![voted_slot], Hash::default())),
|
||||
Vote::new(vec![voted_slot], Hash::default()),
|
||||
None,
|
||||
)],
|
||||
&bank,
|
||||
@@ -1694,17 +1418,6 @@ mod tests {
|
||||
// Setup next epoch
|
||||
let old_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let new_epoch = old_epoch + 1;
|
||||
let new_epoch_vote_accounts: HashMap<_, _> = vec![(
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
validator0_keypairs.vote_keypair.pubkey(),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
vote_tracker
|
||||
.epoch_authorized_voters
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(new_epoch, Arc::new(new_epoch_vote_accounts));
|
||||
|
||||
// Test with votes across two epochs
|
||||
let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch);
|
||||
@@ -1736,7 +1449,7 @@ mod tests {
|
||||
vote_txs,
|
||||
vec![(
|
||||
validator_keypairs[1].vote_keypair.pubkey(),
|
||||
Box::new(Vote::new(vec![first_slot_in_new_epoch], Hash::default())),
|
||||
Vote::new(vec![first_slot_in_new_epoch], Hash::default()),
|
||||
None,
|
||||
)],
|
||||
&new_root_bank,
|
||||
@@ -1769,36 +1482,15 @@ mod tests {
|
||||
let bank = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
optimistically_confirmed_bank,
|
||||
));
|
||||
|
||||
// Integrity Checks
|
||||
let current_epoch = bank.epoch();
|
||||
let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
|
||||
// Check the vote tracker has all the known epoch state on construction
|
||||
for epoch in current_epoch..=leader_schedule_epoch {
|
||||
assert_eq!(
|
||||
vote_tracker
|
||||
.epoch_authorized_voters
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&epoch)
|
||||
.unwrap(),
|
||||
bank.epoch_stakes(epoch).unwrap().epoch_authorized_voters()
|
||||
);
|
||||
}
|
||||
|
||||
// Check the epoch state is correct
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
leader_schedule_epoch,
|
||||
);
|
||||
assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch);
|
||||
(
|
||||
Arc::new(vote_tracker),
|
||||
bank,
|
||||
@@ -1810,8 +1502,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_verify_votes_empty() {
|
||||
solana_logger::setup();
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let bank_forks = RwLock::new(BankForks::new(bank));
|
||||
let votes = vec![];
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes);
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks);
|
||||
assert!(vote_txs.is_empty());
|
||||
assert!(packets.is_empty());
|
||||
}
|
||||
@@ -1824,25 +1519,40 @@ mod tests {
|
||||
assert_eq!(num_packets, ref_value);
|
||||
}
|
||||
|
||||
fn test_vote_tx(hash: Option<Hash>) -> Transaction {
|
||||
let node_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let auth_voter_keypair = Keypair::new();
|
||||
fn test_vote_tx(
|
||||
validator_vote_keypairs: Option<&ValidatorVoteKeypairs>,
|
||||
hash: Option<Hash>,
|
||||
) -> Transaction {
|
||||
let other = ValidatorVoteKeypairs::new_rand();
|
||||
let validator_vote_keypair = validator_vote_keypairs.unwrap_or(&other);
|
||||
// TODO authorized_voter_keypair should be different from vote-keypair
|
||||
// but that is what create_genesis_... currently generates.
|
||||
vote_transaction::new_vote_transaction(
|
||||
vec![0],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&auth_voter_keypair,
|
||||
&validator_vote_keypair.node_keypair,
|
||||
&validator_vote_keypair.vote_keypair,
|
||||
&validator_vote_keypair.vote_keypair, // authorized_voter_keypair
|
||||
hash,
|
||||
)
|
||||
}
|
||||
|
||||
fn run_test_verify_votes_1_pass(hash: Option<Hash>) {
|
||||
let vote_tx = test_vote_tx(hash);
|
||||
let voting_keypairs: Vec<_> = repeat_with(ValidatorVoteKeypairs::new_rand)
|
||||
.take(10)
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000, // mint_lamports
|
||||
&voting_keypairs,
|
||||
vec![100; voting_keypairs.len()], // stakes
|
||||
);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let bank_forks = RwLock::new(BankForks::new(bank));
|
||||
let vote_tx = test_vote_tx(voting_keypairs.first(), hash);
|
||||
let votes = vec![vote_tx];
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes);
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks);
|
||||
assert_eq!(vote_txs.len(), 1);
|
||||
verify_packets_len(&packets, 1);
|
||||
}
|
||||
@@ -1854,11 +1564,22 @@ mod tests {
|
||||
}
|
||||
|
||||
fn run_test_bad_vote(hash: Option<Hash>) {
|
||||
let vote_tx = test_vote_tx(hash);
|
||||
let voting_keypairs: Vec<_> = repeat_with(ValidatorVoteKeypairs::new_rand)
|
||||
.take(10)
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000, // mint_lamports
|
||||
&voting_keypairs,
|
||||
vec![100; voting_keypairs.len()], // stakes
|
||||
);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let bank_forks = RwLock::new(BankForks::new(bank));
|
||||
let vote_tx = test_vote_tx(voting_keypairs.first(), hash);
|
||||
let mut bad_vote = vote_tx.clone();
|
||||
bad_vote.signatures[0] = Signature::default();
|
||||
let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes);
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks);
|
||||
assert_eq!(vote_txs.len(), 2);
|
||||
verify_packets_len(&packets, 2);
|
||||
}
|
||||
|
@@ -97,11 +97,8 @@ impl AggregateCommitmentService {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
|
||||
while let Ok(new_data) = receiver.try_recv() {
|
||||
aggregation_data = new_data;
|
||||
}
|
||||
let aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
let aggregation_data = receiver.try_iter().last().unwrap_or(aggregation_data);
|
||||
|
||||
let ancestors = aggregation_data.bank.status_cache_ancestors();
|
||||
if ancestors.is_empty() {
|
||||
@@ -506,11 +503,7 @@ mod tests {
|
||||
|
||||
let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand();
|
||||
let validator_keypairs = vec![&validator_vote_keypairs];
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair: _,
|
||||
voting_keypair: _,
|
||||
} = create_genesis_config_with_vote_accounts(
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts(
|
||||
1_000_000_000,
|
||||
&validator_keypairs,
|
||||
vec![100; 1],
|
||||
|
@@ -21,9 +21,7 @@ use {
|
||||
},
|
||||
solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{
|
||||
BlockTimestamp, Lockout, Vote, VoteState, VoteTransaction, MAX_LOCKOUT_HISTORY,
|
||||
},
|
||||
vote_state::{BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY},
|
||||
},
|
||||
std::{
|
||||
cmp::Ordering,
|
||||
@@ -369,16 +367,22 @@ impl Tower {
|
||||
) -> Vote {
|
||||
let vote = Vote::new(vec![slot], hash);
|
||||
local_vote_state.process_vote_unchecked(&vote);
|
||||
let slots = if let Some(last_voted_slot) = last_voted_slot_in_bank {
|
||||
let slots = if let Some(last_voted_slot_in_bank) = last_voted_slot_in_bank {
|
||||
local_vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|v| v.slot)
|
||||
.skip_while(|s| *s <= last_voted_slot)
|
||||
.skip_while(|s| *s <= last_voted_slot_in_bank)
|
||||
.collect()
|
||||
} else {
|
||||
local_vote_state.votes.iter().map(|v| v.slot).collect()
|
||||
};
|
||||
trace!(
|
||||
"new vote with {:?} {:?} {:?}",
|
||||
last_voted_slot_in_bank,
|
||||
slots,
|
||||
local_vote_state.votes
|
||||
);
|
||||
Vote::new(slots, hash)
|
||||
}
|
||||
|
||||
@@ -411,7 +415,7 @@ impl Tower {
|
||||
last_voted_slot_in_bank,
|
||||
);
|
||||
|
||||
new_vote.set_timestamp(self.maybe_timestamp(self.last_vote.last_voted_slot().unwrap_or(0)));
|
||||
new_vote.timestamp = self.maybe_timestamp(self.last_vote.last_voted_slot().unwrap_or(0));
|
||||
self.last_vote = new_vote;
|
||||
|
||||
let new_root = self.root();
|
||||
@@ -2248,7 +2252,7 @@ pub mod test {
|
||||
let mut local = VoteState::default();
|
||||
let vote = Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), None);
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
assert_eq!(vote.slots(), vec![0]);
|
||||
assert_eq!(vote.slots, vec![0]);
|
||||
assert_eq!(local.tower(), vec![0]);
|
||||
}
|
||||
|
||||
@@ -2259,7 +2263,7 @@ pub mod test {
|
||||
// another vote for slot 0 should return an empty vote as the diff.
|
||||
let vote =
|
||||
Tower::apply_vote_and_generate_vote_diff(&mut local, 0, Hash::default(), Some(0));
|
||||
assert!(vote.is_empty());
|
||||
assert!(vote.slots.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2274,7 +2278,7 @@ pub mod test {
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
let vote =
|
||||
Tower::apply_vote_and_generate_vote_diff(&mut local, 1, Hash::default(), Some(0));
|
||||
assert_eq!(vote.slots(), vec![1]);
|
||||
assert_eq!(vote.slots, vec![1]);
|
||||
assert_eq!(local.tower(), vec![0, 1]);
|
||||
}
|
||||
|
||||
@@ -2294,7 +2298,7 @@ pub mod test {
|
||||
// observable in any of the results.
|
||||
let vote =
|
||||
Tower::apply_vote_and_generate_vote_diff(&mut local, 3, Hash::default(), Some(0));
|
||||
assert_eq!(vote.slots(), vec![3]);
|
||||
assert_eq!(vote.slots, vec![3]);
|
||||
assert_eq!(local.tower(), vec![3]);
|
||||
}
|
||||
|
||||
@@ -2376,7 +2380,7 @@ pub mod test {
|
||||
tower.record_vote(i as u64, Hash::default());
|
||||
}
|
||||
|
||||
expected.timestamp = tower.last_vote.timestamp();
|
||||
expected.timestamp = tower.last_vote.timestamp;
|
||||
assert_eq!(expected, tower.last_vote)
|
||||
}
|
||||
|
||||
|
@@ -6,10 +6,8 @@
|
||||
use {
|
||||
solana_ledger::blockstore::Blockstore,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
bank::{Bank, ExecuteTimings},
|
||||
cost_model::CostModel,
|
||||
},
|
||||
solana_program_runtime::timings::ExecuteTimings,
|
||||
solana_runtime::{bank::Bank, cost_model::CostModel},
|
||||
solana_sdk::timing::timestamp,
|
||||
std::{
|
||||
sync::{
|
||||
@@ -71,8 +69,12 @@ impl CostUpdateServiceTiming {
|
||||
}
|
||||
|
||||
pub enum CostUpdate {
|
||||
FrozenBank { bank: Arc<Bank> },
|
||||
ExecuteTiming { execute_timings: ExecuteTimings },
|
||||
FrozenBank {
|
||||
bank: Arc<Bank>,
|
||||
},
|
||||
ExecuteTiming {
|
||||
execute_timings: Box<ExecuteTimings>,
|
||||
},
|
||||
}
|
||||
|
||||
pub type CostUpdateReceiver = Receiver<CostUpdate>;
|
||||
@@ -127,8 +129,10 @@ impl CostUpdateService {
|
||||
CostUpdate::FrozenBank { bank } => {
|
||||
bank.read_cost_tracker().unwrap().report_stats(bank.slot());
|
||||
}
|
||||
CostUpdate::ExecuteTiming { execute_timings } => {
|
||||
dirty |= Self::update_cost_model(&cost_model, &execute_timings);
|
||||
CostUpdate::ExecuteTiming {
|
||||
mut execute_timings,
|
||||
} => {
|
||||
dirty |= Self::update_cost_model(&cost_model, &mut execute_timings);
|
||||
update_count += 1;
|
||||
}
|
||||
}
|
||||
@@ -151,16 +155,27 @@ impl CostUpdateService {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_cost_model(cost_model: &RwLock<CostModel>, execute_timings: &ExecuteTimings) -> bool {
|
||||
fn update_cost_model(
|
||||
cost_model: &RwLock<CostModel>,
|
||||
execute_timings: &mut ExecuteTimings,
|
||||
) -> bool {
|
||||
let mut dirty = false;
|
||||
{
|
||||
let mut cost_model_mutable = cost_model.write().unwrap();
|
||||
for (program_id, timing) in &execute_timings.details.per_program_timings {
|
||||
if timing.count < 1 {
|
||||
for (program_id, program_timings) in &mut execute_timings.details.per_program_timings {
|
||||
let current_estimated_program_cost =
|
||||
cost_model.read().unwrap().find_instruction_cost(program_id);
|
||||
program_timings.coalesce_error_timings(current_estimated_program_cost);
|
||||
|
||||
if program_timings.count < 1 {
|
||||
continue;
|
||||
}
|
||||
let units = timing.accumulated_units / timing.count as u64;
|
||||
match cost_model_mutable.upsert_instruction_cost(program_id, units) {
|
||||
|
||||
let units = program_timings.accumulated_units / program_timings.count as u64;
|
||||
match cost_model
|
||||
.write()
|
||||
.unwrap()
|
||||
.upsert_instruction_cost(program_id, units)
|
||||
{
|
||||
Ok(c) => {
|
||||
debug!(
|
||||
"after replayed into bank, instruction {:?} has averaged cost {}",
|
||||
@@ -213,8 +228,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_update_cost_model_with_empty_execute_timings() {
|
||||
let cost_model = Arc::new(RwLock::new(CostModel::default()));
|
||||
let empty_execute_timings = ExecuteTimings::default();
|
||||
CostUpdateService::update_cost_model(&cost_model, &empty_execute_timings);
|
||||
let mut empty_execute_timings = ExecuteTimings::default();
|
||||
CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings);
|
||||
|
||||
assert_eq!(
|
||||
0,
|
||||
@@ -238,6 +253,7 @@ mod tests {
|
||||
{
|
||||
let accumulated_us: u64 = 1000;
|
||||
let accumulated_units: u64 = 100;
|
||||
let total_errored_units = 0;
|
||||
let count: u32 = 10;
|
||||
expected_cost = accumulated_units / count as u64;
|
||||
|
||||
@@ -247,9 +263,11 @@ mod tests {
|
||||
accumulated_us,
|
||||
accumulated_units,
|
||||
count,
|
||||
errored_txs_compute_consumed: vec![],
|
||||
total_errored_units,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &execute_timings);
|
||||
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
|
||||
assert_eq!(
|
||||
1,
|
||||
cost_model
|
||||
@@ -282,9 +300,11 @@ mod tests {
|
||||
accumulated_us,
|
||||
accumulated_units,
|
||||
count,
|
||||
errored_txs_compute_consumed: vec![],
|
||||
total_errored_units: 0,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &execute_timings);
|
||||
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
|
||||
assert_eq!(
|
||||
1,
|
||||
cost_model
|
||||
@@ -303,4 +323,106 @@ mod tests {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_cost_model_with_error_execute_timings() {
|
||||
let cost_model = Arc::new(RwLock::new(CostModel::default()));
|
||||
let mut execute_timings = ExecuteTimings::default();
|
||||
let program_key_1 = Pubkey::new_unique();
|
||||
|
||||
// Test updating cost model with a `ProgramTiming` with no compute units accumulated, i.e.
|
||||
// `accumulated_units` == 0
|
||||
{
|
||||
execute_timings.details.per_program_timings.insert(
|
||||
program_key_1,
|
||||
ProgramTiming {
|
||||
accumulated_us: 1000,
|
||||
accumulated_units: 0,
|
||||
count: 0,
|
||||
errored_txs_compute_consumed: vec![],
|
||||
total_errored_units: 0,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
|
||||
// If both the `errored_txs_compute_consumed` is empty and `count == 0`, then
|
||||
// nothing should be inserted into the cost model
|
||||
assert!(cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
// Test updating cost model with only erroring compute costs where the `cost_per_error` is
|
||||
// greater than the current instruction cost for the program. Should update with the
|
||||
// new erroring compute costs
|
||||
let cost_per_error = 1000;
|
||||
{
|
||||
let errored_txs_compute_consumed = vec![cost_per_error; 3];
|
||||
let total_errored_units = errored_txs_compute_consumed.iter().sum();
|
||||
execute_timings.details.per_program_timings.insert(
|
||||
program_key_1,
|
||||
ProgramTiming {
|
||||
accumulated_us: 1000,
|
||||
accumulated_units: 0,
|
||||
count: 0,
|
||||
errored_txs_compute_consumed,
|
||||
total_errored_units,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
|
||||
assert_eq!(
|
||||
1,
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.len()
|
||||
);
|
||||
assert_eq!(
|
||||
Some(&cost_per_error),
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.get(&program_key_1)
|
||||
);
|
||||
}
|
||||
|
||||
// Test updating cost model with only erroring compute costs where the error cost is
|
||||
// `smaller_cost_per_error`, less than the current instruction cost for the program.
|
||||
// The cost should not decrease for these new lesser errors
|
||||
let smaller_cost_per_error = cost_per_error - 10;
|
||||
{
|
||||
let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3];
|
||||
let total_errored_units = errored_txs_compute_consumed.iter().sum();
|
||||
execute_timings.details.per_program_timings.insert(
|
||||
program_key_1,
|
||||
ProgramTiming {
|
||||
accumulated_us: 1000,
|
||||
accumulated_units: 0,
|
||||
count: 0,
|
||||
errored_txs_compute_consumed,
|
||||
total_errored_units,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &mut execute_timings);
|
||||
assert_eq!(
|
||||
1,
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.len()
|
||||
);
|
||||
assert_eq!(
|
||||
Some(&cost_per_error),
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.get(&program_key_1)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -8,7 +8,10 @@ use {
|
||||
solana_metrics::{inc_new_counter_debug, inc_new_counter_info},
|
||||
solana_perf::{packet::PacketBatchRecycler, recycler::Recycler},
|
||||
solana_poh::poh_recorder::PohRecorder,
|
||||
solana_sdk::clock::DEFAULT_TICKS_PER_SLOT,
|
||||
solana_sdk::{
|
||||
clock::DEFAULT_TICKS_PER_SLOT,
|
||||
packet::{Packet, PacketFlags},
|
||||
},
|
||||
solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender},
|
||||
std::{
|
||||
net::UdpSocket,
|
||||
@@ -83,10 +86,16 @@ impl FetchStage {
|
||||
sendr: &PacketBatchSender,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
) -> Result<()> {
|
||||
let packet_batch = recvr.recv()?;
|
||||
let mark_forwarded = |packet: &mut Packet| {
|
||||
packet.meta.flags |= PacketFlags::FORWARDED;
|
||||
};
|
||||
|
||||
let mut packet_batch = recvr.recv()?;
|
||||
let mut num_packets = packet_batch.packets.len();
|
||||
packet_batch.packets.iter_mut().for_each(mark_forwarded);
|
||||
let mut packet_batches = vec![packet_batch];
|
||||
while let Ok(packet_batch) = recvr.try_recv() {
|
||||
while let Ok(mut packet_batch) = recvr.try_recv() {
|
||||
packet_batch.packets.iter_mut().for_each(mark_forwarded);
|
||||
num_packets += packet_batch.packets.len();
|
||||
packet_batches.push(packet_batch);
|
||||
// Read at most 1K transactions in a loop
|
||||
@@ -115,7 +124,7 @@ impl FetchStage {
|
||||
}
|
||||
|
||||
fn new_multi_socket(
|
||||
sockets: Vec<Arc<UdpSocket>>,
|
||||
tpu_sockets: Vec<Arc<UdpSocket>>,
|
||||
tpu_forwards_sockets: Vec<Arc<UdpSocket>>,
|
||||
tpu_vote_sockets: Vec<Arc<UdpSocket>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@@ -126,7 +135,7 @@ impl FetchStage {
|
||||
) -> Self {
|
||||
let recycler: PacketBatchRecycler = Recycler::warmed(1000, 1024);
|
||||
|
||||
let tpu_threads = sockets.into_iter().map(|socket| {
|
||||
let tpu_threads = tpu_sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
exit,
|
||||
|
@@ -164,12 +164,9 @@ impl LedgerCleanupService {
|
||||
}
|
||||
|
||||
fn receive_new_roots(new_root_receiver: &Receiver<Slot>) -> Result<Slot, RecvTimeoutError> {
|
||||
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
|
||||
// Get the newest root
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
}
|
||||
Ok(root)
|
||||
Ok(new_root_receiver.try_iter().last().unwrap_or(root))
|
||||
}
|
||||
|
||||
pub fn cleanup_ledger(
|
||||
|
@@ -81,45 +81,133 @@ impl ReplaySlotStats {
|
||||
i64
|
||||
),
|
||||
(
|
||||
"serialize_us",
|
||||
"execute_details_serialize_us",
|
||||
self.execute_timings.details.serialize_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"create_vm_us",
|
||||
"execute_details_create_vm_us",
|
||||
self.execute_timings.details.create_vm_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_inner_us",
|
||||
"execute_details_execute_inner_us",
|
||||
self.execute_timings.details.execute_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"deserialize_us",
|
||||
"execute_details_deserialize_us",
|
||||
self.execute_timings.details.deserialize_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"changed_account_count",
|
||||
"execute_details_get_or_create_executor_us",
|
||||
self.execute_timings.details.get_or_create_executor_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_details_changed_account_count",
|
||||
self.execute_timings.details.changed_account_count,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"total_account_count",
|
||||
"execute_details_total_account_count",
|
||||
self.execute_timings.details.total_account_count,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"total_data_size",
|
||||
"execute_details_total_data_size",
|
||||
self.execute_timings.details.total_data_size,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"data_size_changed",
|
||||
"execute_details_data_size_changed",
|
||||
self.execute_timings.details.data_size_changed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_details_create_executor_register_syscalls_us",
|
||||
self.execute_timings
|
||||
.details
|
||||
.create_executor_register_syscalls_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_details_create_executor_load_elf_us",
|
||||
self.execute_timings.details.create_executor_load_elf_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_details_create_executor_verify_code_us",
|
||||
self.execute_timings.details.create_executor_verify_code_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_details_create_executor_jit_compile_us",
|
||||
self.execute_timings.details.create_executor_jit_compile_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_feature_set_clone_us",
|
||||
self.execute_timings
|
||||
.execute_accessories
|
||||
.feature_set_clone_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_compute_budget_process_transaction_us",
|
||||
self.execute_timings
|
||||
.execute_accessories
|
||||
.compute_budget_process_transaction_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_get_executors_us",
|
||||
self.execute_timings.execute_accessories.get_executors_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_process_message_us",
|
||||
self.execute_timings.execute_accessories.process_message_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_update_executors_us",
|
||||
self.execute_timings.execute_accessories.update_executors_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_process_instructions_total_us",
|
||||
self.execute_timings
|
||||
.execute_accessories
|
||||
.process_instructions
|
||||
.total_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_process_instructions_verify_caller_us",
|
||||
self.execute_timings
|
||||
.execute_accessories
|
||||
.process_instructions
|
||||
.verify_caller_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_process_instructions_process_executable_chain_us",
|
||||
self.execute_timings
|
||||
.execute_accessories
|
||||
.process_instructions
|
||||
.process_executable_chain_us,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"execute_accessories_process_instructions_verify_callee_us",
|
||||
self.execute_timings
|
||||
.execute_accessories
|
||||
.process_instructions
|
||||
.verify_callee_us,
|
||||
i64
|
||||
),
|
||||
);
|
||||
|
||||
let mut per_pubkey_timings: Vec<_> = self
|
||||
@@ -129,25 +217,34 @@ impl ReplaySlotStats {
|
||||
.iter()
|
||||
.collect();
|
||||
per_pubkey_timings.sort_by(|a, b| b.1.accumulated_us.cmp(&a.1.accumulated_us));
|
||||
let (total_us, total_units, total_count) =
|
||||
per_pubkey_timings
|
||||
.iter()
|
||||
.fold((0, 0, 0), |(sum_us, sum_units, sum_count), a| {
|
||||
let (total_us, total_units, total_count, total_errored_units, total_errored_count) =
|
||||
per_pubkey_timings.iter().fold(
|
||||
(0, 0, 0, 0, 0),
|
||||
|(sum_us, sum_units, sum_count, sum_errored_units, sum_errored_count), a| {
|
||||
(
|
||||
sum_us + a.1.accumulated_us,
|
||||
sum_units + a.1.accumulated_units,
|
||||
sum_count + a.1.count,
|
||||
sum_errored_units + a.1.total_errored_units,
|
||||
sum_errored_count + a.1.errored_txs_compute_consumed.len(),
|
||||
)
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
for (pubkey, time) in per_pubkey_timings.iter().take(5) {
|
||||
datapoint_info!(
|
||||
datapoint_trace!(
|
||||
"per_program_timings",
|
||||
("slot", slot as i64, i64),
|
||||
("pubkey", pubkey.to_string(), String),
|
||||
("execute_us", time.accumulated_us, i64),
|
||||
("accumulated_units", time.accumulated_units, i64),
|
||||
("count", time.count, i64)
|
||||
("errored_units", time.total_errored_units, i64),
|
||||
("count", time.count, i64),
|
||||
(
|
||||
"errored_count",
|
||||
time.errored_txs_compute_consumed.len(),
|
||||
i64
|
||||
),
|
||||
);
|
||||
}
|
||||
datapoint_info!(
|
||||
@@ -156,7 +253,9 @@ impl ReplaySlotStats {
|
||||
("pubkey", "all", String),
|
||||
("execute_us", total_us, i64),
|
||||
("accumulated_units", total_units, i64),
|
||||
("count", total_count, i64)
|
||||
("count", total_count, i64),
|
||||
("errored_units", total_errored_units, i64),
|
||||
("errored_count", total_errored_count, i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@
|
||||
//! how transactions are included in blocks, and optimize those blocks.
|
||||
//!
|
||||
use {
|
||||
crate::banking_stage::BatchedTransactionCostDetails,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
bank::Bank,
|
||||
@@ -78,13 +79,12 @@ impl QosService {
|
||||
pub fn compute_transaction_costs<'a>(
|
||||
&self,
|
||||
transactions: impl Iterator<Item = &'a SanitizedTransaction>,
|
||||
demote_program_write_locks: bool,
|
||||
) -> Vec<TransactionCost> {
|
||||
let mut compute_cost_time = Measure::start("compute_cost_time");
|
||||
let cost_model = self.cost_model.read().unwrap();
|
||||
let txs_costs: Vec<_> = transactions
|
||||
.map(|tx| {
|
||||
let cost = cost_model.calculate_cost(tx, demote_program_write_locks);
|
||||
let cost = cost_model.calculate_cost(tx);
|
||||
debug!(
|
||||
"transaction {:?}, cost {:?}, cost sum {}",
|
||||
tx,
|
||||
@@ -129,6 +129,10 @@ impl QosService {
|
||||
self.metrics.retried_txs_per_block_limit_count.fetch_add(1, Ordering::Relaxed);
|
||||
Err(TransactionError::WouldExceedMaxBlockCostLimit)
|
||||
}
|
||||
CostTrackerError::WouldExceedVoteMaxLimit => {
|
||||
self.metrics.retried_txs_per_vote_limit_count.fetch_add(1, Ordering::Relaxed);
|
||||
Err(TransactionError::WouldExceedMaxVoteCostLimit)
|
||||
}
|
||||
CostTrackerError::WouldExceedAccountMaxLimit => {
|
||||
self.metrics.retried_txs_per_account_limit_count.fetch_add(1, Ordering::Relaxed);
|
||||
Err(TransactionError::WouldExceedMaxAccountCostLimit)
|
||||
@@ -144,6 +148,36 @@ impl QosService {
|
||||
select_results
|
||||
}
|
||||
|
||||
pub fn accumulate_estimated_transaction_costs(
|
||||
&self,
|
||||
cost_details: &BatchedTransactionCostDetails,
|
||||
) {
|
||||
self.metrics
|
||||
.estimated_signature_cu
|
||||
.fetch_add(cost_details.batched_signature_cost, Ordering::Relaxed);
|
||||
self.metrics
|
||||
.estimated_write_lock_cu
|
||||
.fetch_add(cost_details.batched_write_lock_cost, Ordering::Relaxed);
|
||||
self.metrics
|
||||
.estimated_data_bytes_cu
|
||||
.fetch_add(cost_details.batched_data_bytes_cost, Ordering::Relaxed);
|
||||
self.metrics
|
||||
.estimated_execute_cu
|
||||
.fetch_add(cost_details.batched_execute_cost, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn accumulate_actual_execute_cu(&self, units: u64) {
|
||||
self.metrics
|
||||
.actual_execute_cu
|
||||
.fetch_add(units, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn accumulate_actual_execute_time(&self, micro_sec: u64) {
|
||||
self.metrics
|
||||
.actual_execute_time_us
|
||||
.fetch_add(micro_sec, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
fn reporting_loop(
|
||||
running_flag: Arc<AtomicBool>,
|
||||
metrics: Arc<QosServiceMetrics>,
|
||||
@@ -164,7 +198,26 @@ struct QosServiceMetrics {
|
||||
cost_tracking_time: AtomicU64,
|
||||
selected_txs_count: AtomicU64,
|
||||
retried_txs_per_block_limit_count: AtomicU64,
|
||||
retried_txs_per_vote_limit_count: AtomicU64,
|
||||
retried_txs_per_account_limit_count: AtomicU64,
|
||||
|
||||
// accumulated estimated signature Compute Unites to be packed into block
|
||||
estimated_signature_cu: AtomicU64,
|
||||
|
||||
// accumulated estimated write locks Compute Units to be packed into block
|
||||
estimated_write_lock_cu: AtomicU64,
|
||||
|
||||
// accumulated estimated instructino data Compute Units to be packed into block
|
||||
estimated_data_bytes_cu: AtomicU64,
|
||||
|
||||
// accumulated estimated program Compute Units to be packed into block
|
||||
estimated_execute_cu: AtomicU64,
|
||||
|
||||
// accumulated actual program Compute Units that have been packed into block
|
||||
actual_execute_cu: AtomicU64,
|
||||
|
||||
// accumulated actual program execute micro-sec that have been packed into block
|
||||
actual_execute_time_us: AtomicU64,
|
||||
}
|
||||
|
||||
impl QosServiceMetrics {
|
||||
@@ -198,12 +251,48 @@ impl QosServiceMetrics {
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"retried_txs_per_vote_limit_count",
|
||||
self.retried_txs_per_vote_limit_count
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"retried_txs_per_account_limit_count",
|
||||
self.retried_txs_per_account_limit_count
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"estimated_signature_cu",
|
||||
self.estimated_signature_cu.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"estimated_write_lock_cu",
|
||||
self.estimated_write_lock_cu.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"estimated_data_bytes_cu",
|
||||
self.estimated_data_bytes_cu.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"estimated_execute_cu",
|
||||
self.estimated_execute_cu.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"actual_execute_cu",
|
||||
self.actual_execute_cu.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"actual_execute_time_us",
|
||||
self.actual_execute_time_us.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -250,7 +339,7 @@ mod tests {
|
||||
|
||||
let cost_model = Arc::new(RwLock::new(CostModel::default()));
|
||||
let qos_service = QosService::new(cost_model.clone());
|
||||
let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false);
|
||||
let txs_costs = qos_service.compute_transaction_costs(txs.iter());
|
||||
|
||||
// verify the size of txs_costs and its contents
|
||||
assert_eq!(txs_costs.len(), txs.len());
|
||||
@@ -260,11 +349,7 @@ mod tests {
|
||||
.map(|(index, cost)| {
|
||||
assert_eq!(
|
||||
cost.sum(),
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.calculate_cost(&txs[index], false)
|
||||
.sum()
|
||||
cost_model.read().unwrap().calculate_cost(&txs[index]).sum()
|
||||
);
|
||||
})
|
||||
.collect_vec();
|
||||
@@ -295,28 +380,29 @@ mod tests {
|
||||
let transfer_tx_cost = cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.calculate_cost(&transfer_tx, false)
|
||||
.calculate_cost(&transfer_tx)
|
||||
.sum();
|
||||
let vote_tx_cost = cost_model.read().unwrap().calculate_cost(&vote_tx).sum();
|
||||
|
||||
// make a vec of txs
|
||||
let txs = vec![transfer_tx.clone(), vote_tx.clone(), transfer_tx, vote_tx];
|
||||
|
||||
let qos_service = QosService::new(cost_model);
|
||||
let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false);
|
||||
let txs_costs = qos_service.compute_transaction_costs(txs.iter());
|
||||
|
||||
// set cost tracker limit to fit 1 transfer tx, vote tx bypasses limit check
|
||||
let cost_limit = transfer_tx_cost;
|
||||
// set cost tracker limit to fit 1 transfer tx and 1 vote tx
|
||||
let cost_limit = transfer_tx_cost + vote_tx_cost;
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(cost_limit, cost_limit);
|
||||
.set_limits(cost_limit, cost_limit, cost_limit);
|
||||
let results = qos_service.select_transactions_per_cost(txs.iter(), txs_costs.iter(), &bank);
|
||||
|
||||
// verify that first transfer tx and all votes are allowed
|
||||
// verify that first transfer tx and first votes are allowed
|
||||
assert_eq!(results.len(), txs.len());
|
||||
assert!(results[0].is_ok());
|
||||
assert!(results[1].is_ok());
|
||||
assert!(results[2].is_err());
|
||||
assert!(results[3].is_ok());
|
||||
assert!(results[3].is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -348,7 +434,7 @@ mod tests {
|
||||
.name("test-producer-1".to_string())
|
||||
.spawn(move || {
|
||||
debug!("thread 1 starts with {} txs", txs_1.len());
|
||||
let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter(), false);
|
||||
let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter());
|
||||
assert_eq!(txs_count, tx_costs.len());
|
||||
debug!(
|
||||
"thread 1 done, generated {} count, see service count as {}",
|
||||
@@ -365,7 +451,7 @@ mod tests {
|
||||
.name("test-producer-2".to_string())
|
||||
.spawn(move || {
|
||||
debug!("thread 2 starts with {} txs", txs_2.len());
|
||||
let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter(), false);
|
||||
let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter());
|
||||
assert_eq!(txs_count, tx_costs.len());
|
||||
debug!(
|
||||
"thread 2 done, generated {} count, see service count as {}",
|
||||
|
@@ -86,17 +86,17 @@ fn get_unrepaired_path(
|
||||
) -> Vec<Slot> {
|
||||
let mut path = Vec::new();
|
||||
let mut slot = start_slot;
|
||||
while !visited.contains(&slot) {
|
||||
visited.insert(slot);
|
||||
while visited.insert(slot) {
|
||||
let slot_meta = slot_meta_cache
|
||||
.entry(slot)
|
||||
.or_insert_with(|| blockstore.meta(slot).unwrap());
|
||||
if let Some(slot_meta) = slot_meta {
|
||||
if slot_meta.is_full() {
|
||||
break;
|
||||
if !slot_meta.is_full() {
|
||||
path.push(slot);
|
||||
if let Some(parent_slot) = slot_meta.parent_slot {
|
||||
slot = parent_slot
|
||||
}
|
||||
}
|
||||
path.push(slot);
|
||||
slot = slot_meta.parent_slot;
|
||||
}
|
||||
}
|
||||
path.reverse();
|
||||
|
@@ -56,7 +56,10 @@ mod test {
|
||||
shred::{Shred, Shredder},
|
||||
sigverify_shreds::verify_shred_cpu,
|
||||
},
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_sdk::{
|
||||
packet::PacketFlags,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
std::{
|
||||
collections::HashMap,
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
@@ -87,7 +90,7 @@ mod test {
|
||||
nonce,
|
||||
)
|
||||
.unwrap();
|
||||
packet.meta.repair = true;
|
||||
packet.meta.flags |= PacketFlags::REPAIR;
|
||||
|
||||
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
|
||||
.iter()
|
||||
|
@@ -201,6 +201,7 @@ impl RepairService {
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: Arc<AtomicBool>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
ancestor_hashes_socket: Arc<UdpSocket>,
|
||||
repair_info: RepairInfo,
|
||||
verified_vote_receiver: VerifiedVoteReceiver,
|
||||
outstanding_requests: Arc<RwLock<OutstandingShredRepairs>>,
|
||||
@@ -225,11 +226,10 @@ impl RepairService {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let ancestor_hashes_request_socket = Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap());
|
||||
let ancestor_hashes_service = AncestorHashesService::new(
|
||||
exit,
|
||||
blockstore,
|
||||
ancestor_hashes_request_socket,
|
||||
ancestor_hashes_socket,
|
||||
repair_info,
|
||||
ancestor_hashes_replay_update_receiver,
|
||||
);
|
||||
|
@@ -26,6 +26,7 @@ use {
|
||||
voting_service::VoteOp,
|
||||
window_service::DuplicateSlotReceiver,
|
||||
},
|
||||
solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock,
|
||||
solana_client::rpc_response::SlotUpdate,
|
||||
solana_entry::entry::VerifyRecyclers,
|
||||
solana_gossip::cluster_info::ClusterInfo,
|
||||
@@ -38,15 +39,17 @@ use {
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::inc_new_counter_info,
|
||||
solana_poh::poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
solana_program_runtime::timings::ExecuteTimings,
|
||||
solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
},
|
||||
solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender,
|
||||
bank::{Bank, ExecuteTimings, NewBankOptions},
|
||||
bank::{Bank, NewBankOptions},
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
transaction_cost_metrics_sender::TransactionCostMetricsSender,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
},
|
||||
solana_sdk::{
|
||||
@@ -54,6 +57,7 @@ use {
|
||||
genesis_config::ClusterType,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
saturating_add_assign,
|
||||
signature::{Keypair, Signature, Signer},
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
@@ -160,6 +164,10 @@ pub struct ReplayTiming {
|
||||
process_duplicate_slots_elapsed: u64,
|
||||
process_unfrozen_gossip_verified_vote_hashes_elapsed: u64,
|
||||
repair_correct_slots_elapsed: u64,
|
||||
generate_new_bank_forks_read_lock_us: u64,
|
||||
generate_new_bank_forks_get_slots_since_us: u64,
|
||||
generate_new_bank_forks_loop_us: u64,
|
||||
generate_new_bank_forks_write_lock_us: u64,
|
||||
}
|
||||
impl ReplayTiming {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
@@ -291,7 +299,27 @@ impl ReplayTiming {
|
||||
"repair_correct_slots_elapsed",
|
||||
self.repair_correct_slots_elapsed as i64,
|
||||
i64
|
||||
)
|
||||
),
|
||||
(
|
||||
"generate_new_bank_forks_read_lock_us",
|
||||
self.generate_new_bank_forks_read_lock_us as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"generate_new_bank_forks_get_slots_since_us",
|
||||
self.generate_new_bank_forks_get_slots_since_us as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"generate_new_bank_forks_loop_us",
|
||||
self.generate_new_bank_forks_loop_us as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"generate_new_bank_forks_write_lock_us",
|
||||
self.generate_new_bank_forks_write_lock_us as i64,
|
||||
i64
|
||||
),
|
||||
);
|
||||
|
||||
*self = ReplayTiming::default();
|
||||
@@ -327,6 +355,8 @@ impl ReplayStage {
|
||||
cost_update_sender: Sender<CostUpdate>,
|
||||
voting_sender: Sender<VoteOp>,
|
||||
drop_bank_sender: Sender<Vec<Arc<Bank>>>,
|
||||
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
|
||||
transaction_cost_metrics_sender: Option<TransactionCostMetricsSender>,
|
||||
) -> Self {
|
||||
let ReplayStageConfig {
|
||||
vote_account,
|
||||
@@ -401,6 +431,7 @@ impl ReplayStage {
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
&mut progress,
|
||||
&mut replay_timing,
|
||||
);
|
||||
generate_new_bank_forks_time.stop();
|
||||
|
||||
@@ -432,6 +463,8 @@ impl ReplayStage {
|
||||
&cost_update_sender,
|
||||
&mut duplicate_slots_to_repair,
|
||||
&ancestor_hashes_replay_update_sender,
|
||||
block_metadata_notifier.clone(),
|
||||
transaction_cost_metrics_sender.as_ref(),
|
||||
);
|
||||
replay_active_banks_time.stop();
|
||||
|
||||
@@ -1459,7 +1492,7 @@ impl ReplayStage {
|
||||
);
|
||||
|
||||
let root_distance = poh_slot - root_slot;
|
||||
const MAX_ROOT_DISTANCE_FOR_VOTE_ONLY: Slot = 500;
|
||||
const MAX_ROOT_DISTANCE_FOR_VOTE_ONLY: Slot = 400;
|
||||
let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY {
|
||||
datapoint_info!("vote-only-bank", ("slot", poh_slot, i64));
|
||||
true
|
||||
@@ -1489,6 +1522,7 @@ impl ReplayStage {
|
||||
bank_progress: &mut ForkProgress,
|
||||
transaction_status_sender: Option<&TransactionStatusSender>,
|
||||
replay_vote_sender: &ReplayVoteSender,
|
||||
transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
) -> result::Result<usize, BlockstoreProcessorError> {
|
||||
let tx_count_before = bank_progress.replay_progress.num_txs;
|
||||
@@ -1500,6 +1534,7 @@ impl ReplayStage {
|
||||
false,
|
||||
transaction_status_sender,
|
||||
Some(replay_vote_sender),
|
||||
transaction_cost_metrics_sender,
|
||||
None,
|
||||
verify_recyclers,
|
||||
false,
|
||||
@@ -1988,6 +2023,8 @@ impl ReplayStage {
|
||||
cost_update_sender: &Sender<CostUpdate>,
|
||||
duplicate_slots_to_repair: &mut DuplicateSlotsToRepair,
|
||||
ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender,
|
||||
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
|
||||
transaction_cost_metrics_sender: Option<&TransactionCostMetricsSender>,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let mut tx_count = 0;
|
||||
@@ -2037,6 +2074,7 @@ impl ReplayStage {
|
||||
bank_progress,
|
||||
transaction_status_sender,
|
||||
replay_vote_sender,
|
||||
transaction_cost_metrics_sender,
|
||||
verify_recyclers,
|
||||
);
|
||||
match replay_result {
|
||||
@@ -2143,6 +2181,16 @@ impl ReplayStage {
|
||||
}
|
||||
}
|
||||
Self::record_rewards(&bank, rewards_recorder_sender);
|
||||
if let Some(ref block_metadata_notifier) = block_metadata_notifier {
|
||||
let block_metadata_notifier = block_metadata_notifier.read().unwrap();
|
||||
block_metadata_notifier.notify_block_metadata(
|
||||
bank.slot(),
|
||||
&bank.last_blockhash().to_string(),
|
||||
&bank.rewards,
|
||||
Some(bank.clock().unix_timestamp),
|
||||
Some(bank.block_height()),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"bank {} not completed tick_height: {}, max_tick_height: {}",
|
||||
@@ -2156,7 +2204,9 @@ impl ReplayStage {
|
||||
// send accumulated excute-timings to cost_update_service
|
||||
if !execute_timings.details.per_program_timings.is_empty() {
|
||||
cost_update_sender
|
||||
.send(CostUpdate::ExecuteTiming { execute_timings })
|
||||
.send(CostUpdate::ExecuteTiming {
|
||||
execute_timings: Box::new(execute_timings),
|
||||
})
|
||||
.unwrap_or_else(|err| warn!("cost_update_sender failed: {:?}", err));
|
||||
}
|
||||
|
||||
@@ -2769,24 +2819,34 @@ impl ReplayStage {
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
rpc_subscriptions: &Arc<RpcSubscriptions>,
|
||||
progress: &mut ProgressMap,
|
||||
replay_timing: &mut ReplayTiming,
|
||||
) {
|
||||
// Find the next slot that chains to the old slot
|
||||
let mut generate_new_bank_forks_read_lock =
|
||||
Measure::start("generate_new_bank_forks_read_lock");
|
||||
let forks = bank_forks.read().unwrap();
|
||||
generate_new_bank_forks_read_lock.stop();
|
||||
|
||||
let frozen_banks = forks.frozen_banks();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks
|
||||
.keys()
|
||||
.cloned()
|
||||
.filter(|s| *s >= forks.root())
|
||||
.collect();
|
||||
let mut generate_new_bank_forks_get_slots_since =
|
||||
Measure::start("generate_new_bank_forks_get_slots_since");
|
||||
let next_slots = blockstore
|
||||
.get_slots_since(&frozen_bank_slots)
|
||||
.expect("Db error");
|
||||
generate_new_bank_forks_get_slots_since.stop();
|
||||
|
||||
// Filter out what we've already seen
|
||||
trace!("generate new forks {:?}", {
|
||||
let mut next_slots = next_slots.iter().collect::<Vec<_>>();
|
||||
next_slots.sort();
|
||||
next_slots
|
||||
});
|
||||
let mut generate_new_bank_forks_loop = Measure::start("generate_new_bank_forks_loop");
|
||||
let mut new_banks = HashMap::new();
|
||||
for (parent_slot, children) in next_slots {
|
||||
let parent_bank = frozen_banks
|
||||
@@ -2827,11 +2887,31 @@ impl ReplayStage {
|
||||
}
|
||||
}
|
||||
drop(forks);
|
||||
generate_new_bank_forks_loop.stop();
|
||||
|
||||
let mut generate_new_bank_forks_write_lock =
|
||||
Measure::start("generate_new_bank_forks_write_lock");
|
||||
let mut forks = bank_forks.write().unwrap();
|
||||
for (_, bank) in new_banks {
|
||||
forks.insert(bank);
|
||||
}
|
||||
generate_new_bank_forks_write_lock.stop();
|
||||
saturating_add_assign!(
|
||||
replay_timing.generate_new_bank_forks_read_lock_us,
|
||||
generate_new_bank_forks_read_lock.as_us()
|
||||
);
|
||||
saturating_add_assign!(
|
||||
replay_timing.generate_new_bank_forks_get_slots_since_us,
|
||||
generate_new_bank_forks_get_slots_since.as_us()
|
||||
);
|
||||
saturating_add_assign!(
|
||||
replay_timing.generate_new_bank_forks_loop_us,
|
||||
generate_new_bank_forks_loop.as_us()
|
||||
);
|
||||
saturating_add_assign!(
|
||||
replay_timing.generate_new_bank_forks_write_lock_us,
|
||||
generate_new_bank_forks_write_lock.as_us()
|
||||
);
|
||||
}
|
||||
|
||||
fn new_bank_from_parent_with_notify(
|
||||
@@ -3033,8 +3113,10 @@ pub mod tests {
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(bank_forks);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
optimistically_confirmed_bank,
|
||||
@@ -3103,12 +3185,14 @@ pub mod tests {
|
||||
.unwrap()
|
||||
.get(NUM_CONSECUTIVE_LEADER_SLOTS)
|
||||
.is_none());
|
||||
let mut replay_timing = ReplayTiming::default();
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blockstore,
|
||||
&bank_forks,
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
&mut progress,
|
||||
&mut replay_timing,
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
@@ -3131,6 +3215,7 @@ pub mod tests {
|
||||
&leader_schedule_cache,
|
||||
&rpc_subscriptions,
|
||||
&mut progress,
|
||||
&mut replay_timing,
|
||||
);
|
||||
assert!(bank_forks
|
||||
.read()
|
||||
@@ -3566,10 +3651,13 @@ pub mod tests {
|
||||
bank1_progress,
|
||||
None,
|
||||
&replay_vote_sender,
|
||||
None,
|
||||
&VerifyRecyclers::default(),
|
||||
);
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -3636,8 +3724,10 @@ pub mod tests {
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let max_complete_transaction_status_slot = Arc::new(AtomicU64::default());
|
||||
let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests(
|
||||
&exit,
|
||||
max_complete_transaction_status_slot,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks),
|
||||
@@ -3731,10 +3821,12 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_write_persist_transaction_status() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mut genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(1000);
|
||||
} = create_genesis_config(solana_sdk::native_token::sol_to_lamports(1000.0));
|
||||
genesis_config.rent.lamports_per_byte_year = 50;
|
||||
genesis_config.rent.exemption_threshold = 2.0;
|
||||
let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
{
|
||||
let blockstore = Blockstore::open(&ledger_path)
|
||||
@@ -3747,7 +3839,11 @@ pub mod tests {
|
||||
|
||||
let bank0 = Arc::new(Bank::new_for_tests(&genesis_config));
|
||||
bank0
|
||||
.transfer(4, &mint_keypair, &keypair2.pubkey())
|
||||
.transfer(
|
||||
bank0.get_minimum_balance_for_rent_exemption(0),
|
||||
&mint_keypair,
|
||||
&keypair2.pubkey(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
|
||||
|
@@ -24,7 +24,7 @@ use {
|
||||
solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
shred::{Shred, ShredType},
|
||||
shred::{Shred, ShredId},
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::PacketBatch,
|
||||
@@ -145,13 +145,13 @@ impl RetransmitStats {
|
||||
}
|
||||
|
||||
// Map of shred (slot, index, type) => list of hash values seen for that key.
|
||||
type ShredFilter = LruCache<(Slot, u32, ShredType), Vec<u64>>;
|
||||
type ShredFilter = LruCache<ShredId, Vec<u64>>;
|
||||
|
||||
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
|
||||
|
||||
// Returns true if shred is already received and should skip retransmit.
|
||||
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
|
||||
let key = (shred.slot(), shred.index(), shred.shred_type());
|
||||
let key = shred.id();
|
||||
let mut shreds_received = shreds_received.lock().unwrap();
|
||||
let (cache, hasher) = shreds_received.deref_mut();
|
||||
match cache.get_mut(&key) {
|
||||
@@ -433,6 +433,7 @@ impl RetransmitStage {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
retransmit_sockets: Arc<Vec<UdpSocket>>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
ancestor_hashes_socket: Arc<UdpSocket>,
|
||||
verified_receiver: Receiver<Vec<PacketBatch>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
|
||||
@@ -486,6 +487,7 @@ impl RetransmitStage {
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
repair_socket,
|
||||
ancestor_hashes_socket,
|
||||
exit,
|
||||
repair_info,
|
||||
leader_schedule_cache,
|
||||
@@ -613,7 +615,7 @@ mod tests {
|
||||
let mut packet_batch = PacketBatch::new(vec![]);
|
||||
solana_streamer::packet::recv_from(&mut packet_batch, &me_retransmit, 1).unwrap();
|
||||
assert_eq!(packet_batch.packets.len(), 1);
|
||||
assert!(!packet_batch.packets[0].meta.repair);
|
||||
assert!(!packet_batch.packets[0].meta.repair());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -706,8 +706,8 @@ impl ServeRepair {
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if meta.is_parent_set() && res.packets.len() <= max_responses {
|
||||
slot = meta.parent_slot;
|
||||
if meta.parent_slot.is_some() && res.packets.len() <= max_responses {
|
||||
slot = meta.parent_slot.unwrap();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@ use {
|
||||
solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats},
|
||||
solana_perf::{
|
||||
cuda_runtime::PinnedVec,
|
||||
packet::{Packet, PacketBatchRecycler},
|
||||
packet::{Packet, PacketBatchRecycler, PacketFlags},
|
||||
recycler::Recycler,
|
||||
},
|
||||
solana_runtime::bank_forks::BankForks,
|
||||
@@ -40,7 +40,7 @@ impl ShredFetchStage {
|
||||
) where
|
||||
F: Fn(&mut Packet),
|
||||
{
|
||||
p.meta.discard = true;
|
||||
p.meta.set_discard(true);
|
||||
if let Some((slot, _index, _shred_type)) = get_shred_slot_index_type(p, stats) {
|
||||
// Seems reasonable to limit shreds to 2 epochs away
|
||||
if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) {
|
||||
@@ -50,7 +50,7 @@ impl ShredFetchStage {
|
||||
|
||||
if shreds_received.get(&hash).is_none() {
|
||||
shreds_received.put(hash, ());
|
||||
p.meta.discard = false;
|
||||
p.meta.set_discard(false);
|
||||
modify(p);
|
||||
} else {
|
||||
stats.duplicate_shred += 1;
|
||||
@@ -192,7 +192,7 @@ impl ShredFetchStage {
|
||||
recycler.clone(),
|
||||
bank_forks.clone(),
|
||||
"shred_fetch_tvu_forwards",
|
||||
|p| p.meta.forward = true,
|
||||
|p| p.meta.flags.insert(PacketFlags::FORWARDED),
|
||||
);
|
||||
|
||||
let (repair_receiver, repair_handler) = Self::packet_modifier(
|
||||
@@ -202,7 +202,7 @@ impl ShredFetchStage {
|
||||
recycler,
|
||||
bank_forks,
|
||||
"shred_fetch_repair",
|
||||
|p| p.meta.repair = true,
|
||||
|p| p.meta.flags.insert(PacketFlags::REPAIR),
|
||||
);
|
||||
|
||||
tvu_threads.extend(tvu_forwards_threads.into_iter());
|
||||
@@ -266,10 +266,11 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(!packet.meta.discard);
|
||||
assert!(!packet.meta.discard());
|
||||
let coding = solana_ledger::shred::Shredder::generate_coding_shreds(
|
||||
&[shred],
|
||||
false, // is_last_in_slot
|
||||
3, // next_code_index
|
||||
);
|
||||
coding[0].copy_to_packet(&mut packet);
|
||||
ShredFetchStage::process_packet(
|
||||
@@ -282,7 +283,7 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(!packet.meta.discard);
|
||||
assert!(!packet.meta.discard());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -309,7 +310,7 @@ mod tests {
|
||||
&hasher,
|
||||
);
|
||||
assert_eq!(stats.index_overrun, 1);
|
||||
assert!(packet.meta.discard);
|
||||
assert!(packet.meta.discard());
|
||||
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
|
||||
@@ -324,7 +325,7 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
assert!(packet.meta.discard());
|
||||
|
||||
// Accepted for 1,3
|
||||
ShredFetchStage::process_packet(
|
||||
@@ -337,7 +338,7 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(!packet.meta.discard);
|
||||
assert!(!packet.meta.discard());
|
||||
|
||||
// shreds_received should filter duplicate
|
||||
ShredFetchStage::process_packet(
|
||||
@@ -350,7 +351,7 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
assert!(packet.meta.discard());
|
||||
|
||||
let shred = Shred::new_from_data(1_000_000, 3, 0, None, true, true, 0, 0, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
@@ -366,7 +367,7 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
assert!(packet.meta.discard());
|
||||
|
||||
let index = MAX_DATA_SHREDS_PER_SLOT as u32;
|
||||
let shred = Shred::new_from_data(5, index, 0, None, true, true, 0, 0, 0);
|
||||
@@ -381,6 +382,6 @@ mod tests {
|
||||
&|_p| {},
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
assert!(packet.meta.discard());
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user