Compare commits
34 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
35ab022dbe | ||
|
e6bd6dd260 | ||
|
b216e3c9f7 | ||
|
5f86735382 | ||
|
ea61142c3e | ||
|
b4735893bd | ||
|
e3e1396c1d | ||
|
4453e2937f | ||
|
5013828326 | ||
|
c597a6c828 | ||
|
d890aa93a5 | ||
|
1d7f6a2c66 | ||
|
a483a7da47 | ||
|
e333cd48e8 | ||
|
b3be0b7b44 | ||
|
4ec275b5cd | ||
|
a6e6bc6dc0 | ||
|
91e56bdb5e | ||
|
3fd0f45e91 | ||
|
84644ce994 | ||
|
353cb8efa5 | ||
|
779f36a1bd | ||
|
7b216d67ce | ||
|
ca271892c8 | ||
|
598e2934ba | ||
|
9ff9234280 | ||
|
8f07b46a69 | ||
|
b48bfc19c1 | ||
|
33c24ec3ae | ||
|
4892eb4e1a | ||
|
2d930052dc | ||
|
70c9ca061f | ||
|
e4aecd9320 | ||
|
1dd6dc3709 |
@@ -123,7 +123,7 @@ jobs:
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
- "node"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
739
Cargo.lock
generated
739
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,5 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"accountsdb-plugin-interface",
|
||||
"accountsdb-plugin-manager",
|
||||
"accountsdb-plugin-postgres",
|
||||
"accounts-cluster-bench",
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
@@ -46,10 +43,8 @@ members = [
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/bpf_loader",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/ed25519",
|
||||
"programs/secp256k1",
|
||||
"programs/stake",
|
||||
"programs/vote",
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,9 +19,9 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -66,7 +66,6 @@ fn main() {
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
None,
|
||||
);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,24 +13,24 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-core = { path = "../core", version = "=1.8.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-core = { path = "../core", version = "=1.7.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-interface"
|
||||
description = "The Solana AccountsDb plugin interface."
|
||||
version = "1.8.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
thiserror = "1.0.29"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,20 +0,0 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Solana AccountsDb Plugin Interface
|
||||
|
||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
||||
|
||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
||||
instantiates the implementation of the interface.
|
||||
|
||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
||||
external PostgreSQL databases.
|
||||
|
||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
||||
|
||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
@@ -1,99 +0,0 @@
|
||||
/// The interface for AccountsDb plugins. A plugin must implement
|
||||
/// the AccountsDbPlugin trait to work with the runtime.
|
||||
/// In addition, the dynamic library must export a "C" function _create_plugin which
|
||||
/// creates the implementation of the plugin.
|
||||
use {
|
||||
std::{any::Any, error, io},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
impl Eq for ReplicaAccountInfo<'_> {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct ReplicaAccountInfo<'a> {
|
||||
pub pubkey: &'a [u8],
|
||||
pub lamports: u64,
|
||||
pub owner: &'a [u8],
|
||||
pub executable: bool,
|
||||
pub rent_epoch: u64,
|
||||
pub data: &'a [u8],
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
pub enum ReplicaAccountInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaAccountInfo<'a>),
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginError {
|
||||
#[error("Error opening config file. Error detail: ({0}).")]
|
||||
ConfigFileOpenError(#[from] io::Error),
|
||||
|
||||
#[error("Error reading config file. Error message: ({msg})")]
|
||||
ConfigFileReadError { msg: String },
|
||||
|
||||
#[error("Error updating account. Error message: ({msg})")]
|
||||
AccountsUpdateError { msg: String },
|
||||
|
||||
#[error("Error updating slot status. Error message: ({msg})")]
|
||||
SlotStatusUpdateError { msg: String },
|
||||
|
||||
#[error("Plugin-defined custom error. Error message: ({0})")]
|
||||
Custom(Box<dyn error::Error + Send + Sync>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SlotStatus {
|
||||
Processed,
|
||||
Rooted,
|
||||
Confirmed,
|
||||
}
|
||||
|
||||
impl SlotStatus {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SlotStatus::Confirmed => "confirmed",
|
||||
SlotStatus::Processed => "processed",
|
||||
SlotStatus::Rooted => "rooted",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
|
||||
|
||||
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// The callback called when a plugin is loaded by the system,
|
||||
/// used for doing whatever initialization is required by the plugin.
|
||||
/// The _config_file contains the name of the
|
||||
/// of the config file. The config must be in JSON format and
|
||||
/// include a field "libpath" indicating the full path
|
||||
/// name of the shared library implementing this interface.
|
||||
fn on_load(&mut self, _config_file: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The callback called right before a plugin is unloaded by the system
|
||||
/// Used for doing cleanup before unload.
|
||||
fn on_unload(&mut self) {}
|
||||
|
||||
/// Called when an account is updated at a slot.
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Called when all accounts are notified of during startup.
|
||||
fn notify_end_of_startup(&mut self) -> Result<()>;
|
||||
|
||||
/// Called when a slot status is updated
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()>;
|
||||
}
|
@@ -1 +0,0 @@
|
||||
pub mod accountsdb_plugin_interface;
|
@@ -1,30 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.8.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.4"
|
||||
libloading = "0.7.0"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.67"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
thiserror = "1.0.21"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,227 +0,0 @@
|
||||
/// Module responsible for notifying plugins of account updates
|
||||
use {
|
||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaAccountInfo, ReplicaAccountInfoVersions, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_runtime::{
|
||||
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
|
||||
append_vec::{StoredAccountMeta, StoredMeta},
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{AccountSharedData, ReadableAccount},
|
||||
clock::Slot,
|
||||
},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsUpdateNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
|
||||
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
|
||||
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
|
||||
self.notify_plugins_of_account_update(account_info, slot, false);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
|
||||
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
|
||||
|
||||
let account = self.accountinfo_from_stored_account_meta(account);
|
||||
measure_copy.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-copy-stored-account-info-us",
|
||||
measure_copy.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Some(account_info) = account {
|
||||
self.notify_plugins_of_account_update(account_info, slot, true);
|
||||
}
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify-account-restore-all-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
fn notify_end_of_restore_from_snapshot(&self) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
|
||||
match plugin.notify_end_of_startup() {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully notified the end of restore from snapshot to plugin {}",
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-end-of-restore-from-snapshot",
|
||||
measure.as_us() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
|
||||
}
|
||||
|
||||
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Processed);
|
||||
}
|
||||
|
||||
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierImpl {
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
AccountsUpdateNotifierImpl { plugin_manager }
|
||||
}
|
||||
|
||||
fn accountinfo_from_shared_account_data<'a>(
|
||||
&self,
|
||||
meta: &'a StoredMeta,
|
||||
account: &'a AccountSharedData,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: meta.pubkey.as_ref(),
|
||||
lamports: account.lamports(),
|
||||
owner: account.owner().as_ref(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch(),
|
||||
data: account.data(),
|
||||
write_version: meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn accountinfo_from_stored_account_meta<'a>(
|
||||
&self,
|
||||
stored_account_meta: &'a StoredAccountMeta,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: stored_account_meta.meta.pubkey.as_ref(),
|
||||
lamports: stored_account_meta.account_meta.lamports,
|
||||
owner: stored_account_meta.account_meta.owner.as_ref(),
|
||||
executable: stored_account_meta.account_meta.executable,
|
||||
rent_epoch: stored_account_meta.account_meta.rent_epoch,
|
||||
data: stored_account_meta.data,
|
||||
write_version: stored_account_meta.meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn notify_plugins_of_account_update(
|
||||
&self,
|
||||
account: ReplicaAccountInfo,
|
||||
slot: Slot,
|
||||
is_startup: bool,
|
||||
) {
|
||||
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-account");
|
||||
match plugin.update_account(
|
||||
ReplicaAccountInfoVersions::V0_0_1(&account),
|
||||
slot,
|
||||
is_startup,
|
||||
) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update account {} at slot {}, error: {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated account {} at slot {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
measure2.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify_plugins_of_account_update-us",
|
||||
measure2.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
||||
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update slot status at slot {}, error: {} to plugin {}",
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated slot status at slot {} to plugin {}",
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-slot-us",
|
||||
measure.as_us() as usize,
|
||||
1000,
|
||||
1000
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,55 +0,0 @@
|
||||
/// Managing the AccountsDb plugins
|
||||
use {
|
||||
libloading::{Library, Symbol},
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AccountsDbPluginManager {
|
||||
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
|
||||
libs: Vec<Library>,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginManager {
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginManager {
|
||||
plugins: Vec::default(),
|
||||
libs: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function loads the dynamically linked library specified in the path. The library
|
||||
/// must do necessary initializations.
|
||||
pub unsafe fn load_plugin(
|
||||
&mut self,
|
||||
libpath: &str,
|
||||
config_file: &str,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
|
||||
let lib = Library::new(libpath)?;
|
||||
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
|
||||
let plugin_raw = constructor();
|
||||
let mut plugin = Box::from_raw(plugin_raw);
|
||||
plugin.on_load(config_file)?;
|
||||
self.plugins.push(plugin);
|
||||
self.libs.push(lib);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unload all plugins and loaded plugin libraries, making sure to fire
|
||||
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
|
||||
pub fn unload(&mut self) {
|
||||
for mut plugin in self.plugins.drain(..) {
|
||||
info!("Unloading plugin for {:?}", plugin.name());
|
||||
plugin.on_unload();
|
||||
}
|
||||
|
||||
for lib in self.libs.drain(..) {
|
||||
drop(lib);
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,157 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
accounts_update_notifier::AccountsUpdateNotifierImpl,
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
slot_status_observer::SlotStatusObserver,
|
||||
},
|
||||
crossbeam_channel::Receiver,
|
||||
log::*,
|
||||
serde_json,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
fs::File,
|
||||
io::Read,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, RwLock},
|
||||
thread,
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsdbPluginServiceError {
|
||||
#[error("Cannot open the the plugin config file")]
|
||||
CannotOpenConfigFile(String),
|
||||
|
||||
#[error("Cannot read the the plugin config file")]
|
||||
CannotReadConfigFile(String),
|
||||
|
||||
#[error("The config file is not in a valid Json format")]
|
||||
InvalidConfigFileFormat(String),
|
||||
|
||||
#[error("Plugin library path is not specified in the config file")]
|
||||
LibPathNotSet,
|
||||
|
||||
#[error("Invalid plugin path")]
|
||||
InvalidPluginPath,
|
||||
|
||||
#[error("Cannot load plugin shared library")]
|
||||
PluginLoadError(String),
|
||||
}
|
||||
|
||||
/// The service managing the AccountsDb plugin workflow.
|
||||
pub struct AccountsDbPluginService {
|
||||
slot_status_observer: SlotStatusObserver,
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginService {
|
||||
/// Creates and returns the AccountsDbPluginService.
|
||||
/// # Arguments
|
||||
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
|
||||
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
|
||||
/// config file controls the plugin responsible
|
||||
/// for transporting the data to external data stores. It is defined in JSON format.
|
||||
/// The `libpath` field should be pointed to the full path of the dynamic shared library
|
||||
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
|
||||
/// trait. And the shared library shall export a `C` function `_create_plugin` which
|
||||
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
|
||||
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
|
||||
/// It is usually used to configure the connection information for the external data store.
|
||||
|
||||
pub fn new(
|
||||
confirmed_bank_receiver: Receiver<BankNotification>,
|
||||
accountsdb_plugin_config_files: &[PathBuf],
|
||||
) -> Result<Self, AccountsdbPluginServiceError> {
|
||||
info!(
|
||||
"Starting AccountsDbPluginService from config files: {:?}",
|
||||
accountsdb_plugin_config_files
|
||||
);
|
||||
let mut plugin_manager = AccountsDbPluginManager::new();
|
||||
|
||||
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
|
||||
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
|
||||
}
|
||||
|
||||
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
|
||||
let accounts_update_notifier = Arc::new(RwLock::new(AccountsUpdateNotifierImpl::new(
|
||||
plugin_manager.clone(),
|
||||
)));
|
||||
let slot_status_observer =
|
||||
SlotStatusObserver::new(confirmed_bank_receiver, accounts_update_notifier.clone());
|
||||
|
||||
info!("Started AccountsDbPluginService");
|
||||
Ok(AccountsDbPluginService {
|
||||
slot_status_observer,
|
||||
plugin_manager,
|
||||
accounts_update_notifier,
|
||||
})
|
||||
}
|
||||
|
||||
fn load_plugin(
|
||||
plugin_manager: &mut AccountsDbPluginManager,
|
||||
accountsdb_plugin_config_file: &Path,
|
||||
) -> Result<(), AccountsdbPluginServiceError> {
|
||||
let mut file = match File::open(accountsdb_plugin_config_file) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
|
||||
"Failed to open the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut contents = String::new();
|
||||
if let Err(err) = file.read_to_string(&mut contents) {
|
||||
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
|
||||
"Failed to read the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
|
||||
let result: serde_json::Value = match serde_json::from_str(&contents) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
|
||||
format!(
|
||||
"The config file {:?} is not in a valid Json format, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let libpath = result["libpath"]
|
||||
.as_str()
|
||||
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
|
||||
let config_file = accountsdb_plugin_config_file
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
|
||||
|
||||
unsafe {
|
||||
let result = plugin_manager.load_plugin(libpath, config_file);
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to load the plugin library: {:?}, error: {:?}",
|
||||
libpath, err
|
||||
);
|
||||
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_accounts_update_notifier(&self) -> AccountsUpdateNotifier {
|
||||
self.accounts_update_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn join(mut self) -> thread::Result<()> {
|
||||
self.slot_status_observer.join()?;
|
||||
self.plugin_manager.write().unwrap().unload();
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
pub mod accounts_update_notifier;
|
||||
pub mod accountsdb_plugin_manager;
|
||||
pub mod accountsdb_plugin_service;
|
||||
pub mod slot_status_observer;
|
@@ -1,80 +0,0 @@
|
||||
use {
|
||||
crossbeam_channel::Receiver,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SlotStatusObserver {
|
||||
bank_notification_receiver_service: Option<JoinHandle<()>>,
|
||||
exit_updated_slot_server: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl SlotStatusObserver {
|
||||
pub fn new(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
) -> Self {
|
||||
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
|
||||
|
||||
Self {
|
||||
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
|
||||
bank_notification_receiver,
|
||||
exit_updated_slot_server.clone(),
|
||||
accounts_update_notifier,
|
||||
)),
|
||||
exit_updated_slot_server,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
|
||||
self.bank_notification_receiver_service
|
||||
.take()
|
||||
.map(JoinHandle::join)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn run_bank_notification_receiver(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
exit: Arc<AtomicBool>,
|
||||
accounts_update_notifier: AccountsUpdateNotifier,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("bank_notification_receiver".to_string())
|
||||
.spawn(move || {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
if let Ok(slot) = bank_notification_receiver.recv() {
|
||||
match slot {
|
||||
BankNotification::OptimisticallyConfirmed(slot) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_confirmed(slot, None);
|
||||
}
|
||||
BankNotification::Frozen(bank) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
BankNotification::Root(bank) => {
|
||||
accounts_update_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
}
|
@@ -1,33 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accountsdb-plugin-postgres"
|
||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
||||
version = "1.8.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
postgres = { version = "0.19.1", features = ["with-chrono-0_4"] }
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.67"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
thiserror = "1.0.21"
|
||||
tokio-postgres = "0.7.3"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,5 +0,0 @@
|
||||
This is an example implementing the AccountsDb plugin for PostgreSQL database.
|
||||
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
|
||||
|
||||
To create the schema objects for the database, please use `scripts/create_schema.sql`.
|
||||
`scripts/drop_schema.sql` can be used to tear down the schema objects.
|
@@ -1,54 +0,0 @@
|
||||
/**
|
||||
* This plugin implementation for PostgreSQL requires the following tables
|
||||
*/
|
||||
-- The table storing accounts
|
||||
|
||||
|
||||
CREATE TABLE account (
|
||||
pubkey BYTEA PRIMARY KEY,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- The table storing slot information
|
||||
CREATE TABLE slot (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
parent BIGINT,
|
||||
status varchar(16) NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
/**
|
||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
||||
*/
|
||||
-- The table storing historical data for accounts
|
||||
CREATE TABLE account_audit (
|
||||
pubkey BYTEA,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
|
||||
BEGIN
|
||||
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
|
||||
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
|
||||
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
|
||||
RETURN NEW;
|
||||
END;
|
||||
|
||||
$audit_account_update$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
|
||||
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
|
@@ -1,9 +0,0 @@
|
||||
/**
|
||||
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
|
||||
*/
|
||||
|
||||
DROP TRIGGER account_update_trigger;
|
||||
DROP FUNCTION audit_account_update;
|
||||
DROP TABLE account_audit;
|
||||
DROP TABLE account;
|
||||
DROP TABLE slot;
|
@@ -1,69 +0,0 @@
|
||||
use {log::*, std::collections::HashSet};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsSelector {
|
||||
pub accounts: HashSet<Vec<u8>>,
|
||||
pub owners: HashSet<Vec<u8>>,
|
||||
pub select_all_accounts: bool,
|
||||
}
|
||||
|
||||
impl AccountsSelector {
|
||||
pub fn default() -> Self {
|
||||
AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(accounts: &[String], owners: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
|
||||
accounts, owners
|
||||
);
|
||||
|
||||
let select_all_accounts = accounts.iter().any(|key| key == "*");
|
||||
if select_all_accounts {
|
||||
return AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts,
|
||||
};
|
||||
}
|
||||
let accounts = accounts
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
let owners = owners
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
AccountsSelector {
|
||||
accounts,
|
||||
owners,
|
||||
select_all_accounts,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
|
||||
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_accounts_selector() {
|
||||
AccountsSelector::new(
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
&[],
|
||||
);
|
||||
|
||||
AccountsSelector::new(
|
||||
&[],
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
);
|
||||
}
|
||||
}
|
@@ -1,333 +0,0 @@
|
||||
use solana_measure::measure::Measure;
|
||||
|
||||
/// Main entry for the PostgreSQL plugin
|
||||
use {
|
||||
crate::{
|
||||
accounts_selector::AccountsSelector,
|
||||
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
|
||||
},
|
||||
bs58,
|
||||
log::*,
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
serde_json,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, Result, SlotStatus,
|
||||
},
|
||||
solana_metrics::*,
|
||||
std::{fs::File, io::Read},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountsDbPluginPostgres {
|
||||
client: Option<ParallelPostgresClient>,
|
||||
accounts_selector: Option<AccountsSelector>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AccountsDbPluginPostgres {
|
||||
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AccountsDbPluginPostgresConfig {
|
||||
pub host: String,
|
||||
pub user: String,
|
||||
pub threads: Option<usize>,
|
||||
pub port: Option<u16>,
|
||||
pub batch_size: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginPostgresError {
|
||||
#[error("Error connecting to the backend data store. Error message: ({msg})")]
|
||||
DataStoreConnectionError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
DataSchemaError { msg: String },
|
||||
}
|
||||
|
||||
impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
fn name(&self) -> &'static str {
|
||||
"AccountsDbPluginPostgres"
|
||||
}
|
||||
|
||||
/// Do initialization for the PostgreSQL plugin.
|
||||
/// # Arguments
|
||||
///
|
||||
/// Format of the config file:
|
||||
/// The `accounts_selector` section allows the user to controls accounts selections.
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// or:
|
||||
/// "accounts_selector" = {
|
||||
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
|
||||
/// }
|
||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
||||
/// When only owners is specified,
|
||||
/// all accounts belonging to the owners will be streamed.
|
||||
/// The accounts field support wildcard to select all accounts:
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["*"\],
|
||||
/// }
|
||||
/// "host" specifies the PostgreSQL server.
|
||||
/// "user" specifies the PostgreSQL user.
|
||||
/// "threads" optional, specifies the number of worker threads for the plugin. A thread
|
||||
/// maintains a PostgreSQL connection to the server. The default is 10.
|
||||
/// "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
||||
/// from restoring a snapshot. The default is "10".
|
||||
/// # Examples
|
||||
/// {
|
||||
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
|
||||
/// "host": "host_foo",
|
||||
/// "user": "solana",
|
||||
/// "threads": 10,
|
||||
/// "accounts_selector" : {
|
||||
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
|
||||
/// }
|
||||
|
||||
fn on_load(&mut self, config_file: &str) -> Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
info!(
|
||||
"Loading plugin {:?} from config_file {:?}",
|
||||
self.name(),
|
||||
config_file
|
||||
);
|
||||
let mut file = File::open(config_file)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
|
||||
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
|
||||
|
||||
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
|
||||
serde_json::from_str(&contents);
|
||||
match result {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::ConfigFileReadError {
|
||||
msg: format!(
|
||||
"The config file is not in the JSON format expected: {:?}",
|
||||
err
|
||||
),
|
||||
})
|
||||
}
|
||||
Ok(config) => {
|
||||
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
|
||||
self.client = Some(client);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_unload(&mut self) {
|
||||
info!("Unloading plugin: {:?}", self.name());
|
||||
|
||||
match &mut self.client {
|
||||
None => {}
|
||||
Some(client) => {
|
||||
client.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()> {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
|
||||
match account {
|
||||
ReplicaAccountInfoVersions::V0_0_1(account) => {
|
||||
let mut measure_select =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-select");
|
||||
if let Some(accounts_selector) = &self.accounts_selector {
|
||||
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
measure_select.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-select-us",
|
||||
measure_select.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
bs58::encode(account.owner).into_string(),
|
||||
slot,
|
||||
self.accounts_selector.as_ref().unwrap()
|
||||
);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database."
|
||||
.to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let mut measure_update =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-client");
|
||||
let result = { client.update_account(account, slot, is_startup) };
|
||||
measure_update.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-client-us",
|
||||
measure_update.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-main-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.update_slot_status(slot, parent, status);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
||||
info!("Notifying the end of startup for accounts notifications");
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.notify_end_of_startup();
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsDbPluginPostgres {
|
||||
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
|
||||
let accounts_selector = &config["accounts_selector"];
|
||||
|
||||
if accounts_selector.is_null() {
|
||||
AccountsSelector::default()
|
||||
} else {
|
||||
let accounts = &accounts_selector["accounts"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
let owners = &accounts_selector["owners"];
|
||||
let owners: Vec<String> = if owners.is_array() {
|
||||
owners
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
AccountsSelector::new(&accounts, &owners)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginPostgres {
|
||||
client: None,
|
||||
accounts_selector: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[allow(improper_ctypes_definitions)]
|
||||
/// # Safety
|
||||
///
|
||||
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
|
||||
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
|
||||
let plugin = AccountsDbPluginPostgres::new();
|
||||
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
|
||||
Box::into_raw(plugin)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use {super::*, serde_json};
|
||||
|
||||
#[test]
|
||||
fn test_accounts_selector_from_config() {
|
||||
let config = "{\"accounts_selector\" : { \
|
||||
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
|
||||
}}";
|
||||
|
||||
let config: serde_json::Value = serde_json::from_str(config).unwrap();
|
||||
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
|
||||
}
|
||||
}
|
@@ -1,3 +0,0 @@
|
||||
pub mod accounts_selector;
|
||||
pub mod accountsdb_plugin_postgres;
|
||||
pub mod postgres_client;
|
@@ -1,776 +0,0 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
/// A concurrent implementation for writing accounts into the PostgreSQL in parallel.
|
||||
use {
|
||||
crate::accountsdb_plugin_postgres::{
|
||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
||||
},
|
||||
chrono::Utc,
|
||||
crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender},
|
||||
log::*,
|
||||
postgres::{Client, NoTls, Statement},
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaAccountInfo, SlotStatus,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_sdk::timing::AtomicInterval,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
},
|
||||
tokio_postgres::types::ToSql,
|
||||
};
|
||||
|
||||
/// The maximum asynchronous requests allowed in the channel to avoid excessive
|
||||
/// memory usage. The downside -- calls after this threshold is reached can get blocked.
|
||||
const MAX_ASYNC_REQUESTS: usize = 40960;
|
||||
const DEFAULT_POSTGRES_PORT: u16 = 5432;
|
||||
const DEFAULT_THREADS_COUNT: usize = 100;
|
||||
const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10;
|
||||
const ACCOUNT_COLUMN_COUNT: usize = 9;
|
||||
|
||||
struct PostgresSqlClientWrapper {
|
||||
client: Client,
|
||||
update_account_stmt: Statement,
|
||||
bulk_account_insert_stmt: Statement,
|
||||
}
|
||||
|
||||
pub struct SimplePostgresClient {
|
||||
batch_size: usize,
|
||||
pending_account_updates: Vec<DbAccountInfo>,
|
||||
client: Mutex<PostgresSqlClientWrapper>,
|
||||
}
|
||||
|
||||
struct PostgresClientWorker {
|
||||
client: SimplePostgresClient,
|
||||
/// Indicating if accounts notification during startup is done.
|
||||
is_startup_done: bool,
|
||||
}
|
||||
|
||||
impl Eq for DbAccountInfo {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct DbAccountInfo {
|
||||
pub pubkey: Vec<u8>,
|
||||
pub lamports: i64,
|
||||
pub owner: Vec<u8>,
|
||||
pub executable: bool,
|
||||
pub rent_epoch: i64,
|
||||
pub data: Vec<u8>,
|
||||
pub slot: i64,
|
||||
pub write_version: i64,
|
||||
}
|
||||
|
||||
impl DbAccountInfo {
|
||||
fn new<T: ReadableAccountInfo>(account: &T, slot: u64) -> DbAccountInfo {
|
||||
let data = account.data().to_vec();
|
||||
Self {
|
||||
pubkey: account.pubkey().to_vec(),
|
||||
lamports: account.lamports() as i64,
|
||||
owner: account.owner().to_vec(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch() as i64,
|
||||
data,
|
||||
slot: slot as i64,
|
||||
write_version: account.write_version(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ReadableAccountInfo: Sized {
|
||||
fn pubkey(&self) -> &[u8];
|
||||
fn owner(&self) -> &[u8];
|
||||
fn lamports(&self) -> i64;
|
||||
fn executable(&self) -> bool;
|
||||
fn rent_epoch(&self) -> i64;
|
||||
fn data(&self) -> &[u8];
|
||||
fn write_version(&self) -> i64;
|
||||
}
|
||||
|
||||
impl ReadableAccountInfo for DbAccountInfo {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
&self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
&self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ReadableAccountInfo for ReplicaAccountInfo<'a> {
|
||||
fn pubkey(&self) -> &[u8] {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
fn owner(&self) -> &[u8] {
|
||||
self.owner
|
||||
}
|
||||
|
||||
fn lamports(&self) -> i64 {
|
||||
self.lamports as i64
|
||||
}
|
||||
|
||||
fn executable(&self) -> bool {
|
||||
self.executable
|
||||
}
|
||||
|
||||
fn rent_epoch(&self) -> i64 {
|
||||
self.rent_epoch as i64
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
fn write_version(&self) -> i64 {
|
||||
self.write_version as i64
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PostgresClient {
|
||||
fn join(&mut self) -> thread::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError>;
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError>;
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
fn connect_to_db(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Client, AccountsDbPluginError> {
|
||||
let port = config.port.unwrap_or(DEFAULT_POSTGRES_PORT);
|
||||
|
||||
let connection_str = format!("host={} user={} port={}", config.host, config.user, port);
|
||||
|
||||
match Client::connect(&connection_str, NoTls) {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Error in connecting to the PostgreSQL database: {:?} host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, connection_str);
|
||||
error!("{}", msg);
|
||||
Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError { msg },
|
||||
)))
|
||||
}
|
||||
Ok(client) => Ok(client),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_bulk_account_insert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
let mut stmt = String::from("INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) VALUES");
|
||||
for j in 0..batch_size {
|
||||
let row = j * ACCOUNT_COLUMN_COUNT;
|
||||
let val_str = format!(
|
||||
"(${}, ${}, ${}, ${}, ${}, ${}, ${}, ${}, ${})",
|
||||
row + 1,
|
||||
row + 2,
|
||||
row + 3,
|
||||
row + 4,
|
||||
row + 5,
|
||||
row + 6,
|
||||
row + 7,
|
||||
row + 8,
|
||||
row + 9,
|
||||
);
|
||||
|
||||
if j == 0 {
|
||||
stmt = format!("{} {}", &stmt, val_str);
|
||||
} else {
|
||||
stmt = format!("{}, {}", &stmt, val_str);
|
||||
}
|
||||
}
|
||||
|
||||
let handle_conflict = "ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
stmt = format!("{} {}", stmt, handle_conflict);
|
||||
|
||||
info!("{}", stmt);
|
||||
let bulk_stmt = client.prepare(&stmt);
|
||||
|
||||
match bulk_stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {} user: {} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_single_account_upsert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt = "INSERT INTO account AS acct (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) \
|
||||
ON CONFLICT (pubkey) DO UPDATE SET slot=excluded.slot, owner=excluded.owner, lamports=excluded.lamports, executable=excluded.executable, rent_epoch=excluded.rent_epoch, \
|
||||
data=excluded.data, write_version=excluded.write_version, updated_on=excluded.updated_on WHERE acct.slot < excluded.slot OR (\
|
||||
acct.slot = excluded.slot AND acct.write_version < excluded.write_version)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the accounts update PostgreSQL database: {} host: {} user: {} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(update_account_stmt) => Ok(update_account_stmt),
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal function for updating or inserting a single account
|
||||
fn upsert_account_internal(
|
||||
account: &DbAccountInfo,
|
||||
statement: &Statement,
|
||||
client: &mut Client,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let lamports = account.lamports() as i64;
|
||||
let rent_epoch = account.rent_epoch() as i64;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let result = client.query(
|
||||
statement,
|
||||
&[
|
||||
&account.pubkey(),
|
||||
&account.slot,
|
||||
&account.owner(),
|
||||
&lamports,
|
||||
&account.executable(),
|
||||
&rent_epoch,
|
||||
&account.data(),
|
||||
&account.write_version(),
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update or insert a single account
|
||||
fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
Self::upsert_account_internal(account, statement, client)
|
||||
}
|
||||
|
||||
/// Insert accounts in batch to reduce network overhead
|
||||
fn insert_accounts_in_batch(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
self.pending_account_updates.push(account);
|
||||
|
||||
if self.pending_account_updates.len() == self.batch_size {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-prepare-values");
|
||||
|
||||
let mut values: Vec<&(dyn ToSql + Sync)> =
|
||||
Vec::with_capacity(self.batch_size * ACCOUNT_COLUMN_COUNT);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
for j in 0..self.batch_size {
|
||||
let account = &self.pending_account_updates[j];
|
||||
|
||||
values.push(&account.pubkey);
|
||||
values.push(&account.slot);
|
||||
values.push(&account.owner);
|
||||
values.push(&account.lamports);
|
||||
values.push(&account.executable);
|
||||
values.push(&account.rent_epoch);
|
||||
values.push(&account.data);
|
||||
values.push(&account.write_version);
|
||||
values.push(&updated_on);
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-prepare-values-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-update-account");
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let result = client
|
||||
.client
|
||||
.query(&client.bulk_account_insert_stmt, &values);
|
||||
|
||||
self.pending_account_updates.clear();
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of account to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-count",
|
||||
self.batch_size,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush any left over accounts in batch which are not processed in the last batch
|
||||
fn flush_buffered_writes(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
if self.pending_account_updates.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_account_stmt;
|
||||
let client = &mut client.client;
|
||||
|
||||
for account in self.pending_account_updates.drain(..) {
|
||||
Self::upsert_account_internal(&account, statement, client)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating SimplePostgresClient...");
|
||||
let mut client = Self::connect_to_db(config)?;
|
||||
let bulk_account_insert_stmt =
|
||||
Self::build_bulk_account_insert_statement(&mut client, config)?;
|
||||
let update_account_stmt = Self::build_single_account_upsert_statement(&mut client, config)?;
|
||||
|
||||
let batch_size = config
|
||||
.batch_size
|
||||
.unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE);
|
||||
info!("Created SimplePostgresClient.");
|
||||
Ok(Self {
|
||||
batch_size,
|
||||
pending_account_updates: Vec::with_capacity(batch_size),
|
||||
client: Mutex::new(PostgresSqlClientWrapper {
|
||||
client,
|
||||
update_account_stmt,
|
||||
bulk_account_insert_stmt,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PostgresClient for SimplePostgresClient {
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
trace!(
|
||||
"Updating account {} with owner {} at slot {}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
bs58::encode(account.owner()).into_string(),
|
||||
account.slot,
|
||||
);
|
||||
if !is_startup {
|
||||
return self.upsert_account(&account);
|
||||
}
|
||||
self.insert_accounts_in_batch(account)
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
let slot = slot as i64; // postgres only supports i64
|
||||
let parent = parent.map(|parent| parent as i64);
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
let status_str = status.as_str();
|
||||
let client = self.client.get_mut().unwrap();
|
||||
|
||||
let result = match parent {
|
||||
Some(parent) => {
|
||||
client.client.execute(
|
||||
"INSERT INTO slot (slot, parent, status, updated_on) \
|
||||
VALUES ($1, $2, $3, $4) \
|
||||
ON CONFLICT (slot) DO UPDATE SET parent=$2, status=$3, updated_on=$4",
|
||||
&[
|
||||
&slot,
|
||||
&parent,
|
||||
&status_str,
|
||||
&updated_on,
|
||||
],
|
||||
)
|
||||
}
|
||||
None => {
|
||||
client.client.execute(
|
||||
"INSERT INTO slot (slot, status, updated_on) \
|
||||
VALUES ($1, $2, $3) \
|
||||
ON CONFLICT (slot) DO UPDATE SET status=$2, updated_on=$3",
|
||||
&[
|
||||
&slot,
|
||||
&status_str,
|
||||
&updated_on,
|
||||
],
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
match result {
|
||||
Err(err) => {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of slot to the PostgreSQL database. Error: {:?}",
|
||||
err
|
||||
);
|
||||
error!("{:?}", msg);
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError { msg });
|
||||
}
|
||||
Ok(rows) => {
|
||||
assert_eq!(1, rows, "Expected one rows to be updated a time");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
self.flush_buffered_writes()
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateAccountRequest {
|
||||
account: DbAccountInfo,
|
||||
is_startup: bool,
|
||||
}
|
||||
|
||||
struct UpdateSlotRequest {
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
slot_status: SlotStatus,
|
||||
}
|
||||
|
||||
enum DbWorkItem {
|
||||
UpdateAccount(UpdateAccountRequest),
|
||||
UpdateSlot(UpdateSlotRequest),
|
||||
}
|
||||
|
||||
impl PostgresClientWorker {
|
||||
fn new(config: AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
let result = SimplePostgresClient::new(&config);
|
||||
match result {
|
||||
Ok(client) => Ok(PostgresClientWorker {
|
||||
client,
|
||||
is_startup_done: false,
|
||||
}),
|
||||
Err(err) => {
|
||||
error!("Error in creating SimplePostgresClient: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_work(
|
||||
&mut self,
|
||||
receiver: Receiver<DbWorkItem>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
while !exit_worker.load(Ordering::Relaxed) {
|
||||
let mut measure = Measure::start("accountsdb-plugin-postgres-worker-recv");
|
||||
let work = receiver.recv_timeout(Duration::from_millis(500));
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-worker-recv-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
match work {
|
||||
Ok(work) => match work {
|
||||
DbWorkItem::UpdateAccount(request) => {
|
||||
self.client
|
||||
.update_account(request.account, request.is_startup)?;
|
||||
}
|
||||
DbWorkItem::UpdateSlot(request) => {
|
||||
self.client.update_slot_status(
|
||||
request.slot,
|
||||
request.parent,
|
||||
request.slot_status,
|
||||
)?;
|
||||
}
|
||||
},
|
||||
Err(err) => match err {
|
||||
RecvTimeoutError::Timeout => {
|
||||
if !self.is_startup_done && is_startup_done.load(Ordering::Relaxed) {
|
||||
self.client.notify_end_of_startup()?;
|
||||
self.is_startup_done = true;
|
||||
startup_done_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
error!("Error in receiving the item {:?}", err);
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct ParallelPostgresClient {
|
||||
workers: Vec<JoinHandle<Result<(), AccountsDbPluginError>>>,
|
||||
exit_worker: Arc<AtomicBool>,
|
||||
is_startup_done: Arc<AtomicBool>,
|
||||
startup_done_count: Arc<AtomicUsize>,
|
||||
initialized_worker_count: Arc<AtomicUsize>,
|
||||
sender: Sender<DbWorkItem>,
|
||||
last_report: AtomicInterval,
|
||||
}
|
||||
|
||||
impl ParallelPostgresClient {
|
||||
pub fn new(config: &AccountsDbPluginPostgresConfig) -> Result<Self, AccountsDbPluginError> {
|
||||
info!("Creating ParallelPostgresClient...");
|
||||
let (sender, receiver) = bounded(MAX_ASYNC_REQUESTS);
|
||||
let exit_worker = Arc::new(AtomicBool::new(false));
|
||||
let mut workers = Vec::default();
|
||||
let is_startup_done = Arc::new(AtomicBool::new(false));
|
||||
let startup_done_count = Arc::new(AtomicUsize::new(0));
|
||||
let worker_count = config.threads.unwrap_or(DEFAULT_THREADS_COUNT);
|
||||
let initialized_worker_count = Arc::new(AtomicUsize::new(0));
|
||||
for i in 0..worker_count {
|
||||
let cloned_receiver = receiver.clone();
|
||||
let exit_clone = exit_worker.clone();
|
||||
let is_startup_done_clone = is_startup_done.clone();
|
||||
let startup_done_count_clone = startup_done_count.clone();
|
||||
let initialized_worker_count_clone = initialized_worker_count.clone();
|
||||
let config = config.clone();
|
||||
let worker = Builder::new()
|
||||
.name(format!("worker-{}", i))
|
||||
.spawn(move || -> Result<(), AccountsDbPluginError> {
|
||||
let result = PostgresClientWorker::new(config);
|
||||
|
||||
match result {
|
||||
Ok(mut worker) => {
|
||||
initialized_worker_count_clone.fetch_add(1, Ordering::Relaxed);
|
||||
worker.do_work(
|
||||
cloned_receiver,
|
||||
exit_clone,
|
||||
is_startup_done_clone,
|
||||
startup_done_count_clone,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
workers.push(worker);
|
||||
}
|
||||
|
||||
info!("Created ParallelPostgresClient.");
|
||||
Ok(Self {
|
||||
last_report: AtomicInterval::default(),
|
||||
workers,
|
||||
exit_worker,
|
||||
is_startup_done,
|
||||
startup_done_count,
|
||||
initialized_worker_count,
|
||||
sender,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_worker.store(true, Ordering::Relaxed);
|
||||
while !self.workers.is_empty() {
|
||||
let worker = self.workers.pop();
|
||||
if worker.is_none() {
|
||||
break;
|
||||
}
|
||||
let worker = worker.unwrap();
|
||||
let result = worker.join().unwrap();
|
||||
if result.is_err() {
|
||||
error!("The worker thread has failed: {:?}", result);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_account(
|
||||
&mut self,
|
||||
account: &ReplicaAccountInfo,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if self.last_report.should_update(30000) {
|
||||
datapoint_debug!(
|
||||
"postgres-plugin-stats",
|
||||
("message-queue-length", self.sender.len() as i64, i64),
|
||||
);
|
||||
}
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-create-work-item");
|
||||
let wrk_item = DbWorkItem::UpdateAccount(UpdateAccountRequest {
|
||||
account: DbAccountInfo::new(account, slot),
|
||||
is_startup,
|
||||
});
|
||||
|
||||
measure.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-create-work-item-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
let mut measure = Measure::start("accountsdb-plugin-posgres-send-msg");
|
||||
|
||||
if let Err(err) = self.sender.send(wrk_item) {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!(
|
||||
"Failed to update the account {:?}, error: {:?}",
|
||||
bs58::encode(account.pubkey()).into_string(),
|
||||
err
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-posgres-send-msg-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
if let Err(err) = self.sender.send(DbWorkItem::UpdateSlot(UpdateSlotRequest {
|
||||
slot,
|
||||
parent,
|
||||
slot_status: status,
|
||||
})) {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError {
|
||||
msg: format!("Failed to update the slot {:?}, error: {:?}", slot, err),
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> {
|
||||
info!("Notifying the end of startup");
|
||||
// Ensure all items in the queue has been received by the workers
|
||||
while !self.sender.is_empty() {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
self.is_startup_done.store(true, Ordering::Relaxed);
|
||||
|
||||
// Wait for all worker threads to be done with flushing
|
||||
while self.startup_done_count.load(Ordering::Relaxed)
|
||||
!= self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
{
|
||||
info!(
|
||||
"Startup done count: {}, good worker thread count: {}",
|
||||
self.startup_done_count.load(Ordering::Relaxed),
|
||||
self.initialized_worker_count.load(Ordering::Relaxed)
|
||||
);
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
|
||||
info!("Done with notifying the end of startup");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PostgresClientBuilder {}
|
||||
|
||||
impl PostgresClientBuilder {
|
||||
pub fn build_pararallel_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<ParallelPostgresClient, AccountsDbPluginError> {
|
||||
ParallelPostgresClient::new(config)
|
||||
}
|
||||
|
||||
pub fn build_simple_postgres_client(
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<SimplePostgresClient, AccountsDbPluginError> {
|
||||
SimplePostgresClient::new(config)
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,18 +14,18 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.2" }
|
||||
solana-poh = { path = "../poh", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-core = { path = "../core", version = "=1.7.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.17" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.17" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -16,7 +16,6 @@ use solana_perf::packet::to_packets_chunked;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
cost_model::CostModel,
|
||||
};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
@@ -28,7 +27,7 @@ use solana_sdk::{
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
@@ -232,7 +231,6 @@ fn main() {
|
||||
vote_receiver,
|
||||
None,
|
||||
replay_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -15,16 +15,16 @@ borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.2" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.17" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.17" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -385,9 +385,7 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
|
||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||
@@ -418,9 +416,7 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let (_, recent_blockhash, last_valid_block_height) = banks_client.get_fees().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,7 +12,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,10 +14,10 @@ bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.17" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@@ -12,7 +12,6 @@ use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
feature_set::FeatureSet,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
@@ -44,7 +43,6 @@ struct BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
}
|
||||
|
||||
impl BanksServer {
|
||||
@@ -56,13 +54,11 @@ impl BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
Self {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +81,6 @@ impl BanksServer {
|
||||
fn new_loopback(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
@@ -100,12 +95,7 @@ impl BanksServer {
|
||||
.name("solana-bank-forks-client".to_string())
|
||||
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
||||
.unwrap();
|
||||
Self::new(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
)
|
||||
Self::new(bank_forks, block_commitment_cache, transaction_sender)
|
||||
}
|
||||
|
||||
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
||||
@@ -130,7 +120,7 @@ impl BanksServer {
|
||||
.bank(commitment)
|
||||
.get_signature_status_with_blockhash(signature, blockhash);
|
||||
while status.is_none() {
|
||||
sleep(self.poll_signature_status_sleep_duration).await;
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
let bank = self.bank(commitment);
|
||||
if bank.block_height() > last_valid_block_height {
|
||||
break;
|
||||
@@ -143,11 +133,11 @@ impl BanksServer {
|
||||
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
feature_set: &Arc<FeatureSet>,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
|
||||
} else if let Err(err) = transaction.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -237,13 +227,19 @@ impl Banks for BanksServer {
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
|
||||
if let Err(err) = verify_transaction(
|
||||
&transaction,
|
||||
self.bank(commitment).libsecp256k1_0_5_upgrade_enabled(),
|
||||
) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_block_height = self
|
||||
.bank(commitment)
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
@@ -271,13 +267,8 @@ impl Banks for BanksServer {
|
||||
pub async fn start_local_server(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
||||
let banks_server = BanksServer::new_loopback(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
poll_signature_status_sleep_duration,
|
||||
);
|
||||
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
|
||||
let (client_transport, server_transport) = transport::channel::unbounded();
|
||||
let server = server::new(server::Config::default())
|
||||
.incoming(stream::once(future::ready(server_transport)))
|
||||
@@ -312,12 +303,8 @@ pub async fn start_tcp_server(
|
||||
|
||||
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
||||
|
||||
let server = BanksServer::new(
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
sender,
|
||||
Duration::from_millis(200),
|
||||
);
|
||||
let server =
|
||||
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
|
||||
chan.respond_with(server.serve()).execute()
|
||||
})
|
||||
// Max 10 channels.
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,23 +18,23 @@ rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-core = { path = "../core", version = "=1.8.2" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-core = { path = "../core", version = "=1.7.17" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,24 +15,24 @@ log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-core = { path = "../core", version = "=1.8.2" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-core = { path = "../core", version = "=1.7.17" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.8.2" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,8 +12,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.1"
|
||||
uriparse = "0.6.3"
|
||||
|
@@ -1,14 +1,3 @@
|
||||
//! Loading signers and keypairs from the command line.
|
||||
//!
|
||||
//! This module contains utilities for loading [Signer]s and [Keypair]s from
|
||||
//! standard signing sources, from the command line, as in the Solana CLI.
|
||||
//!
|
||||
//! The key function here is [`signer_from_path`], which loads a `Signer` from
|
||||
//! one of several possible sources by interpreting a "path" command line
|
||||
//! argument. Its documentation includes a description of all possible signing
|
||||
//! sources supported by the Solana CLI. Many other functions here are
|
||||
//! variations on, or delegate to, `signer_from_path`.
|
||||
|
||||
use {
|
||||
crate::{
|
||||
input_parsers::{pubkeys_sigs_of, STDOUT_OUTFILE_TOKEN},
|
||||
@@ -103,56 +92,14 @@ impl CliSignerInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/// A command line argument that loads a default signer in absence of other signers.
|
||||
///
|
||||
/// This type manages a default signing source which may be overridden by other
|
||||
/// signing sources via its [`generate_unique_signers`] method.
|
||||
///
|
||||
/// [`generate_unique_signers`]: DefaultSigner::generate_unique_signers
|
||||
///
|
||||
/// `path` is a signing source as documented by [`signer_from_path`], and
|
||||
/// `arg_name` is the name of its [clap] command line argument, which is passed
|
||||
/// to `signer_from_path` as its `keypair_name` argument.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DefaultSigner {
|
||||
/// The name of the signers command line argument.
|
||||
pub arg_name: String,
|
||||
/// The signing source.
|
||||
pub path: String,
|
||||
is_path_checked: RefCell<bool>,
|
||||
}
|
||||
|
||||
impl DefaultSigner {
|
||||
/// Create a new `DefaultSigner`.
|
||||
///
|
||||
/// `path` is a signing source as documented by [`signer_from_path`], and
|
||||
/// `arg_name` is the name of its [clap] command line argument, which is
|
||||
/// passed to `signer_from_path` as its `keypair_name` argument.
|
||||
///
|
||||
/// [clap]: https://docs.rs/clap
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::DefaultSigner;
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
///
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// # assert!(default_signer.arg_name.len() > 0);
|
||||
/// assert_eq!(default_signer.path, keypair_str);
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn new<AN: AsRef<str>, P: AsRef<str>>(arg_name: AN, path: P) -> Self {
|
||||
let arg_name = arg_name.as_ref().to_string();
|
||||
let path = path.as_ref().to_string();
|
||||
@@ -187,57 +134,6 @@ impl DefaultSigner {
|
||||
Ok(&self.path)
|
||||
}
|
||||
|
||||
/// Generate a unique set of signers, possibly excluding this default signer.
|
||||
///
|
||||
/// This function allows a command line application to have a default
|
||||
/// signer, perhaps representing a default wallet, but to override that
|
||||
/// signer and instead sign with one or more other signers.
|
||||
///
|
||||
/// `bulk_signers` is a vector of signers, all of which are optional. If any
|
||||
/// of those signers is `None`, then the default signer will be loaded; if
|
||||
/// all of those signers are `Some`, then the default signer will not be
|
||||
/// loaded.
|
||||
///
|
||||
/// The returned value includes all of the `bulk_signers` that were not
|
||||
/// `None`, and maybe the default signer, if it was loaded.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::{DefaultSigner, signer_from_path};
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
/// use solana_sdk::signer::Signer;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .arg(Arg::with_name("payer")
|
||||
/// .long("payer")
|
||||
/// .help("The account paying for the transaction"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let maybe_payer = clap_matches.value_of("payer");
|
||||
///
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// let maybe_payer_signer = maybe_payer.map(|payer| {
|
||||
/// signer_from_path(&clap_matches, payer, "payer", &mut wallet_manager)
|
||||
/// }).transpose()?;
|
||||
/// let bulk_signers = vec![maybe_payer_signer];
|
||||
///
|
||||
/// let unique_signers = default_signer.generate_unique_signers(
|
||||
/// bulk_signers,
|
||||
/// &clap_matches,
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn generate_unique_signers(
|
||||
&self,
|
||||
bulk_signers: Vec<Option<Box<dyn Signer>>>,
|
||||
@@ -262,45 +158,6 @@ impl DefaultSigner {
|
||||
})
|
||||
}
|
||||
|
||||
/// Loads the default [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as
|
||||
/// various types of _signing source_, depending on its format, one of which
|
||||
/// is a path to a keypair file. Some sources may require user interaction
|
||||
/// in the course of calling this function.
|
||||
///
|
||||
/// This simply delegates to the [`signer_from_path`] free function, passing
|
||||
/// it the `DefaultSigner`s `path` and `arg_name` fields as the `path` and
|
||||
/// `keypair_name` arguments.
|
||||
///
|
||||
/// See the [`signer_from_path`] free function for full documentation of how
|
||||
/// this function interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::DefaultSigner;
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// let signer = default_signer.signer_from_path(
|
||||
/// &clap_matches,
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path(
|
||||
&self,
|
||||
matches: &ArgMatches,
|
||||
@@ -309,51 +166,6 @@ impl DefaultSigner {
|
||||
signer_from_path(matches, self.path()?, &self.arg_name, wallet_manager)
|
||||
}
|
||||
|
||||
/// Loads the default [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as
|
||||
/// various types of _signing source_, depending on its format, one of which
|
||||
/// is a path to a keypair file. Some sources may require user interaction
|
||||
/// in the course of calling this function.
|
||||
///
|
||||
/// This simply delegates to the [`signer_from_path_with_config`] free
|
||||
/// function, passing it the `DefaultSigner`s `path` and `arg_name` fields
|
||||
/// as the `path` and `keypair_name` arguments.
|
||||
///
|
||||
/// See the [`signer_from_path`] free function for full documentation of how
|
||||
/// this function interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::{SignerFromPathConfig, DefaultSigner};
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let default_signer = DefaultSigner::new("keypair", &keypair_str);
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// // Allow pubkey signers without accompanying signatures
|
||||
/// let config = SignerFromPathConfig {
|
||||
/// allow_null_signer: true,
|
||||
/// };
|
||||
///
|
||||
/// let signer = default_signer.signer_from_path_with_config(
|
||||
/// &clap_matches,
|
||||
/// &mut wallet_manager,
|
||||
/// &config,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path_with_config(
|
||||
&self,
|
||||
matches: &ArgMatches,
|
||||
@@ -521,167 +333,19 @@ pub fn presigner_from_pubkey_sigs(
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(Debug)]
|
||||
pub struct SignerFromPathConfig {
|
||||
pub allow_null_signer: bool,
|
||||
}
|
||||
|
||||
/// Loads a [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// The result of this function is a boxed object of the [Signer] trait. To load
|
||||
/// a concrete [Keypair], use the [keypair_from_path] function, though note that
|
||||
/// it does not support all signer sources.
|
||||
///
|
||||
/// The `matches` argument is the same set of parsed [clap] matches from which
|
||||
/// `path` was parsed. It is used to parse various additional command line
|
||||
/// arguments, depending on which signing source is requested, as described
|
||||
/// below in "Signing sources".
|
||||
///
|
||||
/// [clap]: https//docs.rs/clap
|
||||
///
|
||||
/// The `keypair_name` argument is the "name" of the signer, and is typically
|
||||
/// the name of the clap argument from which the `path` argument was parsed,
|
||||
/// like "keypair", "from", or "fee-payer". It is used solely for interactively
|
||||
/// prompting the user, either when entering seed phrases or selecting from
|
||||
/// multiple hardware wallets.
|
||||
///
|
||||
/// The `wallet_manager` is used for establishing connections to a hardware
|
||||
/// device such as Ledger. If `wallet_manager` is a reference to `None`, and a
|
||||
/// hardware signer is requested, then this function will attempt to create a
|
||||
/// wallet manager, assigning it to the mutable `wallet_manager` reference. This
|
||||
/// argument is typically a reference to `None`.
|
||||
///
|
||||
/// # Signing sources
|
||||
///
|
||||
/// The `path` argument can simply be a path to a keypair file, but it may also
|
||||
/// be interpreted in several other ways, in the following order.
|
||||
///
|
||||
/// Firstly, the `path` argument may be interpreted as a [URI], with the URI
|
||||
/// scheme indicating where to load the signer from. If it parses as a URI, then
|
||||
/// the following schemes are supported:
|
||||
///
|
||||
/// - `file:` — Read the keypair from a JSON keypair file. The path portion
|
||||
/// of the URI is the file path.
|
||||
///
|
||||
/// - `stdin:` — Read the keypair from stdin, in the JSON format used by
|
||||
/// the keypair file.
|
||||
///
|
||||
/// Non-scheme parts of the URI are ignored.
|
||||
///
|
||||
/// - `prompt:` — The user will be prompted at the command line
|
||||
/// for their seed phrase and passphrase.
|
||||
///
|
||||
/// In this URI the [query string][qs] may contain zero or one of the
|
||||
/// following key/value pairs that determine the [BIP44 derivation path][dp]
|
||||
/// of the private key from the seed:
|
||||
///
|
||||
/// - `key` — In this case the value is either one or two numerical
|
||||
/// indexes separated by a slash, which represent the "account", and
|
||||
/// "change" components of the BIP44 derivation path. Example: `key=0/0`.
|
||||
///
|
||||
/// - `full-path` — In this case the value is a full derivation path,
|
||||
/// and the user is responsible for ensuring it is correct. Example:
|
||||
/// `full-path=m/44/501/0/0/0`.
|
||||
///
|
||||
/// If neither is provided, then the default derivation path is used.
|
||||
///
|
||||
/// Note that when specifying derivation paths, this routine will convert all
|
||||
/// indexes into ["hardened"] indexes, even if written as "normal" indexes.
|
||||
///
|
||||
/// Other components of the URI besides the scheme and query string are ignored.
|
||||
///
|
||||
/// If the "skip_seed_phrase_validation" argument, as defined in
|
||||
/// [SKIP_SEED_PHRASE_VALIDATION_ARG] is found in `matches`, then the keypair
|
||||
/// seed will be generated directly from the seed phrase, without parsing or
|
||||
/// validating it as a BIP39 seed phrase. This allows the use of non-BIP39 seed
|
||||
/// phrases.
|
||||
///
|
||||
/// - `usb:` — Use a USB hardware device as the signer. In this case, the
|
||||
/// URI host indicates the device type, and is required. The only currently valid host
|
||||
/// value is "ledger".
|
||||
///
|
||||
/// Optionally, the first segment of the URI path indicates the base-58
|
||||
/// encoded pubkey of the wallet, and the "account" and "change" indices of
|
||||
/// the derivation path can be specified with the `key=` query parameter, as
|
||||
/// with the `prompt:` URI.
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// - `usb://ledger`
|
||||
/// - `usb://ledger?key=0/0`
|
||||
/// - `usb://ledger/9rPVSygg3brqghvdZ6wsL2i5YNQTGhXGdJzF65YxaCQd`
|
||||
/// - `usb://ledger/9rPVSygg3brqghvdZ6wsL2i5YNQTGhXGdJzF65YxaCQd?key=0/0`
|
||||
///
|
||||
/// Next the `path` argument may be one of the following strings:
|
||||
///
|
||||
/// - `-` — Read the keypair from stdin. This is the same as the `stdin:`
|
||||
/// URI scheme.
|
||||
///
|
||||
/// - `ASK` — The user will be prompted at the command line for their seed
|
||||
/// phrase and passphrase. _This uses a legacy key derivation method and should
|
||||
/// usually be avoided in favor of `prompt:`._
|
||||
///
|
||||
/// Next, if the `path` argument parses as a base-58 public key, then the signer
|
||||
/// is created without a private key, but with presigned signatures, each parsed
|
||||
/// from the additional command line arguments, provided by the `matches`
|
||||
/// argument.
|
||||
///
|
||||
/// In this case, the remaining command line arguments are searched for clap
|
||||
/// arguments named "signer", as defined by [SIGNER_ARG], and each is parsed as
|
||||
/// a key-value pair of the form "pubkey=signature", where `pubkey` is the same
|
||||
/// base-58 public key, and `signature` is a serialized signature produced by
|
||||
/// the corresponding keypair. One of the "signer" signatures must be for the
|
||||
/// pubkey specified in `path` or this function will return an error; unless the
|
||||
/// "sign_only" clap argument, as defined by [SIGN_ONLY_ARG], is present in
|
||||
/// `matches`, in which case the signer will be created with no associated
|
||||
/// signatures.
|
||||
///
|
||||
/// Finally, if `path`, interpreted as a file path, represents a file on disk,
|
||||
/// then the signer is created by reading that file as a JSON-serialized
|
||||
/// keypair. This is the same as the `file:` URI scheme.
|
||||
///
|
||||
/// [qs]: https://en.wikipedia.org/wiki/Query_string
|
||||
/// [dp]: https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
|
||||
/// [URI]: https://en.wikipedia.org/wiki/Uniform_Resource_Identifier
|
||||
/// ["hardened"]: https://wiki.trezor.io/Hardened_and_non-hardened_derivation
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// This shows a reasonable way to set up clap to parse all possible signer
|
||||
/// sources. Note the use of the [`OfflineArgs::offline_args`] method to add
|
||||
/// correct clap definitions of the `--signer` and `--sign-only` arguments, as
|
||||
/// required by the base-58 pubkey offline signing method.
|
||||
///
|
||||
/// [`OfflineArgs::offline_args`]: crate::offline::OfflineArgs::offline_args
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::signer_from_path;
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let mut wallet_manager = None;
|
||||
/// let signer = signer_from_path(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
impl Default for SignerFromPathConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
allow_null_signer: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -692,63 +356,6 @@ pub fn signer_from_path(
|
||||
signer_from_path_with_config(matches, path, keypair_name, wallet_manager, &config)
|
||||
}
|
||||
|
||||
/// Loads a [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// This is the same as [`signer_from_path`] except that it additionaolly
|
||||
/// accepts a [`SignerFromPathConfig`] argument.
|
||||
///
|
||||
/// If the `allow_null_signer` field of `config` is `true`, then pubkey signers
|
||||
/// are allowed to have zero associated signatures via additional "signer"
|
||||
/// command line arguments. It the same effect as if the "sign_only" clap
|
||||
/// argument is present.
|
||||
///
|
||||
/// See [`signer_from_path`] for full documentation of how this function
|
||||
/// interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// This shows a reasonable way to set up clap to parse all possible signer
|
||||
/// sources. Note the use of the [`OfflineArgs::offline_args`] method to add
|
||||
/// correct clap definitions of the `--signer` and `--sign-only` arguments, as
|
||||
/// required by the base-58 pubkey offline signing method.
|
||||
///
|
||||
/// [`OfflineArgs::offline_args`]: crate::offline::OfflineArgs::offline_args
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::{signer_from_path_with_config, SignerFromPathConfig};
|
||||
/// use solana_clap_utils::offline::OfflineArgs;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"))
|
||||
/// .offline_args();
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let mut wallet_manager = None;
|
||||
///
|
||||
/// // Allow pubkey signers without accompanying signatures
|
||||
/// let config = SignerFromPathConfig {
|
||||
/// allow_null_signer: true,
|
||||
/// };
|
||||
///
|
||||
/// let signer = signer_from_path_with_config(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// &mut wallet_manager,
|
||||
/// &config,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn signer_from_path_with_config(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -819,43 +426,6 @@ pub fn signer_from_path_with_config(
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads the pubkey of a [Signer] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// The only difference between this function and [`signer_from_path`] is in the
|
||||
/// case of a "pubkey" path: this function does not require that accompanying
|
||||
/// command line arguments contain an offline signature.
|
||||
///
|
||||
/// See [`signer_from_path`] for full documentation of how this function
|
||||
/// interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::pubkey_from_path;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"));
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
/// let mut wallet_manager = None;
|
||||
/// let pubkey = pubkey_from_path(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// &mut wallet_manager,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
pub fn pubkey_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -955,46 +525,7 @@ pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>>
|
||||
Ok(passphrase)
|
||||
}
|
||||
|
||||
/// Loads a [Keypair] from one of several possible sources.
|
||||
///
|
||||
/// The `path` is not strictly a file system path, but is interpreted as various
|
||||
/// types of _signing source_, depending on its format, one of which is a path
|
||||
/// to a keypair file. Some sources may require user interaction in the course
|
||||
/// of calling this function.
|
||||
///
|
||||
/// This is the same as [`signer_from_path`] except that it only supports
|
||||
/// signing sources that can result in a [Keypair]: prompt for seed phrase,
|
||||
/// keypair file, and stdin.
|
||||
///
|
||||
/// If `confirm_pubkey` is `true` then after deriving the pubkey, the user will
|
||||
/// be prompted to confirm that the pubkey is as expected.
|
||||
///
|
||||
/// See [`signer_from_path`] for full documentation of how this function
|
||||
/// interprets its arguments.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use clap::{App, Arg, value_t_or_exit};
|
||||
/// use solana_clap_utils::keypair::keypair_from_path;
|
||||
///
|
||||
/// let clap_app = App::new("my-program")
|
||||
/// // The argument we'll parse as a signer "path"
|
||||
/// .arg(Arg::with_name("keypair")
|
||||
/// .required(true)
|
||||
/// .help("The default signer"));
|
||||
///
|
||||
/// let clap_matches = clap_app.get_matches();
|
||||
/// let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
///
|
||||
/// let signer = keypair_from_path(
|
||||
/// &clap_matches,
|
||||
/// &keypair_str,
|
||||
/// "keypair",
|
||||
/// false,
|
||||
/// )?;
|
||||
/// # Ok::<(), Box<dyn std::error::Error>>(())
|
||||
/// ```
|
||||
/// Parses a path into a SignerSource and returns a Keypair for supporting SignerSourceKinds
|
||||
pub fn keypair_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
@@ -1044,10 +575,9 @@ pub fn keypair_from_path(
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation.
|
||||
///
|
||||
/// Optionally skips validation of seed phrase. Optionally confirms recovered
|
||||
/// public key.
|
||||
/// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation
|
||||
/// Optionally skips validation of seed phrase
|
||||
/// Optionally confirms recovered public key
|
||||
pub fn keypair_from_seed_phrase(
|
||||
keypair_name: &str,
|
||||
skip_validation: bool,
|
||||
@@ -1124,13 +654,9 @@ fn sanitize_seed_phrase(seed_phrase: &str) -> String {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::offline::OfflineArgs;
|
||||
use clap::{value_t_or_exit, App, Arg};
|
||||
use solana_remote_wallet::locator::Manufacturer;
|
||||
use solana_remote_wallet::remote_wallet::initialize_wallet_manager;
|
||||
use solana_sdk::signer::keypair::write_keypair_file;
|
||||
use solana_sdk::system_instruction;
|
||||
use tempfile::{NamedTempFile, TempDir};
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_seed_phrase() {
|
||||
@@ -1289,41 +815,4 @@ mod tests {
|
||||
} if p == relative_path_str)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signer_from_path_with_file() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let dir = TempDir::new()?;
|
||||
let dir = dir.path();
|
||||
let keypair_path = dir.join("id.json");
|
||||
let keypair_path_str = keypair_path.to_str().expect("utf-8");
|
||||
|
||||
let keypair = Keypair::new();
|
||||
write_keypair_file(&keypair, &keypair_path)?;
|
||||
|
||||
let args = vec!["program", keypair_path_str];
|
||||
|
||||
let clap_app = App::new("my-program")
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.required(true)
|
||||
.help("The signing keypair"),
|
||||
)
|
||||
.offline_args();
|
||||
|
||||
let clap_matches = clap_app.get_matches_from(args);
|
||||
let keypair_str = value_t_or_exit!(clap_matches, "keypair", String);
|
||||
|
||||
let wallet_manager = initialize_wallet_manager()?;
|
||||
|
||||
let signer = signer_from_path(
|
||||
&clap_matches,
|
||||
&keypair_str,
|
||||
"signer",
|
||||
&mut Some(wallet_manager),
|
||||
)?;
|
||||
|
||||
assert_eq!(keypair.pubkey(), signer.pubkey());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -20,12 +20,12 @@ indicatif = "0.15.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -576,7 +576,7 @@ impl fmt::Display for CliValidators {
|
||||
for (version, info) in self.stake_by_version.iter() {
|
||||
writeln!(
|
||||
f,
|
||||
"{:<8} - {:4} current validators ({:>5.2}%){}",
|
||||
"{:<8} - {:3} current validators ({:>5.2}%){}",
|
||||
version,
|
||||
info.current_validators,
|
||||
100. * info.current_active_stake as f64 / self.total_active_stake as f64,
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -30,30 +30,30 @@ semver = "1.0.4"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.2" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.8.2" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.8.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.17" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.17" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana_rbpf = "=0.2.11"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.1"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-core = { path = "../core", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -12,9 +12,9 @@ use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig};
|
||||
use solana_bpf_loader_program::{bpf_verifier, BpfError, ThisInstructionMeter};
|
||||
use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*};
|
||||
use solana_cli_output::{
|
||||
CliProgram, CliProgramAccountType, CliProgramAuthority, CliProgramBuffer, CliProgramId,
|
||||
CliUpgradeableBuffer, CliUpgradeableBuffers, CliUpgradeableProgram,
|
||||
CliUpgradeableProgramClosed, CliUpgradeablePrograms,
|
||||
display::new_spinner_progress_bar, CliProgram, CliProgramAccountType, CliProgramAuthority,
|
||||
CliProgramBuffer, CliProgramId, CliUpgradeableBuffer, CliUpgradeableBuffers,
|
||||
CliUpgradeableProgram, CliUpgradeableProgramClosed, CliUpgradeablePrograms,
|
||||
};
|
||||
use solana_client::{
|
||||
client_error::ClientErrorKind,
|
||||
@@ -22,6 +22,8 @@ use solana_client::{
|
||||
rpc_config::RpcSendTransactionConfig,
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType},
|
||||
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
|
||||
rpc_response::Fees,
|
||||
tpu_client::{TpuClient, TpuClientConfig},
|
||||
};
|
||||
use solana_rbpf::vm::{Config, Executable};
|
||||
@@ -31,6 +33,7 @@ use solana_sdk::{
|
||||
account_utils::StateMut,
|
||||
bpf_loader, bpf_loader_deprecated,
|
||||
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
|
||||
commitment_config::CommitmentConfig,
|
||||
instruction::Instruction,
|
||||
instruction::InstructionError,
|
||||
loader_instruction,
|
||||
@@ -39,18 +42,24 @@ use solana_sdk::{
|
||||
packet::PACKET_DATA_SIZE,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, read_keypair_file, Keypair, Signature, Signer},
|
||||
signers::Signers,
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::Transaction,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use solana_transaction_status::TransactionConfirmationStatus;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
error,
|
||||
fs::File,
|
||||
io::{Read, Write},
|
||||
mem::size_of,
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@@ -376,7 +385,6 @@ impl ProgramSubCommands for App<'_, '_> {
|
||||
.subcommand(
|
||||
SubCommand::with_name("deploy")
|
||||
.about("Deploy a program")
|
||||
.setting(AppSettings::Hidden)
|
||||
.arg(
|
||||
Arg::with_name("program_location")
|
||||
.index(1)
|
||||
@@ -2107,29 +2115,29 @@ fn send_deploy_messages(
|
||||
if let Some(write_messages) = write_messages {
|
||||
if let Some(write_signer) = write_signer {
|
||||
trace!("Writing program data");
|
||||
let tpu_client = TpuClient::new(
|
||||
let Fees {
|
||||
blockhash,
|
||||
last_valid_block_height,
|
||||
..
|
||||
} = rpc_client
|
||||
.get_fees_with_commitment(config.commitment)?
|
||||
.value;
|
||||
let mut write_transactions = vec![];
|
||||
for message in write_messages.iter() {
|
||||
let mut tx = Transaction::new_unsigned(message.clone());
|
||||
tx.try_sign(&[payer_signer, write_signer], blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
}
|
||||
|
||||
send_and_confirm_transactions_with_spinner(
|
||||
rpc_client.clone(),
|
||||
&config.websocket_url,
|
||||
TpuClientConfig::default(),
|
||||
)?;
|
||||
let transaction_errors = tpu_client
|
||||
.send_and_confirm_messages_with_spinner(
|
||||
write_messages,
|
||||
&[payer_signer, write_signer],
|
||||
)
|
||||
.map_err(|err| format!("Data writes to account failed: {}", err))?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if !transaction_errors.is_empty() {
|
||||
for transaction_error in &transaction_errors {
|
||||
error!("{:?}", transaction_error);
|
||||
}
|
||||
return Err(
|
||||
format!("{} write transactions failed", transaction_errors.len()).into(),
|
||||
);
|
||||
}
|
||||
write_transactions,
|
||||
&[payer_signer, write_signer],
|
||||
config.commitment,
|
||||
last_valid_block_height,
|
||||
)
|
||||
.map_err(|err| format!("Data writes to account failed: {}", err))?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2181,8 +2189,9 @@ fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) {
|
||||
words
|
||||
);
|
||||
eprintln!("{}\n{}\n{}", divider, phrase, divider);
|
||||
eprintln!("To resume a deploy, pass the recovered keypair as the");
|
||||
eprintln!("[BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'.");
|
||||
eprintln!("To resume a deploy, pass the recovered keypair as");
|
||||
eprintln!("the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy` or");
|
||||
eprintln!("as the [BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'.");
|
||||
eprintln!("Or to recover the account's lamports, pass it as the");
|
||||
eprintln!(
|
||||
"[BUFFER_ACCOUNT_ADDRESS] argument to `solana program close`.\n{}",
|
||||
@@ -2190,6 +2199,134 @@ fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) {
|
||||
);
|
||||
}
|
||||
|
||||
fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
rpc_client: Arc<RpcClient>,
|
||||
websocket_url: &str,
|
||||
mut transactions: Vec<Transaction>,
|
||||
signer_keys: &T,
|
||||
commitment: CommitmentConfig,
|
||||
mut last_valid_block_height: u64,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
let mut send_retries = 5;
|
||||
|
||||
progress_bar.set_message("Finding leader nodes...");
|
||||
let tpu_client = TpuClient::new(
|
||||
rpc_client.clone(),
|
||||
websocket_url,
|
||||
TpuClientConfig::default(),
|
||||
)?;
|
||||
loop {
|
||||
// Send all transactions
|
||||
let mut pending_transactions = HashMap::new();
|
||||
let num_transactions = transactions.len();
|
||||
for transaction in transactions {
|
||||
if !tpu_client.send_transaction(&transaction) {
|
||||
let _result = rpc_client
|
||||
.send_transaction_with_config(
|
||||
&transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
}
|
||||
pending_transactions.insert(transaction.signatures[0], transaction);
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions sent",
|
||||
pending_transactions.len(),
|
||||
num_transactions
|
||||
));
|
||||
|
||||
// Throttle transactions to about 100 TPS
|
||||
sleep(Duration::from_millis(10));
|
||||
}
|
||||
|
||||
// Collect statuses for all the transactions, drop those that are confirmed
|
||||
loop {
|
||||
let mut block_height = 0;
|
||||
let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>();
|
||||
for pending_signatures_chunk in
|
||||
pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS)
|
||||
{
|
||||
if let Ok(result) = rpc_client.get_signature_statuses(pending_signatures_chunk) {
|
||||
let statuses = result.value;
|
||||
for (signature, status) in
|
||||
pending_signatures_chunk.iter().zip(statuses.into_iter())
|
||||
{
|
||||
if let Some(status) = status {
|
||||
if let Some(confirmation_status) = &status.confirmation_status {
|
||||
if *confirmation_status != TransactionConfirmationStatus::Processed
|
||||
{
|
||||
let _ = pending_transactions.remove(signature);
|
||||
}
|
||||
} else if status.confirmations.is_none()
|
||||
|| status.confirmations.unwrap() > 1
|
||||
{
|
||||
let _ = pending_transactions.remove(signature);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
block_height = rpc_client.get_block_height()?;
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Transactions confirmed. Retrying in {} blocks",
|
||||
num_transactions - pending_transactions.len(),
|
||||
num_transactions,
|
||||
last_valid_block_height.saturating_sub(block_height)
|
||||
));
|
||||
}
|
||||
|
||||
if pending_transactions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if block_height > last_valid_block_height {
|
||||
break;
|
||||
}
|
||||
|
||||
for transaction in pending_transactions.values() {
|
||||
if !tpu_client.send_transaction(transaction) {
|
||||
let _result = rpc_client
|
||||
.send_transaction_with_config(
|
||||
transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
|
||||
if send_retries == 0 {
|
||||
return Err("Transactions failed".into());
|
||||
}
|
||||
send_retries -= 1;
|
||||
|
||||
// Re-sign any failed transactions with a new blockhash and retry
|
||||
let Fees {
|
||||
blockhash,
|
||||
last_valid_block_height: new_last_valid_block_height,
|
||||
..
|
||||
} = rpc_client.get_fees_with_commitment(commitment)?.value;
|
||||
last_valid_block_height = new_last_valid_block_height;
|
||||
transactions = vec![];
|
||||
for (_, mut transaction) in pending_transactions.into_iter() {
|
||||
transaction.try_sign(signer_keys, blockhash)?;
|
||||
transactions.push(transaction);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -24,14 +24,14 @@ semver = "0.11.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tungstenite = "0.10.1"
|
||||
@@ -40,7 +40,7 @@ url = "2.1.1"
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-http-server = "18.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -18,6 +18,5 @@ pub mod rpc_filter;
|
||||
pub mod rpc_request;
|
||||
pub mod rpc_response;
|
||||
pub mod rpc_sender;
|
||||
pub mod spinner;
|
||||
pub mod thin_client;
|
||||
pub mod tpu_client;
|
||||
|
@@ -146,8 +146,8 @@ impl RpcSender for MockSender {
|
||||
value: serde_json::to_value(RpcFees {
|
||||
blockhash: PUBKEY.to_string(),
|
||||
fee_calculator: FeeCalculator::default(),
|
||||
last_valid_slot: 1234,
|
||||
last_valid_block_height: 1234,
|
||||
last_valid_slot: 42,
|
||||
last_valid_block_height: 42,
|
||||
})
|
||||
.unwrap(),
|
||||
})?,
|
||||
|
@@ -21,9 +21,9 @@ use {
|
||||
rpc_request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter},
|
||||
rpc_response::*,
|
||||
rpc_sender::*,
|
||||
spinner,
|
||||
},
|
||||
bincode::serialize,
|
||||
indicatif::{ProgressBar, ProgressStyle},
|
||||
log::*,
|
||||
serde_json::{json, Value},
|
||||
solana_account_decoder::{
|
||||
@@ -527,50 +527,34 @@ impl RpcClient {
|
||||
Ok(request)
|
||||
}
|
||||
|
||||
/// Submit a transaction and wait for confirmation.
|
||||
/// Check the confirmation status of a transaction.
|
||||
///
|
||||
/// Once this function returns successfully, the given transaction is
|
||||
/// guaranteed to be processed with the configured [commitment level][cl].
|
||||
/// Returns `true` if the given transaction succeeded and has been committed
|
||||
/// with the configured [commitment level][cl], which can be retrieved with
|
||||
/// the [`commitment`](RpcClient::commitment) method.
|
||||
///
|
||||
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
|
||||
///
|
||||
/// After sending the transaction, this method polls in a loop for the
|
||||
/// status of the transaction until it has ben confirmed.
|
||||
/// Note that this method does not wait for a transaction to be confirmed
|
||||
/// — it only checks whether a transaction has been confirmed. To
|
||||
/// submit a transaction and wait for it to confirm, use
|
||||
/// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If the transaction is not signed then an error with kind [`RpcError`] is
|
||||
/// returned, containing an [`RpcResponseError`] with `code` set to
|
||||
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`].
|
||||
///
|
||||
/// If the preflight transaction simulation fails then an error with kind
|
||||
/// [`RpcError`] is returned, containing an [`RpcResponseError`] with `code`
|
||||
/// set to [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`].
|
||||
///
|
||||
/// If the receiving node is unhealthy, e.g. it is not fully synced to
|
||||
/// the cluster, then an error with kind [`RpcError`] is returned,
|
||||
/// containing an [`RpcResponseError`] with `code` set to
|
||||
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`].
|
||||
///
|
||||
/// [`RpcResponseError`]: RpcError::RpcResponseError
|
||||
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE
|
||||
/// [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE
|
||||
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY
|
||||
/// _This method returns `false` if the transaction failed, even if it has
|
||||
/// been confirmed._
|
||||
///
|
||||
/// # RPC Reference
|
||||
///
|
||||
/// This method is built on the [`sendTransaction`] RPC method, and the
|
||||
/// [`getLatestBlockhash`] RPC method.
|
||||
/// This method is built on the [`getSignatureStatuses`] RPC method.
|
||||
///
|
||||
/// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction
|
||||
/// [`getLatestBlockhash`]: https://docs.solana.com/developing/clients/jsonrpc-api#getlatestblockhash
|
||||
/// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use solana_client::{
|
||||
/// # rpc_client::RpcClient,
|
||||
/// # client_error::ClientError,
|
||||
/// # rpc_client::RpcClient,
|
||||
/// # };
|
||||
/// # use solana_sdk::{
|
||||
/// # signature::Signer,
|
||||
@@ -579,110 +563,97 @@ impl RpcClient {
|
||||
/// # system_transaction,
|
||||
/// # };
|
||||
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
/// // Transfer lamports from Alice to Bob and wait for confirmation
|
||||
/// # let alice = Keypair::new();
|
||||
/// # let bob = Keypair::new();
|
||||
/// # let lamports = 50;
|
||||
/// # let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
/// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
/// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash);
|
||||
/// let signature = rpc_client.send_and_confirm_transaction(&tx)?;
|
||||
/// let signature = rpc_client.send_transaction(&tx)?;
|
||||
///
|
||||
/// loop {
|
||||
/// let confirmed = rpc_client.confirm_transaction(&signature)?;
|
||||
/// if confirmed {
|
||||
/// break;
|
||||
/// }
|
||||
/// }
|
||||
/// # Ok::<(), ClientError>(())
|
||||
/// ```
|
||||
pub fn send_and_confirm_transaction(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
const SEND_RETRIES: usize = 1;
|
||||
const GET_STATUS_RETRIES: usize = usize::MAX;
|
||||
|
||||
'sending: for _ in 0..SEND_RETRIES {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
|
||||
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
|
||||
let (recent_blockhash, ..) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
|
||||
.value;
|
||||
recent_blockhash
|
||||
} else {
|
||||
transaction.message.recent_blockhash
|
||||
};
|
||||
|
||||
for status_retry in 0..GET_STATUS_RETRIES {
|
||||
match self.get_signature_status(&signature)? {
|
||||
Some(Ok(_)) => return Ok(signature),
|
||||
Some(Err(e)) => return Err(e.into()),
|
||||
None => {
|
||||
let fee_calculator = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
&recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value;
|
||||
if fee_calculator.is_none() {
|
||||
// Block hash is not found for some reason
|
||||
break 'sending;
|
||||
} else if cfg!(not(test))
|
||||
// Ignore sleep at last step.
|
||||
&& status_retry < GET_STATUS_RETRIES
|
||||
{
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(RpcError::ForUser(
|
||||
"unable to confirm transaction. \
|
||||
This can happen in situations such as transaction expiration \
|
||||
and insufficient fee-payer funds"
|
||||
.to_string(),
|
||||
)
|
||||
.into())
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner(
|
||||
/// Check the confirmation status of a transaction.
|
||||
///
|
||||
/// Returns an [`RpcResult`] with value `true` if the given transaction
|
||||
/// succeeded and has been committed with the given [commitment level][cl].
|
||||
///
|
||||
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
|
||||
///
|
||||
/// Note that this method does not wait for a transaction to be confirmed
|
||||
/// — it only checks whether a transaction has been confirmed. To
|
||||
/// submit a transaction and wait for it to confirm, use
|
||||
/// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction].
|
||||
///
|
||||
/// _This method returns an [`RpcResult`] with value `false` if the
|
||||
/// transaction failed, even if it has been confirmed._
|
||||
///
|
||||
/// # RPC Reference
|
||||
///
|
||||
/// This method is built on the [`getSignatureStatuses`] RPC method.
|
||||
///
|
||||
/// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use solana_client::{
|
||||
/// # client_error::ClientError,
|
||||
/// # rpc_client::RpcClient,
|
||||
/// # };
|
||||
/// # use solana_sdk::{
|
||||
/// # commitment_config::CommitmentConfig,
|
||||
/// # signature::Signer,
|
||||
/// # signature::Signature,
|
||||
/// # signer::keypair::Keypair,
|
||||
/// # system_transaction,
|
||||
/// # };
|
||||
/// # use std::time::Duration;
|
||||
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
/// // Transfer lamports from Alice to Bob and wait for confirmation
|
||||
/// # let alice = Keypair::new();
|
||||
/// # let bob = Keypair::new();
|
||||
/// # let lamports = 50;
|
||||
/// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
/// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash);
|
||||
/// let signature = rpc_client.send_transaction(&tx)?;
|
||||
///
|
||||
/// loop {
|
||||
/// let commitment_config = CommitmentConfig::processed();
|
||||
/// let confirmed = rpc_client.confirm_transaction_with_commitment(&signature, commitment_config)?;
|
||||
/// if confirmed.value {
|
||||
/// break;
|
||||
/// }
|
||||
/// }
|
||||
/// # Ok::<(), ClientError>(())
|
||||
/// ```
|
||||
pub fn confirm_transaction_with_commitment(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
transaction,
|
||||
self.commitment(),
|
||||
)
|
||||
}
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<bool> {
|
||||
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
commitment: CommitmentConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
transaction,
|
||||
commitment,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
commitment: CommitmentConfig,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
|
||||
.value
|
||||
.0
|
||||
} else {
|
||||
transaction.message.recent_blockhash
|
||||
};
|
||||
let signature = self.send_transaction_with_config(transaction, config)?;
|
||||
self.confirm_transaction_with_spinner(&signature, &recent_blockhash, commitment)?;
|
||||
Ok(signature)
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value[0]
|
||||
.as_ref()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|result| result.status.is_ok())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Submits a signed transaction to the network.
|
||||
@@ -766,6 +737,14 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
|
||||
if self.get_node_version()? < semver::Version::new(1, 3, 16) {
|
||||
Ok(UiTransactionEncoding::Base58)
|
||||
} else {
|
||||
Ok(UiTransactionEncoding::Base64)
|
||||
}
|
||||
}
|
||||
|
||||
/// Submits a signed transaction to the network.
|
||||
///
|
||||
/// Before a transaction is processed, the receiving node runs a "preflight
|
||||
@@ -911,251 +890,6 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send<T>(&self, request: RpcRequest, params: Value) -> ClientResult<T>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
assert!(params.is_array() || params.is_null());
|
||||
|
||||
let response = self
|
||||
.sender
|
||||
.send(request, params)
|
||||
.map_err(|err| err.into_with_request(request))?;
|
||||
serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_request(err.into(), request))
|
||||
}
|
||||
|
||||
/// Check the confirmation status of a transaction.
|
||||
///
|
||||
/// Returns `true` if the given transaction succeeded and has been committed
|
||||
/// with the configured [commitment level][cl], which can be retrieved with
|
||||
/// the [`commitment`](RpcClient::commitment) method.
|
||||
///
|
||||
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
|
||||
///
|
||||
/// Note that this method does not wait for a transaction to be confirmed
|
||||
/// — it only checks whether a transaction has been confirmed. To
|
||||
/// submit a transaction and wait for it to confirm, use
|
||||
/// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction].
|
||||
///
|
||||
/// _This method returns `false` if the transaction failed, even if it has
|
||||
/// been confirmed._
|
||||
///
|
||||
/// # RPC Reference
|
||||
///
|
||||
/// This method is built on the [`getSignatureStatuses`] RPC method.
|
||||
///
|
||||
/// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use solana_client::{
|
||||
/// # client_error::ClientError,
|
||||
/// # rpc_client::RpcClient,
|
||||
/// # };
|
||||
/// # use solana_sdk::{
|
||||
/// # signature::Signer,
|
||||
/// # signature::Signature,
|
||||
/// # signer::keypair::Keypair,
|
||||
/// # system_transaction,
|
||||
/// # };
|
||||
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
/// // Transfer lamports from Alice to Bob and wait for confirmation
|
||||
/// # let alice = Keypair::new();
|
||||
/// # let bob = Keypair::new();
|
||||
/// # let lamports = 50;
|
||||
/// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
/// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash);
|
||||
/// let signature = rpc_client.send_transaction(&tx)?;
|
||||
///
|
||||
/// loop {
|
||||
/// let confirmed = rpc_client.confirm_transaction(&signature)?;
|
||||
/// if confirmed {
|
||||
/// break;
|
||||
/// }
|
||||
/// }
|
||||
/// # Ok::<(), ClientError>(())
|
||||
/// ```
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
/// Check the confirmation status of a transaction.
|
||||
///
|
||||
/// Returns an [`RpcResult`] with value `true` if the given transaction
|
||||
/// succeeded and has been committed with the given [commitment level][cl].
|
||||
///
|
||||
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
|
||||
///
|
||||
/// Note that this method does not wait for a transaction to be confirmed
|
||||
/// — it only checks whether a transaction has been confirmed. To
|
||||
/// submit a transaction and wait for it to confirm, use
|
||||
/// [`send_and_confirm_transaction`][RpcClient::send_and_confirm_transaction].
|
||||
///
|
||||
/// _This method returns an [`RpcResult`] with value `false` if the
|
||||
/// transaction failed, even if it has been confirmed._
|
||||
///
|
||||
/// # RPC Reference
|
||||
///
|
||||
/// This method is built on the [`getSignatureStatuses`] RPC method.
|
||||
///
|
||||
/// [`getSignatureStatuses`]: https://docs.solana.com/developing/clients/jsonrpc-api#getsignaturestatuses
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use solana_client::{
|
||||
/// # client_error::ClientError,
|
||||
/// # rpc_client::RpcClient,
|
||||
/// # };
|
||||
/// # use solana_sdk::{
|
||||
/// # commitment_config::CommitmentConfig,
|
||||
/// # signature::Signer,
|
||||
/// # signature::Signature,
|
||||
/// # signer::keypair::Keypair,
|
||||
/// # system_transaction,
|
||||
/// # };
|
||||
/// # use std::time::Duration;
|
||||
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
/// // Transfer lamports from Alice to Bob and wait for confirmation
|
||||
/// # let alice = Keypair::new();
|
||||
/// # let bob = Keypair::new();
|
||||
/// # let lamports = 50;
|
||||
/// let (recent_blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
/// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash);
|
||||
/// let signature = rpc_client.send_transaction(&tx)?;
|
||||
///
|
||||
/// loop {
|
||||
/// let commitment_config = CommitmentConfig::processed();
|
||||
/// let confirmed = rpc_client.confirm_transaction_with_commitment(&signature, commitment_config)?;
|
||||
/// if confirmed.value {
|
||||
/// break;
|
||||
/// }
|
||||
/// }
|
||||
/// # Ok::<(), ClientError>(())
|
||||
/// ```
|
||||
pub fn confirm_transaction_with_commitment(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<bool> {
|
||||
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
|
||||
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value[0]
|
||||
.as_ref()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|result| result.status.is_ok())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn confirm_transaction_with_spinner(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
recent_blockhash: &Hash,
|
||||
commitment: CommitmentConfig,
|
||||
) -> ClientResult<()> {
|
||||
let desired_confirmations = if commitment.is_finalized() {
|
||||
MAX_LOCKOUT_HISTORY + 1
|
||||
} else {
|
||||
1
|
||||
};
|
||||
let mut confirmations = 0;
|
||||
|
||||
let progress_bar = spinner::new_progress_bar();
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
confirmations, desired_confirmations, signature,
|
||||
));
|
||||
|
||||
let now = Instant::now();
|
||||
let confirm_transaction_initial_timeout = self
|
||||
.config
|
||||
.confirm_transaction_initial_timeout
|
||||
.unwrap_or_default();
|
||||
let (signature, status) = loop {
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
.get_signature_status_with_commitment(signature, CommitmentConfig::processed())?;
|
||||
if status.is_none() {
|
||||
let blockhash_not_found = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value
|
||||
.is_none();
|
||||
if blockhash_not_found && now.elapsed() >= confirm_transaction_initial_timeout {
|
||||
break (signature, status);
|
||||
}
|
||||
} else {
|
||||
break (signature, status);
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
};
|
||||
if let Some(result) = status {
|
||||
if let Err(err) = result {
|
||||
return Err(err.into());
|
||||
}
|
||||
} else {
|
||||
return Err(RpcError::ForUser(
|
||||
"unable to confirm transaction. \
|
||||
This can happen in situations such as transaction expiration \
|
||||
and insufficient fee-payer funds"
|
||||
.to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
// Return when specified commitment is reached
|
||||
// Failed transactions have already been eliminated, `is_some` check is sufficient
|
||||
if self
|
||||
.get_signature_status_with_commitment(signature, commitment)?
|
||||
.is_some()
|
||||
{
|
||||
progress_bar.set_message("Transaction confirmed");
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
min(confirmations + 1, desired_confirmations),
|
||||
desired_confirmations,
|
||||
signature,
|
||||
));
|
||||
sleep(Duration::from_millis(500));
|
||||
confirmations = self
|
||||
.get_num_blocks_since_signature_confirmation(signature)
|
||||
.unwrap_or(confirmations);
|
||||
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
|
||||
return Err(
|
||||
RpcError::ForUser("transaction not finalized. \
|
||||
This can happen when a transaction lands in an abandoned fork. \
|
||||
Please retry.".to_string()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_cluster_transaction_encoding(&self) -> Result<UiTransactionEncoding, RpcError> {
|
||||
if self.get_node_version()? < semver::Version::new(1, 3, 16) {
|
||||
Ok(UiTransactionEncoding::Base58)
|
||||
} else {
|
||||
Ok(UiTransactionEncoding::Base64)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simulates sending a transaction.
|
||||
///
|
||||
/// If the transaction fails, then the [`err`] field of the returned
|
||||
@@ -3489,6 +3223,121 @@ impl RpcClient {
|
||||
self.send(RpcRequest::MinimumLedgerSlot, Value::Null)
|
||||
}
|
||||
|
||||
/// Submit a transaction and wait for confirmation.
|
||||
///
|
||||
/// Once this function returns successfully, the given transaction is
|
||||
/// guaranteed to be processed with the configured [commitment level][cl].
|
||||
///
|
||||
/// [cl]: https://docs.solana.com/developing/clients/jsonrpc-api#configuring-state-commitment
|
||||
///
|
||||
/// After sending the transaction, this method polls in a loop for the
|
||||
/// status of the transaction until it has ben confirmed.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If the transaction is not signed then an error with kind [`RpcError`] is
|
||||
/// returned, containing an [`RpcResponseError`] with `code` set to
|
||||
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`].
|
||||
///
|
||||
/// If the preflight transaction simulation fails then an error with kind
|
||||
/// [`RpcError`] is returned, containing an [`RpcResponseError`] with `code`
|
||||
/// set to [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`].
|
||||
///
|
||||
/// If the receiving node is unhealthy, e.g. it is not fully synced to
|
||||
/// the cluster, then an error with kind [`RpcError`] is returned,
|
||||
/// containing an [`RpcResponseError`] with `code` set to
|
||||
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`].
|
||||
///
|
||||
/// [`RpcResponseError`]: RpcError::RpcResponseError
|
||||
/// [`JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE
|
||||
/// [`JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE
|
||||
/// [`JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY`]: crate::rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY
|
||||
///
|
||||
/// # RPC Reference
|
||||
///
|
||||
/// This method is built on the [`sendTransaction`] RPC method, and the
|
||||
/// [`getLatestBlockhash`] RPC method.
|
||||
///
|
||||
/// [`sendTransaction`]: https://docs.solana.com/developing/clients/jsonrpc-api#sendtransaction
|
||||
/// [`getLatestBlockhash`]: https://docs.solana.com/developing/clients/jsonrpc-api#getlatestblockhash
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use solana_client::{
|
||||
/// # rpc_client::RpcClient,
|
||||
/// # client_error::ClientError,
|
||||
/// # };
|
||||
/// # use solana_sdk::{
|
||||
/// # signature::Signer,
|
||||
/// # signature::Signature,
|
||||
/// # signer::keypair::Keypair,
|
||||
/// # system_transaction,
|
||||
/// # };
|
||||
/// # let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
/// # let alice = Keypair::new();
|
||||
/// # let bob = Keypair::new();
|
||||
/// # let lamports = 50;
|
||||
/// # let recent_blockhash = rpc_client.get_recent_blockhash()?.0;
|
||||
/// let tx = system_transaction::transfer(&alice, &bob.pubkey(), lamports, recent_blockhash);
|
||||
/// let signature = rpc_client.send_and_confirm_transaction(&tx)?;
|
||||
/// # Ok::<(), ClientError>(())
|
||||
/// ```
|
||||
pub fn send_and_confirm_transaction(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
const SEND_RETRIES: usize = 1;
|
||||
const GET_STATUS_RETRIES: usize = usize::MAX;
|
||||
|
||||
'sending: for _ in 0..SEND_RETRIES {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
|
||||
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
|
||||
let (recent_blockhash, ..) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
|
||||
.value;
|
||||
recent_blockhash
|
||||
} else {
|
||||
transaction.message.recent_blockhash
|
||||
};
|
||||
|
||||
for status_retry in 0..GET_STATUS_RETRIES {
|
||||
match self.get_signature_status(&signature)? {
|
||||
Some(Ok(_)) => return Ok(signature),
|
||||
Some(Err(e)) => return Err(e.into()),
|
||||
None => {
|
||||
let fee_calculator = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
&recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value;
|
||||
if fee_calculator.is_none() {
|
||||
// Block hash is not found by some reason
|
||||
break 'sending;
|
||||
} else if cfg!(not(test))
|
||||
// Ignore sleep at last step.
|
||||
&& status_retry < GET_STATUS_RETRIES
|
||||
{
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(RpcError::ForUser(
|
||||
"unable to confirm transaction. \
|
||||
This can happen in situations such as transaction expiration \
|
||||
and insufficient fee-payer funds"
|
||||
.to_string(),
|
||||
)
|
||||
.into())
|
||||
}
|
||||
|
||||
/// Returns all information associated with the account of the provided pubkey.
|
||||
///
|
||||
/// This method uses the configured [commitment level][cl].
|
||||
@@ -4692,6 +4541,157 @@ impl RpcClient {
|
||||
Ok(confirmations)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
transaction,
|
||||
self.commitment(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
commitment: CommitmentConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_config(
|
||||
transaction,
|
||||
commitment,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_transaction_with_spinner_and_config(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
commitment: CommitmentConfig,
|
||||
config: RpcSendTransactionConfig,
|
||||
) -> ClientResult<Signature> {
|
||||
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
|
||||
.value
|
||||
.0
|
||||
} else {
|
||||
transaction.message.recent_blockhash
|
||||
};
|
||||
let signature = self.send_transaction_with_config(transaction, config)?;
|
||||
self.confirm_transaction_with_spinner(&signature, &recent_blockhash, commitment)?;
|
||||
Ok(signature)
|
||||
}
|
||||
|
||||
pub fn confirm_transaction_with_spinner(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
recent_blockhash: &Hash,
|
||||
commitment: CommitmentConfig,
|
||||
) -> ClientResult<()> {
|
||||
let desired_confirmations = if commitment.is_finalized() {
|
||||
MAX_LOCKOUT_HISTORY + 1
|
||||
} else {
|
||||
1
|
||||
};
|
||||
let mut confirmations = 0;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
confirmations, desired_confirmations, signature,
|
||||
));
|
||||
|
||||
let now = Instant::now();
|
||||
let confirm_transaction_initial_timeout = self
|
||||
.config
|
||||
.confirm_transaction_initial_timeout
|
||||
.unwrap_or_default();
|
||||
let (signature, status) = loop {
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
.get_signature_status_with_commitment(signature, CommitmentConfig::processed())?;
|
||||
if status.is_none() {
|
||||
let blockhash_not_found = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value
|
||||
.is_none();
|
||||
if blockhash_not_found && now.elapsed() >= confirm_transaction_initial_timeout {
|
||||
break (signature, status);
|
||||
}
|
||||
} else {
|
||||
break (signature, status);
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
};
|
||||
if let Some(result) = status {
|
||||
if let Err(err) = result {
|
||||
return Err(err.into());
|
||||
}
|
||||
} else {
|
||||
return Err(RpcError::ForUser(
|
||||
"unable to confirm transaction. \
|
||||
This can happen in situations such as transaction expiration \
|
||||
and insufficient fee-payer funds"
|
||||
.to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
// Return when specified commitment is reached
|
||||
// Failed transactions have already been eliminated, `is_some` check is sufficient
|
||||
if self
|
||||
.get_signature_status_with_commitment(signature, commitment)?
|
||||
.is_some()
|
||||
{
|
||||
progress_bar.set_message("Transaction confirmed");
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
min(confirmations + 1, desired_confirmations),
|
||||
desired_confirmations,
|
||||
signature,
|
||||
));
|
||||
sleep(Duration::from_millis(500));
|
||||
confirmations = self
|
||||
.get_num_blocks_since_signature_confirmation(signature)
|
||||
.unwrap_or(confirmations);
|
||||
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
|
||||
return Err(
|
||||
RpcError::ForUser("transaction not finalized. \
|
||||
This can happen when a transaction lands in an abandoned fork. \
|
||||
Please retry.".to_string()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send<T>(&self, request: RpcRequest, params: Value) -> ClientResult<T>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
assert!(params.is_array() || params.is_null());
|
||||
|
||||
let response = self
|
||||
.sender
|
||||
.send(request, params)
|
||||
.map_err(|err| err.into_with_request(request))?;
|
||||
serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_request(err.into(), request))
|
||||
}
|
||||
|
||||
pub fn get_transport_stats(&self) -> RpcTransportStats {
|
||||
self.sender.get_transport_stats()
|
||||
}
|
||||
@@ -4725,6 +4725,14 @@ pub struct GetConfirmedSignaturesForAddress2Config {
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
|
||||
fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
||||
fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
if tls {
|
||||
format!("https://{}", rpc_addr)
|
||||
|
@@ -19,7 +19,6 @@ pub const JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX: i64 = -32010;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011;
|
||||
pub const JSON_RPC_SCAN_ERROR: i64 = -32012;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH: i64 = -32013;
|
||||
pub const JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET: i64 = -32014;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcCustomError {
|
||||
@@ -55,8 +54,6 @@ pub enum RpcCustomError {
|
||||
ScanError { message: String },
|
||||
#[error("TransactionSignatureLenMismatch")]
|
||||
TransactionSignatureLenMismatch,
|
||||
#[error("BlockStatusNotAvailableYet")]
|
||||
BlockStatusNotAvailableYet { slot: Slot },
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -164,11 +161,6 @@ impl From<RpcCustomError> for Error {
|
||||
message: "Transaction signature length mismatch".to_string(),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::BlockStatusNotAvailableYet { slot } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_BLOCK_STATUS_NOT_AVAILABLE_YET),
|
||||
message: format!("Block status not yet available for slot {}", slot),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,11 +0,0 @@
|
||||
//! Spinner creator
|
||||
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
|
||||
pub(crate) fn new_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
@@ -1,21 +1,12 @@
|
||||
use crate::{
|
||||
client_error::ClientError,
|
||||
pubsub_client::{PubsubClient, PubsubClientError, PubsubClientSubscription},
|
||||
rpc_client::RpcClient,
|
||||
rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
|
||||
rpc_response::{Fees, SlotUpdate},
|
||||
spinner,
|
||||
rpc_response::SlotUpdate,
|
||||
};
|
||||
use bincode::serialize;
|
||||
use log::*;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::SignerError,
|
||||
signers::Signers,
|
||||
transaction::{Transaction, TransactionError},
|
||||
clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet, VecDeque},
|
||||
@@ -25,7 +16,7 @@ use std::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{sleep, JoinHandle},
|
||||
thread::JoinHandle,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use thiserror::Error;
|
||||
@@ -35,13 +26,9 @@ pub enum TpuSenderError {
|
||||
#[error("Pubsub error: {0:?}")]
|
||||
PubsubError(#[from] PubsubClientError),
|
||||
#[error("RPC error: {0:?}")]
|
||||
RpcError(#[from] ClientError),
|
||||
RpcError(#[from] crate::client_error::ClientError),
|
||||
#[error("IO error: {0:?}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error("Signer error: {0:?}")]
|
||||
SignerError(#[from] SignerError),
|
||||
#[error("Custom error: {0}")]
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, TpuSenderError>;
|
||||
@@ -75,7 +62,6 @@ pub struct TpuClient {
|
||||
fanout_slots: u64,
|
||||
leader_tpu_service: LeaderTpuService,
|
||||
exit: Arc<AtomicBool>,
|
||||
rpc_client: Arc<RpcClient>,
|
||||
}
|
||||
|
||||
impl TpuClient {
|
||||
@@ -110,161 +96,15 @@ impl TpuClient {
|
||||
config: TpuClientConfig,
|
||||
) -> Result<Self> {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_tpu_service =
|
||||
LeaderTpuService::new(rpc_client.clone(), websocket_url, exit.clone())?;
|
||||
let leader_tpu_service = LeaderTpuService::new(rpc_client, websocket_url, exit.clone())?;
|
||||
|
||||
Ok(Self {
|
||||
send_socket: UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
fanout_slots: config.fanout_slots.min(MAX_FANOUT_SLOTS).max(1),
|
||||
leader_tpu_service,
|
||||
exit,
|
||||
rpc_client,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_and_confirm_messages_with_spinner<T: Signers>(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
signers: &T,
|
||||
) -> Result<Vec<Option<TransactionError>>> {
|
||||
let mut expired_blockhash_retries = 5;
|
||||
/* Send at ~100 TPS */
|
||||
const SEND_TRANSACTION_INTERVAL: Duration = Duration::from_millis(10);
|
||||
/* Retry batch send after 4 seconds */
|
||||
const TRANSACTION_RESEND_INTERVAL: Duration = Duration::from_secs(4);
|
||||
|
||||
let progress_bar = spinner::new_progress_bar();
|
||||
progress_bar.set_message("Setting up...");
|
||||
|
||||
let mut transactions = messages
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, message)| (i, Transaction::new_unsigned(message.clone())))
|
||||
.collect::<Vec<_>>();
|
||||
let num_transactions = transactions.len() as f64;
|
||||
let mut transaction_errors = vec![None; transactions.len()];
|
||||
let set_message = |confirmed_transactions,
|
||||
block_height: Option<u64>,
|
||||
last_valid_block_height: u64,
|
||||
status: &str| {
|
||||
progress_bar.set_message(&format!(
|
||||
"{:>5.1}% | {:<40}{}",
|
||||
confirmed_transactions as f64 * 100. / num_transactions,
|
||||
status,
|
||||
match block_height {
|
||||
Some(block_height) => format!(
|
||||
" [block height {}; re-sign in {} blocks]",
|
||||
block_height,
|
||||
last_valid_block_height.saturating_sub(block_height),
|
||||
),
|
||||
None => String::new(),
|
||||
},
|
||||
));
|
||||
};
|
||||
|
||||
let mut confirmed_transactions = 0;
|
||||
let mut block_height = self.rpc_client.get_block_height()?;
|
||||
while expired_blockhash_retries > 0 {
|
||||
let Fees {
|
||||
blockhash,
|
||||
fee_calculator: _,
|
||||
last_valid_block_height,
|
||||
} = self.rpc_client.get_fees()?;
|
||||
|
||||
let mut pending_transactions = HashMap::new();
|
||||
for (i, mut transaction) in transactions {
|
||||
transaction.try_sign(signers, blockhash)?;
|
||||
pending_transactions.insert(transaction.signatures[0], (i, transaction));
|
||||
}
|
||||
|
||||
let mut last_resend = Instant::now() - TRANSACTION_RESEND_INTERVAL;
|
||||
while block_height <= last_valid_block_height {
|
||||
let num_transactions = pending_transactions.len();
|
||||
|
||||
// Periodically re-send all pending transactions
|
||||
if Instant::now().duration_since(last_resend) > TRANSACTION_RESEND_INTERVAL {
|
||||
for (index, (_i, transaction)) in pending_transactions.values().enumerate() {
|
||||
if !self.send_transaction(transaction) {
|
||||
let _result = self.rpc_client.send_transaction(transaction).ok();
|
||||
}
|
||||
set_message(
|
||||
confirmed_transactions,
|
||||
None, //block_height,
|
||||
last_valid_block_height,
|
||||
&format!("Sending {}/{} transactions", index + 1, num_transactions,),
|
||||
);
|
||||
sleep(SEND_TRANSACTION_INTERVAL);
|
||||
}
|
||||
last_resend = Instant::now();
|
||||
}
|
||||
|
||||
// Wait for the next block before checking for transaction statuses
|
||||
let mut block_height_refreshes = 10;
|
||||
set_message(
|
||||
confirmed_transactions,
|
||||
Some(block_height),
|
||||
last_valid_block_height,
|
||||
&format!("Waiting for next block, {} pending...", num_transactions),
|
||||
);
|
||||
let mut new_block_height = block_height;
|
||||
while block_height == new_block_height && block_height_refreshes > 0 {
|
||||
sleep(Duration::from_millis(500));
|
||||
new_block_height = self.rpc_client.get_block_height()?;
|
||||
block_height_refreshes -= 1;
|
||||
}
|
||||
block_height = new_block_height;
|
||||
|
||||
// Collect statuses for the transactions, drop those that are confirmed
|
||||
let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>();
|
||||
for pending_signatures_chunk in
|
||||
pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS)
|
||||
{
|
||||
if let Ok(result) = self
|
||||
.rpc_client
|
||||
.get_signature_statuses(pending_signatures_chunk)
|
||||
{
|
||||
let statuses = result.value;
|
||||
for (signature, status) in
|
||||
pending_signatures_chunk.iter().zip(statuses.into_iter())
|
||||
{
|
||||
if let Some(status) = status {
|
||||
if status.satisfies_commitment(self.rpc_client.commitment()) {
|
||||
if let Some((i, _)) = pending_transactions.remove(signature) {
|
||||
confirmed_transactions += 1;
|
||||
if status.err.is_some() {
|
||||
progress_bar.println(format!(
|
||||
"Failed transaction: {:?}",
|
||||
status
|
||||
));
|
||||
}
|
||||
transaction_errors[i] = status.err;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
set_message(
|
||||
confirmed_transactions,
|
||||
Some(block_height),
|
||||
last_valid_block_height,
|
||||
"Checking transaction status...",
|
||||
);
|
||||
}
|
||||
|
||||
if pending_transactions.is_empty() {
|
||||
return Ok(transaction_errors);
|
||||
}
|
||||
}
|
||||
|
||||
transactions = pending_transactions.into_iter().map(|(_k, v)| v).collect();
|
||||
progress_bar.println(format!(
|
||||
"Blockhash expired. {} retries remaining",
|
||||
expired_blockhash_retries
|
||||
));
|
||||
expired_blockhash_retries -= 1;
|
||||
}
|
||||
Err(TpuSenderError::Custom("Max retries exceeded".into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TpuClient {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@@ -27,14 +27,13 @@ ed25519-dalek = "=1.0.1"
|
||||
fs_extra = "1.2.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = { version = "1.5", features = ["rayon"] }
|
||||
itertools = "0.9.0"
|
||||
libc = "0.2.81"
|
||||
log = "0.4.11"
|
||||
lru = "0.6.1"
|
||||
miow = "0.2.2"
|
||||
net2 = "0.2.37"
|
||||
num-traits = "0.2"
|
||||
histogram = "0.6.9"
|
||||
itertools = "0.10.1"
|
||||
log = "0.4.14"
|
||||
lru = "0.6.6"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
rand_core = "0.6.2"
|
||||
@@ -44,34 +43,33 @@ retain_mut = "0.1.2"
|
||||
serde = "1.0.122"
|
||||
serde_bytes = "0.11"
|
||||
serde_derive = "1.0.103"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.8.2" }
|
||||
solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.8.2" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.2" }
|
||||
solana-poh = { path = "../poh", version = "=1.8.2" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.8.2" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.8.2" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.17" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.17" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.17" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.17" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.7.17" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.17" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.8.2" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.17" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
@@ -84,8 +82,8 @@ num_cpus = "1.13.0"
|
||||
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.56"
|
||||
serial_test = "0.4.0"
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
symlink = "0.1.0"
|
||||
systemstat = "0.1.5"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
@@ -18,7 +18,6 @@ use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::cost_model::CostModel;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::message::Message;
|
||||
@@ -34,7 +33,7 @@ use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use test::Bencher;
|
||||
|
||||
@@ -93,7 +92,6 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
|
||||
None::<Box<dyn Fn()>>,
|
||||
&BankingStageStats::default(),
|
||||
&recorder,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
});
|
||||
|
||||
@@ -167,11 +165,6 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
bank.ns_per_slot = std::u128::MAX;
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(std::u64::MAX, std::u64::MAX);
|
||||
|
||||
debug!("threads: {} txs: {}", num_threads, txes);
|
||||
|
||||
let transactions = match tx_type {
|
||||
@@ -225,7 +218,6 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
vote_receiver,
|
||||
None,
|
||||
s,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
|
@@ -24,8 +24,6 @@ use solana_runtime::{
|
||||
TransactionExecutionResult,
|
||||
},
|
||||
bank_utils,
|
||||
cost_model::CostModel,
|
||||
cost_tracker::CostTracker,
|
||||
hashed_transaction::HashedTransaction,
|
||||
transaction_batch::TransactionBatch,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
@@ -35,7 +33,6 @@ use solana_sdk::{
|
||||
Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
|
||||
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
|
||||
},
|
||||
feature_set,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
short_vec::decode_shortu16_len,
|
||||
@@ -55,7 +52,7 @@ use std::{
|
||||
net::{SocketAddr, UdpSocket},
|
||||
ops::DerefMut,
|
||||
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
sync::{Arc, Mutex, RwLock, RwLockReadGuard},
|
||||
sync::{Arc, Mutex},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
time::Instant,
|
||||
@@ -97,8 +94,6 @@ pub struct BankingStageStats {
|
||||
current_buffered_packet_batches_count: AtomicUsize,
|
||||
rebuffered_packets_count: AtomicUsize,
|
||||
consumed_buffered_packets_count: AtomicUsize,
|
||||
cost_tracker_check_count: AtomicUsize,
|
||||
cost_forced_retry_transactions_count: AtomicUsize,
|
||||
|
||||
// Timing
|
||||
consume_buffered_packets_elapsed: AtomicU64,
|
||||
@@ -107,11 +102,7 @@ pub struct BankingStageStats {
|
||||
filter_pending_packets_elapsed: AtomicU64,
|
||||
packet_duplicate_check_elapsed: AtomicU64,
|
||||
packet_conversion_elapsed: AtomicU64,
|
||||
unprocessed_packet_conversion_elapsed: AtomicU64,
|
||||
transaction_processing_elapsed: AtomicU64,
|
||||
cost_tracker_update_elapsed: AtomicU64,
|
||||
cost_tracker_clone_elapsed: AtomicU64,
|
||||
cost_tracker_check_elapsed: AtomicU64,
|
||||
}
|
||||
|
||||
impl BankingStageStats {
|
||||
@@ -181,17 +172,6 @@ impl BankingStageStats {
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"cost_tracker_check_count",
|
||||
self.cost_tracker_check_count.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"cost_forced_retry_transactions_count",
|
||||
self.cost_forced_retry_transactions_count
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"consume_buffered_packets_elapsed",
|
||||
self.consume_buffered_packets_elapsed
|
||||
@@ -226,33 +206,12 @@ impl BankingStageStats {
|
||||
self.packet_conversion_elapsed.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"unprocessed_packet_conversion_elapsed",
|
||||
self.unprocessed_packet_conversion_elapsed
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"transaction_processing_elapsed",
|
||||
self.transaction_processing_elapsed
|
||||
.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"cost_tracker_update_elapsed",
|
||||
self.cost_tracker_update_elapsed.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"cost_tracker_clone_elapsed",
|
||||
self.cost_tracker_clone_elapsed.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"cost_tracker_check_elapsed",
|
||||
self.cost_tracker_check_elapsed.swap(0, Ordering::Relaxed) as i64,
|
||||
i64
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -289,7 +248,6 @@ impl BankingStage {
|
||||
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
gossip_vote_sender: ReplayVoteSender,
|
||||
cost_model: Arc<RwLock<CostModel>>,
|
||||
) -> Self {
|
||||
Self::new_num_threads(
|
||||
cluster_info,
|
||||
@@ -300,7 +258,6 @@ impl BankingStage {
|
||||
Self::num_threads(),
|
||||
transaction_status_sender,
|
||||
gossip_vote_sender,
|
||||
cost_model,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -313,7 +270,6 @@ impl BankingStage {
|
||||
num_threads: u32,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
gossip_vote_sender: ReplayVoteSender,
|
||||
cost_model: Arc<RwLock<CostModel>>,
|
||||
) -> Self {
|
||||
let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - 1) as usize * PACKETS_PER_BATCH);
|
||||
// Single thread to generate entries from many banks.
|
||||
@@ -349,7 +305,6 @@ impl BankingStage {
|
||||
let gossip_vote_sender = gossip_vote_sender.clone();
|
||||
let duplicates = duplicates.clone();
|
||||
let data_budget = data_budget.clone();
|
||||
let cost_model = cost_model.clone();
|
||||
Builder::new()
|
||||
.name("solana-banking-stage-tx".to_string())
|
||||
.spawn(move || {
|
||||
@@ -366,7 +321,6 @@ impl BankingStage {
|
||||
gossip_vote_sender,
|
||||
&duplicates,
|
||||
&data_budget,
|
||||
cost_model,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
@@ -424,7 +378,6 @@ impl BankingStage {
|
||||
has_more_unprocessed_transactions
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn consume_buffered_packets(
|
||||
my_pubkey: &Pubkey,
|
||||
max_tx_ingestion_ns: u128,
|
||||
@@ -435,7 +388,6 @@ impl BankingStage {
|
||||
test_fn: Option<impl Fn()>,
|
||||
banking_stage_stats: &BankingStageStats,
|
||||
recorder: &TransactionRecorder,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) {
|
||||
let mut rebuffered_packets_len = 0;
|
||||
let mut new_tx_count = 0;
|
||||
@@ -453,8 +405,6 @@ impl BankingStage {
|
||||
original_unprocessed_indexes,
|
||||
my_pubkey,
|
||||
*next_leader,
|
||||
banking_stage_stats,
|
||||
cost_model,
|
||||
);
|
||||
Self::update_buffered_packets_with_new_unprocessed(
|
||||
original_unprocessed_indexes,
|
||||
@@ -473,7 +423,6 @@ impl BankingStage {
|
||||
transaction_status_sender.clone(),
|
||||
gossip_vote_sender,
|
||||
banking_stage_stats,
|
||||
cost_model,
|
||||
);
|
||||
if processed < verified_txs_len
|
||||
|| !Bank::should_bank_still_be_processing_txs(
|
||||
@@ -577,7 +526,6 @@ impl BankingStage {
|
||||
banking_stage_stats: &BankingStageStats,
|
||||
recorder: &TransactionRecorder,
|
||||
data_budget: &DataBudget,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> BufferedPacketsDecision {
|
||||
let bank_start;
|
||||
let (
|
||||
@@ -618,7 +566,6 @@ impl BankingStage {
|
||||
None::<Box<dyn Fn()>>,
|
||||
banking_stage_stats,
|
||||
recorder,
|
||||
cost_model,
|
||||
);
|
||||
}
|
||||
BufferedPacketsDecision::Forward => {
|
||||
@@ -698,7 +645,6 @@ impl BankingStage {
|
||||
gossip_vote_sender: ReplayVoteSender,
|
||||
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
|
||||
data_budget: &DataBudget,
|
||||
cost_model: Arc<RwLock<CostModel>>,
|
||||
) {
|
||||
let recorder = poh_recorder.lock().unwrap().recorder();
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
@@ -718,7 +664,6 @@ impl BankingStage {
|
||||
&banking_stage_stats,
|
||||
&recorder,
|
||||
data_budget,
|
||||
&cost_model,
|
||||
);
|
||||
if matches!(decision, BufferedPacketsDecision::Hold)
|
||||
|| matches!(decision, BufferedPacketsDecision::ForwardAndHold)
|
||||
@@ -753,7 +698,6 @@ impl BankingStage {
|
||||
&banking_stage_stats,
|
||||
duplicates,
|
||||
&recorder,
|
||||
&cost_model,
|
||||
) {
|
||||
Ok(()) | Err(RecvTimeoutError::Timeout) => (),
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
@@ -857,6 +801,7 @@ impl BankingStage {
|
||||
};
|
||||
|
||||
let mut execute_timings = ExecuteTimings::default();
|
||||
|
||||
let (
|
||||
mut loaded_accounts,
|
||||
results,
|
||||
@@ -997,12 +942,12 @@ impl BankingStage {
|
||||
) -> (usize, Vec<usize>) {
|
||||
let mut chunk_start = 0;
|
||||
let mut unprocessed_txs = vec![];
|
||||
|
||||
while chunk_start != transactions.len() {
|
||||
let chunk_end = std::cmp::min(
|
||||
transactions.len(),
|
||||
chunk_start + MAX_NUM_TRANSACTIONS_PER_BATCH,
|
||||
);
|
||||
|
||||
let (result, retryable_txs_in_chunk) = Self::process_and_record_transactions(
|
||||
bank,
|
||||
&transactions[chunk_start..chunk_end],
|
||||
@@ -1085,21 +1030,13 @@ impl BankingStage {
|
||||
// This function deserializes packets into transactions, computes the blake3 hash of transaction messages,
|
||||
// and verifies secp256k1 instructions. A list of valid transactions are returned with their message hashes
|
||||
// and packet indexes.
|
||||
// Also returned is packet indexes for transaction should be retried due to cost limits.
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn transactions_from_packets(
|
||||
msgs: &Packets,
|
||||
transaction_indexes: &[usize],
|
||||
feature_set: &Arc<feature_set::FeatureSet>,
|
||||
read_cost_tracker: &RwLockReadGuard<CostTracker>,
|
||||
banking_stage_stats: &BankingStageStats,
|
||||
demote_program_write_locks: bool,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
votes_only: bool,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> (Vec<HashedTransaction<'static>>, Vec<usize>, Vec<usize>) {
|
||||
let mut retryable_transaction_packet_indexes: Vec<usize> = vec![];
|
||||
|
||||
let verified_transactions_with_packet_indexes: Vec<_> = transaction_indexes
|
||||
) -> (Vec<HashedTransaction<'static>>, Vec<usize>) {
|
||||
transaction_indexes
|
||||
.iter()
|
||||
.filter_map(|tx_index| {
|
||||
let p = &msgs.packets[*tx_index];
|
||||
@@ -1108,70 +1045,16 @@ impl BankingStage {
|
||||
}
|
||||
|
||||
let tx: Transaction = limited_deserialize(&p.data[0..p.meta.size]).ok()?;
|
||||
tx.verify_precompiles(feature_set).ok()?;
|
||||
|
||||
Some((tx, *tx_index))
|
||||
tx.verify_precompiles(libsecp256k1_0_5_upgrade_enabled)
|
||||
.ok()?;
|
||||
let message_bytes = Self::packet_message(p)?;
|
||||
let message_hash = Message::hash_raw_message(message_bytes);
|
||||
Some((
|
||||
HashedTransaction::new(Cow::Owned(tx), message_hash),
|
||||
tx_index,
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
banking_stage_stats.cost_tracker_check_count.fetch_add(
|
||||
verified_transactions_with_packet_indexes.len(),
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
|
||||
let mut cost_tracker_check_time = Measure::start("cost_tracker_check_time");
|
||||
let filtered_transactions_with_packet_indexes: Vec<_> = {
|
||||
verified_transactions_with_packet_indexes
|
||||
.into_iter()
|
||||
.filter_map(|(tx, tx_index)| {
|
||||
// put transaction into retry queue if it wouldn't fit
|
||||
// into current bank
|
||||
let is_vote = &msgs.packets[tx_index].meta.is_simple_vote_tx;
|
||||
|
||||
// excluding vote TX from cost_model, for now
|
||||
if !is_vote
|
||||
&& read_cost_tracker
|
||||
.would_transaction_fit(
|
||||
&tx,
|
||||
&cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.calculate_cost(&tx, demote_program_write_locks),
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
debug!("transaction {:?} would exceed limit", tx);
|
||||
retryable_transaction_packet_indexes.push(tx_index);
|
||||
return None;
|
||||
}
|
||||
Some((tx, tx_index))
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
cost_tracker_check_time.stop();
|
||||
|
||||
let (filtered_transactions, filter_transaction_packet_indexes) =
|
||||
filtered_transactions_with_packet_indexes
|
||||
.into_iter()
|
||||
.filter_map(|(tx, tx_index)| {
|
||||
let p = &msgs.packets[tx_index];
|
||||
let message_bytes = Self::packet_message(p)?;
|
||||
let message_hash = Message::hash_raw_message(message_bytes);
|
||||
Some((
|
||||
HashedTransaction::new(Cow::Owned(tx), message_hash),
|
||||
tx_index,
|
||||
))
|
||||
})
|
||||
.unzip();
|
||||
|
||||
banking_stage_stats
|
||||
.cost_tracker_check_elapsed
|
||||
.fetch_add(cost_tracker_check_time.as_us(), Ordering::Relaxed);
|
||||
|
||||
(
|
||||
filtered_transactions,
|
||||
filter_transaction_packet_indexes,
|
||||
retryable_transaction_packet_indexes,
|
||||
)
|
||||
.unzip()
|
||||
}
|
||||
|
||||
/// This function filters pending packets that are still valid
|
||||
@@ -1213,7 +1096,6 @@ impl BankingStage {
|
||||
Self::filter_valid_transaction_indexes(&results, transaction_to_packet_indexes)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn process_packets_transactions(
|
||||
bank: &Arc<Bank>,
|
||||
bank_creation_time: &Instant,
|
||||
@@ -1223,31 +1105,20 @@ impl BankingStage {
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
gossip_vote_sender: &ReplayVoteSender,
|
||||
banking_stage_stats: &BankingStageStats,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> (usize, usize, Vec<usize>) {
|
||||
let mut packet_conversion_time = Measure::start("packet_conversion");
|
||||
let (transactions, transaction_to_packet_indexes, retryable_packet_indexes) =
|
||||
Self::transactions_from_packets(
|
||||
msgs,
|
||||
&packet_indexes,
|
||||
&bank.feature_set,
|
||||
&bank.read_cost_tracker().unwrap(),
|
||||
banking_stage_stats,
|
||||
bank.demote_program_write_locks(),
|
||||
bank.vote_only_bank(),
|
||||
cost_model,
|
||||
);
|
||||
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
|
||||
msgs,
|
||||
&packet_indexes,
|
||||
bank.libsecp256k1_0_5_upgrade_enabled(),
|
||||
bank.vote_only_bank(),
|
||||
);
|
||||
packet_conversion_time.stop();
|
||||
inc_new_counter_info!("banking_stage-packet_conversion", 1);
|
||||
|
||||
banking_stage_stats
|
||||
.cost_forced_retry_transactions_count
|
||||
.fetch_add(retryable_packet_indexes.len(), Ordering::Relaxed);
|
||||
debug!(
|
||||
"bank: {} filtered transactions {} cost limited transactions {}",
|
||||
"bank: {} filtered transactions {}",
|
||||
bank.slot(),
|
||||
transactions.len(),
|
||||
retryable_packet_indexes.len()
|
||||
transactions.len()
|
||||
);
|
||||
|
||||
let tx_len = transactions.len();
|
||||
@@ -1262,29 +1133,11 @@ impl BankingStage {
|
||||
gossip_vote_sender,
|
||||
);
|
||||
process_tx_time.stop();
|
||||
let unprocessed_tx_count = unprocessed_tx_indexes.len();
|
||||
inc_new_counter_info!(
|
||||
"banking_stage-unprocessed_transactions",
|
||||
unprocessed_tx_count
|
||||
);
|
||||
|
||||
// applying cost of processed transactions to shared cost_tracker
|
||||
let mut cost_tracking_time = Measure::start("cost_tracking_time");
|
||||
transactions.iter().enumerate().for_each(|(index, tx)| {
|
||||
if unprocessed_tx_indexes.iter().all(|&i| i != index) {
|
||||
bank.write_cost_tracker().unwrap().add_transaction_cost(
|
||||
tx.transaction(),
|
||||
&cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.calculate_cost(tx.transaction(), bank.demote_program_write_locks()),
|
||||
);
|
||||
}
|
||||
});
|
||||
cost_tracking_time.stop();
|
||||
let unprocessed_tx_count = unprocessed_tx_indexes.len();
|
||||
|
||||
let mut filter_pending_packets_time = Measure::start("filter_pending_packets_time");
|
||||
let mut filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
|
||||
let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
|
||||
bank,
|
||||
&transactions,
|
||||
&transaction_to_packet_indexes,
|
||||
@@ -1297,19 +1150,12 @@ impl BankingStage {
|
||||
unprocessed_tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
|
||||
);
|
||||
|
||||
// combine cost-related unprocessed transactions with bank determined unprocessed for
|
||||
// buffering
|
||||
filtered_unprocessed_packet_indexes.extend(retryable_packet_indexes);
|
||||
|
||||
banking_stage_stats
|
||||
.packet_conversion_elapsed
|
||||
.fetch_add(packet_conversion_time.as_us(), Ordering::Relaxed);
|
||||
banking_stage_stats
|
||||
.transaction_processing_elapsed
|
||||
.fetch_add(process_tx_time.as_us(), Ordering::Relaxed);
|
||||
banking_stage_stats
|
||||
.cost_tracker_update_elapsed
|
||||
.fetch_add(cost_tracking_time.as_us(), Ordering::Relaxed);
|
||||
banking_stage_stats
|
||||
.filter_pending_packets_elapsed
|
||||
.fetch_add(filter_pending_packets_time.as_us(), Ordering::Relaxed);
|
||||
@@ -1323,8 +1169,6 @@ impl BankingStage {
|
||||
transaction_indexes: &[usize],
|
||||
my_pubkey: &Pubkey,
|
||||
next_leader: Option<Pubkey>,
|
||||
banking_stage_stats: &BankingStageStats,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> Vec<usize> {
|
||||
// Check if we are the next leader. If so, let's not filter the packets
|
||||
// as we'll filter it again while processing the packets.
|
||||
@@ -1335,43 +1179,27 @@ impl BankingStage {
|
||||
}
|
||||
}
|
||||
|
||||
let mut unprocessed_packet_conversion_time =
|
||||
Measure::start("unprocessed_packet_conversion");
|
||||
let (transactions, transaction_to_packet_indexes, retry_packet_indexes) =
|
||||
Self::transactions_from_packets(
|
||||
msgs,
|
||||
&transaction_indexes,
|
||||
&bank.feature_set,
|
||||
&bank.read_cost_tracker().unwrap(),
|
||||
banking_stage_stats,
|
||||
bank.demote_program_write_locks(),
|
||||
bank.vote_only_bank(),
|
||||
cost_model,
|
||||
);
|
||||
unprocessed_packet_conversion_time.stop();
|
||||
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
|
||||
msgs,
|
||||
&transaction_indexes,
|
||||
bank.libsecp256k1_0_5_upgrade_enabled(),
|
||||
bank.vote_only_bank(),
|
||||
);
|
||||
|
||||
let tx_count = transaction_to_packet_indexes.len();
|
||||
|
||||
let unprocessed_tx_indexes = (0..transactions.len()).collect_vec();
|
||||
let mut filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
|
||||
let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
|
||||
bank,
|
||||
&transactions,
|
||||
&transaction_to_packet_indexes,
|
||||
&unprocessed_tx_indexes,
|
||||
);
|
||||
|
||||
filtered_unprocessed_packet_indexes.extend(retry_packet_indexes);
|
||||
|
||||
inc_new_counter_info!(
|
||||
"banking_stage-dropped_tx_before_forwarding",
|
||||
tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
|
||||
);
|
||||
banking_stage_stats
|
||||
.unprocessed_packet_conversion_elapsed
|
||||
.fetch_add(
|
||||
unprocessed_packet_conversion_time.as_us(),
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
|
||||
filtered_unprocessed_packet_indexes
|
||||
}
|
||||
@@ -1407,7 +1235,6 @@ impl BankingStage {
|
||||
banking_stage_stats: &BankingStageStats,
|
||||
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
|
||||
recorder: &TransactionRecorder,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> Result<(), RecvTimeoutError> {
|
||||
let mut recv_time = Measure::start("process_packets_recv");
|
||||
let mms = verified_receiver.recv_timeout(recv_timeout)?;
|
||||
@@ -1459,7 +1286,6 @@ impl BankingStage {
|
||||
transaction_status_sender.clone(),
|
||||
gossip_vote_sender,
|
||||
banking_stage_stats,
|
||||
cost_model,
|
||||
);
|
||||
|
||||
new_tx_count += processed;
|
||||
@@ -1491,8 +1317,6 @@ impl BankingStage {
|
||||
&packet_indexes,
|
||||
my_pubkey,
|
||||
next_leader,
|
||||
banking_stage_stats,
|
||||
cost_model,
|
||||
);
|
||||
Self::push_unprocessed(
|
||||
buffered_packets,
|
||||
@@ -1673,7 +1497,6 @@ mod tests {
|
||||
poh_service::PohService,
|
||||
};
|
||||
use solana_rpc::transaction_status_service::TransactionStatusService;
|
||||
use solana_runtime::cost_model::CostModel;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
@@ -1730,7 +1553,6 @@ mod tests {
|
||||
gossip_verified_vote_receiver,
|
||||
None,
|
||||
vote_forward_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
drop(verified_sender);
|
||||
drop(gossip_verified_vote_sender);
|
||||
@@ -1779,7 +1601,6 @@ mod tests {
|
||||
verified_gossip_vote_receiver,
|
||||
None,
|
||||
vote_forward_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
trace!("sending bank");
|
||||
drop(verified_sender);
|
||||
@@ -1852,7 +1673,6 @@ mod tests {
|
||||
gossip_verified_vote_receiver,
|
||||
None,
|
||||
gossip_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
|
||||
// fund another account so we can send 2 good transactions in a single batch.
|
||||
@@ -2003,7 +1823,6 @@ mod tests {
|
||||
3,
|
||||
None,
|
||||
gossip_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
|
||||
// wait for banking_stage to eat the packets
|
||||
@@ -2825,7 +2644,6 @@ mod tests {
|
||||
None::<Box<dyn Fn()>>,
|
||||
&BankingStageStats::default(),
|
||||
&recorder,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions);
|
||||
// When the poh recorder has a bank, should process all non conflicting buffered packets.
|
||||
@@ -2842,7 +2660,6 @@ mod tests {
|
||||
None::<Box<dyn Fn()>>,
|
||||
&BankingStageStats::default(),
|
||||
&recorder,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
if num_expected_unprocessed == 0 {
|
||||
assert!(buffered_packets.is_empty())
|
||||
@@ -2908,7 +2725,6 @@ mod tests {
|
||||
test_fn,
|
||||
&BankingStageStats::default(),
|
||||
&recorder,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
|
||||
// Check everything is correct. All indexes after `interrupted_iteration`
|
||||
@@ -3157,32 +2973,22 @@ mod tests {
|
||||
make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes);
|
||||
|
||||
let mut votes_only = false;
|
||||
let (txs, tx_packet_index, _retryable_packet_indexes) =
|
||||
BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
&Arc::new(feature_set::FeatureSet::default()),
|
||||
&RwLock::new(CostTracker::default()).read().unwrap(),
|
||||
&BankingStageStats::default(),
|
||||
false,
|
||||
votes_only,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
false,
|
||||
votes_only,
|
||||
);
|
||||
assert_eq!(2, txs.len());
|
||||
assert_eq!(vec![0, 1], tx_packet_index);
|
||||
|
||||
votes_only = true;
|
||||
let (txs, tx_packet_index, _retryable_packet_indexes) =
|
||||
BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
&Arc::new(feature_set::FeatureSet::default()),
|
||||
&RwLock::new(CostTracker::default()).read().unwrap(),
|
||||
&BankingStageStats::default(),
|
||||
false,
|
||||
votes_only,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
false,
|
||||
votes_only,
|
||||
);
|
||||
assert_eq!(0, txs.len());
|
||||
assert_eq!(0, tx_packet_index.len());
|
||||
}
|
||||
@@ -3196,32 +3002,22 @@ mod tests {
|
||||
);
|
||||
|
||||
let mut votes_only = false;
|
||||
let (txs, tx_packet_index, _retryable_packet_indexes) =
|
||||
BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
&Arc::new(feature_set::FeatureSet::default()),
|
||||
&RwLock::new(CostTracker::default()).read().unwrap(),
|
||||
&BankingStageStats::default(),
|
||||
false,
|
||||
votes_only,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
false,
|
||||
votes_only,
|
||||
);
|
||||
assert_eq!(3, txs.len());
|
||||
assert_eq!(vec![0, 1, 2], tx_packet_index);
|
||||
|
||||
votes_only = true;
|
||||
let (txs, tx_packet_index, _retryable_packet_indexes) =
|
||||
BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
&Arc::new(feature_set::FeatureSet::default()),
|
||||
&RwLock::new(CostTracker::default()).read().unwrap(),
|
||||
&BankingStageStats::default(),
|
||||
false,
|
||||
votes_only,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
false,
|
||||
votes_only,
|
||||
);
|
||||
assert_eq!(2, txs.len());
|
||||
assert_eq!(vec![0, 2], tx_packet_index);
|
||||
}
|
||||
@@ -3235,32 +3031,22 @@ mod tests {
|
||||
);
|
||||
|
||||
let mut votes_only = false;
|
||||
let (txs, tx_packet_index, _retryable_packet_indexes) =
|
||||
BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
&Arc::new(feature_set::FeatureSet::default()),
|
||||
&RwLock::new(CostTracker::default()).read().unwrap(),
|
||||
&BankingStageStats::default(),
|
||||
false,
|
||||
votes_only,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
false,
|
||||
votes_only,
|
||||
);
|
||||
assert_eq!(3, txs.len());
|
||||
assert_eq!(vec![0, 1, 2], tx_packet_index);
|
||||
|
||||
votes_only = true;
|
||||
let (txs, tx_packet_index, _retryable_packet_indexes) =
|
||||
BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
&Arc::new(feature_set::FeatureSet::default()),
|
||||
&RwLock::new(CostTracker::default()).read().unwrap(),
|
||||
&BankingStageStats::default(),
|
||||
false,
|
||||
votes_only,
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
let (txs, tx_packet_index) = BankingStage::transactions_from_packets(
|
||||
&packets,
|
||||
&packet_indexes,
|
||||
false,
|
||||
votes_only,
|
||||
);
|
||||
assert_eq!(3, txs.len());
|
||||
assert_eq!(vec![0, 1, 2], tx_packet_index);
|
||||
}
|
||||
|
@@ -89,7 +89,6 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
slot,
|
||||
num_expected_batches: None,
|
||||
slot_start_ts: Instant::now(),
|
||||
was_interrupted: false,
|
||||
};
|
||||
// 3) Start broadcast step
|
||||
//some indicates fake shreds
|
||||
|
@@ -2,7 +2,7 @@ use super::*;
|
||||
|
||||
pub(crate) trait BroadcastStats {
|
||||
fn update(&mut self, new_stats: &Self);
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant, was_interrupted: bool);
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant);
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -10,7 +10,6 @@ pub(crate) struct BroadcastShredBatchInfo {
|
||||
pub(crate) slot: Slot,
|
||||
pub(crate) num_expected_batches: Option<usize>,
|
||||
pub(crate) slot_start_ts: Instant,
|
||||
pub(crate) was_interrupted: bool,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
@@ -34,39 +33,25 @@ impl BroadcastStats for TransmitShredsStats {
|
||||
self.total_packets += new_stats.total_packets;
|
||||
self.dropped_packets += new_stats.dropped_packets;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant, was_interrupted: bool) {
|
||||
if was_interrupted {
|
||||
datapoint_info!(
|
||||
"broadcast-transmit-shreds-interrupted-stats",
|
||||
("slot", slot as i64, i64),
|
||||
("transmit_elapsed", self.transmit_elapsed as i64, i64),
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
("total_packets", self.total_packets as i64, i64),
|
||||
("dropped_packets", self.dropped_packets as i64, i64),
|
||||
);
|
||||
} else {
|
||||
datapoint_info!(
|
||||
"broadcast-transmit-shreds-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"end_to_end_elapsed",
|
||||
// `slot_start` signals when the first batch of shreds was
|
||||
// received, used to measure duration of broadcast
|
||||
slot_start.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
("transmit_elapsed", self.transmit_elapsed as i64, i64),
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
("total_packets", self.total_packets as i64, i64),
|
||||
("dropped_packets", self.dropped_packets as i64, i64),
|
||||
);
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
"broadcast-transmit-shreds-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"end_to_end_elapsed",
|
||||
// `slot_start` signals when the first batch of shreds was
|
||||
// received, used to measure duration of broadcast
|
||||
slot_start.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
("transmit_elapsed", self.transmit_elapsed as i64, i64),
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("shred_select", self.shred_select as i64, i64),
|
||||
("total_packets", self.total_packets as i64, i64),
|
||||
("dropped_packets", self.dropped_packets as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,37 +65,24 @@ impl BroadcastStats for InsertShredsStats {
|
||||
self.insert_shreds_elapsed += new_stats.insert_shreds_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant, was_interrupted: bool) {
|
||||
if was_interrupted {
|
||||
datapoint_info!(
|
||||
"broadcast-insert-shreds-interrupted-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"insert_shreds_elapsed",
|
||||
self.insert_shreds_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
);
|
||||
} else {
|
||||
datapoint_info!(
|
||||
"broadcast-insert-shreds-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"end_to_end_elapsed",
|
||||
// `slot_start` signals when the first batch of shreds was
|
||||
// received, used to measure duration of broadcast
|
||||
slot_start.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"insert_shreds_elapsed",
|
||||
self.insert_shreds_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
);
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
"broadcast-insert-shreds-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"end_to_end_elapsed",
|
||||
// `slot_start` signals when the first batch of shreds was
|
||||
// received, used to measure duration of broadcast
|
||||
slot_start.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"insert_shreds_elapsed",
|
||||
self.insert_shreds_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,11 +128,9 @@ impl<T: BroadcastStats + Default> SlotBroadcastStats<T> {
|
||||
}
|
||||
if let Some(num_expected_batches) = slot_batch_counter.num_expected_batches {
|
||||
if slot_batch_counter.num_batches == num_expected_batches {
|
||||
slot_batch_counter.broadcast_shred_stats.report_stats(
|
||||
batch_info.slot,
|
||||
batch_info.slot_start_ts,
|
||||
batch_info.was_interrupted,
|
||||
);
|
||||
slot_batch_counter
|
||||
.broadcast_shred_stats
|
||||
.report_stats(batch_info.slot, batch_info.slot_start_ts);
|
||||
should_delete = true;
|
||||
}
|
||||
}
|
||||
@@ -189,7 +159,7 @@ mod test {
|
||||
self.count += new_stats.count;
|
||||
self.sender = new_stats.sender.clone();
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant, _was_interrupted: bool) {
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
self.sender
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
@@ -216,7 +186,6 @@ mod test {
|
||||
slot: 0,
|
||||
num_expected_batches: Some(2),
|
||||
slot_start_ts: start,
|
||||
was_interrupted: false,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -273,7 +242,6 @@ mod test {
|
||||
slot: 0,
|
||||
num_expected_batches: None,
|
||||
slot_start_ts: start,
|
||||
was_interrupted: false,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -297,7 +265,6 @@ mod test {
|
||||
slot,
|
||||
num_expected_batches: None,
|
||||
slot_start_ts: start,
|
||||
was_interrupted: false,
|
||||
};
|
||||
if i == round % num_threads {
|
||||
broadcast_batch_info.num_expected_batches = Some(num_threads);
|
||||
|
@@ -97,7 +97,7 @@ impl StandardBroadcastRun {
|
||||
stats,
|
||||
);
|
||||
shreds.insert(0, shred);
|
||||
self.report_and_reset_stats(true);
|
||||
self.report_and_reset_stats();
|
||||
self.unfinished_slot = None;
|
||||
shreds
|
||||
}
|
||||
@@ -245,7 +245,6 @@ impl StandardBroadcastRun {
|
||||
"Old broadcast start time for previous slot must exist if the previous slot
|
||||
was interrupted",
|
||||
),
|
||||
was_interrupted: true,
|
||||
});
|
||||
let shreds = Arc::new(prev_slot_shreds);
|
||||
debug_assert!(shreds.iter().all(|shred| shred.slot() == slot));
|
||||
@@ -268,7 +267,6 @@ impl StandardBroadcastRun {
|
||||
slot_start_ts: self
|
||||
.slot_broadcast_start
|
||||
.expect("Start timestamp must exist for a slot if we're broadcasting the slot"),
|
||||
was_interrupted: false,
|
||||
});
|
||||
get_leader_schedule_time.stop();
|
||||
|
||||
@@ -304,7 +302,7 @@ impl StandardBroadcastRun {
|
||||
self.process_shreds_stats.update(&process_stats);
|
||||
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
self.report_and_reset_stats(false);
|
||||
self.report_and_reset_stats();
|
||||
self.unfinished_slot = None;
|
||||
}
|
||||
|
||||
@@ -387,59 +385,35 @@ impl StandardBroadcastRun {
|
||||
transmit_shreds_stats.update(new_transmit_shreds_stats, broadcast_shred_batch_info);
|
||||
}
|
||||
|
||||
fn report_and_reset_stats(&mut self, was_interrupted: bool) {
|
||||
fn report_and_reset_stats(&mut self) {
|
||||
let stats = &self.process_shreds_stats;
|
||||
let unfinished_slot = self.unfinished_slot.as_ref().unwrap();
|
||||
if was_interrupted {
|
||||
datapoint_info!(
|
||||
"broadcast-process-shreds-interrupted-stats",
|
||||
("slot", unfinished_slot.slot as i64, i64),
|
||||
("shredding_time", stats.shredding_elapsed, i64),
|
||||
("receive_time", stats.receive_elapsed, i64),
|
||||
(
|
||||
"num_data_shreds",
|
||||
unfinished_slot.next_shred_index as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get_leader_schedule_time",
|
||||
stats.get_leader_schedule_elapsed,
|
||||
i64
|
||||
),
|
||||
("serialize_shreds_time", stats.serialize_elapsed, i64),
|
||||
("gen_data_time", stats.gen_data_elapsed, i64),
|
||||
("gen_coding_time", stats.gen_coding_elapsed, i64),
|
||||
("sign_coding_time", stats.sign_coding_elapsed, i64),
|
||||
("coding_send_time", stats.coding_send_elapsed, i64),
|
||||
);
|
||||
} else {
|
||||
datapoint_info!(
|
||||
"broadcast-process-shreds-stats",
|
||||
("slot", unfinished_slot.slot as i64, i64),
|
||||
("shredding_time", stats.shredding_elapsed, i64),
|
||||
("receive_time", stats.receive_elapsed, i64),
|
||||
(
|
||||
"num_data_shreds",
|
||||
unfinished_slot.next_shred_index as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"slot_broadcast_time",
|
||||
self.slot_broadcast_start.unwrap().elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get_leader_schedule_time",
|
||||
stats.get_leader_schedule_elapsed,
|
||||
i64
|
||||
),
|
||||
("serialize_shreds_time", stats.serialize_elapsed, i64),
|
||||
("gen_data_time", stats.gen_data_elapsed, i64),
|
||||
("gen_coding_time", stats.gen_coding_elapsed, i64),
|
||||
("sign_coding_time", stats.sign_coding_elapsed, i64),
|
||||
("coding_send_time", stats.coding_send_elapsed, i64),
|
||||
);
|
||||
}
|
||||
datapoint_info!(
|
||||
"broadcast-process-shreds-stats",
|
||||
("slot", unfinished_slot.slot as i64, i64),
|
||||
("shredding_time", stats.shredding_elapsed, i64),
|
||||
("receive_time", stats.receive_elapsed, i64),
|
||||
(
|
||||
"num_data_shreds",
|
||||
unfinished_slot.next_shred_index as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"slot_broadcast_time",
|
||||
self.slot_broadcast_start.unwrap().elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get_leader_schedule_time",
|
||||
stats.get_leader_schedule_elapsed,
|
||||
i64
|
||||
),
|
||||
("serialize_shreds_time", stats.serialize_elapsed, i64),
|
||||
("gen_data_time", stats.gen_data_elapsed, i64),
|
||||
("gen_coding_time", stats.gen_coding_elapsed, i64),
|
||||
("sign_coding_time", stats.sign_coding_elapsed, i64),
|
||||
("coding_send_time", stats.coding_send_elapsed, i64),
|
||||
);
|
||||
self.process_shreds_stats.reset();
|
||||
}
|
||||
}
|
||||
|
@@ -318,7 +318,6 @@ mod tests {
|
||||
super::*,
|
||||
rand::{seq::SliceRandom, Rng},
|
||||
solana_gossip::{
|
||||
crds::GossipRoute,
|
||||
crds_value::{CrdsData, CrdsValue},
|
||||
deprecated::{
|
||||
shuffle_peers_and_index, sorted_retransmit_peers_and_stakes,
|
||||
@@ -385,10 +384,7 @@ mod tests {
|
||||
for node in nodes.iter().skip(1) {
|
||||
let node = CrdsData::ContactInfo(node.clone());
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(
|
||||
gossip.crds.insert(node, now, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(gossip.crds.insert(node, now), Ok(()));
|
||||
}
|
||||
}
|
||||
(nodes, stakes, cluster_info)
|
||||
|
@@ -1,303 +0,0 @@
|
||||
//! this service receives instruction ExecuteTimings from replay_stage,
|
||||
//! update cost_model which is shared with banking_stage to optimize
|
||||
//! packing transactions into block; it also triggers persisting cost
|
||||
//! table to blockstore.
|
||||
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{bank::Bank, bank::ExecuteTimings, cost_model::CostModel};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::Receiver,
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CostUpdateServiceTiming {
|
||||
last_print: u64,
|
||||
update_cost_model_count: u64,
|
||||
update_cost_model_elapsed: u64,
|
||||
persist_cost_table_elapsed: u64,
|
||||
}
|
||||
|
||||
impl CostUpdateServiceTiming {
|
||||
fn update(
|
||||
&mut self,
|
||||
update_cost_model_count: u64,
|
||||
update_cost_model_elapsed: u64,
|
||||
persist_cost_table_elapsed: u64,
|
||||
) {
|
||||
self.update_cost_model_count += update_cost_model_count;
|
||||
self.update_cost_model_elapsed += update_cost_model_elapsed;
|
||||
self.persist_cost_table_elapsed += persist_cost_table_elapsed;
|
||||
|
||||
let now = timestamp();
|
||||
let elapsed_ms = now - self.last_print;
|
||||
if elapsed_ms > 1000 {
|
||||
datapoint_info!(
|
||||
"cost-update-service-stats",
|
||||
("total_elapsed_us", elapsed_ms * 1000, i64),
|
||||
(
|
||||
"update_cost_model_count",
|
||||
self.update_cost_model_count as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"update_cost_model_elapsed",
|
||||
self.update_cost_model_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"persist_cost_table_elapsed",
|
||||
self.persist_cost_table_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
);
|
||||
|
||||
*self = CostUpdateServiceTiming::default();
|
||||
self.last_print = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum CostUpdate {
|
||||
FrozenBank { bank: Arc<Bank> },
|
||||
ExecuteTiming { execute_timings: ExecuteTimings },
|
||||
}
|
||||
|
||||
pub type CostUpdateReceiver = Receiver<CostUpdate>;
|
||||
|
||||
pub struct CostUpdateService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl CostUpdateService {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
exit: Arc<AtomicBool>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cost_model: Arc<RwLock<CostModel>>,
|
||||
cost_update_receiver: CostUpdateReceiver,
|
||||
) -> Self {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-cost-update-service".to_string())
|
||||
.spawn(move || {
|
||||
Self::service_loop(exit, blockstore, cost_model, cost_update_receiver);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
Self { thread_hdl }
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
|
||||
fn service_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cost_model: Arc<RwLock<CostModel>>,
|
||||
cost_update_receiver: CostUpdateReceiver,
|
||||
) {
|
||||
let mut cost_update_service_timing = CostUpdateServiceTiming::default();
|
||||
let mut dirty: bool;
|
||||
let mut update_count: u64;
|
||||
let wait_timer = Duration::from_millis(100);
|
||||
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
dirty = false;
|
||||
update_count = 0_u64;
|
||||
let mut update_cost_model_time = Measure::start("update_cost_model_time");
|
||||
for cost_update in cost_update_receiver.try_iter() {
|
||||
match cost_update {
|
||||
CostUpdate::FrozenBank { bank } => {
|
||||
bank.read_cost_tracker().unwrap().report_stats(bank.slot());
|
||||
}
|
||||
CostUpdate::ExecuteTiming { execute_timings } => {
|
||||
dirty |= Self::update_cost_model(&cost_model, &execute_timings);
|
||||
update_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
update_cost_model_time.stop();
|
||||
|
||||
let mut persist_cost_table_time = Measure::start("persist_cost_table_time");
|
||||
if dirty {
|
||||
Self::persist_cost_table(&blockstore, &cost_model);
|
||||
}
|
||||
persist_cost_table_time.stop();
|
||||
|
||||
cost_update_service_timing.update(
|
||||
update_count,
|
||||
update_cost_model_time.as_us(),
|
||||
persist_cost_table_time.as_us(),
|
||||
);
|
||||
|
||||
thread::sleep(wait_timer);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_cost_model(cost_model: &RwLock<CostModel>, execute_timings: &ExecuteTimings) -> bool {
|
||||
let mut dirty = false;
|
||||
{
|
||||
let mut cost_model_mutable = cost_model.write().unwrap();
|
||||
for (program_id, timing) in &execute_timings.details.per_program_timings {
|
||||
if timing.count < 1 {
|
||||
continue;
|
||||
}
|
||||
let units = timing.accumulated_units / timing.count as u64;
|
||||
match cost_model_mutable.upsert_instruction_cost(program_id, units) {
|
||||
Ok(c) => {
|
||||
debug!(
|
||||
"after replayed into bank, instruction {:?} has averaged cost {}",
|
||||
program_id, c
|
||||
);
|
||||
dirty = true;
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(
|
||||
"after replayed into bank, instruction {:?} failed to update cost, err: {}",
|
||||
program_id, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"after replayed into bank, updated cost model instruction cost table, current values: {:?}",
|
||||
cost_model.read().unwrap().get_instruction_cost_table()
|
||||
);
|
||||
dirty
|
||||
}
|
||||
|
||||
fn persist_cost_table(blockstore: &Blockstore, cost_model: &RwLock<CostModel>) {
|
||||
let cost_model_read = cost_model.read().unwrap();
|
||||
let cost_table = cost_model_read.get_instruction_cost_table();
|
||||
let db_records = blockstore.read_program_costs().expect("read programs");
|
||||
|
||||
// delete records from blockstore if they are no longer in cost_table
|
||||
db_records.iter().for_each(|(pubkey, _)| {
|
||||
if cost_table.get(pubkey).is_none() {
|
||||
blockstore
|
||||
.delete_program_cost(pubkey)
|
||||
.expect("delete old program");
|
||||
}
|
||||
});
|
||||
|
||||
for (key, cost) in cost_table.iter() {
|
||||
blockstore
|
||||
.write_program_cost(key, cost)
|
||||
.expect("persist program costs to blockstore");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_runtime::message_processor::ProgramTiming;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_update_cost_model_with_empty_execute_timings() {
|
||||
let cost_model = Arc::new(RwLock::new(CostModel::default()));
|
||||
let empty_execute_timings = ExecuteTimings::default();
|
||||
CostUpdateService::update_cost_model(&cost_model, &empty_execute_timings);
|
||||
|
||||
assert_eq!(
|
||||
0,
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.len()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_cost_model_with_execute_timings() {
|
||||
let cost_model = Arc::new(RwLock::new(CostModel::default()));
|
||||
let mut execute_timings = ExecuteTimings::default();
|
||||
|
||||
let program_key_1 = Pubkey::new_unique();
|
||||
let mut expected_cost: u64;
|
||||
|
||||
// add new program
|
||||
{
|
||||
let accumulated_us: u64 = 1000;
|
||||
let accumulated_units: u64 = 100;
|
||||
let count: u32 = 10;
|
||||
expected_cost = accumulated_units / count as u64;
|
||||
|
||||
execute_timings.details.per_program_timings.insert(
|
||||
program_key_1,
|
||||
ProgramTiming {
|
||||
accumulated_us,
|
||||
accumulated_units,
|
||||
count,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &execute_timings);
|
||||
assert_eq!(
|
||||
1,
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.len()
|
||||
);
|
||||
assert_eq!(
|
||||
Some(&expected_cost),
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.get(&program_key_1)
|
||||
);
|
||||
}
|
||||
|
||||
// update program
|
||||
{
|
||||
let accumulated_us: u64 = 2000;
|
||||
let accumulated_units: u64 = 200;
|
||||
let count: u32 = 10;
|
||||
// to expect new cost is Average(new_value, existing_value)
|
||||
expected_cost = ((accumulated_units / count as u64) + expected_cost) / 2;
|
||||
|
||||
execute_timings.details.per_program_timings.insert(
|
||||
program_key_1,
|
||||
ProgramTiming {
|
||||
accumulated_us,
|
||||
accumulated_units,
|
||||
count,
|
||||
},
|
||||
);
|
||||
CostUpdateService::update_cost_model(&cost_model, &execute_timings);
|
||||
assert_eq!(
|
||||
1,
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.len()
|
||||
);
|
||||
assert_eq!(
|
||||
Some(&expected_cost),
|
||||
cost_model
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_instruction_cost_table()
|
||||
.get(&program_key_1)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@@ -19,7 +19,6 @@ pub mod cluster_slots_service;
|
||||
pub mod commitment_service;
|
||||
pub mod completed_data_sets_service;
|
||||
pub mod consensus;
|
||||
pub mod cost_update_service;
|
||||
pub mod fetch_stage;
|
||||
pub mod fork_choice;
|
||||
pub mod gen_keys;
|
||||
@@ -47,7 +46,6 @@ pub mod sigverify;
|
||||
pub mod sigverify_shreds;
|
||||
pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod system_monitor_service;
|
||||
pub mod test_validator;
|
||||
pub mod tpu;
|
||||
pub mod tree_diff;
|
||||
|
@@ -119,43 +119,6 @@ impl ReplaySlotStats {
|
||||
i64
|
||||
),
|
||||
);
|
||||
|
||||
let mut per_pubkey_timings: Vec<_> = self
|
||||
.execute_timings
|
||||
.details
|
||||
.per_program_timings
|
||||
.iter()
|
||||
.collect();
|
||||
per_pubkey_timings.sort_by(|a, b| b.1.accumulated_us.cmp(&a.1.accumulated_us));
|
||||
let (total_us, total_units, total_count) =
|
||||
per_pubkey_timings
|
||||
.iter()
|
||||
.fold((0, 0, 0), |(sum_us, sum_units, sum_count), a| {
|
||||
(
|
||||
sum_us + a.1.accumulated_us,
|
||||
sum_units + a.1.accumulated_units,
|
||||
sum_count + a.1.count,
|
||||
)
|
||||
});
|
||||
|
||||
for (pubkey, time) in per_pubkey_timings.iter().take(5) {
|
||||
datapoint_info!(
|
||||
"per_program_timings",
|
||||
("slot", slot as i64, i64),
|
||||
("pubkey", pubkey.to_string(), String),
|
||||
("execute_us", time.accumulated_us, i64),
|
||||
("accumulated_units", time.accumulated_units, i64),
|
||||
("count", time.count, i64)
|
||||
);
|
||||
}
|
||||
datapoint_info!(
|
||||
"per_program_timings",
|
||||
("slot", slot as i64, i64),
|
||||
("pubkey", "all", String),
|
||||
("execute_us", total_us, i64),
|
||||
("accumulated_units", total_units, i64),
|
||||
("count", total_count, i64)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -13,12 +13,12 @@ use crate::{
|
||||
consensus::{
|
||||
ComputedBankState, Stake, SwitchForkDecision, Tower, VotedStakes, SWITCH_FORK_THRESHOLD,
|
||||
},
|
||||
cost_update_service::CostUpdate,
|
||||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
|
||||
progress_map::{ForkProgress, ProgressMap, PropagatedStats},
|
||||
repair_service::DuplicateSlotsResetReceiver,
|
||||
result::Result,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
|
||||
voting_service::VoteOp,
|
||||
@@ -42,7 +42,7 @@ use solana_rpc::{
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender,
|
||||
bank::{Bank, ExecuteTimings, NewBankOptions},
|
||||
bank::{Bank, NewBankOptions},
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
@@ -281,7 +281,7 @@ impl ReplayTiming {
|
||||
"process_duplicate_slots_elapsed",
|
||||
self.process_duplicate_slots_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
)
|
||||
);
|
||||
|
||||
*self = ReplayTiming::default();
|
||||
@@ -291,7 +291,7 @@ impl ReplayTiming {
|
||||
}
|
||||
|
||||
pub struct ReplayStage {
|
||||
t_replay: JoinHandle<()>,
|
||||
t_replay: JoinHandle<Result<()>>,
|
||||
commitment_service: AggregateCommitmentService,
|
||||
}
|
||||
|
||||
@@ -315,7 +315,6 @@ impl ReplayStage {
|
||||
gossip_verified_vote_hash_receiver: GossipVerifiedVoteHashReceiver,
|
||||
cluster_slots_update_sender: ClusterSlotsUpdateSender,
|
||||
voting_sender: Sender<VoteOp>,
|
||||
cost_update_sender: Sender<CostUpdate>,
|
||||
) -> Self {
|
||||
let ReplayStageConfig {
|
||||
my_pubkey,
|
||||
@@ -413,7 +412,6 @@ impl ReplayStage {
|
||||
&mut unfrozen_gossip_verified_vote_hashes,
|
||||
&mut latest_validator_votes_for_frozen_banks,
|
||||
&cluster_slots_update_sender,
|
||||
&cost_update_sender,
|
||||
);
|
||||
replay_active_banks_time.stop();
|
||||
|
||||
@@ -744,6 +742,7 @@ impl ReplayStage {
|
||||
process_duplicate_slots_time.as_us(),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@@ -1691,11 +1690,9 @@ impl ReplayStage {
|
||||
unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes,
|
||||
latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks,
|
||||
cluster_slots_update_sender: &ClusterSlotsUpdateSender,
|
||||
cost_update_sender: &Sender<CostUpdate>,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let mut tx_count = 0;
|
||||
let mut execute_timings = ExecuteTimings::default();
|
||||
let active_banks = bank_forks.read().unwrap().active_banks();
|
||||
trace!("active banks {:?}", active_banks);
|
||||
|
||||
@@ -1766,12 +1763,6 @@ impl ReplayStage {
|
||||
}
|
||||
assert_eq!(*bank_slot, bank.slot());
|
||||
if bank.is_complete() {
|
||||
execute_timings.accumulate(&bank_progress.replay_stats.execute_timings);
|
||||
debug!("bank {} is completed replay from blockstore, contribute to update cost with {:?}",
|
||||
bank.slot(),
|
||||
bank_progress.replay_stats.execute_timings
|
||||
);
|
||||
|
||||
bank_progress.replay_stats.report_stats(
|
||||
bank.slot(),
|
||||
bank_progress.replay_progress.num_entries,
|
||||
@@ -1784,13 +1775,6 @@ impl ReplayStage {
|
||||
transaction_status_sender.send_transaction_status_freeze_message(&bank);
|
||||
}
|
||||
bank.freeze();
|
||||
// report cost tracker stats
|
||||
cost_update_sender
|
||||
.send(CostUpdate::FrozenBank { bank: bank.clone() })
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("cost_update_sender failed sending bank stats: {:?}", err)
|
||||
});
|
||||
|
||||
let bank_hash = bank.hash();
|
||||
assert_ne!(bank_hash, Hash::default());
|
||||
// Needs to be updated before `check_slot_agrees_with_cluster()` so that
|
||||
@@ -1840,14 +1824,6 @@ impl ReplayStage {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// send accumulated excute-timings to cost_update_service
|
||||
if !execute_timings.details.per_program_timings.is_empty() {
|
||||
cost_update_sender
|
||||
.send(CostUpdate::ExecuteTiming { execute_timings })
|
||||
.unwrap_or_else(|err| warn!("cost_update_sender failed: {:?}", err));
|
||||
}
|
||||
|
||||
inc_new_counter_info!("replay_stage-replay_transactions", tx_count);
|
||||
did_complete_bank
|
||||
}
|
||||
@@ -4953,6 +4929,7 @@ mod tests {
|
||||
);
|
||||
assert_eq!(tower.last_voted_slot().unwrap(), 1);
|
||||
}
|
||||
|
||||
fn run_compute_and_select_forks(
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
progress: &mut ProgressMap,
|
||||
|
@@ -28,9 +28,9 @@ use {
|
||||
solana_runtime::{bank::Bank, bank_forks::BankForks},
|
||||
solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp},
|
||||
std::{
|
||||
collections::{BTreeSet, HashMap, HashSet},
|
||||
collections::{BTreeSet, HashSet},
|
||||
net::UdpSocket,
|
||||
ops::{AddAssign, DerefMut},
|
||||
ops::DerefMut,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
||||
mpsc::{self, channel, RecvTimeoutError},
|
||||
@@ -47,25 +47,9 @@ const DEFAULT_LRU_SIZE: usize = 10_000;
|
||||
const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
|
||||
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);
|
||||
|
||||
#[derive(Default)]
|
||||
struct RetransmitSlotStats {
|
||||
num_shreds: usize,
|
||||
num_nodes: usize,
|
||||
}
|
||||
|
||||
impl AddAssign for RetransmitSlotStats {
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
*self = Self {
|
||||
num_shreds: self.num_shreds + other.num_shreds,
|
||||
num_nodes: self.num_nodes + other.num_nodes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct RetransmitStats {
|
||||
since: Option<Instant>,
|
||||
num_nodes: AtomicUsize,
|
||||
num_shreds: usize,
|
||||
num_shreds_skipped: AtomicUsize,
|
||||
total_batches: usize,
|
||||
@@ -74,7 +58,6 @@ struct RetransmitStats {
|
||||
epoch_cache_update: u64,
|
||||
retransmit_total: AtomicU64,
|
||||
compute_turbine_peers_total: AtomicU64,
|
||||
slot_stats: HashMap<Slot, RetransmitSlotStats>,
|
||||
unknown_shred_slot_leader: AtomicUsize,
|
||||
}
|
||||
|
||||
@@ -108,7 +91,6 @@ impl RetransmitStats {
|
||||
("epoch_fetch", stats.epoch_fetch, i64),
|
||||
("epoch_cache_update", stats.epoch_cache_update, i64),
|
||||
("total_batches", stats.total_batches, i64),
|
||||
("num_nodes", stats.num_nodes.into_inner(), i64),
|
||||
("num_shreds", stats.num_shreds, i64),
|
||||
(
|
||||
"num_shreds_skipped",
|
||||
@@ -127,14 +109,6 @@ impl RetransmitStats {
|
||||
i64
|
||||
),
|
||||
);
|
||||
for (slot, stats) in stats.slot_stats {
|
||||
datapoint_info!(
|
||||
"retransmit-stage-slot-stats",
|
||||
("slot", slot, i64),
|
||||
("num_shreds", stats.num_shreds, i64),
|
||||
("num_nodes", stats.num_nodes, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,10 +216,10 @@ fn retransmit(
|
||||
|
||||
let my_id = cluster_info.id();
|
||||
let socket_addr_space = cluster_info.socket_addr_space();
|
||||
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
|
||||
if should_skip_retransmit(shred, shreds_received) {
|
||||
let retransmit_shred = |shred: Shred, socket: &UdpSocket| {
|
||||
if should_skip_retransmit(&shred, shreds_received) {
|
||||
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
let shred_slot = shred.slot();
|
||||
max_slots
|
||||
@@ -273,7 +247,7 @@ fn retransmit(
|
||||
stats
|
||||
.unknown_shred_slot_leader
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
};
|
||||
let cluster_nodes =
|
||||
@@ -310,52 +284,17 @@ fn retransmit(
|
||||
socket_addr_space,
|
||||
);
|
||||
retransmit_time.stop();
|
||||
let num_nodes = if anchor_node {
|
||||
neighbors.len() + children.len() - 1
|
||||
} else {
|
||||
children.len()
|
||||
};
|
||||
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
|
||||
stats
|
||||
.retransmit_total
|
||||
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
|
||||
num_nodes
|
||||
};
|
||||
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
|
||||
where
|
||||
K: Eq + std::hash::Hash,
|
||||
V: Default + AddAssign,
|
||||
{
|
||||
if acc.len() < other.len() {
|
||||
return merge(other, acc);
|
||||
}
|
||||
for (key, value) in other {
|
||||
*acc.entry(key).or_default() += value;
|
||||
}
|
||||
acc
|
||||
}
|
||||
let slot_stats = thread_pool.install(|| {
|
||||
shreds
|
||||
.into_par_iter()
|
||||
.with_min_len(4)
|
||||
.map(|shred| {
|
||||
let index = thread_pool.current_thread_index().unwrap();
|
||||
let socket = &sockets[index % sockets.len()];
|
||||
let num_nodes = retransmit_shred(&shred, socket);
|
||||
(shred.slot(), num_nodes)
|
||||
})
|
||||
.fold(
|
||||
HashMap::<Slot, RetransmitSlotStats>::new,
|
||||
|mut acc, (slot, num_nodes)| {
|
||||
let stats = acc.entry(slot).or_default();
|
||||
stats.num_nodes += num_nodes;
|
||||
stats.num_shreds += 1;
|
||||
acc
|
||||
},
|
||||
)
|
||||
.reduce(HashMap::new, merge)
|
||||
thread_pool.install(|| {
|
||||
shreds.into_par_iter().with_min_len(4).for_each(|shred| {
|
||||
let index = thread_pool.current_thread_index().unwrap();
|
||||
let socket = &sockets[index % sockets.len()];
|
||||
retransmit_shred(shred, socket);
|
||||
});
|
||||
});
|
||||
stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats);
|
||||
timer_start.stop();
|
||||
stats.total_time += timer_start.as_us();
|
||||
stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache);
|
||||
@@ -556,7 +495,7 @@ mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, cached_leader_schedule) =
|
||||
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None, None).unwrap();
|
||||
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts, None).unwrap();
|
||||
let leader_schedule_cache = Arc::new(cached_leader_schedule);
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
|
@@ -8,13 +8,13 @@
|
||||
use crate::sigverify;
|
||||
use crossbeam_channel::{SendError, Sender as CrossbeamSender};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_sdk::timing;
|
||||
use solana_streamer::streamer::{self, PacketReceiver, StreamerError};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use thiserror::Error;
|
||||
|
||||
const MAX_SIGVERIFY_BATCH: usize = 10_000;
|
||||
@@ -41,82 +41,6 @@ pub trait SigVerifier {
|
||||
#[derive(Default, Clone)]
|
||||
pub struct DisabledSigVerifier {}
|
||||
|
||||
#[derive(Default)]
|
||||
struct SigVerifierStats {
|
||||
recv_batches_us_hist: histogram::Histogram, // time to call recv_batch
|
||||
verify_batches_pp_us_hist: histogram::Histogram, // per-packet time to call verify_batch
|
||||
batches_hist: histogram::Histogram, // number of Packets structures per verify call
|
||||
packets_hist: histogram::Histogram, // number of packets per verify call
|
||||
total_batches: usize,
|
||||
total_packets: usize,
|
||||
}
|
||||
|
||||
impl SigVerifierStats {
|
||||
fn report(&self) {
|
||||
datapoint_info!(
|
||||
"sigverify_stage-total_verify_time",
|
||||
(
|
||||
"recv_batches_us_90pct",
|
||||
self.recv_batches_us_hist.percentile(90.0).unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"recv_batches_us_min",
|
||||
self.recv_batches_us_hist.minimum().unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"recv_batches_us_max",
|
||||
self.recv_batches_us_hist.maximum().unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"recv_batches_us_mean",
|
||||
self.recv_batches_us_hist.mean().unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"verify_batches_pp_us_90pct",
|
||||
self.verify_batches_pp_us_hist.percentile(90.0).unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"verify_batches_pp_us_min",
|
||||
self.verify_batches_pp_us_hist.minimum().unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"verify_batches_pp_us_max",
|
||||
self.verify_batches_pp_us_hist.maximum().unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"verify_batches_pp_us_mean",
|
||||
self.verify_batches_pp_us_hist.mean().unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"batches_90pct",
|
||||
self.batches_hist.percentile(90.0).unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
("batches_min", self.batches_hist.minimum().unwrap_or(0), i64),
|
||||
("batches_max", self.batches_hist.maximum().unwrap_or(0), i64),
|
||||
("batches_mean", self.batches_hist.mean().unwrap_or(0), i64),
|
||||
(
|
||||
"packets_90pct",
|
||||
self.packets_hist.percentile(90.0).unwrap_or(0),
|
||||
i64
|
||||
),
|
||||
("packets_min", self.packets_hist.minimum().unwrap_or(0), i64),
|
||||
("packets_max", self.packets_hist.maximum().unwrap_or(0), i64),
|
||||
("packets_mean", self.packets_hist.mean().unwrap_or(0), i64),
|
||||
("total_batches", self.total_batches, i64),
|
||||
("total_packets", self.total_packets, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl SigVerifier for DisabledSigVerifier {
|
||||
fn verify_batch(&self, mut batch: Vec<Packets>) -> Vec<Packets> {
|
||||
sigverify::ed25519_verify_disabled(&mut batch);
|
||||
@@ -168,7 +92,6 @@ impl SigVerifyStage {
|
||||
recvr: &PacketReceiver,
|
||||
sendr: &CrossbeamSender<Vec<Packets>>,
|
||||
verifier: &T,
|
||||
stats: &mut SigVerifierStats,
|
||||
) -> Result<()> {
|
||||
let (mut batches, len, recv_time) = streamer::recv_batch(recvr)?;
|
||||
|
||||
@@ -198,19 +121,6 @@ impl SigVerifyStage {
|
||||
("recv_time", recv_time, i64),
|
||||
);
|
||||
|
||||
stats
|
||||
.recv_batches_us_hist
|
||||
.increment(recv_time as u64)
|
||||
.unwrap();
|
||||
stats
|
||||
.verify_batches_pp_us_hist
|
||||
.increment(verify_batch_time.as_us() / (len as u64))
|
||||
.unwrap();
|
||||
stats.batches_hist.increment(batches_len as u64).unwrap();
|
||||
stats.packets_hist.increment(len as u64).unwrap();
|
||||
stats.total_batches += batches_len;
|
||||
stats.total_packets += len;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -220,14 +130,10 @@ impl SigVerifyStage {
|
||||
verifier: &T,
|
||||
) -> JoinHandle<()> {
|
||||
let verifier = verifier.clone();
|
||||
let mut stats = SigVerifierStats::default();
|
||||
let mut last_print = Instant::now();
|
||||
Builder::new()
|
||||
.name("solana-verifier".to_string())
|
||||
.spawn(move || loop {
|
||||
if let Err(e) =
|
||||
Self::verifier(&packet_receiver, &verified_sender, &verifier, &mut stats)
|
||||
{
|
||||
if let Err(e) = Self::verifier(&packet_receiver, &verified_sender, &verifier) {
|
||||
match e {
|
||||
SigVerifyServiceError::Streamer(StreamerError::RecvTimeout(
|
||||
RecvTimeoutError::Disconnected,
|
||||
@@ -241,11 +147,6 @@ impl SigVerifyStage {
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
if last_print.elapsed().as_secs() > 2 {
|
||||
stats.report();
|
||||
stats = SigVerifierStats::default();
|
||||
last_print = Instant::now();
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
@@ -1,227 +0,0 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
io::BufRead,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::{fs::File, io::BufReader, path::Path};
|
||||
|
||||
const SAMPLE_INTERVAL: Duration = Duration::from_secs(60);
|
||||
const SLEEP_INTERVAL: Duration = Duration::from_millis(500);
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
const PROC_NET_SNMP_PATH: &str = "/proc/net/snmp";
|
||||
|
||||
pub struct SystemMonitorService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
|
||||
struct UdpStats {
|
||||
in_datagrams: usize,
|
||||
no_ports: usize,
|
||||
in_errors: usize,
|
||||
out_datagrams: usize,
|
||||
rcvbuf_errors: usize,
|
||||
sndbuf_errors: usize,
|
||||
in_csum_errors: usize,
|
||||
ignored_multi: usize,
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn read_udp_stats(file_path: impl AsRef<Path>) -> Result<UdpStats, String> {
|
||||
let file = File::open(file_path).map_err(|e| e.to_string())?;
|
||||
let mut reader = BufReader::new(file);
|
||||
parse_udp_stats(&mut reader)
|
||||
}
|
||||
|
||||
#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
|
||||
fn parse_udp_stats(reader: &mut impl BufRead) -> Result<UdpStats, String> {
|
||||
let mut udp_lines = Vec::default();
|
||||
for line in reader.lines() {
|
||||
let line = line.map_err(|e| e.to_string())?;
|
||||
if line.starts_with("Udp:") {
|
||||
udp_lines.push(line);
|
||||
if udp_lines.len() == 2 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if udp_lines.len() != 2 {
|
||||
return Err(format!(
|
||||
"parse error, expected 2 lines, num lines: {}",
|
||||
udp_lines.len()
|
||||
));
|
||||
}
|
||||
|
||||
let pairs: Vec<_> = udp_lines[0]
|
||||
.split_ascii_whitespace()
|
||||
.zip(udp_lines[1].split_ascii_whitespace())
|
||||
.collect();
|
||||
let udp_stats: HashMap<String, usize> = pairs[1..]
|
||||
.iter()
|
||||
.map(|(label, val)| (label.to_string(), val.parse::<usize>().unwrap()))
|
||||
.collect();
|
||||
|
||||
let stats = UdpStats {
|
||||
in_datagrams: *udp_stats.get("InDatagrams").unwrap_or(&0),
|
||||
no_ports: *udp_stats.get("NoPorts").unwrap_or(&0),
|
||||
in_errors: *udp_stats.get("InErrors").unwrap_or(&0),
|
||||
out_datagrams: *udp_stats.get("OutDatagrams").unwrap_or(&0),
|
||||
rcvbuf_errors: *udp_stats.get("RcvbufErrors").unwrap_or(&0),
|
||||
sndbuf_errors: *udp_stats.get("SndbufErrors").unwrap_or(&0),
|
||||
in_csum_errors: *udp_stats.get("InCsumErrors").unwrap_or(&0),
|
||||
ignored_multi: *udp_stats.get("IgnoredMulti").unwrap_or(&0),
|
||||
};
|
||||
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn verify_udp_stats_access() -> Result<(), String> {
|
||||
read_udp_stats(PROC_NET_SNMP_PATH)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub fn verify_udp_stats_access() -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl SystemMonitorService {
|
||||
pub fn new(exit: Arc<AtomicBool>) -> Self {
|
||||
info!("Starting SystemMonitorService");
|
||||
let thread_hdl = Builder::new()
|
||||
.name("system-monitor".to_string())
|
||||
.spawn(move || {
|
||||
Self::run(exit);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
Self { thread_hdl }
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn process_udp_stats(udp_stats: &mut Option<UdpStats>) {
|
||||
match read_udp_stats(PROC_NET_SNMP_PATH) {
|
||||
Ok(new_stats) => {
|
||||
if let Some(old_stats) = udp_stats {
|
||||
SystemMonitorService::report_udp_stats(old_stats, &new_stats);
|
||||
}
|
||||
*udp_stats = Some(new_stats);
|
||||
}
|
||||
Err(e) => warn!("read_udp_stats: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn process_udp_stats(_udp_stats: &mut Option<UdpStats>) {}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn report_udp_stats(old_stats: &UdpStats, new_stats: &UdpStats) {
|
||||
datapoint_info!(
|
||||
"net-stats",
|
||||
(
|
||||
"in_datagrams_delta",
|
||||
new_stats.in_datagrams - old_stats.in_datagrams,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"no_ports_delta",
|
||||
new_stats.no_ports - old_stats.no_ports,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"in_errors_delta",
|
||||
new_stats.in_errors - old_stats.in_errors,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"out_datagrams_delta",
|
||||
new_stats.out_datagrams - old_stats.out_datagrams,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"rcvbuf_errors_delta",
|
||||
new_stats.rcvbuf_errors - old_stats.rcvbuf_errors,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"sndbuf_errors_delta",
|
||||
new_stats.sndbuf_errors - old_stats.sndbuf_errors,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"in_csum_errors_delta",
|
||||
new_stats.in_csum_errors - old_stats.in_csum_errors,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"ignored_multi_delta",
|
||||
new_stats.ignored_multi - old_stats.ignored_multi,
|
||||
i64
|
||||
),
|
||||
("in_errors", new_stats.in_errors, i64),
|
||||
("rcvbuf_errors", new_stats.rcvbuf_errors, i64),
|
||||
("sndbuf_errors", new_stats.sndbuf_errors, i64),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn run(exit: Arc<AtomicBool>) {
|
||||
let mut udp_stats = None;
|
||||
|
||||
let mut now = Instant::now();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
if now.elapsed() >= SAMPLE_INTERVAL {
|
||||
now = Instant::now();
|
||||
|
||||
SystemMonitorService::process_udp_stats(&mut udp_stats);
|
||||
}
|
||||
|
||||
sleep(SLEEP_INTERVAL);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_udp_stats() {
|
||||
let mut mock_snmp =
|
||||
b"Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates
|
||||
Ip: 1 64 357 0 2 0 0 0 355 315 0 6 0 0 0 0 0 0 0
|
||||
Icmp: InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps
|
||||
Icmp: 3 0 0 3 0 0 0 0 0 0 0 0 0 0 7 0 7 0 0 0 0 0 0 0 0 0 0
|
||||
IcmpMsg: InType3 OutType3
|
||||
IcmpMsg: 3 7
|
||||
Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors
|
||||
Tcp: 1 200 120000 -1 29 1 0 0 5 318 279 0 0 4 0
|
||||
Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti
|
||||
Udp: 27 7 0 30 0 0 0 0
|
||||
UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors IgnoredMulti
|
||||
UdpLite: 0 0 0 0 0 0 0 0" as &[u8];
|
||||
let stats = parse_udp_stats(&mut mock_snmp).unwrap();
|
||||
assert_eq!(stats.out_datagrams, 30);
|
||||
assert_eq!(stats.no_ports, 7);
|
||||
|
||||
let mut mock_snmp = b"unexpected data" as &[u8];
|
||||
let stats = parse_udp_stats(&mut mock_snmp);
|
||||
assert!(stats.is_err());
|
||||
}
|
||||
}
|
@@ -22,7 +22,6 @@ use solana_rpc::{
|
||||
};
|
||||
use solana_runtime::{
|
||||
bank_forks::BankForks,
|
||||
cost_model::CostModel,
|
||||
vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender},
|
||||
};
|
||||
use std::{
|
||||
@@ -72,7 +71,6 @@ impl Tpu {
|
||||
bank_notification_sender: Option<BankNotificationSender>,
|
||||
tpu_coalesce_ms: u64,
|
||||
cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let (vote_packet_sender, vote_packet_receiver) = channel();
|
||||
@@ -130,7 +128,6 @@ impl Tpu {
|
||||
verified_gossip_vote_packets_receiver,
|
||||
transaction_status_sender,
|
||||
replay_vote_sender,
|
||||
cost_model.clone(),
|
||||
);
|
||||
|
||||
let broadcast_stage = broadcast_type.new_broadcast_stage(
|
||||
|
@@ -12,7 +12,6 @@ use crate::{
|
||||
cluster_slots::ClusterSlots,
|
||||
completed_data_sets_service::CompletedDataSetsSender,
|
||||
consensus::Tower,
|
||||
cost_update_service::CostUpdateService,
|
||||
ledger_cleanup_service::LedgerCleanupService,
|
||||
replay_stage::{ReplayStage, ReplayStageConfig},
|
||||
retransmit_stage::RetransmitStage,
|
||||
@@ -41,7 +40,6 @@ use solana_runtime::{
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
commitment::BlockCommitmentCache,
|
||||
cost_model::CostModel,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
};
|
||||
use solana_sdk::{
|
||||
@@ -69,7 +67,6 @@ pub struct Tvu {
|
||||
accounts_background_service: AccountsBackgroundService,
|
||||
accounts_hash_verifier: AccountsHashVerifier,
|
||||
voting_service: VotingService,
|
||||
cost_update_service: CostUpdateService,
|
||||
}
|
||||
|
||||
pub struct Sockets {
|
||||
@@ -134,7 +131,6 @@ impl Tvu {
|
||||
gossip_confirmed_slots_receiver: GossipDuplicateConfirmedSlotsReceiver,
|
||||
tvu_config: TvuConfig,
|
||||
max_slots: &Arc<MaxSlots>,
|
||||
cost_model: &Arc<RwLock<CostModel>>,
|
||||
) -> Self {
|
||||
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
|
||||
|
||||
@@ -289,14 +285,6 @@ impl Tvu {
|
||||
bank_forks.clone(),
|
||||
);
|
||||
|
||||
let (cost_update_sender, cost_update_receiver) = channel();
|
||||
let cost_update_service = CostUpdateService::new(
|
||||
exit.clone(),
|
||||
blockstore.clone(),
|
||||
cost_model.clone(),
|
||||
cost_update_receiver,
|
||||
);
|
||||
|
||||
let replay_stage = ReplayStage::new(
|
||||
replay_stage_config,
|
||||
blockstore.clone(),
|
||||
@@ -315,7 +303,6 @@ impl Tvu {
|
||||
gossip_verified_vote_hash_receiver,
|
||||
cluster_slots_update_sender,
|
||||
voting_sender,
|
||||
cost_update_sender,
|
||||
);
|
||||
|
||||
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
|
||||
@@ -347,7 +334,6 @@ impl Tvu {
|
||||
accounts_background_service,
|
||||
accounts_hash_verifier,
|
||||
voting_service,
|
||||
cost_update_service,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,7 +348,6 @@ impl Tvu {
|
||||
self.replay_stage.join()?;
|
||||
self.accounts_hash_verifier.join()?;
|
||||
self.voting_service.join()?;
|
||||
self.cost_update_service.join()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -470,7 +455,6 @@ pub mod tests {
|
||||
gossip_confirmed_slots_receiver,
|
||||
TvuConfig::default(),
|
||||
&Arc::new(MaxSlots::default()),
|
||||
&Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
tvu.join().unwrap();
|
||||
|
@@ -14,13 +14,11 @@ use {
|
||||
serve_repair_service::ServeRepairService,
|
||||
sigverify,
|
||||
snapshot_packager_service::{PendingSnapshotPackage, SnapshotPackagerService},
|
||||
system_monitor_service::{verify_udp_stats_access, SystemMonitorService},
|
||||
tpu::{Tpu, DEFAULT_TPU_COALESCE_MS},
|
||||
tvu::{Sockets, Tvu, TvuConfig},
|
||||
},
|
||||
crossbeam_channel::{bounded, unbounded},
|
||||
rand::{thread_rng, Rng},
|
||||
solana_accountsdb_plugin_manager::accountsdb_plugin_service::AccountsDbPluginService,
|
||||
solana_gossip::{
|
||||
cluster_info::{
|
||||
ClusterInfo, Node, DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
|
||||
@@ -45,7 +43,6 @@ use {
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
poh_service::{self, PohService},
|
||||
},
|
||||
solana_rpc::send_transaction_service,
|
||||
solana_rpc::{
|
||||
max_slots::MaxSlots,
|
||||
optimistically_confirmed_bank_tracker::{
|
||||
@@ -56,16 +53,15 @@ use {
|
||||
rpc_pubsub_service::{PubSubConfig, PubSubService},
|
||||
rpc_service::JsonRpcService,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
send_transaction_service,
|
||||
transaction_status_service::TransactionStatusService,
|
||||
},
|
||||
solana_runtime::{
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
bank::Bank,
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
commitment::BlockCommitmentCache,
|
||||
cost_model::CostModel,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
},
|
||||
solana_sdk::{
|
||||
@@ -109,7 +105,6 @@ pub struct ValidatorConfig {
|
||||
pub account_paths: Vec<PathBuf>,
|
||||
pub account_shrink_paths: Option<Vec<PathBuf>>,
|
||||
pub rpc_config: JsonRpcConfig,
|
||||
pub accountsdb_plugin_config_files: Option<Vec<PathBuf>>,
|
||||
pub rpc_addrs: Option<(SocketAddr, SocketAddr)>, // (JsonRpc, JsonRpcPubSub)
|
||||
pub pubsub_config: PubSubConfig,
|
||||
pub snapshot_config: Option<SnapshotConfig>,
|
||||
@@ -168,7 +163,6 @@ impl Default for ValidatorConfig {
|
||||
account_paths: Vec::new(),
|
||||
account_shrink_paths: None,
|
||||
rpc_config: JsonRpcConfig::default(),
|
||||
accountsdb_plugin_config_files: None,
|
||||
rpc_addrs: None,
|
||||
pubsub_config: PubSubConfig::default(),
|
||||
snapshot_config: None,
|
||||
@@ -262,7 +256,6 @@ pub struct Validator {
|
||||
transaction_status_service: Option<TransactionStatusService>,
|
||||
rewards_recorder_service: Option<RewardsRecorderService>,
|
||||
cache_block_meta_service: Option<CacheBlockMetaService>,
|
||||
system_monitor_service: Option<SystemMonitorService>,
|
||||
sample_performance_service: Option<SamplePerformanceService>,
|
||||
gossip_service: GossipService,
|
||||
serve_repair_service: ServeRepairService,
|
||||
@@ -273,7 +266,6 @@ pub struct Validator {
|
||||
tpu: Tpu,
|
||||
tvu: Tvu,
|
||||
ip_echo_server: Option<solana_net_utils::IpEchoServer>,
|
||||
accountsdb_plugin_service: Option<AccountsDbPluginService>,
|
||||
}
|
||||
|
||||
// in the distant future, get rid of ::new()/exit() and use Result properly...
|
||||
@@ -310,27 +302,6 @@ impl Validator {
|
||||
warn!("identity: {}", id);
|
||||
warn!("vote account: {}", vote_account);
|
||||
|
||||
let mut bank_notification_senders = Vec::new();
|
||||
|
||||
let accountsdb_plugin_service =
|
||||
if let Some(accountsdb_plugin_config_files) = &config.accountsdb_plugin_config_files {
|
||||
let (confirmed_bank_sender, confirmed_bank_receiver) = unbounded();
|
||||
bank_notification_senders.push(confirmed_bank_sender);
|
||||
let result = AccountsDbPluginService::new(
|
||||
confirmed_bank_receiver,
|
||||
accountsdb_plugin_config_files,
|
||||
);
|
||||
match result {
|
||||
Ok(accountsdb_plugin_service) => Some(accountsdb_plugin_service),
|
||||
Err(err) => {
|
||||
error!("Failed to load the AccountsDb plugin: {:?}", err);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if config.voting_disabled {
|
||||
warn!("voting disabled");
|
||||
authorized_voter_keypairs.write().unwrap().clear();
|
||||
@@ -424,19 +395,10 @@ impl Validator {
|
||||
config.enforce_ulimit_nofile,
|
||||
&start_progress,
|
||||
config.no_poh_speed_test,
|
||||
accountsdb_plugin_service
|
||||
.as_ref()
|
||||
.map(|plugin_service| plugin_service.get_accounts_update_notifier()),
|
||||
);
|
||||
|
||||
*start_progress.write().unwrap() = ValidatorStartProgress::StartingServices;
|
||||
|
||||
verify_udp_stats_access().unwrap_or_else(|err| {
|
||||
error!("Failed to access UDP stats: {}", err);
|
||||
abort();
|
||||
});
|
||||
let system_monitor_service = Some(SystemMonitorService::new(Arc::clone(&exit)));
|
||||
|
||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||
let bank = bank_forks.working_bank();
|
||||
if let Some(ref shrink_paths) = config.account_shrink_paths {
|
||||
@@ -570,11 +532,6 @@ impl Validator {
|
||||
));
|
||||
}
|
||||
let (bank_notification_sender, bank_notification_receiver) = unbounded();
|
||||
let confirmed_bank_subscribers = if !bank_notification_senders.is_empty() {
|
||||
Some(Arc::new(RwLock::new(bank_notification_senders)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(
|
||||
Some(JsonRpcService::new(
|
||||
rpc_addr,
|
||||
@@ -618,7 +575,6 @@ impl Validator {
|
||||
bank_forks.clone(),
|
||||
optimistically_confirmed_bank,
|
||||
rpc_subscriptions.clone(),
|
||||
confirmed_bank_subscribers,
|
||||
)),
|
||||
Some(bank_notification_sender),
|
||||
)
|
||||
@@ -724,10 +680,6 @@ impl Validator {
|
||||
bank_forks.read().unwrap().root_bank().deref(),
|
||||
));
|
||||
|
||||
let mut cost_model = CostModel::default();
|
||||
cost_model.initialize_cost_table(&blockstore.read_program_costs().unwrap());
|
||||
let cost_model = Arc::new(RwLock::new(cost_model));
|
||||
|
||||
let (retransmit_slots_sender, retransmit_slots_receiver) = unbounded();
|
||||
let (verified_vote_sender, verified_vote_receiver) = unbounded();
|
||||
let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded();
|
||||
@@ -805,7 +757,6 @@ impl Validator {
|
||||
disable_epoch_boundary_optimization: config.disable_epoch_boundary_optimization,
|
||||
},
|
||||
&max_slots,
|
||||
&cost_model,
|
||||
);
|
||||
|
||||
let tpu = Tpu::new(
|
||||
@@ -832,7 +783,6 @@ impl Validator {
|
||||
bank_notification_sender,
|
||||
config.tpu_coalesce_ms,
|
||||
cluster_confirmed_slot_sender,
|
||||
&cost_model,
|
||||
);
|
||||
|
||||
datapoint_info!("validator-new", ("id", id.to_string(), String));
|
||||
@@ -847,7 +797,6 @@ impl Validator {
|
||||
transaction_status_service,
|
||||
rewards_recorder_service,
|
||||
cache_block_meta_service,
|
||||
system_monitor_service,
|
||||
sample_performance_service,
|
||||
snapshot_packager_service,
|
||||
completed_data_sets_service,
|
||||
@@ -857,7 +806,6 @@ impl Validator {
|
||||
poh_recorder,
|
||||
ip_echo_server,
|
||||
validator_exit: config.validator_exit.clone(),
|
||||
accountsdb_plugin_service,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -938,12 +886,6 @@ impl Validator {
|
||||
.expect("cache_block_meta_service");
|
||||
}
|
||||
|
||||
if let Some(system_monitor_service) = self.system_monitor_service {
|
||||
system_monitor_service
|
||||
.join()
|
||||
.expect("system_monitor_service");
|
||||
}
|
||||
|
||||
if let Some(sample_performance_service) = self.sample_performance_service {
|
||||
sample_performance_service
|
||||
.join()
|
||||
@@ -966,12 +908,6 @@ impl Validator {
|
||||
if let Some(ip_echo_server) = self.ip_echo_server {
|
||||
ip_echo_server.shutdown_background();
|
||||
}
|
||||
|
||||
if let Some(accountsdb_plugin_service) = self.accountsdb_plugin_service {
|
||||
accountsdb_plugin_service
|
||||
.join()
|
||||
.expect("accountsdb_plugin_service");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1105,7 +1041,6 @@ fn post_process_restored_tower(
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn new_banks_from_ledger(
|
||||
validator_identity: &Pubkey,
|
||||
vote_account: &Pubkey,
|
||||
@@ -1116,7 +1051,6 @@ fn new_banks_from_ledger(
|
||||
enforce_ulimit_nofile: bool,
|
||||
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
|
||||
no_poh_speed_test: bool,
|
||||
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
||||
) -> (
|
||||
GenesisConfig,
|
||||
BankForks,
|
||||
@@ -1233,7 +1167,6 @@ fn new_banks_from_ledger(
|
||||
transaction_history_services
|
||||
.cache_block_meta_sender
|
||||
.as_ref(),
|
||||
accounts_update_notifier,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to load ledger: {:?}", err);
|
||||
|
@@ -50,12 +50,6 @@ struct WindowServiceMetrics {
|
||||
num_shreds_received: u64,
|
||||
shred_receiver_elapsed_us: u64,
|
||||
prune_shreds_elapsed_us: u64,
|
||||
num_shreds_pruned_invalid_repair: usize,
|
||||
num_errors: u64,
|
||||
num_errors_blockstore: u64,
|
||||
num_errors_cross_beam_recv_timeout: u64,
|
||||
num_errors_other: u64,
|
||||
num_errors_try_crossbeam_send: u64,
|
||||
}
|
||||
|
||||
impl WindowServiceMetrics {
|
||||
@@ -74,39 +68,8 @@ impl WindowServiceMetrics {
|
||||
self.prune_shreds_elapsed_us as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"num_shreds_pruned_invalid_repair",
|
||||
self.num_shreds_pruned_invalid_repair,
|
||||
i64
|
||||
),
|
||||
("num_errors", self.num_errors, i64),
|
||||
("num_errors_blockstore", self.num_errors_blockstore, i64),
|
||||
("num_errors_other", self.num_errors_other, i64),
|
||||
(
|
||||
"num_errors_try_crossbeam_send",
|
||||
self.num_errors_try_crossbeam_send,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"num_errors_cross_beam_recv_timeout",
|
||||
self.num_errors_cross_beam_recv_timeout,
|
||||
i64
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
fn record_error(&mut self, err: &Error) {
|
||||
self.num_errors += 1;
|
||||
match err {
|
||||
Error::TryCrossbeamSend => self.num_errors_try_crossbeam_send += 1,
|
||||
Error::CrossbeamRecvTimeout(_) => self.num_errors_cross_beam_recv_timeout += 1,
|
||||
Error::Blockstore(err) => {
|
||||
self.num_errors_blockstore += 1;
|
||||
error!("blockstore error: {}", err);
|
||||
}
|
||||
_ => self.num_errors_other += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -303,7 +266,6 @@ fn run_insert<F>(
|
||||
where
|
||||
F: Fn(Shred),
|
||||
{
|
||||
ws_metrics.run_insert_count += 1;
|
||||
let mut shred_receiver_elapsed = Measure::start("shred_receiver_elapsed");
|
||||
let timer = Duration::from_millis(200);
|
||||
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
|
||||
@@ -312,19 +274,15 @@ where
|
||||
repair_infos.extend(more_repair_infos);
|
||||
}
|
||||
shred_receiver_elapsed.stop();
|
||||
ws_metrics.shred_receiver_elapsed_us += shred_receiver_elapsed.as_us();
|
||||
ws_metrics.num_shreds_received += shreds.len() as u64;
|
||||
|
||||
let mut prune_shreds_elapsed = Measure::start("prune_shreds_elapsed");
|
||||
let num_shreds = shreds.len();
|
||||
prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, outstanding_requests);
|
||||
ws_metrics.num_shreds_pruned_invalid_repair = num_shreds - shreds.len();
|
||||
let repairs: Vec<_> = repair_infos
|
||||
.iter()
|
||||
.map(|repair_info| repair_info.is_some())
|
||||
.collect();
|
||||
prune_shreds_elapsed.stop();
|
||||
ws_metrics.prune_shreds_elapsed_us += prune_shreds_elapsed.as_us();
|
||||
|
||||
let (completed_data_sets, inserted_indices) = blockstore.insert_shreds_handle_duplicate(
|
||||
shreds,
|
||||
@@ -342,6 +300,11 @@ where
|
||||
}
|
||||
|
||||
completed_data_sets_sender.try_send(completed_data_sets)?;
|
||||
|
||||
ws_metrics.run_insert_count += 1;
|
||||
ws_metrics.shred_receiver_elapsed_us += shred_receiver_elapsed.as_us();
|
||||
ws_metrics.prune_shreds_elapsed_us += prune_shreds_elapsed.as_us();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -601,7 +564,6 @@ impl WindowService {
|
||||
&retransmit_sender,
|
||||
&outstanding_requests,
|
||||
) {
|
||||
ws_metrics.record_error(&e);
|
||||
if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) {
|
||||
break;
|
||||
}
|
||||
|
@@ -109,7 +109,6 @@ mod tests {
|
||||
false,
|
||||
accounts_db::AccountShrinkThreshold::default(),
|
||||
false,
|
||||
None,
|
||||
);
|
||||
bank0.freeze();
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
@@ -173,7 +172,6 @@ mod tests {
|
||||
accounts_db::AccountShrinkThreshold::default(),
|
||||
check_hash_calculation,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -41,6 +41,7 @@ module.exports = {
|
||||
"cli/choose-a-cluster",
|
||||
"cli/transfer-tokens",
|
||||
"cli/delegate-stake",
|
||||
"cli/manage-stake-accounts",
|
||||
"cli/deploy-a-program",
|
||||
"offline-signing",
|
||||
"offline-signing/durable-nonce",
|
||||
@@ -64,7 +65,6 @@ module.exports = {
|
||||
items: [
|
||||
"developing/clients/jsonrpc-api",
|
||||
"developing/clients/javascript-api",
|
||||
"developing/clients/javascript-reference",
|
||||
"developing/clients/rust-api",
|
||||
],
|
||||
},
|
||||
|
@@ -132,7 +132,8 @@ Recover the intermediate account's ephemeral keypair file with
|
||||
valley flat great hockey share token excess clever benefit traffic avocado athlete
|
||||
==================================================================================
|
||||
To resume a deploy, pass the recovered keypair as
|
||||
the [BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'.
|
||||
the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy` or
|
||||
as the [BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'.
|
||||
Or to recover the account's lamports, pass it as the
|
||||
[BUFFER_ACCOUNT_ADDRESS] argument to `solana program drain`.
|
||||
==================================================================================
|
||||
@@ -242,6 +243,16 @@ Or anytime after:
|
||||
solana program set-upgrade-authority <PROGRAM_ADDRESS> --final
|
||||
```
|
||||
|
||||
`solana program deploy ...` utilizes Solana's upgradeable loader, but there is
|
||||
another way to deploy immutable programs using the original on-chain loader:
|
||||
|
||||
```bash
|
||||
solana deploy <PROGRAM_FILEPATH>
|
||||
```
|
||||
|
||||
Programs deployed with `solana deploy ...` are not redeployable and are not
|
||||
compatible with the `solana program ...` commands.
|
||||
|
||||
### Dumping a program to a file
|
||||
|
||||
The deployed program may be dumped back to a local file:
|
||||
|
78
docs/src/cli/manage-stake-accounts.md
Normal file
78
docs/src/cli/manage-stake-accounts.md
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
title: Manage Stake Accounts
|
||||
---
|
||||
|
||||
If you want to delegate stake to many different validators, you will need
|
||||
to create a separate stake account for each. If you follow the convention
|
||||
of creating the first stake account at seed "0", the second at "1", the
|
||||
third at "2", and so on, then the `solana-stake-accounts` tool will allow
|
||||
you to operate on all accounts with single invocations. You can use it to
|
||||
sum up the balances of all accounts, move accounts to a new wallet, or set
|
||||
new authorities.
|
||||
|
||||
## Usage
|
||||
|
||||
### Create a stake account
|
||||
|
||||
Create and fund a derived stake account at the stake authority public key:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts new <FUNDING_KEYPAIR> <BASE_KEYPAIR> <AMOUNT> \
|
||||
--stake-authority <PUBKEY> --withdraw-authority <PUBKEY> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
### Count accounts
|
||||
|
||||
Count the number of derived accounts:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts count <BASE_PUBKEY>
|
||||
```
|
||||
|
||||
### Get stake account balances
|
||||
|
||||
Sum the balance of derived stake accounts:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts balance <BASE_PUBKEY> --num-accounts <NUMBER>
|
||||
```
|
||||
|
||||
### Get stake account addresses
|
||||
|
||||
List the address of each stake account derived from the given public key:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts addresses <BASE_PUBKEY> --num-accounts <NUMBER>
|
||||
```
|
||||
|
||||
### Set new authorities
|
||||
|
||||
Set new authorities on each derived stake account:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts authorize <BASE_PUBKEY> \
|
||||
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
|
||||
--new-stake-authority <PUBKEY> --new-withdraw-authority <PUBKEY> \
|
||||
--num-accounts <NUMBER> --fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
### Relocate stake accounts
|
||||
|
||||
Relocate stake accounts:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts rebase <BASE_PUBKEY> <NEW_BASE_KEYPAIR> \
|
||||
--stake-authority <KEYPAIR> --num-accounts <NUMBER> \
|
||||
--fee-payer <KEYPAIR>
|
||||
```
|
||||
|
||||
To atomically rebase and authorize each stake account, use the 'move'
|
||||
command:
|
||||
|
||||
```bash
|
||||
solana-stake-accounts move <BASE_PUBKEY> <NEW_BASE_KEYPAIR> \
|
||||
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
|
||||
--new-stake-authority <PUBKEY> --new-withdraw-authority <PUBKEY> \
|
||||
--num-accounts <NUMBER> --fee-payer <KEYPAIR>
|
||||
```
|
@@ -76,7 +76,6 @@ Major releases:
|
||||
- [`solana-program`](https://docs.rs/solana-program/) - Rust SDK for writing programs
|
||||
- [`solana-client`](https://docs.rs/solana-client/) - Rust client for connecting to RPC API
|
||||
- [`solana-cli-config`](https://docs.rs/solana-cli-config/) - Rust client for managing Solana CLI config files
|
||||
- [`solana-accountsdb-plugin-interface`](https://docs.rs/solana-accountsdb-plugin-interface/) - Rust interface for developing Solana AccountsDb plugins.
|
||||
|
||||
Patch releases:
|
||||
|
||||
|
@@ -2,331 +2,4 @@
|
||||
title: Web3 JavaScript API
|
||||
---
|
||||
|
||||
## What is Solana-Web3.js?
|
||||
|
||||
The Solana-Web3.js library aims to provide complete coverage of Solana. The library was built on top of the [Solana JSON RPC API](https://docs.solana.com/developing/clients/jsonrpc-api).
|
||||
|
||||
## Common Terminology
|
||||
|
||||
| Term | Definition |
|
||||
|-------------|------------------------|
|
||||
| Program | Stateless executable code written to interpret instructions. Programs are capable of performing actions based on the instructions provided. |
|
||||
| Instruction | The smallest unit of a program that a client can include in a transaction. Within its processing code, an instruction may contain one or more cross-program invocations. |
|
||||
| Transaction | One or more instructions signed by the client using one or more Keypairs and executed atomically with only two possible outcomes: success or failure. |
|
||||
|
||||
For the full list of terms, see [Solana terminology](https://docs.solana.com/terminology#cross-program-invocation)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installation
|
||||
|
||||
#### yarn
|
||||
|
||||
```bash
|
||||
$ yarn add @solana/web3.js
|
||||
```
|
||||
|
||||
#### npm
|
||||
|
||||
```bash
|
||||
$ npm install --save @solana/web3.js
|
||||
```
|
||||
|
||||
#### Bundle
|
||||
|
||||
```html
|
||||
<!-- Development (un-minified) -->
|
||||
<script src="https://unpkg.com/@solana/web3.js@latest/lib/index.iife.js"></script>
|
||||
|
||||
<!-- Production (minified) -->
|
||||
<script src="https://unpkg.com/@solana/web3.js@latest/lib/index.iife.min.js"></script>
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
#### Javascript
|
||||
|
||||
```javascript
|
||||
const solanaWeb3 = require('@solana/web3.js');
|
||||
console.log(solanaWeb3);
|
||||
```
|
||||
|
||||
|
||||
#### ES6
|
||||
|
||||
```javascript
|
||||
import * as solanaWeb3 from '@solana/web3.js';
|
||||
console.log(solanaWeb3);
|
||||
```
|
||||
|
||||
|
||||
#### Browser Bundle
|
||||
|
||||
```javascript
|
||||
// solanaWeb3 is provided in the global namespace by the bundle script
|
||||
console.log(solanaWeb3);
|
||||
```
|
||||
|
||||
## Quickstart
|
||||
|
||||
### Connecting to a Wallet
|
||||
|
||||
To allow users to use your dApp or application on Solana, they will need to get access to their Keypair. A Keypair is a private key with a matching public key, used to sign transactions.
|
||||
|
||||
There are two ways to obtain a Keypair:
|
||||
1. Generate a new Keypair
|
||||
2. Obtain a Keypair using the secret key
|
||||
|
||||
You can obtain a new Keypair with the following:
|
||||
|
||||
```javascript
|
||||
const {Keypair} = require("@solana/web3.js");
|
||||
|
||||
let keypair = Keypair.generate();
|
||||
```
|
||||
|
||||
This will generate a brand new Keypair for a user to fund and use within your application.
|
||||
|
||||
You can allow entry of the secretKey using a textbox, and obtain the Keypair with `Keypair.fromSecretKey(secretKey)`.
|
||||
|
||||
```javascript
|
||||
const {Keypair} = require("@solana/web3.js");
|
||||
|
||||
let secretKey = Uint8Array.from([
|
||||
202, 171, 192, 129, 150, 189, 204, 241, 142, 71, 205,
|
||||
2, 81, 97, 2, 176, 48, 81, 45, 1, 96, 138,
|
||||
220, 132, 231, 131, 120, 77, 66, 40, 97, 172, 91,
|
||||
245, 84, 221, 157, 190, 9, 145, 176, 130, 25, 43,
|
||||
72, 107, 190, 229, 75, 88, 191, 136, 7, 167, 109,
|
||||
91, 170, 164, 186, 15, 142, 36, 12, 23
|
||||
]);
|
||||
|
||||
let keypair = Keypair.fromSecretKey(secretKey);
|
||||
```
|
||||
|
||||
Many wallets today allow users to bring their Keypair using a variety of extensions or web wallets. The general recommendation is to use wallets, not Keypairs, to sign transactions. The wallet creates a layer of separation between the dApp and the Keypair, ensuring that the dApp never has access to the secret key. You can find ways to connect to external wallets with the [wallet-adapter](https://github.com/solana-labs/wallet-adapter) library.
|
||||
|
||||
### Creating and Sending Transactions
|
||||
|
||||
To interact with programs on Solana, you create, sign, and send transactions to the network. Transactions are collections of instructions with signatures. The order that instructions exist in a transaction determines the order they are executed.
|
||||
|
||||
A transaction in Solana-Web3.js is created using the [`Transaction`](javascript-api.md#Transaction) object and adding desired messages, addresses, or instructions.
|
||||
|
||||
Take the example of a transfer transaction:
|
||||
|
||||
```javascript
|
||||
const {Keypair, Transaction, SystemProgram, LAMPORTS_PER_SOL} = require("@solana/web3.js");
|
||||
|
||||
let fromKeypair = Keypair.generate();
|
||||
let toKeypair = Keypair.generate();
|
||||
let transaction = new Transaction();
|
||||
|
||||
transaction.add(
|
||||
SystemProgram.transfer({
|
||||
fromPubkey: fromKeypair.publicKey,
|
||||
toPubkey: toKeypair.publicKey,
|
||||
lamports: LAMPORTS_PER_SOL
|
||||
})
|
||||
);
|
||||
```
|
||||
|
||||
The above code achieves creating a transaction ready to be signed and broadcasted to the network. The `SystemProgram.transfer` instruction was added to the transaction, containing the amount of lamports to send, and the `to` and `from` public keys.
|
||||
|
||||
All that is left is to sign the transaction with keypair and send it over the network. You can accomplish sending a transaction by using `sendAndConfirmTransaction` if you wish to alert the user or do something after a transaction is finished, or use `sendTransaction` if you don't need to wait for the transaction to be confirmed.
|
||||
|
||||
```javascript
|
||||
const {sendAndConfirmTransaction, clusterApiUrl, Connection} = require("@solana/web3.js");
|
||||
|
||||
let keypair = Keypair.generate();
|
||||
let connection = new Connection(clusterApiUrl('testnet'));
|
||||
|
||||
sendAndConfirmTransaction(
|
||||
connection,
|
||||
transaction,
|
||||
[keypair]
|
||||
);
|
||||
```
|
||||
|
||||
The above code takes in a `TransactionInstruction` using `SystemProgram`, creates a `Transaction`, and sends it over the network. You use `Connection` in order to define with Solana network you are connecting to, namely `mainnet-beta`, `testnet`, or `devnet`.
|
||||
|
||||
### Interacting with Custom Programs
|
||||
|
||||
The previous section visits sending basic transactions. In Solana everything you do interacts with different programs, including the previous section's transfer transaction. At the time of writing programs on Solana are either written in Rust or C.
|
||||
|
||||
Let's look at the `SystemProgram`. The method signature for allocating space in your account on Solana in Rust looks like this:
|
||||
|
||||
```rust
|
||||
pub fn allocate(
|
||||
pubkey: &Pubkey,
|
||||
space: u64
|
||||
) -> Instruction
|
||||
```
|
||||
|
||||
In Solana when you want to interact with a program you must first know all the accounts you will be interacting with.
|
||||
|
||||
You must always provide every account that the program will be interacting within the instruction. Not only that, but you must provide whether or not the account is `isSigner` or `isWritable`.
|
||||
|
||||
In the `allocate` method above, a single account `pubkey` is required, as well as an amount of `space` for allocation. We know that the `allocate` method writes to the account by allocating space within it, making the `pubkey` required to be `isWritable`. `isSigner` is required when you are designating the account that is running the instruction. In this case, the signer is the account calling to allocate space within itself.
|
||||
|
||||
Let's look at how to call this instruction using solana-web3.js:
|
||||
|
||||
```javascript
|
||||
let keypair = web3.Keypair.generate();
|
||||
let payer = web3.Keypair.generate();
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('testnet'));
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
payer.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
```
|
||||
|
||||
First, we set up the account Keypair and connection so that we have an account to make allocate on the testnet. We also create a payer Keypair and airdrop some sol so we can pay for the allocate transaction.
|
||||
|
||||
```javascript
|
||||
let allocateTransaction = new web3.Transaction({
|
||||
feePayer: payer.publicKey
|
||||
});
|
||||
let keys = [{pubkey: keypair.publicKey, isSigner: true, isWritable: true}];
|
||||
let params = { space: 100 };
|
||||
```
|
||||
|
||||
We create the transaction `allocateTransaction`, keys, and params objects. `feePayer` is an optional field when creating a transaction that specifies who is paying for the transaction, defaulting to the pubkey of the first signer in the transaction. `keys` represents all accounts that the program's `allocate` function will interact with. Since the `allocate` function also required space, we created `params` to be used later when invoking the `allocate` function.
|
||||
|
||||
```javascript
|
||||
let allocateStruct = {
|
||||
index: 8,
|
||||
layout: struct([
|
||||
u32('instruction'),
|
||||
ns64('space'),
|
||||
])
|
||||
};
|
||||
```
|
||||
|
||||
The above is created using using u32 and ns64 from `@solana/buffer-layout` to facilitate the payload creation. The `allocate` function takes in the parameter `space`. To interact with the function we must provide the data as a Buffer format. The `buffer-layout` library helps with allocating the buffer and encoding it correctly for Rust programs on Solana to interpret.
|
||||
|
||||
Let's break down this struct.
|
||||
|
||||
```javascript
|
||||
{
|
||||
index: 8, /* <-- */
|
||||
layout: struct([
|
||||
u32('instruction'),
|
||||
ns64('space'),
|
||||
])
|
||||
}
|
||||
```
|
||||
|
||||
`index` is set to 8 because the function `allocate` is in the 8th position in the instruction enum for `SystemProgram`.
|
||||
|
||||
```rust
|
||||
/* https://github.com/solana-labs/solana/blob/21bc43ed58c63c827ba4db30426965ef3e807180/sdk/program/src/system_instruction.rs#L142-L305 */
|
||||
pub enum SystemInstruction {
|
||||
/** 0 **/CreateAccount {/**/},
|
||||
/** 1 **/Assign {/**/},
|
||||
/** 2 **/Transfer {/**/},
|
||||
/** 3 **/CreateAccountWithSeed {/**/},
|
||||
/** 4 **/AdvanceNonceAccount,
|
||||
/** 5 **/WithdrawNonceAccount(u64),
|
||||
/** 6 **/InitializeNonceAccount(Pubkey),
|
||||
/** 7 **/AuthorizeNonceAccount(Pubkey),
|
||||
/** 8 **/Allocate {/**/},
|
||||
/** 9 **/AllocateWithSeed {/**/},
|
||||
/** 10 **/AssignWithSeed {/**/},
|
||||
/** 11 **/TransferWithSeed {/**/},
|
||||
}
|
||||
```
|
||||
|
||||
Next up is `u32('instruction')`.
|
||||
|
||||
```javascript
|
||||
{
|
||||
index: 8,
|
||||
layout: struct([
|
||||
u32('instruction'), /* <-- */
|
||||
ns64('space'),
|
||||
])
|
||||
}
|
||||
```
|
||||
|
||||
The `layout` in the allocate struct must always have `u32('instruction')` first when you are using it to call an instruction.
|
||||
|
||||
```javascript
|
||||
{
|
||||
index: 8,
|
||||
layout: struct([
|
||||
u32('instruction'),
|
||||
ns64('space'), /* <-- */
|
||||
])
|
||||
}
|
||||
```
|
||||
|
||||
`ns64('space')` is the argument for the `allocate` function. You can see in the original `allocate` function in Rust that space was of the type `u64`. `u64` is an unsigned 64bit integer. Javascript by default only provides up to 53bit integers. `ns64` comes from `@solana/buffer-layout` to help with type conversions between Rust and Javascript. You can find more type conversions between Rust and Javascript at [solana-labs/buffer-layout](https://github.com/solana-labs/buffer-layout).
|
||||
|
||||
```javascript
|
||||
let data = Buffer.alloc(allocateStruct.layout.span);
|
||||
let layoutFields = Object.assign({instruction: allocateStruct.index}, params);
|
||||
allocateStruct.layout.encode(layoutFields, data);
|
||||
```
|
||||
|
||||
Using the previously created bufferLayout, we can allocate a data buffer. We then assign our params `{ space: 100 }` so that it maps correctly to the layout, and encode it to the data buffer. Now the data is ready to be sent to the program.
|
||||
|
||||
```javascript
|
||||
allocateTransaction.add(new web3.TransactionInstruction({
|
||||
keys,
|
||||
programId: web3.SystemProgram.programId,
|
||||
data,
|
||||
}));
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, allocateTransaction, [payer, keypair]);
|
||||
```
|
||||
|
||||
Finally, we add the transaction instruction with all the account keys, payer, data, and programId and broadcast the transaction to the network.
|
||||
|
||||
The full code can be found below.
|
||||
|
||||
```javascript
|
||||
const {struct, u32, ns64} = require("@solana/buffer-layout");
|
||||
const {Buffer} = require('buffer');
|
||||
const web3 = require("@solana/web3.js");
|
||||
|
||||
let keypair = web3.Keypair.generate();
|
||||
let payer = web3.Keypair.generate();
|
||||
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('testnet'));
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
payer.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
let allocateTransaction = new web3.Transaction({
|
||||
feePayer: payer.publicKey
|
||||
});
|
||||
let keys = [{pubkey: keypair.publicKey, isSigner: true, isWritable: true}];
|
||||
let params = { space: 100 };
|
||||
|
||||
let allocateStruct = {
|
||||
index: 8,
|
||||
layout: struct([
|
||||
u32('instruction'),
|
||||
ns64('space'),
|
||||
])
|
||||
};
|
||||
|
||||
let data = Buffer.alloc(allocateStruct.layout.span);
|
||||
let layoutFields = Object.assign({instruction: allocateStruct.index}, params);
|
||||
allocateStruct.layout.encode(layoutFields, data);
|
||||
|
||||
allocateTransaction.add(new web3.TransactionInstruction({
|
||||
keys,
|
||||
programId: web3.SystemProgram.programId,
|
||||
data,
|
||||
}));
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, allocateTransaction, [payer, keypair]);
|
||||
```
|
||||
See [solana-web3](https://solana-labs.github.io/solana-web3.js/).
|
||||
|
@@ -1,729 +0,0 @@
|
||||
---
|
||||
title: Web3 API Reference
|
||||
---
|
||||
|
||||
## Web3 API Reference Guide
|
||||
|
||||
## General
|
||||
|
||||
### Connection
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Connection.html)
|
||||
|
||||
Connection is used to interact with the [Solana JSON RPC](https://docs.solana.com/developing/clients/jsonrpc-api). You can use Connection to confirm transactions, get account info, and more.
|
||||
|
||||
You create a connection by defining the JSON RPC cluster endpoint and the desired commitment. Once this is complete, you can use this connection object to interact with any of the Solana JSON RPC API.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const web3 = require("@solana/web3.js");
|
||||
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('devnet'), 'confirmed');
|
||||
|
||||
let slot = await connection.getSlot();
|
||||
console.log(slot);
|
||||
// 93186439
|
||||
|
||||
let blockTime = await connection.getBlockTime(slot);
|
||||
console.log(blockTime);
|
||||
// 1630747045
|
||||
|
||||
let block = await connection.getBlock(slot);
|
||||
console.log(block);
|
||||
|
||||
/*
|
||||
{
|
||||
blockHeight: null,
|
||||
blockTime: 1630747045,
|
||||
blockhash: 'AsFv1aV5DGip9YJHHqVjrGg6EKk55xuyxn2HeiN9xQyn',
|
||||
parentSlot: 93186438,
|
||||
previousBlockhash: '11111111111111111111111111111111',
|
||||
rewards: [],
|
||||
transactions: []
|
||||
}
|
||||
*/
|
||||
|
||||
let slotLeader = await connection.getSlotLeader();
|
||||
console.log(slotLeader);
|
||||
//49AqLYbpJYc2DrzGUAH1fhWJy62yxBxpLEkfJwjKy2jr
|
||||
```
|
||||
|
||||
The above example shows only a few of the methods on Connection. Please see the [source generated docs](https://solana-labs.github.io/solana-web3.js/classes/Connection.html) for the full list.
|
||||
|
||||
### Transaction
|
||||
|
||||
[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/Transaction.html)
|
||||
|
||||
A transaction is used to interact with programs on the Solana blockchain. These transactions are constructed with TransactionInstructions, containing all the accounts possible to interact with, as well as any needed data or program addresses. Each TransactionInstruction consists of keys, data, and a programId. You can do multiple instructions in a single transaction, interacting with multiple programs at once.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const web3 = require('@solana/web3.js');
|
||||
const nacl = require('tweetnacl');
|
||||
|
||||
// Airdrop SOL for paying transactions
|
||||
let payer = web3.Keypair.generate();
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('devnet'), 'confirmed');
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
payer.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
let toAccount = web3.Keypair.generate();
|
||||
|
||||
// Create Simple Transaction
|
||||
let transaction = new web3.Transaction();
|
||||
|
||||
// Add an instruction to execute
|
||||
transaction.add(web3.SystemProgram.transfer({
|
||||
fromPubkey: payer.publicKey,
|
||||
toPubkey: toAccount.publicKey,
|
||||
lamports: 1000,
|
||||
}));
|
||||
|
||||
// Send and confirm transaction
|
||||
// Note: feePayer is by default the first signer, or payer, if the parameter is not set
|
||||
await web3.sendAndConfirmTransaction(connection, transaction, [payer])
|
||||
|
||||
// Alternatively, manually construct the transaction
|
||||
let recentBlockhash = await connection.getRecentBlockhash();
|
||||
let manualTransaction = new web3.Transaction({
|
||||
recentBlockhash: recentBlockhash.blockhash,
|
||||
feePayer: payer.publicKey
|
||||
});
|
||||
manualTransaction.add(web3.SystemProgram.transfer({
|
||||
fromPubkey: payer.publicKey,
|
||||
toPubkey: toAccount.publicKey,
|
||||
lamports: 1000,
|
||||
}));
|
||||
|
||||
let transactionBuffer = manualTransaction.serializeMessage();
|
||||
let signature = nacl.sign.detached(transactionBuffer, payer.secretKey);
|
||||
|
||||
manualTransaction.addSignature(payer.publicKey, signature);
|
||||
|
||||
let isVerifiedSignature = manualTransaction.verifySignatures();
|
||||
console.log(`The signatures were verifed: ${isVerifiedSignature}`)
|
||||
|
||||
// The signatures were verified: true
|
||||
|
||||
let rawTransaction = manualTransaction.serialize();
|
||||
|
||||
await web3.sendAndConfirmRawTransaction(connection, rawTransaction);
|
||||
```
|
||||
|
||||
### Keypair
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Keypair.html)
|
||||
|
||||
The keypair is used to create an account with a public key and secret key within Solana. You can either generate, generate from a seed, or create from a secret key.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const {Keypair} = require("@solana/web3.js")
|
||||
|
||||
let account = Keypair.generate();
|
||||
|
||||
console.log(account.publicKey.toBase58());
|
||||
console.log(account.secretKey);
|
||||
|
||||
// 2DVaHtcdTf7cm18Zm9VV8rKK4oSnjmTkKE6MiXe18Qsb
|
||||
// Uint8Array(64) [
|
||||
// 152, 43, 116, 211, 207, 41, 220, 33, 193, 168, 118,
|
||||
// 24, 176, 83, 206, 132, 47, 194, 2, 203, 186, 131,
|
||||
// 197, 228, 156, 170, 154, 41, 56, 76, 159, 124, 18,
|
||||
// 14, 247, 32, 210, 51, 102, 41, 43, 21, 12, 170,
|
||||
// 166, 210, 195, 188, 60, 220, 210, 96, 136, 158, 6,
|
||||
// 205, 189, 165, 112, 32, 200, 116, 164, 234
|
||||
// ]
|
||||
|
||||
|
||||
let seed = Uint8Array.from([70,60,102,100,70,60,102,100,70,60,102,100,70,60,102,100,70,60,102,100,70,60,102,100,70,60,102,100,70,60,102,100]);
|
||||
let accountFromSeed = Keypair.fromSeed(seed);
|
||||
|
||||
console.log(accountFromSeed.publicKey.toBase58());
|
||||
console.log(accountFromSeed.secretKey);
|
||||
|
||||
// 3LDverZtSC9Duw2wyGC1C38atMG49toPNW9jtGJiw9Ar
|
||||
// Uint8Array(64) [
|
||||
// 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102,
|
||||
// 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60,
|
||||
// 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 34,
|
||||
// 164, 6, 12, 9, 193, 196, 30, 148, 122, 175, 11,
|
||||
// 28, 243, 209, 82, 240, 184, 30, 31, 56, 223, 236,
|
||||
// 227, 60, 72, 215, 47, 208, 209, 162, 59
|
||||
// ]
|
||||
|
||||
|
||||
let accountFromSecret = Keypair.fromSecretKey(account.secretKey);
|
||||
|
||||
console.log(accountFromSecret.publicKey.toBase58());
|
||||
console.log(accountFromSecret.secretKey);
|
||||
|
||||
// 2DVaHtcdTf7cm18Zm9VV8rKK4oSnjmTkKE6MiXe18Qsb
|
||||
// Uint8Array(64) [
|
||||
// 152, 43, 116, 211, 207, 41, 220, 33, 193, 168, 118,
|
||||
// 24, 176, 83, 206, 132, 47, 194, 2, 203, 186, 131,
|
||||
// 197, 228, 156, 170, 154, 41, 56, 76, 159, 124, 18,
|
||||
// 14, 247, 32, 210, 51, 102, 41, 43, 21, 12, 170,
|
||||
// 166, 210, 195, 188, 60, 220, 210, 96, 136, 158, 6,
|
||||
// 205, 189, 165, 112, 32, 200, 116, 164, 234
|
||||
// ]
|
||||
```
|
||||
|
||||
Using `generate` generates a random Keypair for use as an account on Solana. Using `fromSeed`, you can generate a Keypair using a deterministic constructor. `fromSecret` creates a Keypair from a secret Uint8array. You can see that the publicKey for the `generate` Keypair and `fromSecret` Keypair are the same because the secret from the `generate` Keypair is used in `fromSecret`.
|
||||
|
||||
**Warning**: Do not use `fromSeed` unless you are creating a seed with high entropy. Do not share your seed. Treat the seed like you would a private key.
|
||||
|
||||
### PublicKey
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/PublicKey.html)
|
||||
|
||||
PublicKey is used throughout `@solana/web3.js` in transactions, keypairs, and programs. You require publickey when listing each account in a transaction and as a general identifier on Solana.
|
||||
|
||||
A PublicKey can be created with a base58 encoded string, buffer, Uint8Array, number, and an array of numbers.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const {Buffer} = require('buffer');
|
||||
const web3 = require('@solana/web3.js');
|
||||
const crypto = require('crypto');
|
||||
|
||||
// Create a PublicKey with a base58 encoded string
|
||||
let base58publicKey = new web3.PublicKey('5xot9PVkphiX2adznghwrAuxGs2zeWisNSxMW6hU6Hkj');
|
||||
console.log(base58publicKey.toBase58());
|
||||
|
||||
// 5xot9PVkphiX2adznghwrAuxGs2zeWisNSxMW6hU6Hkj
|
||||
|
||||
// Create a Program Address
|
||||
let highEntropyBuffer = crypto.randomBytes(31);
|
||||
let programAddressFromKey = await web3.PublicKey.createProgramAddress([highEntropyBuffer.slice(0, 31)], base58publicKey);
|
||||
console.log(`Generated Program Address: ${programAddressFromKey.toBase58()}`);
|
||||
|
||||
// Generated Program Address: 3thxPEEz4EDWHNxo1LpEpsAxZryPAHyvNVXJEJWgBgwJ
|
||||
|
||||
// Find Program address given a PublicKey
|
||||
let validProgramAddress = await web3.PublicKey.findProgramAddress([Buffer.from('', 'utf8')], programAddressFromKey);
|
||||
console.log(`Valid Program Address: ${validProgramAddress}`);
|
||||
|
||||
// Valid Program Address: C14Gs3oyeXbASzwUpqSymCKpEyccfEuSe8VRar9vJQRE,253
|
||||
```
|
||||
|
||||
### SystemProgram
|
||||
|
||||
[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/SystemProgram.html)
|
||||
|
||||
The SystemProgram grants the ability to create accounts, allocate account data, assign an account to programs, work with nonce accounts, and transfer lamports. You can use the SystemInstruction class to help with decoding and reading individual instructions
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const web3 = require("@solana/web3.js");
|
||||
|
||||
// Airdrop SOL for paying transactions
|
||||
let payer = web3.Keypair.generate();
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('devnet'), 'confirmed');
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
payer.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
// Allocate Account Data
|
||||
let allocatedAccount = web3.Keypair.generate();
|
||||
let allocateInstruction = web3.SystemProgram.allocate({
|
||||
accountPubkey: allocatedAccount.publicKey,
|
||||
space: 100,
|
||||
})
|
||||
let transaction = new web3.Transaction().add(allocateInstruction);
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, transaction, [payer, allocatedAccount])
|
||||
|
||||
// Create Nonce Account
|
||||
let nonceAccount = web3.Keypair.generate();
|
||||
let minimumAmountForNonceAccount = await connection.getMinimumBalanceForRentExemption(
|
||||
web3.NONCE_ACCOUNT_LENGTH,
|
||||
);
|
||||
let createNonceAccountTransaction = new web3.Transaction().add(
|
||||
web3.SystemProgram.createNonceAccount({
|
||||
fromPubkey: payer.publicKey,
|
||||
noncePubkey: nonceAccount.publicKey,
|
||||
authorizedPubkey: payer.publicKey,
|
||||
lamports: minimumAmountForNonceAccount,
|
||||
}),
|
||||
);
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, createNonceAccountTransaction, [payer, nonceAccount])
|
||||
|
||||
// Advance nonce - Used to create transactions as an account custodian
|
||||
let advanceNonceTransaction = new web3.Transaction().add(
|
||||
web3.SystemProgram.nonceAdvance({
|
||||
noncePubkey: nonceAccount.publicKey,
|
||||
authorizedPubkey: payer.publicKey,
|
||||
}),
|
||||
);
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, advanceNonceTransaction, [payer])
|
||||
|
||||
// Transfer lamports between accounts
|
||||
let toAccount = web3.Keypair.generate();
|
||||
|
||||
let transferTransaction = new web3.Transaction().add(
|
||||
web3.SystemProgram.transfer({
|
||||
fromPubkey: payer.publicKey,
|
||||
toPubkey: toAccount.publicKey,
|
||||
lamports: 1000,
|
||||
}),
|
||||
);
|
||||
await web3.sendAndConfirmTransaction(connection, transferTransaction, [payer])
|
||||
|
||||
// Assign a new account to a program
|
||||
let programId = web3.Keypair.generate();
|
||||
let assignedAccount = web3.Keypair.generate();
|
||||
|
||||
let assignTransaction = new web3.Transaction().add(
|
||||
web3.SystemProgram.assign({
|
||||
accountPubkey: assignedAccount.publicKey,
|
||||
programId: programId.publicKey,
|
||||
}),
|
||||
);
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, assignTransaction, [payer, assignedAccount]);
|
||||
```
|
||||
|
||||
### Secp256k1Program
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Secp256k1Program.html)
|
||||
|
||||
The Secp256k1Program is used to verify Secp256k1 signatures, which are used by both Bitcoin and Ethereum.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const {keccak_256} = require('js-sha3');
|
||||
const web3 = require("@solana/web3.js");
|
||||
const secp256k1 = require('secp256k1');
|
||||
|
||||
// Create a Ethereum Address from secp256k1
|
||||
let secp256k1PrivateKey;
|
||||
do {
|
||||
secp256k1PrivateKey = web3.Keypair.generate().secretKey.slice(0, 32);
|
||||
} while (!secp256k1.privateKeyVerify(secp256k1PrivateKey));
|
||||
|
||||
let secp256k1PublicKey = secp256k1.publicKeyCreate(secp256k1PrivateKey, false).slice(1);
|
||||
|
||||
let ethAddress = web3.Secp256k1Program.publicKeyToEthAddress(secp256k1PublicKey);
|
||||
console.log(`Ethereum Address: 0x${ethAddress.toString('hex')}`);
|
||||
|
||||
// Ethereum Address: 0xadbf43eec40694eacf36e34bb5337fba6a2aa8ee
|
||||
|
||||
// Fund a keypair to create instructions
|
||||
let fromPublicKey = web3.Keypair.generate();
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('devnet'), 'confirmed');
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
fromPublicKey.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
// Sign Message with Ethereum Key
|
||||
let plaintext = Buffer.from('string address');
|
||||
let plaintextHash = Buffer.from(keccak_256.update(plaintext).digest());
|
||||
let {signature, recid: recoveryId} = secp256k1.ecdsaSign(
|
||||
plaintextHash,
|
||||
secp256k1PrivateKey
|
||||
);
|
||||
|
||||
// Create transaction to verify the signature
|
||||
let transaction = new Transaction().add(
|
||||
web3.Secp256k1Program.createInstructionWithEthAddress({
|
||||
ethAddress: ethAddress.toString('hex'),
|
||||
plaintext,
|
||||
signature,
|
||||
recoveryId,
|
||||
}),
|
||||
);
|
||||
|
||||
// Transaction will succeed if the message is verified to be signed by the address
|
||||
await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey]);
|
||||
```
|
||||
|
||||
### Message
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Message.html)
|
||||
|
||||
Message is used as another way to construct transactions. You can construct a message using the accounts, header, instructions, and recentBlockhash that are a part of a transaction. A [Transaction](https://solana-labs.github.io/solana-web3.js/classes/Transaction.html) is a Message plus the list of required signatures required to execute the transaction.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const {Buffer} = require("buffer");
|
||||
const bs58 = require('bs58');
|
||||
const web3 = require('@solana/web3.js');
|
||||
|
||||
let toPublicKey = web3.Keypair.generate().publicKey;
|
||||
let fromPublicKey = web3.Keypair.generate();
|
||||
|
||||
let connection = new web3.Connection(
|
||||
web3.clusterApiUrl('devnet'),
|
||||
'confirmed'
|
||||
);
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
fromPublicKey.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
let type = web3.SYSTEM_INSTRUCTION_LAYOUTS.Transfer;
|
||||
let data = Buffer.alloc(type.layout.span);
|
||||
let layoutFields = Object.assign({instruction: type.index});
|
||||
type.layout.encode(layoutFields, data);
|
||||
|
||||
let recentBlockhash = await connection.getRecentBlockhash();
|
||||
|
||||
let messageParams = {
|
||||
accountKeys: [
|
||||
fromPublicKey.publicKey.toString(),
|
||||
toPublicKey.toString(),
|
||||
web3.SystemProgram.programId.toString()
|
||||
],
|
||||
header: {
|
||||
numReadonlySignedAccounts: 0,
|
||||
numReadonlyUnsignedAccounts: 1,
|
||||
numRequiredSignatures: 1,
|
||||
},
|
||||
instructions: [
|
||||
{
|
||||
accounts: [0, 1],
|
||||
data: bs58.encode(data),
|
||||
programIdIndex: 2,
|
||||
},
|
||||
],
|
||||
recentBlockhash,
|
||||
};
|
||||
|
||||
let message = new web3.Message(messageParams);
|
||||
|
||||
let transaction = web3.Transaction.populate(
|
||||
message,
|
||||
[fromPublicKey.publicKey.toString()]
|
||||
);
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey])
|
||||
```
|
||||
|
||||
### Struct
|
||||
|
||||
[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/Struct.html)
|
||||
|
||||
The struct class is used to create Rust compatible structs in javascript. This class is only compatible with Borsch encoded Rust structs.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
Struct in Rust:
|
||||
```rust
|
||||
pub struct Fee {
|
||||
pub denominator: u64,
|
||||
pub numerator: u64,
|
||||
}
|
||||
```
|
||||
|
||||
Using web3:
|
||||
```javascript
|
||||
import BN from 'bn.js';
|
||||
import {Struct} from '@solana/web3.js';
|
||||
|
||||
export class Fee extends Struct {
|
||||
denominator: BN;
|
||||
numerator: BN;
|
||||
}
|
||||
```
|
||||
|
||||
### Enum
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Enum.html)
|
||||
|
||||
The Enum class is used to represent a Rust compatible Enum in javascript. The enum will just be a string representation if logged but can be properly encoded/decoded when used in conjunction with [Struct](https://solana-labs.github.io/solana-web3.js/classes/Struct.html). This class is only compatible with Borsch encoded Rust enumerations.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
Rust:
|
||||
```rust
|
||||
pub enum AccountType {
|
||||
Uninitialized,
|
||||
StakePool,
|
||||
ValidatorList,
|
||||
}
|
||||
```
|
||||
|
||||
Web3:
|
||||
```javascript
|
||||
import {Enum} from '@solana/web3.js';
|
||||
|
||||
export class AccountType extends Enum {}
|
||||
```
|
||||
|
||||
### NonceAccount
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/NonceAccount.html)
|
||||
|
||||
Normally a transaction is rejected if a transaction's `recentBlockhash` field is too old. To provide for certain custodial services, Nonce Accounts are used. Transactions which use a `recentBlockhash` captured on-chain by a Nonce Account do not expire as long at the Nonce Account is not advanced.
|
||||
|
||||
You can create a nonce account by first creating a normal account, then using `SystemProgram` to make the account a Nonce Account.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const web3 = require('@solana/web3.js');
|
||||
|
||||
// Create connection
|
||||
let connection = new web3.Connection(
|
||||
web3.clusterApiUrl('devnet'),
|
||||
'confirmed',
|
||||
);
|
||||
|
||||
// Generate accounts
|
||||
let account = web3.Keypair.generate();
|
||||
let nonceAccount = web3.Keypair.generate();
|
||||
|
||||
// Fund account
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
account.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
// Get Minimum amount for rent exemption
|
||||
let minimumAmount = await connection.getMinimumBalanceForRentExemption(
|
||||
web3.NONCE_ACCOUNT_LENGTH,
|
||||
);
|
||||
|
||||
// Form CreateNonceAccount transaction
|
||||
let transaction = new web3.Transaction().add(
|
||||
web3.SystemProgram.createNonceAccount({
|
||||
fromPubkey: account.publicKey,
|
||||
noncePubkey: nonceAccount.publicKey,
|
||||
authorizedPubkey: account.publicKey,
|
||||
lamports: minimumAmount,
|
||||
}),
|
||||
);
|
||||
// Create Nonce Account
|
||||
await web3.sendAndConfirmTransaction(
|
||||
connection,
|
||||
transaction,
|
||||
[account, nonceAccount]
|
||||
);
|
||||
|
||||
let nonceAccountData = await connection.getNonce(
|
||||
nonceAccount.publicKey,
|
||||
'confirmed',
|
||||
);
|
||||
|
||||
console.log(nonceAccountData);
|
||||
// NonceAccount {
|
||||
// authorizedPubkey: PublicKey {
|
||||
// _bn: <BN: 919981a5497e8f85c805547439ae59f607ea625b86b1138ea6e41a68ab8ee038>
|
||||
// },
|
||||
// nonce: '93zGZbhMmReyz4YHXjt2gHsvu5tjARsyukxD4xnaWaBq',
|
||||
// feeCalculator: { lamportsPerSignature: 5000 }
|
||||
// }
|
||||
|
||||
let nonceAccountInfo = await connection.getAccountInfo(
|
||||
nonceAccount.publicKey,
|
||||
'confirmed'
|
||||
);
|
||||
|
||||
let nonceAccountFromInfo = web3.NonceAccount.fromAccountData(
|
||||
nonceAccountInfo.data
|
||||
);
|
||||
|
||||
console.log(nonceAccountFromInfo);
|
||||
// NonceAccount {
|
||||
// authorizedPubkey: PublicKey {
|
||||
// _bn: <BN: 919981a5497e8f85c805547439ae59f607ea625b86b1138ea6e41a68ab8ee038>
|
||||
// },
|
||||
// nonce: '93zGZbhMmReyz4YHXjt2gHsvu5tjARsyukxD4xnaWaBq',
|
||||
// feeCalculator: { lamportsPerSignature: 5000 }
|
||||
// }
|
||||
```
|
||||
|
||||
The above example shows both how to create a `NonceAccount` using `SystemProgram.createNonceAccount`, as well as how to retrieve the `NonceAccount` from accountInfo. Using the nonce, you can create transactions offline with the nonce in place of the `recentBlockhash`.
|
||||
|
||||
### VoteAccount
|
||||
|
||||
[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/VoteAccount.html)
|
||||
|
||||
Vote account is an object that grants the capability of decoding vote accounts from the native vote account program on the network.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const web3 = require('@solana/web3.js');
|
||||
|
||||
let voteAccountInfo = await connection.getProgramAccounts(web3.VOTE_PROGRAM_ID);
|
||||
let voteAccountFromData = web3.VoteAccount.fromAccountData(voteAccountInfo[0].account.data);
|
||||
console.log(voteAccountFromData);
|
||||
/*
|
||||
VoteAccount {
|
||||
nodePubkey: PublicKey {
|
||||
_bn: <BN: 100000096c4e1e61a6393e51937e548aee2c062e23c67cbaa8d04f289d18232>
|
||||
},
|
||||
authorizedVoterPubkey: PublicKey {
|
||||
_bn: <BN: 5862b94396c4e1e61a6393e51937e548aee2c062e23c67cbaa8d04f289d18232>
|
||||
},
|
||||
authorizedWithdrawerPubkey: PublicKey {
|
||||
_bn: <BN: 5862b9430a0800000000000000cb136204000000001f000000cc136204000000>
|
||||
},
|
||||
commission: 0,
|
||||
votes: [
|
||||
{ slot: 124554051584, confirmationCount: 73536462 },
|
||||
{ slot: 120259084288, confirmationCount: 73536463 },
|
||||
{ slot: 115964116992, confirmationCount: 73536464 },
|
||||
{ slot: 111669149696, confirmationCount: 73536465 },
|
||||
{ slot: 107374182400, confirmationCount: 96542804 },
|
||||
{ slot: 4294967296, confirmationCount: 1645464065 },
|
||||
{ slot: 1099511627780, confirmationCount: 0 },
|
||||
{ slot: 57088, confirmationCount: 3787757056 },
|
||||
{ slot: 16516698632215534000, confirmationCount: 3236081224 },
|
||||
{ slot: 328106138455040640, confirmationCount: 2194770418 },
|
||||
{ slot: 290873038898, confirmationCount: 0 },
|
||||
],
|
||||
rootSlot: null,
|
||||
epoch: 0,
|
||||
credits: 0,
|
||||
lastEpochCredits: 0,
|
||||
epochCredits: []
|
||||
}
|
||||
*/
|
||||
```
|
||||
|
||||
## Staking
|
||||
|
||||
### StakeProgram
|
||||
|
||||
[SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/StakeProgram.html)
|
||||
|
||||
The StakeProgram facilitates staking SOL and delegating them to any validators on the network. You can use StakeProgram to create a stake account, stake some SOL, authorize accounts for withdrawal of your stake, deactivate your stake, and withdraw your funds. The StakeInstruction class is used to decode and read more instructions from transactions calling the StakeProgram
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const web3 = require("@solana/web3.js");
|
||||
|
||||
// Fund a key to create transactions
|
||||
let fromPublicKey = web3.Keypair.generate();
|
||||
let connection = new web3.Connection(web3.clusterApiUrl('devnet'), 'confirmed');
|
||||
|
||||
let airdropSignature = await connection.requestAirdrop(
|
||||
fromPublicKey.publicKey,
|
||||
web3.LAMPORTS_PER_SOL,
|
||||
);
|
||||
await connection.confirmTransaction(airdropSignature);
|
||||
|
||||
// Create Account
|
||||
let stakeAccount = web3.Keypair.generate();
|
||||
let authorizedAccount = web3.Keypair.generate();
|
||||
/* Note: This is the minimum amount for a stake account -- Add additional Lamports for staking
|
||||
For example, we add 50 lamports as part of the stake */
|
||||
let lamportsForStakeAccount = (await connection.getMinimumBalanceForRentExemption(web3.StakeProgram.space)) + 50;
|
||||
|
||||
let createAccountTransaction = web3.StakeProgram.createAccount({
|
||||
fromPubkey: fromPublicKey.publicKey,
|
||||
authorized: new web3.Authorized(authorizedAccount.publicKey, authorizedAccount.publicKey),
|
||||
lamports: lamportsForStakeAccount,
|
||||
lockup: new web3.Lockup(0, 0, fromPublicKey.publicKey),
|
||||
stakePubkey: stakeAccount.publicKey
|
||||
});
|
||||
await web3.sendAndConfirmTransaction(connection, createAccountTransaction, [fromPublicKey, stakeAccount]);
|
||||
|
||||
// Check that stake is available
|
||||
let stakeBalance = await connection.getBalance(stakeAccount.publicKey);
|
||||
console.log(`Stake balance: ${stakeBalance}`)
|
||||
// Stake balance: 2282930
|
||||
|
||||
// We can verify the state of our stake. This may take some time to become active
|
||||
let stakeState = await connection.getStakeActivation(stakeAccount.publicKey);
|
||||
console.log(`Stake Stake: ${stakeState.state}`);
|
||||
// Stake State: inactive
|
||||
|
||||
// To delegate our stake, we get the current vote accounts and choose the first
|
||||
let voteAccounts = await connection.getVoteAccounts();
|
||||
let voteAccount = voteAccounts.current.concat(
|
||||
voteAccounts.delinquent,
|
||||
)[0];
|
||||
let votePubkey = new web3.PublicKey(voteAccount.votePubkey);
|
||||
|
||||
// We can then delegate our stake to the voteAccount
|
||||
let delegateTransaction = web3.StakeProgram.delegate({
|
||||
stakePubkey: stakeAccount.publicKey,
|
||||
authorizedPubkey: authorizedAccount.publicKey,
|
||||
votePubkey: votePubkey,
|
||||
});
|
||||
await web3.sendAndConfirmTransaction(connection, delegateTransaction, [fromPublicKey, authorizedAccount]);
|
||||
|
||||
// To withdraw our funds, we first have to deactivate the stake
|
||||
let deactivateTransaction = web3.StakeProgram.deactivate({
|
||||
stakePubkey: stakeAccount.publicKey,
|
||||
authorizedPubkey: authorizedAccount.publicKey,
|
||||
});
|
||||
await web3.sendAndConfirmTransaction(connection, deactivateTransaction, [fromPublicKey, authorizedAccount]);
|
||||
|
||||
// Once deactivated, we can withdraw our funds
|
||||
let withdrawTransaction = web3.StakeProgram.withdraw({
|
||||
stakePubkey: stakeAccount.publicKey,
|
||||
authorizedPubkey: authorizedAccount.publicKey,
|
||||
toPubkey: fromPublicKey.publicKey,
|
||||
lamports: stakeBalance,
|
||||
});
|
||||
|
||||
await web3.sendAndConfirmTransaction(connection, withdrawTransaction, [fromPublicKey, authorizedAccount]);
|
||||
```
|
||||
|
||||
### Authorized
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Authorized.html)
|
||||
|
||||
Authorized is an object used when creating an authorized account for staking within Solana. You can designate a `staker` and `withdrawer` separately, allowing for a different account to withdraw other than the staker.
|
||||
|
||||
You can find more usage of the `Authorized` object under [`StakeProgram`](https://solana-labs.github.io/solana-web3.js/classes/StakeProgram.html)
|
||||
|
||||
### Lockup
|
||||
|
||||
[Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Lockup.html)
|
||||
|
||||
Lockup is used in conjunction with the [StakeProgram](https://solana-labs.github.io/solana-web3.js/classes/StakeProgram.html) to create an account. The Lockup is used to determine how long the stake will be locked, or unable to be retrieved. If the Lockup is set to 0 for both epoch and the Unix timestamp, the lockup will be disabled for the stake account.
|
||||
|
||||
#### Example Usage
|
||||
|
||||
```javascript
|
||||
const {Authorized, Keypair, Lockup, StakeProgram} = require("@solana/web3.js");
|
||||
|
||||
let account = Keypair.generate();
|
||||
let stakeAccount = Keypair.generate();
|
||||
let authorized = new Authorized(account.publicKey, account.publicKey);
|
||||
let lockup = new Lockup(0, 0, account.publicKey);
|
||||
|
||||
let createStakeAccountInstruction = StakeProgram.createAccount({
|
||||
fromPubkey: account.publicKey,
|
||||
authorized: authorized,
|
||||
lamports: 1000,
|
||||
lockup: lockup,
|
||||
stakePubkey: stakeAccount.publicKey
|
||||
});
|
||||
```
|
||||
The above code creates a `createStakeAccountInstruction` to be used when creating an account with the `StakeProgram`. The Lockup is set to 0 for both the epoch and Unix timestamp, disabling lockup for the account.
|
||||
|
||||
See [StakeProgram](https://solana-labs.github.io/solana-web3.js/classes/StakeProgram.html) for more.
|
@@ -873,12 +873,12 @@ None
|
||||
The result field will be an array of JSON objects, each with the following sub fields:
|
||||
|
||||
- `pubkey: <string>` - Node public key, as base-58 encoded string
|
||||
- `gossip: <string | null>` - Gossip network address for the node
|
||||
- `tpu: <string | null>` - TPU network address for the node
|
||||
- `rpc: <string | null>` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||
- `version: <string | null>` - The software version of the node, or `null` if the version information is not available
|
||||
- `featureSet: <u32 | null >` - The unique identifier of the node's feature set
|
||||
- `shredVersion: <u16 | null>` - The shred version the node has been configured to use
|
||||
- `gossip: <string>` - Gossip network address for the node
|
||||
- `tpu: <string>` - TPU network address for the node
|
||||
- `rpc: <string>|null` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||
- `version: <string>|null` - The software version of the node, or `null` if the version information is not available
|
||||
- `featureSet: <number>|null` - The unique identifier of the node's feature set
|
||||
- `shredVersion: <number>|null` - The shred version the node has been configured to use
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -923,7 +923,6 @@ The result field will be an object with the following fields:
|
||||
- `epoch: <u64>`, the current epoch
|
||||
- `slotIndex: <u64>`, the current slot relative to the start of the current epoch
|
||||
- `slotsInEpoch: <u64>`, the number of slots in this epoch
|
||||
- `transactionCount: <u64 | null>`, total number of transactions processed without error since genesis
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -943,8 +942,7 @@ Result:
|
||||
"blockHeight": 166500,
|
||||
"epoch": 27,
|
||||
"slotIndex": 2790,
|
||||
"slotsInEpoch": 8192,
|
||||
"transactionCount": 22661093
|
||||
"slotsInEpoch": 8192
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
@@ -1342,7 +1340,7 @@ The result field will be a JSON object with the following fields:
|
||||
- `total: <f64>`, total inflation
|
||||
- `validator: <f64>`, inflation allocated to validators
|
||||
- `foundation: <f64>`, inflation allocated to the foundation
|
||||
- `epoch: <u64>`, epoch for which these values are valid
|
||||
- `epoch: <f64>`, epoch for which these values are valid
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -2571,41 +2569,37 @@ Result:
|
||||
},
|
||||
"value": [
|
||||
{
|
||||
"account": {
|
||||
"data": {
|
||||
"program": "spl-token",
|
||||
"parsed": {
|
||||
"info": {
|
||||
"tokenAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1"
|
||||
},
|
||||
"delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T",
|
||||
"delegatedAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1"
|
||||
},
|
||||
"state": "initialized",
|
||||
"isNative": false,
|
||||
"mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E",
|
||||
"owner": "CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD"
|
||||
"data": {
|
||||
"program": "spl-token",
|
||||
"parsed": {
|
||||
"info": {
|
||||
"tokenAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1",
|
||||
},
|
||||
"type": "account"
|
||||
"delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T",
|
||||
"delegatedAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1",
|
||||
},
|
||||
"state": "initialized",
|
||||
"isNative": false,
|
||||
"mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E",
|
||||
"owner": "CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD"
|
||||
},
|
||||
"space": 165
|
||||
"type": "account"
|
||||
},
|
||||
"executable": false,
|
||||
"lamports": 1726080,
|
||||
"owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA",
|
||||
"rentEpoch": 4
|
||||
"space": 165
|
||||
},
|
||||
"pubkey": "28YTZEwqtMHWrhWcvv34se7pjS7wctgqzCPB3gReCFKp"
|
||||
"executable": false,
|
||||
"lamports": 1726080,
|
||||
"owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA",
|
||||
"rentEpoch": 4
|
||||
}
|
||||
|
||||
]
|
||||
},
|
||||
"id": 1
|
||||
@@ -2673,40 +2667,37 @@ Result:
|
||||
},
|
||||
"value": [
|
||||
{
|
||||
"account": {
|
||||
"data": {
|
||||
"program": "spl-token",
|
||||
"parsed": {
|
||||
"accountType": "account",
|
||||
"info": {
|
||||
"tokenAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1"
|
||||
},
|
||||
"delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T",
|
||||
"delegatedAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1"
|
||||
},
|
||||
"state": "initialized",
|
||||
"isNative": false,
|
||||
"mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E",
|
||||
"owner": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F"
|
||||
},
|
||||
"type": "account"
|
||||
"data": {
|
||||
"program": "spl-token",
|
||||
"parsed": {
|
||||
"accountType": "account",
|
||||
"info": {
|
||||
"tokenAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1",
|
||||
},
|
||||
"space": 165
|
||||
"delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T",
|
||||
"delegatedAmount": {
|
||||
"amount": "1",
|
||||
"decimals": 1,
|
||||
"uiAmount": 0.1,
|
||||
"uiAmountString": "0.1",
|
||||
},
|
||||
"state": "initialized",
|
||||
"isNative": false,
|
||||
"mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E",
|
||||
"owner": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F"
|
||||
},
|
||||
"executable": false,
|
||||
"lamports": 1726080,
|
||||
"owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA",
|
||||
"rentEpoch": 4
|
||||
"type": "account"
|
||||
},
|
||||
"space": 165
|
||||
},
|
||||
"pubkey": "C2gJg6tKpQs41PRS1nC8aw3ZKNZK3HQQZGVrDFDup5nx"
|
||||
"executable": false,
|
||||
"lamports": 1726080,
|
||||
"owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA",
|
||||
"rentEpoch": 4
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -3048,7 +3039,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
|
||||
|
||||
Result:
|
||||
```json
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.8.2"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.7.17"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
|
@@ -2,30 +2,7 @@
|
||||
title: Rust API
|
||||
---
|
||||
|
||||
Solana's Rust crates are [published to crates.io][crates.io] and can be found
|
||||
[on docs.rs with the "solana-" prefix][docs.rs].
|
||||
|
||||
[crates.io]: https://crates.io/search?q=solana-
|
||||
[docs.rs]: https://docs.rs/releases/search?query=solana-
|
||||
|
||||
Some important crates:
|
||||
|
||||
- [`solana-program`] — Imported by programs running on Solana, compiled
|
||||
to BPF. This crate contains many fundamental data types and is re-exported from
|
||||
[`solana-sdk`], which cannot be imported from a Solana program.
|
||||
|
||||
- [`solana-sdk`] — The basic off-chain SDK, it re-exports
|
||||
[`solana-program`] and adds more APIs on top of that. Most Solana programs
|
||||
that do not run on-chain will import this.
|
||||
|
||||
- [`solana-client`] — For interacting with a Solana node via the
|
||||
[JSON RPC API](jsonrpc-api).
|
||||
|
||||
- [`solana-clap-utils`] — Routines for setting up a CLI, using [`clap`],
|
||||
as used by the main Solana CLI.
|
||||
|
||||
[`solana-program`]: https://docs.rs/solana-program
|
||||
[`solana-sdk`]: https://docs.rs/solana-sdk
|
||||
[`solana-client`]: https://docs.rs/solana-client
|
||||
[`solana-clap-utils`]: https://docs.rs/solana-clap-utils
|
||||
[`clap`]: https://docs.rs/clap
|
||||
See [doc.rs](https://docs.rs/releases/search?query=solana-) for documentation of
|
||||
all crates published by Solana. In particular [solana-sdk](https://docs.rs/solana-sdk)
|
||||
for working with common data structures and [solana-client](https://docs.rs/solana-client)
|
||||
for querying the [JSON RPC API](jsonrpc-api).
|
||||
|
@@ -33,19 +33,19 @@ Solana Rust programs may depend directly on each other in order to gain access
|
||||
to instruction helpers when making [cross-program invocations](developing/programming-model/calling-between-programs.md#cross-program-invocations).
|
||||
When doing so it's important to not pull in the dependent program's entrypoint
|
||||
symbols because they may conflict with the program's own. To avoid this,
|
||||
programs should define an `no-entrypoint` feature in `Cargo.toml` and use
|
||||
programs should define an `exclude_entrypoint` feature in `Cargo.toml` and use
|
||||
to exclude the entrypoint.
|
||||
|
||||
- [Define the
|
||||
feature](https://github.com/solana-labs/solana-program-library/blob/fca9836a2c8e18fc7e3595287484e9acd60a8f64/token/program/Cargo.toml#L12)
|
||||
feature](https://github.com/solana-labs/solana-program-library/blob/a5babd6cbea0d3f29d8c57d2ecbbd2a2bd59c8a9/token/program/Cargo.toml#L12)
|
||||
- [Exclude the
|
||||
entrypoint](https://github.com/solana-labs/solana-program-library/blob/fca9836a2c8e18fc7e3595287484e9acd60a8f64/token/program/src/lib.rs#L12)
|
||||
entrypoint](https://github.com/solana-labs/solana-program-library/blob/a5babd6cbea0d3f29d8c57d2ecbbd2a2bd59c8a9/token/program/src/lib.rs#L12)
|
||||
|
||||
Then when other programs include this program as a dependency, they should do so
|
||||
using the `no-entrypoint` feature.
|
||||
using the `exclude_entrypoint` feature.
|
||||
|
||||
- [Include without
|
||||
entrypoint](https://github.com/solana-labs/solana-program-library/blob/fca9836a2c8e18fc7e3595287484e9acd60a8f64/token-swap/program/Cargo.toml#L22)
|
||||
entrypoint](https://github.com/solana-labs/solana-program-library/blob/a5babd6cbea0d3f29d8c57d2ecbbd2a2bd59c8a9/token-swap/program/Cargo.toml#L19)
|
||||
|
||||
## Project Dependencies
|
||||
|
||||
@@ -115,9 +115,9 @@ Programs must be written for and deployed to the same loader. For more details
|
||||
see the [overview](overview#loaders).
|
||||
|
||||
Currently there are two supported loaders [BPF
|
||||
Loader](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/bpf_loader.rs#L17)
|
||||
Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17)
|
||||
and [BPF loader
|
||||
deprecated](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/bpf_loader_deprecated.rs#L14)
|
||||
deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14)
|
||||
|
||||
They both have the same raw entrypoint definition, the following is the raw
|
||||
symbol that the runtime looks up and calls:
|
||||
@@ -136,9 +136,9 @@ processing function, and returns the results.
|
||||
You can find the entrypoint macros here:
|
||||
|
||||
- [BPF Loader's entrypoint
|
||||
macro](https://github.com/solana-labs/solana/blob/9b1199cdb1b391b00d510ed7fc4866bdf6ee4eb3/sdk/program/src/entrypoint.rs#L42)
|
||||
macro](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/entrypoint.rs#L46)
|
||||
- [BPF Loader deprecated's entrypoint
|
||||
macro](https://github.com/solana-labs/solana/blob/9b1199cdb1b391b00d510ed7fc4866bdf6ee4eb3/sdk/program/src/entrypoint_deprecated.rs#L38)
|
||||
macro](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/entrypoint_deprecated.rs#L37)
|
||||
|
||||
The program defined instruction processing function that the entrypoint macros
|
||||
call must be of this form:
|
||||
@@ -149,7 +149,7 @@ pub type ProcessInstruction =
|
||||
```
|
||||
|
||||
Refer to [helloworld's use of the
|
||||
entrypoint](https://github.com/solana-labs/example-helloworld/blob/1e049076e10be8712b1a725d2d886ce0cd036b2e/src/program-rust/src/lib.rs#L19)
|
||||
entrypoint](https://github.com/solana-labs/example-helloworld/blob/c1a7247d87cd045f574ed49aec5d160aefc45cf2/src/program-rust/src/lib.rs#L15)
|
||||
as an example of how things fit together.
|
||||
|
||||
### Parameter Deserialization
|
||||
@@ -159,9 +159,9 @@ parameters into Rust types. The entrypoint macros automatically calls the
|
||||
deserialization helper:
|
||||
|
||||
- [BPF Loader
|
||||
deserialization](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint.rs#L146)
|
||||
deserialization](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/entrypoint.rs#L104)
|
||||
- [BPF Loader deprecated
|
||||
deserialization](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint_deprecated.rs#L57)
|
||||
deserialization](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/entrypoint_deprecated.rs#L56)
|
||||
|
||||
Some programs may want to perform deserialization themselves and they can by
|
||||
providing their own implementation of the [raw entrypoint](#program-entrypoint).
|
||||
@@ -190,7 +190,7 @@ The program id is the public key of the currently executing program.
|
||||
|
||||
The accounts is an ordered slice of the accounts referenced by the instruction
|
||||
and represented as an
|
||||
[AccountInfo](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/account_info.rs#L12)
|
||||
[AccountInfo](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/account_info.rs#L10)
|
||||
structures. An account's place in the array signifies its meaning, for example,
|
||||
when transferring lamports an instruction may define the first account as the
|
||||
source and the second as the destination.
|
||||
@@ -214,7 +214,7 @@ being processed.
|
||||
## Heap
|
||||
|
||||
Rust programs implement the heap directly by defining a custom
|
||||
[`global_allocator`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint.rs#L72)
|
||||
[`global_allocator`](https://github.com/solana-labs/solana/blob/8330123861a719cd7a79af0544617896e7f00ce3/sdk/program/src/entrypoint.rs#L50)
|
||||
|
||||
Programs may implement their own `global_allocator` based on its specific needs.
|
||||
Refer to the [custom heap example](#examples) for more information.
|
||||
@@ -288,7 +288,7 @@ getrandom = { version = "0.2.2", features = ["custom"] }
|
||||
|
||||
Rust's `println!` macro is computationally expensive and not supported. Instead
|
||||
the helper macro
|
||||
[`msg!`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L33)
|
||||
[`msg!`](https://github.com/solana-labs/solana/blob/6705b5a98c076ac08f3991bb8a6f9fcb280bf51e/sdk/program/src/log.rs#L33)
|
||||
is provided.
|
||||
|
||||
`msg!` has two forms:
|
||||
@@ -375,7 +375,7 @@ fn custom_panic(info: &core::panic::PanicInfo<'_>) {
|
||||
## Compute Budget
|
||||
|
||||
Use the system call
|
||||
[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L141)
|
||||
[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/program/src/log.rs#L102)
|
||||
to log a message containing the remaining number of compute units the program
|
||||
may consume before execution is halted
|
||||
|
||||
|
@@ -65,51 +65,6 @@ to the BPF Upgradeable Loader to process the instruction.
|
||||
|
||||
[More information about deployment](cli/deploy-a-program.md)
|
||||
|
||||
## Ed25519 Program
|
||||
|
||||
Verify ed25519 signature program. This program takes an ed25519 signature, public key, and message.
|
||||
Multiple signatures can be verified. If any of the signatures fail to verify, an error is returned.
|
||||
|
||||
- Program id: `Ed25519SigVerify111111111111111111111111111`
|
||||
- Instructions: [new_ed25519_instruction](https://github.com/solana-labs/solana/blob/master/sdk/src/ed25519_instruction.rs#L45)
|
||||
|
||||
The ed25519 program processes an instruction. The first `u8` is a count of the number of
|
||||
signatures to check, which is followed by a single byte padding. After that, the
|
||||
following struct is serialized, one for each signature to check.
|
||||
|
||||
```
|
||||
struct Ed25519SignatureOffsets {
|
||||
signature_offset: u16, // offset to ed25519 signature of 64 bytes
|
||||
signature_instruction_index: u16, // instruction index to find signature
|
||||
public_key_offset: u16, // offset to public key of 32 bytes
|
||||
public_key_instruction_index: u16, // instruction index to find public key
|
||||
message_data_offset: u16, // offset to start of message data
|
||||
message_data_size: u16, // size of message data
|
||||
message_instruction_index: u16, // index of instruction data to get message data
|
||||
}
|
||||
```
|
||||
|
||||
Pseudo code of the operation:
|
||||
|
||||
```
|
||||
process_instruction() {
|
||||
for i in 0..count {
|
||||
// i'th index values referenced:
|
||||
instructions = &transaction.message().instructions
|
||||
instruction_index = ed25519_signature_instruction_index != u16::MAX ? ed25519_signature_instruction_index : current_instruction;
|
||||
signature = instructions[instruction_index].data[ed25519_signature_offset..ed25519_signature_offset + 64]
|
||||
instruction_index = ed25519_pubkey_instruction_index != u16::MAX ? ed25519_pubkey_instruction_index : current_instruction;
|
||||
pubkey = instructions[instruction_index].data[ed25519_pubkey_offset..ed25519_pubkey_offset + 32]
|
||||
instruction_index = ed25519_message_instruction_index != u16::MAX ? ed25519_message_instruction_index : current_instruction;
|
||||
message = instructions[instruction_index].data[ed25519_message_data_offset..ed25519_message_data_offset + ed25519_message_data_size]
|
||||
if pubkey.verify(signature, message) != Success {
|
||||
return Error
|
||||
}
|
||||
}
|
||||
return Success
|
||||
}
|
||||
```
|
||||
|
||||
## Secp256k1 Program
|
||||
|
||||
Verify secp256k1 public key recovery operations (ecrecover).
|
||||
|
@@ -16,7 +16,10 @@ The affected RPC endpoints are:
|
||||
- [getConfirmedSignaturesForAddress](developing/clients/jsonrpc-api.md#getconfirmedsignaturesforaddress)
|
||||
- [getConfirmedTransaction](developing/clients/jsonrpc-api.md#getconfirmedtransaction)
|
||||
- [getSignatureStatuses](developing/clients/jsonrpc-api.md#getsignaturestatuses)
|
||||
- [getBlockTime](developing/clients/jsonrpc-api.md#getblocktime)
|
||||
|
||||
Note that [getBlockTime](developing/clients/jsonrpc-api.md#getblocktime)
|
||||
is not supported, as once https://github.com/solana-labs/solana/issues/10089 is
|
||||
fixed then `getBlockTime` can be removed.
|
||||
|
||||
Some system design constraints:
|
||||
|
||||
|
@@ -1,146 +0,0 @@
|
||||
# Program log binary data
|
||||
|
||||
## Problem
|
||||
|
||||
There is no support for logging binary data in Solidity.
|
||||
|
||||
### Events in Solidity
|
||||
|
||||
In Solidity, events can be reported. These look like structures with zero or
|
||||
more fields, and can be emitted with specific values. For example:
|
||||
|
||||
```
|
||||
event PaymentReceived {
|
||||
address sender;
|
||||
uint amount;
|
||||
}
|
||||
|
||||
contract c {
|
||||
function pay() public payable {
|
||||
emit PaymentReceived(msg.sender, msg.value);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Events are write-only from a Solidity/VM perspective and are written to
|
||||
the blocks in the tx records.
|
||||
|
||||
Some of these fields can be marked `indexed`, which affects how the data is
|
||||
encoded. All non-indexed fields are eth abi encoded into a variable length
|
||||
byte array. All indexed fields go into so-called topics.
|
||||
|
||||
Topics are fixed length fields of 32 bytes. There are a maximum of 4 topics;
|
||||
if a type does not always fit into 32 bytes (e.g. string types), then the topic
|
||||
is keccak256 hashed.
|
||||
|
||||
The first topic is a keccak256 hash of the event signature, in this case
|
||||
`keccak256('PaymentReceived(address,uint)')`. The four remaining are available
|
||||
for `indexed` fields. The event may be declared `anonymous`, in which case
|
||||
the first field is not a hash of the signature, and it is permitted to have
|
||||
4 indexed fields.
|
||||
|
||||
### Listening to events in a client
|
||||
|
||||
The reason for the distinction between topics/indexed and regular fields is
|
||||
that it easier to filter on topics.
|
||||
|
||||
```
|
||||
const Web3 = require('web3');
|
||||
const url = 'ws://127.0.0.1:8546';
|
||||
const web3 = new Web3(url);
|
||||
|
||||
var options = {
|
||||
address: '0xfbBE8f06FAda977Ea1E177da391C370EFbEE3D25',
|
||||
topics: [
|
||||
'0xdf50c7bb3b25f812aedef81bc334454040e7b27e27de95a79451d663013b7e17',
|
||||
//'0x0000000000000000000000000d8a3f5e71560982fb0eb5959ecf84412be6ae3e'
|
||||
]
|
||||
};
|
||||
|
||||
var subscription = web3.eth.subscribe('logs', options, function(error, result){
|
||||
if (!error) console.log('got result');
|
||||
else console.log(error);
|
||||
}).on("data", function(log){
|
||||
console.log('got data', log);
|
||||
}).on("changed", function(log){
|
||||
console.log('changed');
|
||||
});
|
||||
```
|
||||
|
||||
In order to decode the non-indexed fields (the data), the abi of the contract
|
||||
is needed. So, the topic is first used to discover what event was used, and
|
||||
then the data can be decoded.
|
||||
|
||||
### Ethereum Tx in block
|
||||
|
||||
The transaction calls event logs. Here is a tx with a single event, with 3
|
||||
topics and some data.
|
||||
|
||||
```
|
||||
{
|
||||
"tx": {
|
||||
"nonce": "0x2",
|
||||
"gasPrice": "0xf224d4a00",
|
||||
"gas": "0xc350",
|
||||
"to": "0x6B175474E89094C44Da98b954EedeAC495271d0F",
|
||||
"value": "0x0",
|
||||
"input": "0xa9059cbb000000000000000000000000a12431d0b9db640034b0cdfceef9cce161e62be40000000000000000000000000000000000000000000000a030dcebbd2f4c0000",
|
||||
"hash": "0x98a67f0a35ebc0ac068acf0885d38419c632ffa4354e96641d6d5103a7681910",
|
||||
"blockNumber": "0xc96431",
|
||||
"from": "0x82f890D638478d211eF2208f3c1466B5Abf83551",
|
||||
"transactionIndex": "0xe1"
|
||||
},
|
||||
"receipt": {
|
||||
"gasUsed": "0x74d2",
|
||||
"status": "0x1",
|
||||
"logs": [
|
||||
{
|
||||
"address": "0x6B175474E89094C44Da98b954EedeAC495271d0F",
|
||||
"topics": [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x00000000000000000000000082f890d638478d211ef2208f3c1466b5abf83551",
|
||||
"0x000000000000000000000000a12431d0b9db640034b0cdfceef9cce161e62be4"
|
||||
],
|
||||
"data": "0x0000000000000000000000000000000000000000000000a030dcebbd2f4c0000"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Further considerations
|
||||
|
||||
In Ethereum, events are stored in blocks. Events mark certain state changes in
|
||||
smart contracts. This serves two purposes:
|
||||
|
||||
- Listen to events (i.e. state changes) as they happen by reading new blocks
|
||||
as they are published
|
||||
- Re-read historical events by reading old blocks
|
||||
|
||||
So for example, smart contracts may emit changes as they happen but never the
|
||||
complete state, so the only way to recover the entire state of the contract
|
||||
is by re-reading all events from the chain. So an application will read events
|
||||
from block 1 or whatever block the application was deployed at and then use
|
||||
that state for local processing. This is a local cache and may re-populated
|
||||
from the chain at any point.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
Binary logging should be added to the program log. The program log should include the base64 encoded data (zero or more one permitted).
|
||||
|
||||
So if we encoding the topics first, followed by the data then the event in the
|
||||
tx above would look like:
|
||||
```
|
||||
program data: 3fJSrRviyJtpwrBo/DeNqpUrpFjxKEWKPVaTfUjs8AAAAAAAAAAAAAAACC+JDWOEeNIR7yII88FGa1q/g1UQAAAAAAAAAAAAAAAKEkMdC522QANLDN/O75zOFh5ivk AAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgMNzrvS9MAAA=
|
||||
```
|
||||
|
||||
This requires a new system call:
|
||||
|
||||
```
|
||||
void sol_log_data(SolBytes *fields, uint64_t length);
|
||||
```
|
||||
|
||||
### Considerations
|
||||
|
||||
- Should there be text field in the program log so we can have a little bit of
|
||||
metadata on the binary data, to make it more human readable
|
@@ -39,9 +39,9 @@ on our mainnet, cosmos, or tezos. For our network, which is primarily
|
||||
composed of high availability systems, this seems unlikely. Currently,
|
||||
we have set the threshold percentage to 4.66%, which means that if 23.68%
|
||||
have failed the network may stop finalizing blocks. For our network,
|
||||
which is primarily composed of high availability systems, a 23.68% drop
|
||||
in availability seems unlikely. 1:10^12 odds, assuming five 4.7% staked
|
||||
nodes with 0.995 uptime.
|
||||
which is primarily composed of high availability systems a 23.68% drop
|
||||
in availabilty seems unlinkely. 1:10^12 odds assuming five 4.7% staked
|
||||
nodes with 0.995 of uptime.
|
||||
|
||||
## Security
|
||||
|
||||
|
@@ -1,144 +0,0 @@
|
||||
# Return data from BPF programs
|
||||
|
||||
## Problem
|
||||
|
||||
In the Solidity langauge it is permitted to return any number of values from a function,
|
||||
for example a variable length string can be returned:
|
||||
|
||||
```
|
||||
function foo1() public returns (string) {
|
||||
return "Hello, world!\n";
|
||||
}
|
||||
```
|
||||
|
||||
Multiple values, arrays and structs are permitted too.
|
||||
|
||||
```
|
||||
struct S {
|
||||
int f1;
|
||||
bool f2
|
||||
};
|
||||
|
||||
function foo2() public returns (string, int[], S) {
|
||||
return (a, b, c);
|
||||
}
|
||||
```
|
||||
|
||||
All the return values are eth abi encoded to a variable-length byte array.
|
||||
|
||||
On ethereum errors can be returned too:
|
||||
|
||||
```
|
||||
function withdraw() public {
|
||||
require(msg.sender == owner, "Permission denied");
|
||||
}
|
||||
|
||||
function failure() public {
|
||||
revert("I afraid I can't do that dave");
|
||||
}
|
||||
```
|
||||
These errors help the developer debug any issue they are having, and can
|
||||
also be caught in a Solidity `try` .. `catch` block. Outside of a `try` .. `catch`
|
||||
block, any of these would cause the transaction or rpc to fail.
|
||||
|
||||
## Existing solution
|
||||
|
||||
The existing solution that Solang uses, writes the return data to the callee account data.
|
||||
The caller's account cannot be used, since the callee may not be the same BPF program, so
|
||||
it will not have permission to write to the callee's account data.
|
||||
|
||||
Another solution would be to have a single return data account which is passed
|
||||
around through CPI. Again this does not work for CPI as the callee may not have
|
||||
permission to write to it.
|
||||
|
||||
The problem with this solution is:
|
||||
|
||||
- It does not work for RPC calls
|
||||
- It is very racey; a client has to submit the Tx and then retrieve the account
|
||||
data. This is not atomic so the return data can be overwritten by another transaction.
|
||||
|
||||
## Requirements for Solution
|
||||
|
||||
It must work for:
|
||||
|
||||
- RPC: An RPC should be able to return any number of values without writing to account data
|
||||
- Transaction: An transaction should be able to return any number of values without needing to write them account data
|
||||
- CPI: The callee must "set" return value, and the caller must be able to retrieve it.
|
||||
|
||||
## Review of other chains
|
||||
|
||||
### Ethereum (EVM)
|
||||
|
||||
The `RETURN` opcode allows a contract to set a buffer as a returndata. This opcode takes a pointer to memory and a size. The `REVERT` opcode works similarly but signals that the call failed, and all account data changes must be reverted.
|
||||
|
||||
For CPI, the caller can retrieve the returned data of the callee using the `RETURNDATASIZE` opcode which returns the length, and the `RETURNDATACOPY` opcode, which takes a memory destination pointer, offset into the returndata, and a length argument.
|
||||
|
||||
Ethereum stores the returndata in blocks.
|
||||
|
||||
### Parity Substrate
|
||||
|
||||
The return data can be set using the `seal_return(u32 flags, u32 pointer, u32 size)` syscall.
|
||||
- Flags can be 1 for revert, 0 for success (nothing else defined)
|
||||
- Function does not return
|
||||
|
||||
CPI: The `seal_call()` syscall takes pointer to buffer and pointer to buffer size where return data goes
|
||||
- There is a 32KB limit for return data.
|
||||
|
||||
Parity Substrate does not write the return data to blocks.
|
||||
|
||||
## Rejected Solution
|
||||
|
||||
The concept of ephemeral accounts has been proposed a solution for this. This would
|
||||
certainly work for the CPI case, but this would not work RPC or Transaction case.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
The callee can set the return data using a new system call `sol_set_return_data(buf: *const u8, length: u64)`.
|
||||
There is a limit of 1024 bytes for the returndata. This function can be called multiple times, and
|
||||
will simply overwrite what was written in the last call.
|
||||
|
||||
The return data can be retrieved with `sol_get_return_data(buf: *mut u8, length: u64, program_id: *mut Pubkey) -> u64`.
|
||||
This function copies the return buffer, and the program_id that set the return data, and
|
||||
returns the length of the return data, or `0` if no return data is set. In this case, program_id is not set.
|
||||
|
||||
When an instruction calls `sol_invoke()`, the return data of the callee is copied into the return data
|
||||
of the current instruction. This means that any return data is automatically passed up the call stack,
|
||||
to the callee of the current instruction (or the RPC call).
|
||||
|
||||
Note that `sol_invoke()` clears the returns data before invoking the callee, so that any return data from
|
||||
a previous invoke is not reused if the invoked fails to set a return data. For example:
|
||||
|
||||
- A invokes B
|
||||
- Before entry to B, return data is cleared.0
|
||||
- B sets some return data and returns
|
||||
- A invokes C
|
||||
- Before entry to C, return data is cleared.
|
||||
- C does not set return data and returns
|
||||
- A checks return data and finds it empty
|
||||
|
||||
Another scenario to consider:
|
||||
|
||||
- A invokes B
|
||||
- B invokes C
|
||||
- C sets return data and returns
|
||||
- B does not touch return data and returns
|
||||
- A gets return data from C
|
||||
- A does not touch return data
|
||||
- Return data from transaction is what C set.
|
||||
|
||||
The compute costs are calculated for getting and setting the return data using
|
||||
the syscalls.
|
||||
|
||||
For a normal RPC or Transaction, the returndata is base64-encoded and stored along side the sol_log
|
||||
strings in the [stable log](https://github.com/solana-labs/solana/blob/95292841947763bdd47ef116b40fc34d0585bca8/sdk/src/process_instruction.rs#L275-L281).
|
||||
|
||||
## Note on returning errors
|
||||
|
||||
Solidity on Ethereum allows the contract to return an error in the return data. In this case, all
|
||||
the account data changes for the account should be reverted. On Solana, any non-zero exit code
|
||||
for a BPF prorgram means the entire transaction fails. We do not wish to support an error return
|
||||
by returning success and then returning an error in the return data. This would mean we would have
|
||||
to support reverting the account data changes; this too expensive both on the VM side and the BPF
|
||||
contract side.
|
||||
|
||||
Errors will be reported via sol_log.
|
@@ -8,7 +8,7 @@ Confirm the IP address and **identity pubkey** of your validator is visible in
|
||||
the gossip network by running:
|
||||
|
||||
```bash
|
||||
solana gossip
|
||||
solana-gossip spy --entrypoint devnet.solana.com:8001
|
||||
```
|
||||
|
||||
## Check Your Balance
|
||||
|
@@ -26,6 +26,16 @@ solana transaction-count
|
||||
View the [metrics dashboard](https://metrics.solana.com:3000/d/monitor/cluster-telemetry) for more
|
||||
detail on cluster activity.
|
||||
|
||||
## Confirm your Installation
|
||||
|
||||
Try running following command to join the gossip network and view all the other
|
||||
nodes in the cluster:
|
||||
|
||||
```bash
|
||||
solana-gossip spy --entrypoint entrypoint.devnet.solana.com:8001
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
## Enabling CUDA
|
||||
|
||||
If your machine has a GPU with CUDA installed \(Linux-only currently\), include
|
||||
@@ -308,11 +318,11 @@ The ledger will be placed in the `ledger/` directory by default, use the
|
||||
> `solana-validator --identity ASK ... --authorized-voter ASK ...`
|
||||
> and you will be prompted to enter your seed phrases and optional passphrase.
|
||||
|
||||
Confirm your validator is connected to the network by opening a new terminal and
|
||||
Confirm your validator connected to the network by opening a new terminal and
|
||||
running:
|
||||
|
||||
```bash
|
||||
solana gossip
|
||||
solana-gossip spy --entrypoint entrypoint.devnet.solana.com:8001
|
||||
```
|
||||
|
||||
If your validator is connected, its public key and IP address will appear in the list.
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-dos"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,18 +14,18 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-core = { path = "../core", version = "=1.8.2" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.8.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-core = { path = "../core", version = "=1.7.17" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-download-utils"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Download Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -15,8 +15,8 @@ console = "0.14.1"
|
||||
indicatif = "0.15.0"
|
||||
log = "0.4.11"
|
||||
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
tar = "0.4.37"
|
||||
|
||||
[lib]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -16,12 +16,12 @@ clap = "2.33"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-frozen-abi"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Frozen ABI"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -16,11 +16,11 @@ log = "0.4.11"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
sha2 = "0.9.2"
|
||||
solana-frozen-abi-macro = { path = "macro", version = "=1.8.2" }
|
||||
solana-frozen-abi-macro = { path = "macro", version = "=1.7.17" }
|
||||
thiserror = "1.0"
|
||||
|
||||
[target.'cfg(not(target_arch = "bpf"))'.dependencies]
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
generic-array = { version = "0.14.3", default-features = false, features = ["serde", "more_lengths"]}
|
||||
memmap2 = "0.1.0"
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-frozen-abi-macro"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Frozen ABI Macro"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-utils"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
description = "Solana Genesis Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-download-utils = { path = "../download-utils", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-download-utils = { path = "../download-utils", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,16 +16,16 @@ chrono = "0.4"
|
||||
serde = "1.0.122"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.8.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.8.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.17" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.8.2"
|
||||
version = "1.7.17"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,7 +15,7 @@ bv = { version = "0.11.1", features = ["serde"] }
|
||||
clap = "2.33.1"
|
||||
flate2 = "1.0"
|
||||
indexmap = { version = "1.5", features = ["rayon"] }
|
||||
itertools = "0.10.1"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.11"
|
||||
lru = "0.6.1"
|
||||
matches = "0.1.8"
|
||||
@@ -26,22 +26,22 @@ rayon = "1.5.0"
|
||||
serde = "1.0.122"
|
||||
serde_bytes = "0.11"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.8.2" }
|
||||
solana-client = { path = "../client", version = "=1.8.2" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.8.2" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.8.2" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.8.2" }
|
||||
solana-logger = { path = "../logger", version = "=1.8.2" }
|
||||
solana-measure = { path = "../measure", version = "=1.8.2" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.8.2" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.8.2" }
|
||||
solana-perf = { path = "../perf", version = "=1.8.2" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.8.2" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.8.2" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.8.2" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.8.2" }
|
||||
solana-version = { path = "../version", version = "=1.8.2" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.8.2" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.17" }
|
||||
solana-client = { path = "../client", version = "=1.7.17" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.17" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.17" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.17" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.17" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.17" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.17" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.17" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.17" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.17" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.17" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.17" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.17" }
|
||||
solana-version = { path = "../version", version = "=1.7.17" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.17" }
|
||||
thiserror = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -6,9 +6,7 @@ use {
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::ThreadPoolBuilder,
|
||||
solana_gossip::{
|
||||
crds::{Crds, GossipRoute},
|
||||
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
crds_value::CrdsValue,
|
||||
crds::Crds, crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, crds_value::CrdsValue,
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::collections::HashMap,
|
||||
@@ -23,7 +21,7 @@ fn bench_find_old_labels(bencher: &mut Bencher) {
|
||||
let now = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS + CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 1000;
|
||||
std::iter::repeat_with(|| (CrdsValue::new_rand(&mut rng, None), rng.gen_range(0, now)))
|
||||
.take(50_000)
|
||||
.for_each(|(v, ts)| assert!(crds.insert(v, ts, GossipRoute::LocalMessage).is_ok()));
|
||||
.for_each(|(v, ts)| assert!(crds.insert(v, ts).is_ok()));
|
||||
let mut timeouts = HashMap::new();
|
||||
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
|
||||
bencher.iter(|| {
|
||||
|
@@ -7,7 +7,7 @@ use {
|
||||
rayon::ThreadPoolBuilder,
|
||||
solana_gossip::{
|
||||
cluster_info::MAX_BLOOM_SIZE,
|
||||
crds::{Crds, GossipRoute},
|
||||
crds::Crds,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull},
|
||||
crds_value::CrdsValue,
|
||||
},
|
||||
@@ -38,11 +38,7 @@ fn bench_build_crds_filters(bencher: &mut Bencher) {
|
||||
let mut num_inserts = 0;
|
||||
for _ in 0..90_000 {
|
||||
if crds
|
||||
.insert(
|
||||
CrdsValue::new_rand(&mut rng, None),
|
||||
rng.gen(),
|
||||
GossipRoute::LocalMessage,
|
||||
)
|
||||
.insert(CrdsValue::new_rand(&mut rng, None), rng.gen())
|
||||
.is_ok()
|
||||
{
|
||||
num_inserts += 1;
|
||||
|
@@ -5,7 +5,7 @@ extern crate test;
|
||||
use {
|
||||
rand::{thread_rng, Rng},
|
||||
solana_gossip::{
|
||||
crds::{Crds, GossipRoute, VersionedCrdsValue},
|
||||
crds::{Crds, VersionedCrdsValue},
|
||||
crds_shards::CrdsShards,
|
||||
crds_value::CrdsValue,
|
||||
},
|
||||
@@ -20,8 +20,7 @@ fn new_test_crds_value<R: Rng>(rng: &mut R) -> VersionedCrdsValue {
|
||||
let value = CrdsValue::new_rand(rng, None);
|
||||
let label = value.label();
|
||||
let mut crds = Crds::default();
|
||||
crds.insert(value, timestamp(), GossipRoute::LocalMessage)
|
||||
.unwrap();
|
||||
crds.insert(value, timestamp()).unwrap();
|
||||
crds.get(&label).cloned().unwrap()
|
||||
}
|
||||
|
||||
|
@@ -18,7 +18,7 @@ use {
|
||||
submit_gossip_stats, Counter, GossipStats, ScopedTimer, TimedGuard,
|
||||
},
|
||||
contact_info::ContactInfo,
|
||||
crds::{Crds, Cursor, GossipRoute},
|
||||
crds::{Crds, Cursor},
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, ProcessPullStats, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
@@ -127,7 +127,7 @@ pub const DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS: u64 = 60_000;
|
||||
/// Minimum serialized size of a Protocol::PullResponse packet.
|
||||
const PULL_RESPONSE_MIN_SERIALIZED_SIZE: usize = 161;
|
||||
// Limit number of unique pubkeys in the crds table.
|
||||
pub(crate) const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 8192;
|
||||
pub(crate) const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 4096;
|
||||
/// Minimum stake that a node should have so that its CRDS values are
|
||||
/// propagated through gossip (few types are exempted).
|
||||
const MIN_STAKE_FOR_GOSSIP: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
@@ -492,12 +492,7 @@ impl ClusterInfo {
|
||||
// TODO kill insert_info, only used by tests
|
||||
pub fn insert_info(&self, contact_info: ContactInfo) {
|
||||
let value = CrdsValue::new_signed(CrdsData::ContactInfo(contact_info), &self.keypair);
|
||||
let _ =
|
||||
self.gossip
|
||||
.write()
|
||||
.unwrap()
|
||||
.crds
|
||||
.insert(value, timestamp(), GossipRoute::LocalMessage);
|
||||
let _ = self.gossip.write().unwrap().crds.insert(value, timestamp());
|
||||
}
|
||||
|
||||
pub fn set_entrypoint(&self, entrypoint: ContactInfo) {
|
||||
@@ -613,7 +608,7 @@ impl ClusterInfo {
|
||||
let now = timestamp();
|
||||
let mut gossip = self.gossip.write().unwrap();
|
||||
for node in nodes {
|
||||
if let Err(err) = gossip.crds.insert(node, now, GossipRoute::LocalMessage) {
|
||||
if let Err(err) = gossip.crds.insert(node, now) {
|
||||
warn!("crds insert failed {:?}", err);
|
||||
}
|
||||
}
|
||||
@@ -901,7 +896,7 @@ impl ClusterInfo {
|
||||
let mut gossip = self.gossip.write().unwrap();
|
||||
let now = timestamp();
|
||||
for entry in entries {
|
||||
if let Err(err) = gossip.crds.insert(entry, now, GossipRoute::LocalMessage) {
|
||||
if let Err(err) = gossip.crds.insert(entry, now) {
|
||||
error!("push_epoch_slots failed: {:?}", err);
|
||||
}
|
||||
}
|
||||
@@ -964,7 +959,7 @@ impl ClusterInfo {
|
||||
let vote = CrdsData::Vote(vote_index, vote);
|
||||
let vote = CrdsValue::new_signed(vote, &self.keypair);
|
||||
let mut gossip = self.gossip.write().unwrap();
|
||||
if let Err(err) = gossip.crds.insert(vote, now, GossipRoute::LocalMessage) {
|
||||
if let Err(err) = gossip.crds.insert(vote, now) {
|
||||
error!("push_vote failed: {:?}", err);
|
||||
}
|
||||
}
|
||||
@@ -1312,12 +1307,7 @@ impl ClusterInfo {
|
||||
fn insert_self(&self) {
|
||||
let value =
|
||||
CrdsValue::new_signed(CrdsData::ContactInfo(self.my_contact_info()), &self.keypair);
|
||||
let _ =
|
||||
self.gossip
|
||||
.write()
|
||||
.unwrap()
|
||||
.crds
|
||||
.insert(value, timestamp(), GossipRoute::LocalMessage);
|
||||
let _ = self.gossip.write().unwrap().crds.insert(value, timestamp());
|
||||
}
|
||||
|
||||
// If the network entrypoint hasn't been discovered yet, add it to the crds table
|
||||
@@ -1478,7 +1468,7 @@ impl ClusterInfo {
|
||||
let mut gossip = self.gossip.write().unwrap();
|
||||
let now = timestamp();
|
||||
for entry in pending_push_messages {
|
||||
let _ = gossip.crds.insert(entry, now, GossipRoute::LocalMessage);
|
||||
let _ = gossip.crds.insert(entry, now);
|
||||
}
|
||||
}
|
||||
fn new_push_requests(
|
||||
@@ -3761,10 +3751,7 @@ mod tests {
|
||||
{
|
||||
let mut gossip = cluster_info.gossip.write().unwrap();
|
||||
for entry in entries {
|
||||
assert!(gossip
|
||||
.crds
|
||||
.insert(entry, /*now=*/ 0, GossipRoute::LocalMessage)
|
||||
.is_ok());
|
||||
assert!(gossip.crds.insert(entry, /*now=*/ 0).is_ok());
|
||||
}
|
||||
}
|
||||
// Should exclude other node's epoch-slot because of different
|
||||
@@ -4063,11 +4050,12 @@ mod tests {
|
||||
0,
|
||||
LowestSlot::new(other_node_pubkey, peer_lowest, timestamp()),
|
||||
));
|
||||
let _ = cluster_info.gossip.write().unwrap().crds.insert(
|
||||
value,
|
||||
timestamp(),
|
||||
GossipRoute::LocalMessage,
|
||||
);
|
||||
let _ = cluster_info
|
||||
.gossip
|
||||
.write()
|
||||
.unwrap()
|
||||
.crds
|
||||
.insert(value, timestamp());
|
||||
}
|
||||
// only half the visible peers should be eligible to serve this repair
|
||||
assert_eq!(cluster_info.repair_peers(5).len(), 5);
|
||||
|
@@ -1,8 +1,7 @@
|
||||
use {
|
||||
crate::crds_gossip::CrdsGossip,
|
||||
itertools::Itertools,
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
collections::HashMap,
|
||||
ops::{Deref, DerefMut},
|
||||
@@ -164,10 +163,9 @@ pub(crate) fn submit_gossip_stats(
|
||||
gossip: &RwLock<CrdsGossip>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) {
|
||||
let (crds_stats, table_size, num_nodes, num_pubkeys, purged_values_size, failed_inserts_size) = {
|
||||
let (table_size, num_nodes, num_pubkeys, purged_values_size, failed_inserts_size) = {
|
||||
let gossip = gossip.read().unwrap();
|
||||
(
|
||||
gossip.crds.take_stats(),
|
||||
gossip.crds.len(),
|
||||
gossip.crds.num_nodes(),
|
||||
gossip.crds.num_pubkeys(),
|
||||
@@ -451,155 +449,4 @@ pub(crate) fn submit_gossip_stats(
|
||||
i64
|
||||
),
|
||||
);
|
||||
let counts: Vec<_> = crds_stats
|
||||
.pull
|
||||
.counts
|
||||
.iter()
|
||||
.zip(crds_stats.push.counts.iter())
|
||||
.map(|(a, b)| a + b)
|
||||
.collect();
|
||||
datapoint_info!(
|
||||
"cluster_info_crds_stats",
|
||||
("ContactInfo", counts[0], i64),
|
||||
("ContactInfo-push", crds_stats.push.counts[0], i64),
|
||||
("ContactInfo-pull", crds_stats.pull.counts[0], i64),
|
||||
("Vote", counts[1], i64),
|
||||
("Vote-push", crds_stats.push.counts[1], i64),
|
||||
("Vote-pull", crds_stats.pull.counts[1], i64),
|
||||
("LowestSlot", counts[2], i64),
|
||||
("LowestSlot-push", crds_stats.push.counts[2], i64),
|
||||
("LowestSlot-pull", crds_stats.pull.counts[2], i64),
|
||||
("SnapshotHashes", counts[3], i64),
|
||||
("SnapshotHashes-push", crds_stats.push.counts[3], i64),
|
||||
("SnapshotHashes-pull", crds_stats.pull.counts[3], i64),
|
||||
("AccountsHashes", counts[4], i64),
|
||||
("AccountsHashes-push", crds_stats.push.counts[4], i64),
|
||||
("AccountsHashes-pull", crds_stats.pull.counts[4], i64),
|
||||
("EpochSlots", counts[5], i64),
|
||||
("EpochSlots-push", crds_stats.push.counts[5], i64),
|
||||
("EpochSlots-pull", crds_stats.pull.counts[5], i64),
|
||||
("LegacyVersion", counts[6], i64),
|
||||
("LegacyVersion-push", crds_stats.push.counts[6], i64),
|
||||
("LegacyVersion-pull", crds_stats.pull.counts[6], i64),
|
||||
("Version", counts[7], i64),
|
||||
("Version-push", crds_stats.push.counts[7], i64),
|
||||
("Version-pull", crds_stats.pull.counts[7], i64),
|
||||
("NodeInstance", counts[8], i64),
|
||||
("NodeInstance-push", crds_stats.push.counts[8], i64),
|
||||
("NodeInstance-pull", crds_stats.pull.counts[8], i64),
|
||||
("DuplicateShred", counts[9], i64),
|
||||
("DuplicateShred-push", crds_stats.push.counts[9], i64),
|
||||
("DuplicateShred-pull", crds_stats.pull.counts[9], i64),
|
||||
("IncrementalSnapshotHashes", counts[10], i64),
|
||||
(
|
||||
"IncrementalSnapshotHashes-push",
|
||||
crds_stats.push.counts[10],
|
||||
i64
|
||||
),
|
||||
(
|
||||
"IncrementalSnapshotHashes-pull",
|
||||
crds_stats.pull.counts[10],
|
||||
i64
|
||||
),
|
||||
("all", counts.iter().sum::<usize>(), i64),
|
||||
(
|
||||
"all-push",
|
||||
crds_stats.push.counts.iter().sum::<usize>(),
|
||||
i64
|
||||
),
|
||||
(
|
||||
"all-pull",
|
||||
crds_stats.pull.counts.iter().sum::<usize>(),
|
||||
i64
|
||||
),
|
||||
);
|
||||
let fails: Vec<_> = crds_stats
|
||||
.pull
|
||||
.fails
|
||||
.iter()
|
||||
.zip(crds_stats.push.fails.iter())
|
||||
.map(|(a, b)| a + b)
|
||||
.collect();
|
||||
datapoint_info!(
|
||||
"cluster_info_crds_stats_fails",
|
||||
("ContactInfo", fails[0], i64),
|
||||
("ContactInfo-push", crds_stats.push.fails[0], i64),
|
||||
("ContactInfo-pull", crds_stats.pull.fails[0], i64),
|
||||
("Vote", fails[1], i64),
|
||||
("Vote-push", crds_stats.push.fails[1], i64),
|
||||
("Vote-pull", crds_stats.pull.fails[1], i64),
|
||||
("LowestSlot", fails[2], i64),
|
||||
("LowestSlot-push", crds_stats.push.fails[2], i64),
|
||||
("LowestSlot-pull", crds_stats.pull.fails[2], i64),
|
||||
("SnapshotHashes", fails[3], i64),
|
||||
("SnapshotHashes-push", crds_stats.push.fails[3], i64),
|
||||
("SnapshotHashes-pull", crds_stats.pull.fails[3], i64),
|
||||
("AccountsHashes", fails[4], i64),
|
||||
("AccountsHashes-push", crds_stats.push.fails[4], i64),
|
||||
("AccountsHashes-pull", crds_stats.pull.fails[4], i64),
|
||||
("EpochSlots", fails[5], i64),
|
||||
("EpochSlots-push", crds_stats.push.fails[5], i64),
|
||||
("EpochSlots-pull", crds_stats.pull.fails[5], i64),
|
||||
("LegacyVersion", fails[6], i64),
|
||||
("LegacyVersion-push", crds_stats.push.fails[6], i64),
|
||||
("LegacyVersion-pull", crds_stats.pull.fails[6], i64),
|
||||
("Version", fails[7], i64),
|
||||
("Version-push", crds_stats.push.fails[7], i64),
|
||||
("Version-pull", crds_stats.pull.fails[7], i64),
|
||||
("NodeInstance", fails[8], i64),
|
||||
("NodeInstance-push", crds_stats.push.fails[8], i64),
|
||||
("NodeInstance-pull", crds_stats.pull.fails[8], i64),
|
||||
("DuplicateShred", fails[9], i64),
|
||||
("DuplicateShred-push", crds_stats.push.fails[9], i64),
|
||||
("DuplicateShred-pull", crds_stats.pull.fails[9], i64),
|
||||
("IncrementalSnapshotHashes", fails[10], i64),
|
||||
(
|
||||
"IncrementalSnapshotHashes-push",
|
||||
crds_stats.push.fails[10],
|
||||
i64
|
||||
),
|
||||
(
|
||||
"IncrementalSnapshotHashes-pull",
|
||||
crds_stats.pull.fails[10],
|
||||
i64
|
||||
),
|
||||
("all", fails.iter().sum::<usize>(), i64),
|
||||
("all-push", crds_stats.push.fails.iter().sum::<usize>(), i64),
|
||||
("all-pull", crds_stats.pull.fails.iter().sum::<usize>(), i64),
|
||||
);
|
||||
for (slot, num_votes) in &crds_stats.pull.votes {
|
||||
datapoint_info!(
|
||||
"cluster_info_crds_stats_votes_pull",
|
||||
("slot", *slot, i64),
|
||||
("num_votes", *num_votes, i64),
|
||||
);
|
||||
}
|
||||
for (slot, num_votes) in &crds_stats.push.votes {
|
||||
datapoint_info!(
|
||||
"cluster_info_crds_stats_votes_push",
|
||||
("slot", *slot, i64),
|
||||
("num_votes", *num_votes, i64),
|
||||
);
|
||||
}
|
||||
let votes: HashMap<Slot, usize> = crds_stats
|
||||
.pull
|
||||
.votes
|
||||
.into_iter()
|
||||
.map(|(slot, num_votes)| (*slot, *num_votes))
|
||||
.chain(
|
||||
crds_stats
|
||||
.push
|
||||
.votes
|
||||
.into_iter()
|
||||
.map(|(slot, num_votes)| (*slot, *num_votes)),
|
||||
)
|
||||
.into_grouping_map()
|
||||
.aggregate(|acc, _slot, num_votes| Some(acc.unwrap_or_default() + num_votes));
|
||||
for (slot, num_votes) in votes {
|
||||
datapoint_info!(
|
||||
"cluster_info_crds_stats_votes",
|
||||
("slot", slot, i64),
|
||||
("num_votes", num_votes, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -35,11 +35,9 @@ use {
|
||||
map::{rayon::ParValues, Entry, IndexMap},
|
||||
set::IndexSet,
|
||||
},
|
||||
lru::LruCache,
|
||||
matches::debug_assert_matches,
|
||||
rayon::{prelude::*, ThreadPool},
|
||||
solana_sdk::{
|
||||
clock::Slot,
|
||||
hash::{hash, Hash},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
@@ -47,14 +45,12 @@ use {
|
||||
cmp::Ordering,
|
||||
collections::{hash_map, BTreeMap, HashMap, VecDeque},
|
||||
ops::{Bound, Index, IndexMut},
|
||||
sync::Mutex,
|
||||
},
|
||||
};
|
||||
|
||||
const CRDS_SHARDS_BITS: u32 = 8;
|
||||
// Number of vote slots to track in an lru-cache for metrics.
|
||||
const VOTE_SLOTS_METRICS_CAP: usize = 100;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Crds {
|
||||
/// Stores the map of labels and values
|
||||
table: IndexMap<CrdsValueLabel, VersionedCrdsValue>,
|
||||
@@ -73,7 +69,6 @@ pub struct Crds {
|
||||
purged: VecDeque<(Hash, u64 /*timestamp*/)>,
|
||||
// Mapping from nodes' pubkeys to their respective shred-version.
|
||||
shred_versions: HashMap<Pubkey, u16>,
|
||||
stats: Mutex<CrdsStats>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
@@ -82,28 +77,6 @@ pub enum CrdsError {
|
||||
UnknownStakes,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum GossipRoute {
|
||||
LocalMessage,
|
||||
PullRequest,
|
||||
PullResponse,
|
||||
PushMessage,
|
||||
}
|
||||
|
||||
type CrdsCountsArray = [usize; 11];
|
||||
|
||||
pub(crate) struct CrdsDataStats {
|
||||
pub(crate) counts: CrdsCountsArray,
|
||||
pub(crate) fails: CrdsCountsArray,
|
||||
pub(crate) votes: LruCache<Slot, /*count:*/ usize>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct CrdsStats {
|
||||
pub(crate) pull: CrdsDataStats,
|
||||
pub(crate) push: CrdsDataStats,
|
||||
}
|
||||
|
||||
/// This structure stores some local metadata associated with the CrdsValue
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
pub struct VersionedCrdsValue {
|
||||
@@ -156,7 +129,6 @@ impl Default for Crds {
|
||||
entries: BTreeMap::default(),
|
||||
purged: VecDeque::default(),
|
||||
shred_versions: HashMap::default(),
|
||||
stats: Mutex::<CrdsStats>::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -197,18 +169,12 @@ impl Crds {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(
|
||||
&mut self,
|
||||
value: CrdsValue,
|
||||
now: u64,
|
||||
route: GossipRoute,
|
||||
) -> Result<(), CrdsError> {
|
||||
pub fn insert(&mut self, value: CrdsValue, now: u64) -> Result<(), CrdsError> {
|
||||
let label = value.label();
|
||||
let pubkey = value.pubkey();
|
||||
let value = VersionedCrdsValue::new(value, self.cursor, now);
|
||||
match self.table.entry(label) {
|
||||
Entry::Vacant(entry) => {
|
||||
self.stats.lock().unwrap().record_insert(&value, route);
|
||||
let entry_index = entry.index();
|
||||
self.shards.insert(entry_index, &value);
|
||||
match &value.value.data {
|
||||
@@ -231,7 +197,6 @@ impl Crds {
|
||||
Ok(())
|
||||
}
|
||||
Entry::Occupied(mut entry) if overrides(&value.value, entry.get()) => {
|
||||
self.stats.lock().unwrap().record_insert(&value, route);
|
||||
let entry_index = entry.index();
|
||||
self.shards.remove(entry_index, entry.get());
|
||||
self.shards.insert(entry_index, &value);
|
||||
@@ -263,7 +228,6 @@ impl Crds {
|
||||
Ok(())
|
||||
}
|
||||
Entry::Occupied(entry) => {
|
||||
self.stats.lock().unwrap().record_fail(&value, route);
|
||||
trace!(
|
||||
"INSERT FAILED data: {} new.wallclock: {}",
|
||||
value.value.label(),
|
||||
@@ -598,89 +562,6 @@ impl Crds {
|
||||
}
|
||||
Ok(keys.len())
|
||||
}
|
||||
|
||||
pub(crate) fn take_stats(&self) -> CrdsStats {
|
||||
std::mem::take(&mut self.stats.lock().unwrap())
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub(crate) fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
table: self.table.clone(),
|
||||
cursor: self.cursor,
|
||||
shards: self.shards.clone(),
|
||||
nodes: self.nodes.clone(),
|
||||
votes: self.votes.clone(),
|
||||
epoch_slots: self.epoch_slots.clone(),
|
||||
records: self.records.clone(),
|
||||
entries: self.entries.clone(),
|
||||
purged: self.purged.clone(),
|
||||
shred_versions: self.shred_versions.clone(),
|
||||
stats: Mutex::<CrdsStats>::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CrdsDataStats {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
counts: CrdsCountsArray::default(),
|
||||
fails: CrdsCountsArray::default(),
|
||||
votes: LruCache::new(VOTE_SLOTS_METRICS_CAP),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CrdsDataStats {
|
||||
fn record_insert(&mut self, entry: &VersionedCrdsValue) {
|
||||
self.counts[Self::ordinal(entry)] += 1;
|
||||
if let CrdsData::Vote(_, vote) = &entry.value.data {
|
||||
if let Some(slot) = vote.slot() {
|
||||
let num_nodes = self.votes.get(&slot).copied().unwrap_or_default();
|
||||
self.votes.put(slot, num_nodes + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn record_fail(&mut self, entry: &VersionedCrdsValue) {
|
||||
self.fails[Self::ordinal(entry)] += 1;
|
||||
}
|
||||
|
||||
fn ordinal(entry: &VersionedCrdsValue) -> usize {
|
||||
match &entry.value.data {
|
||||
CrdsData::ContactInfo(_) => 0,
|
||||
CrdsData::Vote(_, _) => 1,
|
||||
CrdsData::LowestSlot(_, _) => 2,
|
||||
CrdsData::SnapshotHashes(_) => 3,
|
||||
CrdsData::AccountsHashes(_) => 4,
|
||||
CrdsData::EpochSlots(_, _) => 5,
|
||||
CrdsData::LegacyVersion(_) => 6,
|
||||
CrdsData::Version(_) => 7,
|
||||
CrdsData::NodeInstance(_) => 8,
|
||||
CrdsData::DuplicateShred(_, _) => 9,
|
||||
CrdsData::IncrementalSnapshotHashes(_) => 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CrdsStats {
|
||||
fn record_insert(&mut self, entry: &VersionedCrdsValue, route: GossipRoute) {
|
||||
match route {
|
||||
GossipRoute::LocalMessage => (),
|
||||
GossipRoute::PullRequest => (),
|
||||
GossipRoute::PushMessage => self.push.record_insert(entry),
|
||||
GossipRoute::PullResponse => self.pull.record_insert(entry),
|
||||
}
|
||||
}
|
||||
|
||||
fn record_fail(&mut self, entry: &VersionedCrdsValue, route: GossipRoute) {
|
||||
match route {
|
||||
GossipRoute::LocalMessage => (),
|
||||
GossipRoute::PullRequest => (),
|
||||
GossipRoute::PushMessage => self.push.record_fail(entry),
|
||||
GossipRoute::PullResponse => self.pull.record_fail(entry),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -705,10 +586,7 @@ mod tests {
|
||||
fn test_insert() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 0, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(()));
|
||||
assert_eq!(crds.table.len(), 1);
|
||||
assert!(crds.table.contains_key(&val.label()));
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 0);
|
||||
@@ -717,14 +595,8 @@ mod tests {
|
||||
fn test_update_old() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 0, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 1, GossipRoute::LocalMessage),
|
||||
Err(CrdsError::InsertFailed)
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(()));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Err(CrdsError::InsertFailed));
|
||||
assert!(crds.purged.is_empty());
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 0);
|
||||
}
|
||||
@@ -736,15 +608,12 @@ mod tests {
|
||||
0,
|
||||
)));
|
||||
let value_hash = hash(&serialize(&original).unwrap());
|
||||
assert_matches!(crds.insert(original, 0, GossipRoute::LocalMessage), Ok(()));
|
||||
assert_matches!(crds.insert(original, 0), Ok(()));
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
)));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 1, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(()));
|
||||
assert_eq!(*crds.purged.back().unwrap(), (value_hash, 1));
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 1);
|
||||
}
|
||||
@@ -755,19 +624,13 @@ mod tests {
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 0, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(()));
|
||||
assert_eq!(crds.table[&val.label()].ordinal, 0);
|
||||
|
||||
let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
let value_hash = hash(&serialize(&val2).unwrap());
|
||||
assert_eq!(val2.label().pubkey(), val.label().pubkey());
|
||||
assert_eq!(
|
||||
crds.insert(val2.clone(), 0, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val2.clone(), 0), Ok(()));
|
||||
|
||||
crds.update_record_timestamp(&val.label().pubkey(), 2);
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 2);
|
||||
@@ -782,7 +645,7 @@ mod tests {
|
||||
let mut ci = ContactInfo::default();
|
||||
ci.wallclock += 1;
|
||||
let val3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
|
||||
assert_eq!(crds.insert(val3, 3, GossipRoute::LocalMessage), Ok(()));
|
||||
assert_eq!(crds.insert(val3, 3), Ok(()));
|
||||
assert_eq!(*crds.purged.back().unwrap(), (value_hash, 3));
|
||||
assert_eq!(crds.table[&val2.label()].local_timestamp, 3);
|
||||
assert_eq!(crds.table[&val2.label()].ordinal, 2);
|
||||
@@ -800,22 +663,19 @@ mod tests {
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let node = NodeInstance::new(&mut rng, pubkey, now);
|
||||
let node = make_crds_value(node);
|
||||
assert_eq!(crds.insert(node, now, GossipRoute::LocalMessage), Ok(()));
|
||||
assert_eq!(crds.insert(node, now), Ok(()));
|
||||
// A node-instance with a different key should insert fine even with
|
||||
// older timestamps.
|
||||
let other = NodeInstance::new(&mut rng, Pubkey::new_unique(), now - 1);
|
||||
let other = make_crds_value(other);
|
||||
assert_eq!(crds.insert(other, now, GossipRoute::LocalMessage), Ok(()));
|
||||
assert_eq!(crds.insert(other, now), Ok(()));
|
||||
// A node-instance with older timestamp should fail to insert, even if
|
||||
// the wallclock is more recent.
|
||||
let other = NodeInstance::new(&mut rng, pubkey, now - 1);
|
||||
let other = other.with_wallclock(now + 1);
|
||||
let other = make_crds_value(other);
|
||||
let value_hash = hash(&serialize(&other).unwrap());
|
||||
assert_eq!(
|
||||
crds.insert(other, now, GossipRoute::LocalMessage),
|
||||
Err(CrdsError::InsertFailed)
|
||||
);
|
||||
assert_eq!(crds.insert(other, now), Err(CrdsError::InsertFailed));
|
||||
assert_eq!(*crds.purged.back().unwrap(), (value_hash, now));
|
||||
// A node instance with the same timestamp should insert only if the
|
||||
// random token is larger.
|
||||
@@ -824,7 +684,7 @@ mod tests {
|
||||
let other = NodeInstance::new(&mut rng, pubkey, now);
|
||||
let other = make_crds_value(other);
|
||||
let value_hash = hash(&serialize(&other).unwrap());
|
||||
match crds.insert(other, now, GossipRoute::LocalMessage) {
|
||||
match crds.insert(other, now) {
|
||||
Ok(()) => num_overrides += 1,
|
||||
Err(CrdsError::InsertFailed) => {
|
||||
assert_eq!(*crds.purged.back().unwrap(), (value_hash, now))
|
||||
@@ -839,7 +699,7 @@ mod tests {
|
||||
let other = NodeInstance::new(&mut rng, pubkey, now + k);
|
||||
let other = other.with_wallclock(now - 1);
|
||||
let other = make_crds_value(other);
|
||||
match crds.insert(other, now, GossipRoute::LocalMessage) {
|
||||
match crds.insert(other, now) {
|
||||
Ok(()) => (),
|
||||
_ => panic!(),
|
||||
}
|
||||
@@ -851,10 +711,7 @@ mod tests {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 1, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(()));
|
||||
let mut set = HashMap::new();
|
||||
set.insert(Pubkey::default(), 0);
|
||||
assert!(crds.find_old_labels(&thread_pool, 0, &set).is_empty());
|
||||
@@ -877,10 +734,7 @@ mod tests {
|
||||
let mut timeouts = HashMap::new();
|
||||
let val = CrdsValue::new_rand(&mut rng, None);
|
||||
timeouts.insert(Pubkey::default(), 3);
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 0, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(()));
|
||||
assert!(crds.find_old_labels(&thread_pool, 2, &timeouts).is_empty());
|
||||
timeouts.insert(val.pubkey(), 1);
|
||||
assert_eq!(
|
||||
@@ -903,10 +757,7 @@ mod tests {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_matches!(
|
||||
crds.insert(val.clone(), 1, GossipRoute::LocalMessage),
|
||||
Ok(_)
|
||||
);
|
||||
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
|
||||
let mut set = HashMap::new();
|
||||
set.insert(Pubkey::default(), 1);
|
||||
assert_eq!(
|
||||
@@ -921,10 +772,7 @@ mod tests {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 1, GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(()));
|
||||
let mut set = HashMap::new();
|
||||
//now < timestamp
|
||||
set.insert(Pubkey::default(), 0);
|
||||
@@ -966,7 +814,7 @@ mod tests {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
let local_timestamp = new_rand_timestamp(&mut rng);
|
||||
if let Ok(()) = crds.insert(value, local_timestamp, GossipRoute::LocalMessage) {
|
||||
if let Ok(()) = crds.insert(value, local_timestamp) {
|
||||
num_inserts += 1;
|
||||
check_crds_shards(&crds);
|
||||
}
|
||||
@@ -1120,7 +968,7 @@ mod tests {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
let local_timestamp = new_rand_timestamp(&mut rng);
|
||||
if let Ok(()) = crds.insert(value, local_timestamp, GossipRoute::LocalMessage) {
|
||||
if let Ok(()) = crds.insert(value, local_timestamp) {
|
||||
num_inserts += 1;
|
||||
}
|
||||
if k % 16 == 0 {
|
||||
@@ -1174,7 +1022,7 @@ mod tests {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
let local_timestamp = new_rand_timestamp(&mut rng);
|
||||
let _ = crds.insert(value, local_timestamp, GossipRoute::LocalMessage);
|
||||
let _ = crds.insert(value, local_timestamp);
|
||||
if k % 64 == 0 {
|
||||
check_crds_records(&crds);
|
||||
}
|
||||
@@ -1205,10 +1053,7 @@ mod tests {
|
||||
node.shred_version = 42;
|
||||
let node = CrdsData::ContactInfo(node);
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(
|
||||
crds.insert(node, timestamp(), GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(node, timestamp()), Ok(()));
|
||||
assert_eq!(crds.get_shred_version(&pubkey), Some(42));
|
||||
// An outdated value should not update shred-version:
|
||||
let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey));
|
||||
@@ -1216,10 +1061,7 @@ mod tests {
|
||||
node.shred_version = 8;
|
||||
let node = CrdsData::ContactInfo(node);
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(
|
||||
crds.insert(node, timestamp(), GossipRoute::LocalMessage),
|
||||
Err(CrdsError::InsertFailed)
|
||||
);
|
||||
assert_eq!(crds.insert(node, timestamp()), Err(CrdsError::InsertFailed));
|
||||
assert_eq!(crds.get_shred_version(&pubkey), Some(42));
|
||||
// Update shred version:
|
||||
let mut node = ContactInfo::new_rand(&mut rng, Some(pubkey));
|
||||
@@ -1227,19 +1069,13 @@ mod tests {
|
||||
node.shred_version = 8;
|
||||
let node = CrdsData::ContactInfo(node);
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(
|
||||
crds.insert(node, timestamp(), GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(node, timestamp()), Ok(()));
|
||||
assert_eq!(crds.get_shred_version(&pubkey), Some(8));
|
||||
// Add other crds values with the same pubkey.
|
||||
let val = SnapshotHash::new_rand(&mut rng, Some(pubkey));
|
||||
let val = CrdsData::SnapshotHashes(val);
|
||||
let val = CrdsValue::new_unsigned(val);
|
||||
assert_eq!(
|
||||
crds.insert(val, timestamp(), GossipRoute::LocalMessage),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(crds.insert(val, timestamp()), Ok(()));
|
||||
assert_eq!(crds.get_shred_version(&pubkey), Some(8));
|
||||
// Remove contact-info. Shred version should stay there since there
|
||||
// are still values associated with the pubkey.
|
||||
@@ -1276,7 +1112,7 @@ mod tests {
|
||||
let keypair = &keypairs[rng.gen_range(0, keypairs.len())];
|
||||
let value = CrdsValue::new_rand(&mut rng, Some(keypair));
|
||||
let local_timestamp = new_rand_timestamp(&mut rng);
|
||||
let _ = crds.insert(value, local_timestamp, GossipRoute::LocalMessage);
|
||||
let _ = crds.insert(value, local_timestamp);
|
||||
}
|
||||
let num_values = crds.table.len();
|
||||
let num_pubkeys = num_unique_pubkeys(crds.table.values());
|
||||
@@ -1317,10 +1153,7 @@ mod tests {
|
||||
let thread_pool = ThreadPoolBuilder::new().build().unwrap();
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_matches!(
|
||||
crds.insert(val.clone(), 1, GossipRoute::LocalMessage),
|
||||
Ok(_)
|
||||
);
|
||||
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
|
||||
let mut set = HashMap::new();
|
||||
|
||||
//default has max timeout, but pubkey should still expire
|
||||
|
@@ -7,7 +7,7 @@ use {
|
||||
crate::{
|
||||
cluster_info::Ping,
|
||||
contact_info::ContactInfo,
|
||||
crds::{Crds, GossipRoute},
|
||||
crds::Crds,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CrdsGossipPull, ProcessPullStats},
|
||||
crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE},
|
||||
@@ -88,7 +88,7 @@ impl CrdsGossip {
|
||||
now: u64,
|
||||
) -> HashMap<Pubkey, Vec<CrdsValue>> {
|
||||
for entry in pending_push_messages {
|
||||
let _ = self.crds.insert(entry, now, GossipRoute::LocalMessage);
|
||||
let _ = self.crds.insert(entry, now);
|
||||
}
|
||||
self.push.new_push_messages(&self.crds, now)
|
||||
}
|
||||
@@ -150,7 +150,7 @@ impl CrdsGossip {
|
||||
});
|
||||
let now = timestamp();
|
||||
for entry in entries {
|
||||
if let Err(err) = self.crds.insert(entry, now, GossipRoute::LocalMessage) {
|
||||
if let Err(err) = self.crds.insert(entry, now) {
|
||||
error!("push_duplicate_shred faild: {:?}", err);
|
||||
}
|
||||
}
|
||||
@@ -334,7 +334,7 @@ impl CrdsGossip {
|
||||
// Only for tests and simulations.
|
||||
pub(crate) fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
crds: self.crds.mock_clone(),
|
||||
crds: self.crds.clone(),
|
||||
push: self.push.mock_clone(),
|
||||
pull: self.pull.mock_clone(),
|
||||
}
|
||||
@@ -377,7 +377,6 @@ mod test {
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
|
||||
0,
|
||||
GossipRoute::LocalMessage,
|
||||
)
|
||||
.unwrap();
|
||||
crds_gossip.refresh_push_active_set(
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user