Compare commits
78 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
7ca7f8604d | ||
|
e2b5f2dd9c | ||
|
3652bd57a9 | ||
|
5077d6bfb3 | ||
|
f0ee3e9deb | ||
|
babad39846 | ||
|
c15aa4a968 | ||
|
3124a88284 | ||
|
e76a2065e3 | ||
|
45f8e453a9 | ||
|
20f9c12855 | ||
|
4218414c87 | ||
|
60c91d386f | ||
|
e477501687 | ||
|
20463e141e | ||
|
e699462ed3 | ||
|
8b345f3258 | ||
|
56436a6271 | ||
|
805ea6f469 | ||
|
1db1d173fc | ||
|
11476038cd | ||
|
a669ef3abb | ||
|
dbbdfa1dbb | ||
|
768c6b4bef | ||
|
8bcc04c275 | ||
|
2fd822887f | ||
|
e2c8aa0847 | ||
|
9b049402c9 | ||
|
d0e1779893 | ||
|
929ffc5a4e | ||
|
1f63fb06f1 | ||
|
b49aa125c9 | ||
|
55836d133e | ||
|
277e402d55 | ||
|
0ab8312b23 | ||
|
bc4c5c5a97 | ||
|
1a9aa78129 | ||
|
798a6db915 | ||
|
0a4a3fd37e | ||
|
66242eab41 | ||
|
7f0d4f0656 | ||
|
acba8d6026 | ||
|
1ff9555099 | ||
|
72a13e2a72 | ||
|
74cdfc2213 | ||
|
7b8e5a9f47 | ||
|
80525ac862 | ||
|
c14f98c6fc | ||
|
c6edfc3944 | ||
|
b95c493d66 | ||
|
5871462241 | ||
|
53bb826375 | ||
|
c769bcc418 | ||
|
f06a4c7861 | ||
|
0cae099d12 | ||
|
4bc3653906 | ||
|
3e7050983a | ||
|
9f1bb75445 | ||
|
139bb32dba | ||
|
158f6f3725 | ||
|
e33f9ea6b5 | ||
|
473037db86 | ||
|
b0e14ea83c | ||
|
782a549613 | ||
|
c805f7dc4e | ||
|
782829152e | ||
|
da6f09afb8 | ||
|
004b1b9c3f | ||
|
2f8d0f88d6 | ||
|
177d241160 | ||
|
5323622842 | ||
|
c852923347 | ||
|
5dc4410d58 | ||
|
da4642d634 | ||
|
a264be1791 | ||
|
9aff121949 | ||
|
a7f4d1487a | ||
|
11e43e1654 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -23,3 +23,7 @@ log-*/
|
||||
/.idea/
|
||||
/solana.iml
|
||||
/.vscode/
|
||||
|
||||
# fetch-spl.sh artifacts
|
||||
/spl-genesis-args.sh
|
||||
/spl_*.so
|
||||
|
2504
Cargo.lock
generated
2504
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -25,6 +25,7 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
@@ -64,6 +65,4 @@ members = [
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
"programs/move_loader",
|
||||
"programs/librapay",
|
||||
]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,14 +10,16 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
base64 = "0.12.3"
|
||||
bs58 = "0.3.1"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
spl-memo = { version = "1.0.4", features = ["skip-no-mangle"] }
|
||||
spl-sdk = { package = "solana-sdk", version = "=1.2.13", default-features = false }
|
||||
spl-token = { version = "1.0.2", features = ["skip-no-mangle"] }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
serde = "1.0.112"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.54"
|
||||
|
@@ -4,15 +4,20 @@ extern crate lazy_static;
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod parse_account_data;
|
||||
pub mod parse_config;
|
||||
pub mod parse_nonce;
|
||||
pub mod parse_stake;
|
||||
pub mod parse_sysvar;
|
||||
pub mod parse_token;
|
||||
pub mod parse_vote;
|
||||
pub mod validator_info;
|
||||
|
||||
use crate::parse_account_data::parse_account_data;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{account::Account, clock::Epoch, pubkey::Pubkey};
|
||||
use crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount};
|
||||
use solana_sdk::{account::Account, clock::Epoch, fee_calculator::FeeCalculator, pubkey::Pubkey};
|
||||
use std::str::FromStr;
|
||||
|
||||
pub type StringAmount = String;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -28,31 +33,41 @@ pub struct UiAccount {
|
||||
#[serde(rename_all = "camelCase", untagged)]
|
||||
pub enum UiAccountData {
|
||||
Binary(String),
|
||||
Json(Value),
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for UiAccountData {
|
||||
fn from(data: Vec<u8>) -> Self {
|
||||
Self::Binary(bs58::encode(data).into_string())
|
||||
}
|
||||
Json(ParsedAccount),
|
||||
Binary64(String),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary,
|
||||
Binary, // SLOW! Avoid this encoding
|
||||
JsonParsed,
|
||||
Binary64,
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
pub fn encode(account: Account, encoding: UiAccountEncoding) -> Self {
|
||||
pub fn encode(
|
||||
pubkey: &Pubkey,
|
||||
account: Account,
|
||||
encoding: UiAccountEncoding,
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => account.data.into(),
|
||||
UiAccountEncoding::Binary => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(&account.data, data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Binary64 => UiAccountData::Binary64(base64::encode(slice_data(
|
||||
&account.data,
|
||||
data_slice_config,
|
||||
))),
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) = parse_account_data(&account.owner, &account.data) {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner, &account.data, additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
account.data.into()
|
||||
UiAccountData::Binary64(base64::encode(&account.data))
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -69,6 +84,7 @@ impl UiAccount {
|
||||
let data = match &self.data {
|
||||
UiAccountData::Json(_) => None,
|
||||
UiAccountData::Binary(blob) => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountData::Binary64(blob) => base64::decode(blob).ok(),
|
||||
}?;
|
||||
Some(Account {
|
||||
lamports: self.lamports,
|
||||
@@ -79,3 +95,79 @@ impl UiAccount {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiFeeCalculator {
|
||||
pub lamports_per_signature: StringAmount,
|
||||
}
|
||||
|
||||
impl From<FeeCalculator> for UiFeeCalculator {
|
||||
fn from(fee_calculator: FeeCalculator) -> Self {
|
||||
Self {
|
||||
lamports_per_signature: fee_calculator.lamports_per_signature.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for UiFeeCalculator {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
lamports_per_signature: "0".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDataSliceConfig {
|
||||
pub offset: usize,
|
||||
pub length: usize,
|
||||
}
|
||||
|
||||
fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8] {
|
||||
if let Some(UiDataSliceConfig { offset, length }) = data_slice_config {
|
||||
if offset >= data.len() {
|
||||
&[]
|
||||
} else if length > data.len() - offset {
|
||||
&data[offset..]
|
||||
} else {
|
||||
&data[offset..offset + length]
|
||||
}
|
||||
} else {
|
||||
data
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_slice_data() {
|
||||
let data = vec![1, 2, 3, 4, 5];
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 0,
|
||||
length: 5,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &data[..]);
|
||||
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 0,
|
||||
length: 10,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &data[..]);
|
||||
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 1,
|
||||
length: 2,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &data[1..3]);
|
||||
|
||||
let slice_config = Some(UiDataSliceConfig {
|
||||
offset: 10,
|
||||
length: 2,
|
||||
});
|
||||
assert_eq!(slice_data(&data, slice_config), &[] as &[u8]);
|
||||
}
|
||||
}
|
||||
|
@@ -1,20 +1,31 @@
|
||||
use crate::{parse_nonce::parse_nonce, parse_token::parse_token, parse_vote::parse_vote};
|
||||
use crate::{
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v1_0},
|
||||
parse_vote::parse_vote,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program};
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey =
|
||||
Pubkey::from_str(&system_program::id().to_string()).unwrap();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = Pubkey::from_str(&spl_token::id().to_string()).unwrap();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey =
|
||||
Pubkey::from_str(&solana_vote_program::id().to_string()).unwrap();
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v1_0();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::Token);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
|
||||
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
m
|
||||
};
|
||||
@@ -28,6 +39,9 @@ pub enum ParseAccountError {
|
||||
#[error("Program not parsable")]
|
||||
ProgramNotParsable,
|
||||
|
||||
#[error("Additional data required to parse: {0}")]
|
||||
AdditionalDataMissing(String),
|
||||
|
||||
#[error("Instruction error")]
|
||||
InstructionError(#[from] InstructionError),
|
||||
|
||||
@@ -35,26 +49,55 @@ pub enum ParseAccountError {
|
||||
SerdeJsonError(#[from] serde_json::error::Error),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ParsedAccount {
|
||||
pub program: String,
|
||||
pub parsed: Value,
|
||||
pub space: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ParsableAccount {
|
||||
Config,
|
||||
Nonce,
|
||||
Token,
|
||||
SplToken,
|
||||
Stake,
|
||||
Sysvar,
|
||||
Vote,
|
||||
}
|
||||
|
||||
pub fn parse_account_data(program_id: &Pubkey, data: &[u8]) -> Result<Value, ParseAccountError> {
|
||||
#[derive(Default)]
|
||||
pub struct AccountAdditionalData {
|
||||
pub spl_token_decimals: Option<u8>,
|
||||
}
|
||||
|
||||
pub fn parse_account_data(
|
||||
pubkey: &Pubkey,
|
||||
program_id: &Pubkey,
|
||||
data: &[u8],
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
) -> Result<ParsedAccount, ParseAccountError> {
|
||||
let program_name = PARSABLE_PROGRAM_IDS
|
||||
.get(program_id)
|
||||
.ok_or_else(|| ParseAccountError::ProgramNotParsable)?;
|
||||
let additional_data = additional_data.unwrap_or_default();
|
||||
let parsed_json = match program_name {
|
||||
ParsableAccount::Config => serde_json::to_value(parse_config(data, pubkey)?)?,
|
||||
ParsableAccount::Nonce => serde_json::to_value(parse_nonce(data)?)?,
|
||||
ParsableAccount::Token => serde_json::to_value(parse_token(data)?)?,
|
||||
ParsableAccount::SplToken => {
|
||||
serde_json::to_value(parse_token(data, additional_data.spl_token_decimals)?)?
|
||||
}
|
||||
ParsableAccount::Stake => serde_json::to_value(parse_stake(data)?)?,
|
||||
ParsableAccount::Sysvar => serde_json::to_value(parse_sysvar(data, pubkey)?)?,
|
||||
ParsableAccount::Vote => serde_json::to_value(parse_vote(data)?)?,
|
||||
};
|
||||
Ok(json!({
|
||||
format!("{:?}", program_name).to_kebab_case(): parsed_json
|
||||
}))
|
||||
Ok(ParsedAccount {
|
||||
program: format!("{:?}", program_name).to_kebab_case(),
|
||||
parsed: parsed_json,
|
||||
space: data.len() as u64,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -68,20 +111,35 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
let account_pubkey = Pubkey::new_rand();
|
||||
let other_program = Pubkey::new_rand();
|
||||
let data = vec![0; 4];
|
||||
assert!(parse_account_data(&other_program, &data).is_err());
|
||||
assert!(parse_account_data(&account_pubkey, &other_program, &data, None).is_err());
|
||||
|
||||
let vote_state = VoteState::default();
|
||||
let mut vote_account_data: Vec<u8> = vec![0; VoteState::size_of()];
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
|
||||
let parsed = parse_account_data(&solana_vote_program::id(), &vote_account_data).unwrap();
|
||||
assert!(parsed.as_object().unwrap().contains_key("vote"));
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
&solana_vote_program::id(),
|
||||
&vote_account_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(parsed.program, "vote".to_string());
|
||||
assert_eq!(parsed.space, VoteState::size_of() as u64);
|
||||
|
||||
let nonce_data = Versions::new_current(State::Initialized(Data::default()));
|
||||
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
|
||||
let parsed = parse_account_data(&system_program::id(), &nonce_account_data).unwrap();
|
||||
assert!(parsed.as_object().unwrap().contains_key("nonce"));
|
||||
let parsed = parse_account_data(
|
||||
&account_pubkey,
|
||||
&system_program::id(),
|
||||
&nonce_account_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(parsed.program, "nonce".to_string());
|
||||
assert_eq!(parsed.space, State::size() as u64);
|
||||
}
|
||||
}
|
||||
|
146
account-decoder/src/parse_config.rs
Normal file
146
account-decoder/src/parse_config.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
.map(|config| ConfigAccountType::StakeConfig(config.into()))
|
||||
} else {
|
||||
deserialize::<ConfigKeys>(data).ok().and_then(|key_list| {
|
||||
if !key_list.keys.is_empty() && key_list.keys[0].0 == validator_info::id() {
|
||||
parse_config_data::<String>(data, key_list.keys).and_then(|validator_info| {
|
||||
Some(ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: validator_info.keys,
|
||||
config_data: serde_json::from_str(&validator_info.config_data).ok()?,
|
||||
}))
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
};
|
||||
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Config,
|
||||
))
|
||||
}
|
||||
|
||||
fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConfig<T>>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
pubkey: key.0.to_string(),
|
||||
signer: key.1,
|
||||
})
|
||||
.collect();
|
||||
Some(UiConfig { keys, config_data })
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum ConfigAccountType {
|
||||
StakeConfig(UiStakeConfig),
|
||||
ValidatorInfo(UiConfig<Value>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiConfigKey {
|
||||
pub pubkey: String,
|
||||
pub signer: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeConfig {
|
||||
pub warmup_cooldown_rate: f64,
|
||||
pub slash_penalty: u8,
|
||||
}
|
||||
|
||||
impl From<StakeConfig> for UiStakeConfig {
|
||||
fn from(config: StakeConfig) -> Self {
|
||||
Self {
|
||||
warmup_cooldown_rate: config.warmup_cooldown_rate,
|
||||
slash_penalty: config.slash_penalty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiConfig<T> {
|
||||
pub keys: Vec<UiConfigKey>,
|
||||
pub config_data: T,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
let stake_config = StakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(
|
||||
&stake_config_account.data,
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
}),
|
||||
);
|
||||
|
||||
let validator_info = ValidatorInfo {
|
||||
info: serde_json::to_string(&json!({
|
||||
"name": "Solana",
|
||||
}))
|
||||
.unwrap(),
|
||||
};
|
||||
let info_pubkey = Pubkey::new_rand();
|
||||
let validator_info_config_account = create_config_account(
|
||||
vec![(validator_info::id(), false), (info_pubkey, true)],
|
||||
&validator_info,
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(&validator_info_config_account.data, &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
pubkey: validator_info::id().to_string(),
|
||||
signer: false,
|
||||
},
|
||||
UiConfigKey {
|
||||
pubkey: info_pubkey.to_string(),
|
||||
signer: true,
|
||||
}
|
||||
],
|
||||
config_data: serde_json::from_str(r#"{"name":"Solana"}"#).unwrap(),
|
||||
}),
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_config(&bad_data, &info_pubkey).is_err());
|
||||
}
|
||||
}
|
@@ -1,6 +1,5 @@
|
||||
use crate::parse_account_data::ParseAccountError;
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
};
|
||||
@@ -14,14 +13,14 @@ pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
fee_calculator: data.fee_calculator,
|
||||
fee_calculator: data.fee_calculator.into(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// A duplicate representation of NonceState for pretty JSON serialization
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum UiNonceState {
|
||||
Uninitialized,
|
||||
Initialized(UiNonceData),
|
||||
@@ -32,7 +31,7 @@ pub enum UiNonceState {
|
||||
pub struct UiNonceData {
|
||||
pub authority: String,
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -56,7 +55,9 @@ mod test {
|
||||
UiNonceState::Initialized(UiNonceData {
|
||||
authority: Pubkey::default().to_string(),
|
||||
blockhash: Hash::default().to_string(),
|
||||
fee_calculator: FeeCalculator::default(),
|
||||
fee_calculator: UiFeeCalculator {
|
||||
lamports_per_signature: 0.to_string(),
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
|
236
account-decoder/src/parse_stake.rs
Normal file
236
account-decoder/src/parse_stake.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Stake))?;
|
||||
let parsed_account = match stake_state {
|
||||
StakeState::Uninitialized => StakeAccountType::Uninitialized,
|
||||
StakeState::Initialized(meta) => StakeAccountType::Initialized(UiStakeAccount {
|
||||
meta: meta.into(),
|
||||
stake: None,
|
||||
}),
|
||||
StakeState::Stake(meta, stake) => StakeAccountType::Delegated(UiStakeAccount {
|
||||
meta: meta.into(),
|
||||
stake: Some(stake.into()),
|
||||
}),
|
||||
StakeState::RewardsPool => StakeAccountType::RewardsPool,
|
||||
};
|
||||
Ok(parsed_account)
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum StakeAccountType {
|
||||
Uninitialized,
|
||||
Initialized(UiStakeAccount),
|
||||
Delegated(UiStakeAccount),
|
||||
RewardsPool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeAccount {
|
||||
pub meta: UiMeta,
|
||||
pub stake: Option<UiStake>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiMeta {
|
||||
pub rent_exempt_reserve: StringAmount,
|
||||
pub authorized: UiAuthorized,
|
||||
pub lockup: UiLockup,
|
||||
}
|
||||
|
||||
impl From<Meta> for UiMeta {
|
||||
fn from(meta: Meta) -> Self {
|
||||
Self {
|
||||
rent_exempt_reserve: meta.rent_exempt_reserve.to_string(),
|
||||
authorized: meta.authorized.into(),
|
||||
lockup: meta.lockup.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiLockup {
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
pub epoch: Epoch,
|
||||
pub custodian: String,
|
||||
}
|
||||
|
||||
impl From<Lockup> for UiLockup {
|
||||
fn from(lockup: Lockup) -> Self {
|
||||
Self {
|
||||
unix_timestamp: lockup.unix_timestamp,
|
||||
epoch: lockup.epoch,
|
||||
custodian: lockup.custodian.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAuthorized {
|
||||
pub staker: String,
|
||||
pub withdrawer: String,
|
||||
}
|
||||
|
||||
impl From<Authorized> for UiAuthorized {
|
||||
fn from(authorized: Authorized) -> Self {
|
||||
Self {
|
||||
staker: authorized.staker.to_string(),
|
||||
withdrawer: authorized.withdrawer.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStake {
|
||||
pub delegation: UiDelegation,
|
||||
pub credits_observed: u64,
|
||||
}
|
||||
|
||||
impl From<Stake> for UiStake {
|
||||
fn from(stake: Stake) -> Self {
|
||||
Self {
|
||||
delegation: stake.delegation.into(),
|
||||
credits_observed: stake.credits_observed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDelegation {
|
||||
pub voter: String,
|
||||
pub stake: StringAmount,
|
||||
pub activation_epoch: StringAmount,
|
||||
pub deactivation_epoch: StringAmount,
|
||||
pub warmup_cooldown_rate: f64,
|
||||
}
|
||||
|
||||
impl From<Delegation> for UiDelegation {
|
||||
fn from(delegation: Delegation) -> Self {
|
||||
Self {
|
||||
voter: delegation.voter_pubkey.to_string(),
|
||||
stake: delegation.stake.to_string(),
|
||||
activation_epoch: delegation.activation_epoch.to_string(),
|
||||
deactivation_epoch: delegation.deactivation_epoch.to_string(),
|
||||
warmup_cooldown_rate: delegation.warmup_cooldown_rate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
let stake_state = StakeState::Uninitialized;
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Uninitialized
|
||||
);
|
||||
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let custodian = Pubkey::new_rand();
|
||||
let authorized = Authorized::auto(&pubkey);
|
||||
let lockup = Lockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian,
|
||||
};
|
||||
let meta = Meta {
|
||||
rent_exempt_reserve: 42,
|
||||
authorized,
|
||||
lockup,
|
||||
};
|
||||
|
||||
let stake_state = StakeState::Initialized(meta);
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Initialized(UiStakeAccount {
|
||||
meta: UiMeta {
|
||||
rent_exempt_reserve: 42.to_string(),
|
||||
authorized: UiAuthorized {
|
||||
staker: pubkey.to_string(),
|
||||
withdrawer: pubkey.to_string(),
|
||||
},
|
||||
lockup: UiLockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian: custodian.to_string(),
|
||||
}
|
||||
},
|
||||
stake: None,
|
||||
})
|
||||
);
|
||||
|
||||
let voter_pubkey = Pubkey::new_rand();
|
||||
let stake = Stake {
|
||||
delegation: Delegation {
|
||||
voter_pubkey,
|
||||
stake: 20,
|
||||
activation_epoch: 2,
|
||||
deactivation_epoch: std::u64::MAX,
|
||||
warmup_cooldown_rate: 0.25,
|
||||
},
|
||||
credits_observed: 10,
|
||||
};
|
||||
|
||||
let stake_state = StakeState::Stake(meta, stake);
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::Delegated(UiStakeAccount {
|
||||
meta: UiMeta {
|
||||
rent_exempt_reserve: 42.to_string(),
|
||||
authorized: UiAuthorized {
|
||||
staker: pubkey.to_string(),
|
||||
withdrawer: pubkey.to_string(),
|
||||
},
|
||||
lockup: UiLockup {
|
||||
unix_timestamp: 0,
|
||||
epoch: 1,
|
||||
custodian: custodian.to_string(),
|
||||
}
|
||||
},
|
||||
stake: Some(UiStake {
|
||||
delegation: UiDelegation {
|
||||
voter: voter_pubkey.to_string(),
|
||||
stake: 20.to_string(),
|
||||
activation_epoch: 2.to_string(),
|
||||
deactivation_epoch: std::u64::MAX.to_string(),
|
||||
warmup_cooldown_rate: 0.25,
|
||||
},
|
||||
credits_observed: 10,
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
let stake_state = StakeState::RewardsPool;
|
||||
let stake_data = serialize(&stake_state).unwrap();
|
||||
assert_eq!(
|
||||
parse_stake(&stake_data).unwrap(),
|
||||
StakeAccountType::RewardsPool
|
||||
);
|
||||
|
||||
let bad_data = vec![1, 2, 3, 4];
|
||||
assert!(parse_stake(&bad_data).is_err());
|
||||
}
|
||||
}
|
328
account-decoder/src/parse_sysvar.rs
Normal file
328
account-decoder/src/parse_sysvar.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
let parsed_account = {
|
||||
if pubkey == &sysvar::clock::id() {
|
||||
deserialize::<Clock>(data)
|
||||
.ok()
|
||||
.map(|clock| SysvarAccountType::Clock(clock.into()))
|
||||
} else if pubkey == &sysvar::epoch_schedule::id() {
|
||||
deserialize(data).ok().map(SysvarAccountType::EpochSchedule)
|
||||
} else if pubkey == &sysvar::fees::id() {
|
||||
deserialize::<Fees>(data)
|
||||
.ok()
|
||||
.map(|fees| SysvarAccountType::Fees(fees.into()))
|
||||
} else if pubkey == &sysvar::recent_blockhashes::id() {
|
||||
deserialize::<RecentBlockhashes>(data)
|
||||
.ok()
|
||||
.map(|recent_blockhashes| {
|
||||
let recent_blockhashes = recent_blockhashes
|
||||
.iter()
|
||||
.map(|entry| UiRecentBlockhashesEntry {
|
||||
blockhash: entry.blockhash.to_string(),
|
||||
fee_calculator: entry.fee_calculator.clone().into(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::RecentBlockhashes(recent_blockhashes)
|
||||
})
|
||||
} else if pubkey == &sysvar::rent::id() {
|
||||
deserialize::<Rent>(data)
|
||||
.ok()
|
||||
.map(|rent| SysvarAccountType::Rent(rent.into()))
|
||||
} else if pubkey == &sysvar::rewards::id() {
|
||||
deserialize::<Rewards>(data)
|
||||
.ok()
|
||||
.map(|rewards| SysvarAccountType::Rewards(rewards.into()))
|
||||
} else if pubkey == &sysvar::slot_hashes::id() {
|
||||
deserialize::<SlotHashes>(data).ok().map(|slot_hashes| {
|
||||
let slot_hashes = slot_hashes
|
||||
.iter()
|
||||
.map(|slot_hash| UiSlotHashEntry {
|
||||
slot: slot_hash.0,
|
||||
hash: slot_hash.1.to_string(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::SlotHashes(slot_hashes)
|
||||
})
|
||||
} else if pubkey == &sysvar::slot_history::id() {
|
||||
deserialize::<SlotHistory>(data).ok().map(|slot_history| {
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
next_slot: slot_history.next_slot,
|
||||
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
|
||||
})
|
||||
})
|
||||
} else if pubkey == &sysvar::stake_history::id() {
|
||||
deserialize::<StakeHistory>(data).ok().map(|stake_history| {
|
||||
let stake_history = stake_history
|
||||
.iter()
|
||||
.map(|entry| UiStakeHistoryEntry {
|
||||
epoch: entry.0,
|
||||
stake_history: entry.1.clone(),
|
||||
})
|
||||
.collect();
|
||||
SysvarAccountType::StakeHistory(stake_history)
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
parsed_account.ok_or(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Sysvar,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum SysvarAccountType {
|
||||
Clock(UiClock),
|
||||
EpochSchedule(EpochSchedule),
|
||||
Fees(UiFees),
|
||||
RecentBlockhashes(Vec<UiRecentBlockhashesEntry>),
|
||||
Rent(UiRent),
|
||||
Rewards(UiRewards),
|
||||
SlotHashes(Vec<UiSlotHashEntry>),
|
||||
SlotHistory(UiSlotHistory),
|
||||
StakeHistory(Vec<UiStakeHistoryEntry>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiClock {
|
||||
pub slot: Slot,
|
||||
pub epoch: Epoch,
|
||||
pub leader_schedule_epoch: Epoch,
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
}
|
||||
|
||||
impl From<Clock> for UiClock {
|
||||
fn from(clock: Clock) -> Self {
|
||||
Self {
|
||||
slot: clock.slot,
|
||||
epoch: clock.epoch,
|
||||
leader_schedule_epoch: clock.leader_schedule_epoch,
|
||||
unix_timestamp: clock.unix_timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiFees {
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
impl From<Fees> for UiFees {
|
||||
fn from(fees: Fees) -> Self {
|
||||
Self {
|
||||
fee_calculator: fees.fee_calculator.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRent {
|
||||
pub lamports_per_byte_year: StringAmount,
|
||||
pub exemption_threshold: f64,
|
||||
pub burn_percent: u8,
|
||||
}
|
||||
|
||||
impl From<Rent> for UiRent {
|
||||
fn from(rent: Rent) -> Self {
|
||||
Self {
|
||||
lamports_per_byte_year: rent.lamports_per_byte_year.to_string(),
|
||||
exemption_threshold: rent.exemption_threshold,
|
||||
burn_percent: rent.burn_percent,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRewards {
|
||||
pub validator_point_value: f64,
|
||||
}
|
||||
|
||||
impl From<Rewards> for UiRewards {
|
||||
fn from(rewards: Rewards) -> Self {
|
||||
Self {
|
||||
validator_point_value: rewards.validator_point_value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiRecentBlockhashesEntry {
|
||||
pub blockhash: String,
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiSlotHashEntry {
|
||||
pub slot: Slot,
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiSlotHistory {
|
||||
pub next_slot: Slot,
|
||||
pub bits: String,
|
||||
}
|
||||
|
||||
struct SlotHistoryBits(BitVec<u64>);
|
||||
|
||||
impl std::fmt::Debug for SlotHistoryBits {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
for i in 0..slot_history::MAX_ENTRIES {
|
||||
if self.0.get(i) {
|
||||
write!(f, "1")?;
|
||||
} else {
|
||||
write!(f, "0")?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiStakeHistoryEntry {
|
||||
pub epoch: Epoch,
|
||||
pub stake_history: StakeHistoryEntry,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
sysvar::{recent_blockhashes::IterItem, Sysvar},
|
||||
};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let clock_sysvar = Clock::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
SysvarAccountType::Clock(UiClock::default()),
|
||||
);
|
||||
|
||||
let epoch_schedule = EpochSchedule {
|
||||
slots_per_epoch: 12,
|
||||
leader_schedule_slot_offset: 0,
|
||||
warmup: false,
|
||||
first_normal_epoch: 1,
|
||||
first_normal_slot: 12,
|
||||
};
|
||||
let epoch_schedule_sysvar = epoch_schedule.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
let fees_sysvar = Fees::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
let fee_calculator = FeeCalculator {
|
||||
lamports_per_signature: 10,
|
||||
};
|
||||
let recent_blockhashes =
|
||||
RecentBlockhashes::from_iter(vec![IterItem(0, &hash, &fee_calculator)].into_iter());
|
||||
let recent_blockhashes_sysvar = recent_blockhashes.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: fee_calculator.into(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 10,
|
||||
exemption_threshold: 2.0,
|
||||
burn_percent: 5,
|
||||
};
|
||||
let rent_sysvar = rent.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
|
||||
SysvarAccountType::Rent(rent.into()),
|
||||
);
|
||||
|
||||
let rewards_sysvar = Rewards::default().create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
|
||||
SysvarAccountType::Rewards(UiRewards::default()),
|
||||
);
|
||||
|
||||
let mut slot_hashes = SlotHashes::default();
|
||||
slot_hashes.add(1, hash);
|
||||
let slot_hashes_sysvar = slot_hashes.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
|
||||
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
|
||||
slot: 1,
|
||||
hash: hash.to_string(),
|
||||
}]),
|
||||
);
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
let slot_history_sysvar = slot_history.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
next_slot: slot_history.next_slot,
|
||||
bits: format!("{:?}", SlotHistoryBits(slot_history.bits)),
|
||||
}),
|
||||
);
|
||||
|
||||
let mut stake_history = StakeHistory::default();
|
||||
let stake_history_entry = StakeHistoryEntry {
|
||||
effective: 10,
|
||||
activating: 2,
|
||||
deactivating: 3,
|
||||
};
|
||||
stake_history.add(1, stake_history_entry.clone());
|
||||
let stake_history_sysvar = stake_history.create_account(1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
|
||||
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
|
||||
epoch: 1,
|
||||
stake_history: stake_history_entry,
|
||||
}]),
|
||||
);
|
||||
|
||||
let bad_pubkey = Pubkey::new_rand();
|
||||
assert!(parse_sysvar(&stake_history_sysvar.data, &bad_pubkey).is_err());
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_sysvar(&bad_data, &sysvar::stake_history::id()).is_err());
|
||||
}
|
||||
}
|
@@ -1,31 +1,62 @@
|
||||
use crate::parse_account_data::{ParsableAccount, ParseAccountError};
|
||||
use spl_sdk::pubkey::Pubkey;
|
||||
use spl_token::{
|
||||
option::COption,
|
||||
state::{Account, Mint, Multisig, State},
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
};
|
||||
use std::mem::size_of;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v1_0::{
|
||||
option::COption,
|
||||
solana_sdk::pubkey::Pubkey as SplTokenPubkey,
|
||||
state::{unpack, Account, Mint, Multisig},
|
||||
};
|
||||
use std::{mem::size_of, str::FromStr};
|
||||
|
||||
pub fn parse_token(data: &[u8]) -> Result<TokenAccountType, ParseAccountError> {
|
||||
// A helper function to convert spl_token_v1_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id_v1_0() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token_v1_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v1_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v1_0::native_mint::id().to_string()).unwrap()
|
||||
}
|
||||
|
||||
pub fn parse_token(
|
||||
data: &[u8],
|
||||
mint_decimals: Option<u8>,
|
||||
) -> Result<TokenAccountType, ParseAccountError> {
|
||||
let mut data = data.to_vec();
|
||||
if data.len() == size_of::<Account>() {
|
||||
let account: Account = *State::unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Token))?;
|
||||
let account: Account = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
let decimals = mint_decimals.ok_or_else(|| {
|
||||
ParseAccountError::AdditionalDataMissing(
|
||||
"no mint_decimals provided to parse spl-token account".to_string(),
|
||||
)
|
||||
})?;
|
||||
Ok(TokenAccountType::Account(UiTokenAccount {
|
||||
mint: account.mint.to_string(),
|
||||
owner: account.owner.to_string(),
|
||||
amount: account.amount,
|
||||
token_amount: token_amount_to_ui_amount(account.amount, decimals),
|
||||
delegate: match account.delegate {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
COption::None => None,
|
||||
},
|
||||
is_initialized: account.is_initialized,
|
||||
is_native: account.is_native,
|
||||
delegated_amount: account.delegated_amount,
|
||||
delegated_amount: if account.delegate.is_none() {
|
||||
None
|
||||
} else {
|
||||
Some(token_amount_to_ui_amount(
|
||||
account.delegated_amount,
|
||||
decimals,
|
||||
))
|
||||
},
|
||||
}))
|
||||
} else if data.len() == size_of::<Mint>() {
|
||||
let mint: Mint = *State::unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Token))?;
|
||||
let mint: Mint = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Mint(UiMint {
|
||||
owner: match mint.owner {
|
||||
COption::Some(pubkey) => Some(pubkey.to_string()),
|
||||
@@ -35,8 +66,8 @@ pub fn parse_token(data: &[u8]) -> Result<TokenAccountType, ParseAccountError> {
|
||||
is_initialized: mint.is_initialized,
|
||||
}))
|
||||
} else if data.len() == size_of::<Multisig>() {
|
||||
let multisig: Multisig = *State::unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::Token))?;
|
||||
let multisig: Multisig = *unpack(&mut data)
|
||||
.map_err(|_| ParseAccountError::AccountNotParsable(ParsableAccount::SplToken))?;
|
||||
Ok(TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: multisig.m,
|
||||
num_valid_signers: multisig.n,
|
||||
@@ -45,7 +76,7 @@ pub fn parse_token(data: &[u8]) -> Result<TokenAccountType, ParseAccountError> {
|
||||
.signers
|
||||
.iter()
|
||||
.filter_map(|pubkey| {
|
||||
if pubkey != &Pubkey::default() {
|
||||
if pubkey != &SplTokenPubkey::default() {
|
||||
Some(pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
@@ -55,13 +86,13 @@ pub fn parse_token(data: &[u8]) -> Result<TokenAccountType, ParseAccountError> {
|
||||
}))
|
||||
} else {
|
||||
Err(ParseAccountError::AccountNotParsable(
|
||||
ParsableAccount::Token,
|
||||
ParsableAccount::SplToken,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum TokenAccountType {
|
||||
Account(UiTokenAccount),
|
||||
Mint(UiMint),
|
||||
@@ -73,11 +104,31 @@ pub enum TokenAccountType {
|
||||
pub struct UiTokenAccount {
|
||||
pub mint: String,
|
||||
pub owner: String,
|
||||
pub amount: u64,
|
||||
pub token_amount: UiTokenAmount,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegate: Option<String>,
|
||||
pub is_initialized: bool,
|
||||
pub is_native: bool,
|
||||
pub delegated_amount: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_amount: Option<UiTokenAmount>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiTokenAmount {
|
||||
pub ui_amount: f64,
|
||||
pub decimals: u8,
|
||||
pub amount: StringAmount,
|
||||
}
|
||||
|
||||
pub fn token_amount_to_ui_amount(amount: u64, decimals: u8) -> UiTokenAmount {
|
||||
// Use `amount_to_ui_amount()` once spl_token is bumped to a version that supports it: https://github.com/solana-labs/solana-program-library/pull/211
|
||||
let amount_decimals = amount as f64 / 10_usize.pow(decimals as u32) as f64;
|
||||
UiTokenAmount {
|
||||
ui_amount: amount_decimals,
|
||||
decimals,
|
||||
amount: amount.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
@@ -97,40 +148,54 @@ pub struct UiMultisig {
|
||||
pub signers: Vec<String>,
|
||||
}
|
||||
|
||||
pub fn get_token_account_mint(data: &[u8]) -> Option<Pubkey> {
|
||||
if data.len() == size_of::<Account>() {
|
||||
Some(Pubkey::new(&data[0..32]))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use spl_token_v1_0::state::unpack_unchecked;
|
||||
|
||||
#[test]
|
||||
fn test_parse_token() {
|
||||
let mint_pubkey = Pubkey::new(&[2; 32]);
|
||||
let owner_pubkey = Pubkey::new(&[3; 32]);
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let owner_pubkey = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = State::unpack_unchecked(&mut account_data).unwrap();
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
account.owner = owner_pubkey;
|
||||
account.amount = 42;
|
||||
account.is_initialized = true;
|
||||
assert!(parse_token(&account_data, None).is_err());
|
||||
assert_eq!(
|
||||
parse_token(&account_data).unwrap(),
|
||||
parse_token(&account_data, Some(2)).unwrap(),
|
||||
TokenAccountType::Account(UiTokenAccount {
|
||||
mint: mint_pubkey.to_string(),
|
||||
owner: owner_pubkey.to_string(),
|
||||
amount: 42,
|
||||
token_amount: UiTokenAmount {
|
||||
ui_amount: 0.42,
|
||||
decimals: 2,
|
||||
amount: "42".to_string()
|
||||
},
|
||||
delegate: None,
|
||||
is_initialized: true,
|
||||
is_native: false,
|
||||
delegated_amount: 0,
|
||||
delegated_amount: None,
|
||||
}),
|
||||
);
|
||||
|
||||
let mut mint_data = [0; size_of::<Mint>()];
|
||||
let mut mint: &mut Mint = State::unpack_unchecked(&mut mint_data).unwrap();
|
||||
let mut mint: &mut Mint = unpack_unchecked(&mut mint_data).unwrap();
|
||||
mint.owner = COption::Some(owner_pubkey);
|
||||
mint.decimals = 3;
|
||||
mint.is_initialized = true;
|
||||
assert_eq!(
|
||||
parse_token(&mint_data).unwrap(),
|
||||
parse_token(&mint_data, None).unwrap(),
|
||||
TokenAccountType::Mint(UiMint {
|
||||
owner: Some(owner_pubkey.to_string()),
|
||||
decimals: 3,
|
||||
@@ -138,12 +203,12 @@ mod test {
|
||||
}),
|
||||
);
|
||||
|
||||
let signer1 = Pubkey::new(&[1; 32]);
|
||||
let signer2 = Pubkey::new(&[2; 32]);
|
||||
let signer3 = Pubkey::new(&[3; 32]);
|
||||
let signer1 = SplTokenPubkey::new(&[1; 32]);
|
||||
let signer2 = SplTokenPubkey::new(&[2; 32]);
|
||||
let signer3 = SplTokenPubkey::new(&[3; 32]);
|
||||
let mut multisig_data = [0; size_of::<Multisig>()];
|
||||
let mut multisig: &mut Multisig = State::unpack_unchecked(&mut multisig_data).unwrap();
|
||||
let mut signers = [Pubkey::default(); 11];
|
||||
let mut multisig: &mut Multisig = unpack_unchecked(&mut multisig_data).unwrap();
|
||||
let mut signers = [SplTokenPubkey::default(); 11];
|
||||
signers[0] = signer1;
|
||||
signers[1] = signer2;
|
||||
signers[2] = signer3;
|
||||
@@ -152,7 +217,7 @@ mod test {
|
||||
multisig.is_initialized = true;
|
||||
multisig.signers = signers;
|
||||
assert_eq!(
|
||||
parse_token(&multisig_data).unwrap(),
|
||||
parse_token(&multisig_data, None).unwrap(),
|
||||
TokenAccountType::Multisig(UiMultisig {
|
||||
num_required_signers: 2,
|
||||
num_valid_signers: 3,
|
||||
@@ -166,6 +231,20 @@ mod test {
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_token(&bad_data).is_err());
|
||||
assert!(parse_token(&bad_data, None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_token_account_mint() {
|
||||
let mint_pubkey = SplTokenPubkey::new(&[2; 32]);
|
||||
let mut account_data = [0; size_of::<Account>()];
|
||||
let mut account: &mut Account = unpack_unchecked(&mut account_data).unwrap();
|
||||
account.mint = mint_pubkey;
|
||||
|
||||
let expected_mint_pubkey = Pubkey::new(&[2; 32]);
|
||||
assert_eq!(
|
||||
get_token_account_mint(&account_data),
|
||||
Some(expected_mint_pubkey)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,19 +1,19 @@
|
||||
use crate::parse_account_data::ParseAccountError;
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<UiVoteState, ParseAccountError> {
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
let epoch_credits = vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.map(|(epoch, credits, previous_credits)| UiEpochCredits {
|
||||
epoch: *epoch,
|
||||
credits: *credits,
|
||||
previous_credits: *previous_credits,
|
||||
credits: credits.to_string(),
|
||||
previous_credits: previous_credits.to_string(),
|
||||
})
|
||||
.collect();
|
||||
let votes = vote_state
|
||||
@@ -45,7 +45,7 @@ pub fn parse_vote(data: &[u8]) -> Result<UiVoteState, ParseAccountError> {
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
Ok(UiVoteState {
|
||||
Ok(VoteAccountType::Vote(UiVoteState {
|
||||
node_pubkey: vote_state.node_pubkey.to_string(),
|
||||
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
|
||||
commission: vote_state.commission,
|
||||
@@ -55,7 +55,14 @@ pub fn parse_vote(data: &[u8]) -> Result<UiVoteState, ParseAccountError> {
|
||||
prior_voters,
|
||||
epoch_credits,
|
||||
last_timestamp: vote_state.last_timestamp,
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
/// A wrapper enum for consistency across programs
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
pub enum VoteAccountType {
|
||||
Vote(UiVoteState),
|
||||
}
|
||||
|
||||
/// A duplicate representation of VoteState for pretty JSON serialization
|
||||
@@ -108,8 +115,8 @@ struct UiPriorVoters {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct UiEpochCredits {
|
||||
epoch: Epoch,
|
||||
credits: u64,
|
||||
previous_credits: u64,
|
||||
credits: StringAmount,
|
||||
previous_credits: StringAmount,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -126,7 +133,10 @@ mod test {
|
||||
let mut expected_vote_state = UiVoteState::default();
|
||||
expected_vote_state.node_pubkey = Pubkey::default().to_string();
|
||||
expected_vote_state.authorized_withdrawer = Pubkey::default().to_string();
|
||||
assert_eq!(parse_vote(&vote_account_data).unwrap(), expected_vote_state,);
|
||||
assert_eq!(
|
||||
parse_vote(&vote_account_data).unwrap(),
|
||||
VoteAccountType::Vote(expected_vote_state)
|
||||
);
|
||||
|
||||
let bad_data = vec![0; 4];
|
||||
assert!(parse_vote(&bad_data).is_err());
|
||||
|
18
account-decoder/src/validator_info.rs
Normal file
18
account-decoder/src/validator_info.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use solana_config_program::ConfigState;
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
pub const MAX_VALIDATOR_INFO: u64 = 576;
|
||||
|
||||
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct ValidatorInfo {
|
||||
pub info: String,
|
||||
}
|
||||
|
||||
impl ConfigState for ValidatorInfo {
|
||||
fn max_space() -> u64 {
|
||||
MAX_VALIDATOR_INFO
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-measure = { path = "../measure", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,16 +13,16 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.6"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.14" }
|
||||
solana-perf = { path = "../perf", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-measure = { path = "../measure", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.23" }
|
||||
solana-perf = { path = "../perf", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,21 +18,21 @@ rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.14" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.14" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,18 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,28 +14,23 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.14" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.2.14", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.14" }
|
||||
solana-measure = { path = "../measure", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.2.14", optional = true }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.14" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,8 +4,6 @@ use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::{create_genesis, upload_mint_script, upload_payment_script};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
@@ -37,9 +35,6 @@ use std::{
|
||||
const MAX_TX_QUEUE_AGE: u64 =
|
||||
MAX_PROCESSING_AGE as u64 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND;
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
use solana_librapay::librapay_transaction;
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -51,8 +46,6 @@ pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
||||
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
|
||||
@@ -122,7 +115,6 @@ fn generate_chunked_transfers(
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) {
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
@@ -137,7 +129,6 @@ fn generate_chunked_transfers(
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
&libra_args,
|
||||
);
|
||||
|
||||
// In sustained mode, overlap the transfers with generation. This has higher average
|
||||
@@ -205,12 +196,7 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn do_bench_tps<T>(
|
||||
client: Arc<T>,
|
||||
config: Config,
|
||||
gen_keypairs: Vec<Keypair>,
|
||||
libra_args: Option<LibraKeys>,
|
||||
) -> u64
|
||||
pub fn do_bench_tps<T>(client: Arc<T>, config: Config, gen_keypairs: Vec<Keypair>) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
{
|
||||
@@ -294,7 +280,6 @@ where
|
||||
threads,
|
||||
duration,
|
||||
sustained,
|
||||
libra_args,
|
||||
);
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
@@ -340,52 +325,6 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn generate_move_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
reclaim: bool,
|
||||
move_keypairs: &[Keypair],
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_id: &Pubkey,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
let count = move_keypairs.len() / 2;
|
||||
let source_move = &move_keypairs[..count];
|
||||
let dest_move = &move_keypairs[count..];
|
||||
let pairs: Vec<_> = if !reclaim {
|
||||
source_move
|
||||
.iter()
|
||||
.zip(dest_move.iter())
|
||||
.zip(source.iter())
|
||||
.collect()
|
||||
} else {
|
||||
dest_move
|
||||
.iter()
|
||||
.zip(source_move.iter())
|
||||
.zip(dest.iter())
|
||||
.collect()
|
||||
};
|
||||
|
||||
pairs
|
||||
.par_iter()
|
||||
.map(|((from, to), payer)| {
|
||||
(
|
||||
librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
libra_mint_id,
|
||||
&payer,
|
||||
&from,
|
||||
&to.pubkey(),
|
||||
1,
|
||||
*blockhash,
|
||||
),
|
||||
timestamp(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn generate_system_txs(
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
@@ -416,7 +355,6 @@ fn generate_txs(
|
||||
dest: &VecDeque<&Keypair>,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
libra_args: &Option<LibraKeys>,
|
||||
) {
|
||||
let blockhash = *blockhash.read().unwrap();
|
||||
let tx_count = source.len();
|
||||
@@ -426,33 +364,7 @@ fn generate_txs(
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions = if let Some((
|
||||
_libra_genesis_keypair,
|
||||
_libra_pay_program_id,
|
||||
_libra_mint_program_id,
|
||||
_libra_keys,
|
||||
)) = libra_args
|
||||
{
|
||||
#[cfg(not(feature = "move"))]
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
generate_move_txs(
|
||||
source,
|
||||
dest,
|
||||
reclaim,
|
||||
&_libra_keys,
|
||||
_libra_pay_program_id,
|
||||
&_libra_genesis_keypair.pubkey(),
|
||||
&blockhash,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
generate_system_txs(source, dest, reclaim, &blockhash)
|
||||
};
|
||||
let transactions = generate_system_txs(source, dest, reclaim, &blockhash);
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
@@ -954,181 +866,13 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn fund_move_keys<T: Client>(
|
||||
client: &T,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
total: u64,
|
||||
libra_pay_program_id: &Pubkey,
|
||||
libra_mint_program_id: &Pubkey,
|
||||
libra_genesis_key: &Keypair,
|
||||
) {
|
||||
let (mut blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
|
||||
info!("creating the libra funding account..");
|
||||
let libra_funding_key = Keypair::new();
|
||||
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key, &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("minting to funding keypair");
|
||||
let tx = librapay_transaction::mint_tokens(
|
||||
&libra_mint_program_id,
|
||||
funding_key,
|
||||
libra_genesis_key,
|
||||
&libra_funding_key.pubkey(),
|
||||
total,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key, libra_genesis_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("creating {} move accounts...", keypairs.len());
|
||||
let total_len = keypairs.len();
|
||||
let create_len = 5;
|
||||
let mut funding_time = Measure::start("funding_time");
|
||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
||||
if client
|
||||
.get_balance_with_commitment(&keys[0].pubkey(), CommitmentConfig::recent())
|
||||
.unwrap_or(0)
|
||||
> 0
|
||||
{
|
||||
// already created these accounts.
|
||||
break;
|
||||
}
|
||||
|
||||
let keypairs: Vec<_> = keys.iter().map(|k| k).collect();
|
||||
let tx = librapay_transaction::create_accounts(funding_key, &keypairs, 1, blockhash);
|
||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
||||
let mut keys = vec![funding_key];
|
||||
keys.extend(&keypairs);
|
||||
client.send_and_confirm_message(&keys, tx.message).unwrap();
|
||||
|
||||
if i % 10 == 0 {
|
||||
info!(
|
||||
"created {} accounts of {} (size {})",
|
||||
i,
|
||||
total_len / create_len,
|
||||
ser_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const NUM_FUNDING_KEYS: usize = 10;
|
||||
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
let pubkey_amounts: Vec<_> = funding_keys
|
||||
.iter()
|
||||
.map(|key| (key.pubkey(), total / NUM_FUNDING_KEYS as u64))
|
||||
.collect();
|
||||
let instructions = system_instruction::transfer_many(&funding_key.pubkey(), &pubkey_amounts);
|
||||
let message = Message::new(&instructions, Some(&funding_key.pubkey()));
|
||||
let tx = Transaction::new(&[funding_key], message, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[funding_key], tx.message)
|
||||
.unwrap();
|
||||
let mut balance = 0;
|
||||
for _ in 0..20 {
|
||||
if let Ok(balance_) = client
|
||||
.get_balance_with_commitment(&funding_keys[0].pubkey(), CommitmentConfig::recent())
|
||||
{
|
||||
if balance_ > 0 {
|
||||
balance = balance_;
|
||||
break;
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
assert!(balance > 0);
|
||||
info!(
|
||||
"funded multiple funding accounts with {:?} lanports",
|
||||
balance
|
||||
);
|
||||
|
||||
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
|
||||
for (i, key) in libra_funding_keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
|
||||
client
|
||||
.send_and_confirm_message(&[&funding_keys[i], &key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[i],
|
||||
&libra_funding_key,
|
||||
&key.pubkey(),
|
||||
total / NUM_FUNDING_KEYS as u64,
|
||||
blockhash,
|
||||
);
|
||||
client
|
||||
.send_and_confirm_message(&[&funding_keys[i], &libra_funding_key], tx.message)
|
||||
.unwrap();
|
||||
|
||||
info!("funded libra funding key {}", i);
|
||||
}
|
||||
|
||||
let keypair_count = keypairs.len();
|
||||
let amount = total / (keypair_count as u64);
|
||||
for (i, keys) in keypairs[..keypair_count]
|
||||
.chunks(NUM_FUNDING_KEYS)
|
||||
.enumerate()
|
||||
{
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
&libra_genesis_key.pubkey(),
|
||||
&funding_keys[j],
|
||||
&libra_funding_keys[j],
|
||||
&key.pubkey(),
|
||||
amount,
|
||||
blockhash,
|
||||
);
|
||||
|
||||
let _sig = client
|
||||
.async_send_transaction(tx.clone())
|
||||
.expect("create_account in generate_and_fund_keypairs");
|
||||
}
|
||||
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let mut times = 0;
|
||||
loop {
|
||||
let balance =
|
||||
librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
||||
if balance >= amount {
|
||||
break;
|
||||
} else if times > 20 {
|
||||
info!("timed out.. {} key: {} balance: {}", i, j, balance);
|
||||
break;
|
||||
} else {
|
||||
times += 1;
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"funded group {} of {}",
|
||||
i + 1,
|
||||
keypairs.len() / NUM_FUNDING_KEYS
|
||||
);
|
||||
blockhash = get_recent_blockhash(client).0;
|
||||
}
|
||||
|
||||
funding_time.stop();
|
||||
info!("done funding keys, took {} ms", funding_time.as_ms());
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
|
||||
) -> Result<Vec<Keypair>> {
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
info!("Get lamports...");
|
||||
@@ -1141,12 +885,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
let mut move_keypairs_ret = None;
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let move_keypairs_ret = None;
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
// start another bench-tps run without re-funding all of the keypairs, check if the
|
||||
// keypairs still have at least 80% of the expected funds. That should be enough to
|
||||
@@ -1157,10 +895,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let mut total = lamports_per_account * total_keypairs + extra_fees;
|
||||
if use_move {
|
||||
total *= 3;
|
||||
}
|
||||
let total = lamports_per_account * total_keypairs + extra_fees;
|
||||
|
||||
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
|
||||
info!(
|
||||
@@ -1172,40 +907,6 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
{
|
||||
if use_move {
|
||||
let libra_genesis_keypair =
|
||||
create_genesis(&funding_key, client.as_ref(), 10_000_000);
|
||||
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
|
||||
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
|
||||
|
||||
// Generate another set of keypairs for move accounts.
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
let seed = [0u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||
fund_move_keys(
|
||||
client.as_ref(),
|
||||
funding_key,
|
||||
&move_keypairs,
|
||||
total / 3,
|
||||
&libra_pay_program_id,
|
||||
&libra_mint_program_id,
|
||||
&libra_genesis_keypair,
|
||||
);
|
||||
move_keypairs_ret = Some((
|
||||
libra_genesis_keypair,
|
||||
libra_pay_program_id,
|
||||
libra_mint_program_id,
|
||||
move_keypairs,
|
||||
));
|
||||
|
||||
// Give solana keys 1/3 and move keys 1/3 the lamports. Keep 1/3 for fees.
|
||||
total /= 3;
|
||||
}
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
@@ -1219,7 +920,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok((keypairs, move_keypairs_ret))
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -1243,11 +944,11 @@ mod tests {
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs, None);
|
||||
do_bench_tps(client, config, keypairs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1258,9 +959,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
@@ -1282,9 +982,8 @@ mod tests {
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs) =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
|
||||
.unwrap();
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
|
@@ -23,7 +23,6 @@ pub struct Config {
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
}
|
||||
@@ -46,7 +45,6 @@ impl Default for Config {
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
}
|
||||
@@ -109,11 +107,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("use-move")
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no-multi-client")
|
||||
.long("no-multi-client")
|
||||
@@ -263,7 +256,6 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
args.multi_client = !matches.is_present("no-multi-client");
|
||||
|
||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
||||
|
@@ -29,7 +29,6 @@ fn main() {
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
@@ -86,7 +85,7 @@ fn main() {
|
||||
Arc::new(get_client(&nodes))
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
||||
let keypairs = if *read_from_client_file {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
@@ -115,8 +114,8 @@ fn main() {
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||
(keypairs, None)
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
@@ -124,7 +123,6 @@ fn main() {
|
||||
&id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
*use_move,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
@@ -132,5 +130,5 @@ fn main() {
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
|
@@ -6,17 +6,11 @@ use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_faucet::faucet::run_local_faucet;
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
#[cfg(feature = "move")]
|
||||
use solana_sdk::move_loader::solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::sync::{mpsc::channel, Arc};
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[cfg(feature = "move")]
|
||||
let native_instruction_processors = vec![solana_move_loader_program()];
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let native_instruction_processors = vec![];
|
||||
|
||||
solana_logger::setup();
|
||||
@@ -48,17 +42,16 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
||||
let _total = do_bench_tps(client, config, keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
@@ -73,14 +66,3 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
@@ -16,6 +16,3 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
timeout 15 ./run.sh &
|
||||
timeout 120 ./run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
|
@@ -27,5 +27,5 @@ Alternatively, you can source it from within a script:
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto "1.2.14" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "1.2.23" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
||||
|
@@ -1 +0,0 @@
|
||||
test-stable.sh
|
@@ -47,7 +47,6 @@ echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
ci/affects-files.sh \
|
||||
@@ -93,27 +92,6 @@ test-stable-perf)
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-move.sh \
|
||||
^programs/move_loader \
|
||||
^programs/librapay \
|
||||
^logger/ \
|
||||
^runtime/ \
|
||||
^sdk/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipped $testName as no relevant files were modified"
|
||||
exit 0
|
||||
}
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/librapay/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster)
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -27,29 +27,29 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.14" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.14" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.14" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.14" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.23" }
|
||||
thiserror = "1.0.19"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -29,7 +29,7 @@ use solana_client::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsFilter, RpcSendTransactionConfig},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
rpc_response::{Response, RpcKeyedAccount},
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
@@ -246,8 +246,8 @@ pub enum CliCommand {
|
||||
},
|
||||
TransactionHistory {
|
||||
address: Pubkey,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: Option<u64>, // None == search full history
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
@@ -1230,7 +1230,13 @@ fn process_show_account(
|
||||
let cli_account = CliAccount {
|
||||
keyed_account: RpcKeyedAccount {
|
||||
pubkey: account_pubkey.to_string(),
|
||||
account: UiAccount::encode(account, UiAccountEncoding::Binary),
|
||||
account: UiAccount::encode(
|
||||
account_pubkey,
|
||||
account,
|
||||
UiAccountEncoding::Binary64,
|
||||
None,
|
||||
None,
|
||||
),
|
||||
},
|
||||
use_lamports_unit,
|
||||
};
|
||||
@@ -1308,23 +1314,16 @@ fn send_and_confirm_transactions_with_spinner<T: Signers>(
|
||||
transactions_signatures = transactions_signatures
|
||||
.into_iter()
|
||||
.filter(|(_transaction, signature)| {
|
||||
if let Some(signature) = signature {
|
||||
if let Ok(status) = rpc_client.get_signature_status(&signature) {
|
||||
if rpc_client
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(0)
|
||||
> 1
|
||||
{
|
||||
return false;
|
||||
} else {
|
||||
return match status {
|
||||
None => true,
|
||||
Some(result) => result.is_err(),
|
||||
};
|
||||
signature
|
||||
.and_then(|signature| rpc_client.get_signature_statuses(&[signature]).ok())
|
||||
.map(|Response { context: _, value }| match &value[0] {
|
||||
None => true,
|
||||
Some(transaction_status) => {
|
||||
!(transaction_status.confirmations.is_none()
|
||||
|| transaction_status.confirmations.unwrap() > 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
})
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -1379,7 +1378,7 @@ fn process_deploy(
|
||||
create_account_tx.try_sign(&[config.signers[0], &program_id], blockhash)?;
|
||||
messages.push(&create_account_tx.message);
|
||||
let signers = [config.signers[0], &program_id];
|
||||
let mut write_transactions = vec![];
|
||||
let mut write_messages = vec![];
|
||||
for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) {
|
||||
let instruction = loader_instruction::write(
|
||||
&program_id.pubkey(),
|
||||
@@ -1388,19 +1387,17 @@ fn process_deploy(
|
||||
chunk.to_vec(),
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
write_messages.push(message);
|
||||
}
|
||||
for transaction in write_transactions.iter() {
|
||||
messages.push(&transaction.message);
|
||||
let mut write_message_refs = vec![];
|
||||
for message in write_messages.iter() {
|
||||
write_message_refs.push(message);
|
||||
}
|
||||
messages.append(&mut write_message_refs);
|
||||
|
||||
let instruction = loader_instruction::finalize(&program_id.pubkey(), &bpf_loader::id());
|
||||
let message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
let mut finalize_tx = Transaction::new_unsigned(message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
messages.push(&finalize_tx.message);
|
||||
let finalize_message = Message::new(&[instruction], Some(&signers[0].pubkey()));
|
||||
messages.push(&finalize_message);
|
||||
|
||||
check_account_for_multiple_fees(
|
||||
rpc_client,
|
||||
@@ -1415,11 +1412,24 @@ fn process_deploy(
|
||||
CliError::DynamicProgramError("Program account allocation failed".to_string())
|
||||
})?;
|
||||
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let mut write_transactions = vec![];
|
||||
for message in write_messages.into_iter() {
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&signers, blockhash)?;
|
||||
write_transactions.push(tx);
|
||||
}
|
||||
|
||||
trace!("Writing program data");
|
||||
send_and_confirm_transactions_with_spinner(&rpc_client, write_transactions, &signers).map_err(
|
||||
|_| CliError::DynamicProgramError("Data writes to program account failed".to_string()),
|
||||
)?;
|
||||
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash()?;
|
||||
let mut finalize_tx = Transaction::new_unsigned(finalize_message);
|
||||
finalize_tx.try_sign(&signers, blockhash)?;
|
||||
|
||||
trace!("Finalizing program account");
|
||||
rpc_client
|
||||
.send_and_confirm_transaction_with_spinner_and_config(
|
||||
@@ -1837,9 +1847,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_validators(&rpc_client, config, *use_lamports_unit, *commitment_config),
|
||||
CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
|
||||
before,
|
||||
limit,
|
||||
} => process_transaction_history(&rpc_client, config, address, *before, *limit),
|
||||
|
||||
// Nonce Commands
|
||||
|
||||
|
@@ -16,7 +16,6 @@ use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcLargestAccountsFilter},
|
||||
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -27,6 +26,7 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::{self, Pubkey},
|
||||
signature::Signature,
|
||||
system_instruction, system_program,
|
||||
sysvar::{
|
||||
self,
|
||||
@@ -256,9 +256,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-history")
|
||||
.about("Show historical transactions affecting the given address, \
|
||||
ordered based on the slot in which they were confirmed in \
|
||||
from lowest to highest slot")
|
||||
.about("Show historical transactions affecting the given address \
|
||||
from newest to oldest")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("address")
|
||||
.index(1)
|
||||
@@ -266,26 +265,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Account address"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("end_slot")
|
||||
.takes_value(false)
|
||||
.value_name("SLOT")
|
||||
.index(2)
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Slot to start from [default: latest slot at maximum commitment]"
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.takes_value(true)
|
||||
.value_name("NUMBER OF SLOTS")
|
||||
.value_name("LIMIT")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Limit the search to this many slots"
|
||||
),
|
||||
),
|
||||
.default_value("1000")
|
||||
.help("Maximum number of transaction signatures to return"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("before")
|
||||
.long("before")
|
||||
.value_name("TRANSACTION_SIGNATURE")
|
||||
.takes_value(true)
|
||||
.help("Start with the first signature older than this one"),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -453,14 +448,22 @@ pub fn parse_transaction_history(
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
|
||||
let end_slot = value_t!(matches, "end_slot", Slot).ok();
|
||||
let slot_limit = value_t!(matches, "limit", u64).ok();
|
||||
|
||||
let before = match matches.value_of("before") {
|
||||
Some(signature) => Some(
|
||||
signature
|
||||
.parse()
|
||||
.map_err(|err| CliError::BadParameter(format!("Invalid signature: {}", err)))?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
let limit = value_t_or_exit!(matches, "limit", usize);
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
before,
|
||||
limit,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@@ -1276,41 +1279,36 @@ pub fn process_show_validators(
|
||||
|
||||
pub fn process_transaction_history(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
address: &Pubkey,
|
||||
end_slot: Option<Slot>, // None == use latest slot
|
||||
slot_limit: Option<u64>,
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
) -> ProcessResult {
|
||||
let end_slot = {
|
||||
if let Some(end_slot) = end_slot {
|
||||
end_slot
|
||||
let results = rpc_client.get_confirmed_signatures_for_address2_with_config(
|
||||
address,
|
||||
before,
|
||||
Some(limit),
|
||||
)?;
|
||||
|
||||
let transactions_found = format!("{} transactions found", results.len());
|
||||
|
||||
for result in results {
|
||||
if config.verbose {
|
||||
println!(
|
||||
"{} [slot={} status={}] {}",
|
||||
result.signature,
|
||||
result.slot,
|
||||
match result.err {
|
||||
None => "Confirmed".to_string(),
|
||||
Some(err) => format!("Failed: {:?}", err),
|
||||
},
|
||||
result.memo.unwrap_or_else(|| "".to_string()),
|
||||
);
|
||||
} else {
|
||||
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
|
||||
println!("{}", result.signature);
|
||||
}
|
||||
};
|
||||
let mut start_slot = match slot_limit {
|
||||
Some(slot_limit) => end_slot.saturating_sub(slot_limit),
|
||||
None => rpc_client.minimum_ledger_slot()?,
|
||||
};
|
||||
|
||||
println!(
|
||||
"Transactions affecting {} within slots [{},{}]",
|
||||
address, start_slot, end_slot
|
||||
);
|
||||
|
||||
let mut transaction_count = 0;
|
||||
while start_slot < end_slot {
|
||||
let signatures = rpc_client.get_confirmed_signatures_for_address(
|
||||
address,
|
||||
start_slot,
|
||||
(start_slot + MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE).min(end_slot),
|
||||
)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
}
|
||||
transaction_count += signatures.len();
|
||||
start_slot += MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE;
|
||||
}
|
||||
Ok(format!("{} transactions found", transaction_count))
|
||||
Ok(transactions_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -345,7 +345,13 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
let nonce_pubkey = Pubkey::new(&[4u8; 32]);
|
||||
let rpc_nonce_account = UiAccount::encode(nonce_account, UiAccountEncoding::Binary);
|
||||
let rpc_nonce_account = UiAccount::encode(
|
||||
&nonce_pubkey,
|
||||
nonce_account,
|
||||
UiAccountEncoding::Binary64,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let get_account_response = json!(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: json!(Some(rpc_nonce_account)),
|
||||
|
@@ -6,9 +6,10 @@ use crate::{
|
||||
use bincode::deserialize;
|
||||
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use reqwest::blocking::Client;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
use solana_account_decoder::validator_info::{
|
||||
self, ValidatorInfo, MAX_LONG_FIELD_LENGTH, MAX_SHORT_FIELD_LENGTH,
|
||||
};
|
||||
use solana_clap_utils::{
|
||||
input_parsers::pubkey_of,
|
||||
input_validators::{is_pubkey, is_url},
|
||||
@@ -27,23 +28,6 @@ use solana_sdk::{
|
||||
};
|
||||
use std::{error, sync::Arc};
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
pub const MAX_VALIDATOR_INFO: u64 = 576;
|
||||
|
||||
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
|
||||
pub struct ValidatorInfo {
|
||||
info: String,
|
||||
}
|
||||
|
||||
impl ConfigState for ValidatorInfo {
|
||||
fn max_space() -> u64 {
|
||||
MAX_VALIDATOR_INFO
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a validator details are longer than the max length.
|
||||
pub fn check_details_length(string: String) -> Result<(), String> {
|
||||
if string.len() > MAX_LONG_FIELD_LENGTH {
|
||||
@@ -289,7 +273,7 @@ pub fn process_set_validator_info(
|
||||
.iter()
|
||||
.filter(|(_, account)| {
|
||||
let key_list: ConfigKeys = deserialize(&account.data).map_err(|_| false).unwrap();
|
||||
key_list.keys.contains(&(id(), false))
|
||||
key_list.keys.contains(&(validator_info::id(), false))
|
||||
})
|
||||
.find(|(pubkey, account)| {
|
||||
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
|
||||
@@ -328,7 +312,10 @@ pub fn process_set_validator_info(
|
||||
};
|
||||
|
||||
let build_message = |lamports| {
|
||||
let keys = vec![(id(), false), (config.signers[0].pubkey(), true)];
|
||||
let keys = vec![
|
||||
(validator_info::id(), false),
|
||||
(config.signers[0].pubkey(), true),
|
||||
];
|
||||
if balance == 0 {
|
||||
println!(
|
||||
"Publishing info for Validator {:?}",
|
||||
@@ -400,7 +387,7 @@ pub fn process_get_validator_info(
|
||||
let key_list: ConfigKeys = deserialize(&validator_info_account.data)
|
||||
.map_err(|_| false)
|
||||
.unwrap();
|
||||
key_list.keys.contains(&(id(), false))
|
||||
key_list.keys.contains(&(validator_info::id(), false))
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
@@ -502,7 +489,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_validator_info() {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let keys = vec![(id(), false), (pubkey, true)];
|
||||
let keys = vec![(validator_info::id(), false), (pubkey, true)];
|
||||
let config = ConfigKeys { keys };
|
||||
|
||||
let mut info = Map::new();
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,11 +19,11 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -32,7 +32,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.1.0"
|
||||
jsonrpc-http-server = "14.1.0"
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,8 +2,12 @@ use crate::{
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
http_sender::HttpSender,
|
||||
mock_sender::{MockSender, Mocks},
|
||||
rpc_config::{RpcLargestAccountsConfig, RpcSendTransactionConfig},
|
||||
rpc_request::{RpcError, RpcRequest},
|
||||
rpc_config::RpcAccountInfoConfig,
|
||||
rpc_config::{
|
||||
RpcGetConfirmedSignaturesForAddress2Config, RpcLargestAccountsConfig,
|
||||
RpcSendTransactionConfig, RpcTokenAccountsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcRequest, TokenAccountsFilter},
|
||||
rpc_response::*,
|
||||
rpc_sender::RpcSender,
|
||||
};
|
||||
@@ -11,7 +15,12 @@ use bincode::serialize;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_account_decoder::{
|
||||
parse_token::UiTokenAmount,
|
||||
UiAccount,
|
||||
UiAccountData::{Binary, Binary64},
|
||||
UiAccountEncoding,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{
|
||||
@@ -283,6 +292,32 @@ impl RpcClient {
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
self.get_confirmed_signatures_for_address2_with_config(address, None, None)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2_with_config(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
before: Option<Signature>,
|
||||
limit: Option<usize>,
|
||||
) -> ClientResult<Vec<RpcConfirmedTransactionStatusWithSignature>> {
|
||||
let config = RpcGetConfirmedSignaturesForAddress2Config {
|
||||
before: before.map(|signature| signature.to_string()),
|
||||
limit,
|
||||
};
|
||||
|
||||
let result: Vec<RpcConfirmedTransactionStatusWithSignature> = self.send(
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2,
|
||||
json!([address.to_string(), config]),
|
||||
)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
@@ -438,9 +473,14 @@ impl RpcClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Option<Account>> {
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Binary64),
|
||||
commitment: Some(commitment_config),
|
||||
data_slice: None,
|
||||
};
|
||||
let response = self.sender.send(
|
||||
RpcRequest::GetAccountInfo,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
json!([pubkey.to_string(), config]),
|
||||
);
|
||||
|
||||
response
|
||||
@@ -452,8 +492,17 @@ impl RpcClient {
|
||||
}
|
||||
let Response {
|
||||
context,
|
||||
value: rpc_account,
|
||||
value: mut rpc_account,
|
||||
} = serde_json::from_value::<Response<Option<UiAccount>>>(result_json)?;
|
||||
if let Some(ref mut account) = rpc_account {
|
||||
if let Binary(_) = &account.data {
|
||||
let tmp = Binary64(String::new());
|
||||
match std::mem::replace(&mut account.data, tmp) {
|
||||
Binary(new_data) => account.data = Binary64(new_data),
|
||||
_ => panic!("should have gotten binary here."),
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("Response account {:?} {:?}", pubkey, rpc_account);
|
||||
let account = rpc_account.and_then(|rpc_account| rpc_account.decode());
|
||||
Ok(Response {
|
||||
@@ -511,17 +560,7 @@ impl RpcClient {
|
||||
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let accounts: Vec<RpcKeyedAccount> =
|
||||
self.send(RpcRequest::GetProgramAccounts, json!([pubkey.to_string()]))?;
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
RpcRequest::GetProgramAccounts,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((pubkey, account.decode().unwrap()));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
parse_keyed_accounts(accounts, RpcRequest::GetProgramAccounts)
|
||||
}
|
||||
|
||||
/// Request the transaction count.
|
||||
@@ -668,6 +707,118 @@ impl RpcClient {
|
||||
Ok(hash)
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_account_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<UiTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountBalance,
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<RpcKeyedAccount>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_delegate_with_commitment(
|
||||
&self,
|
||||
delegate: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<RpcKeyedAccount>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
commitment: Some(commitment_config),
|
||||
data_slice: None,
|
||||
};
|
||||
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountsByOwner,
|
||||
json!([delegate.to_string(), token_account_filter, config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
) -> ClientResult<Vec<RpcKeyedAccount>> {
|
||||
Ok(self
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
CommitmentConfig::default(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_accounts_by_owner_with_commitment(
|
||||
&self,
|
||||
owner: &Pubkey,
|
||||
token_account_filter: TokenAccountsFilter,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<Vec<RpcKeyedAccount>> {
|
||||
let token_account_filter = match token_account_filter {
|
||||
TokenAccountsFilter::Mint(mint) => RpcTokenAccountsFilter::Mint(mint.to_string()),
|
||||
TokenAccountsFilter::ProgramId(program_id) => {
|
||||
RpcTokenAccountsFilter::ProgramId(program_id.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
commitment: Some(commitment_config),
|
||||
data_slice: None,
|
||||
};
|
||||
|
||||
self.send(
|
||||
RpcRequest::GetTokenAccountsByOwner,
|
||||
json!([owner.to_string(), token_account_filter, config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, CommitmentConfig::default())?
|
||||
.value)
|
||||
}
|
||||
|
||||
pub fn get_token_supply_with_commitment(
|
||||
&self,
|
||||
mint: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<UiTokenAmount> {
|
||||
self.send(
|
||||
RpcRequest::GetTokenSupply,
|
||||
json!([mint.to_string(), commitment_config]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn poll_balance_with_timeout_and_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
@@ -1009,6 +1160,31 @@ pub fn get_rpc_request_str(rpc_addr: SocketAddr, tls: bool) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_keyed_accounts(
|
||||
accounts: Vec<RpcKeyedAccount>,
|
||||
request: RpcRequest,
|
||||
) -> ClientResult<Vec<(Pubkey, Account)>> {
|
||||
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||
for RpcKeyedAccount { pubkey, account } in accounts.into_iter() {
|
||||
let pubkey = pubkey.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Pubkey".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?;
|
||||
pubkey_accounts.push((
|
||||
pubkey,
|
||||
account.decode().ok_or_else(|| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Account from rpc".to_string()).into(),
|
||||
request,
|
||||
)
|
||||
})?,
|
||||
));
|
||||
}
|
||||
Ok(pubkey_accounts)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
use crate::rpc_filter::RpcFilterType;
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig};
|
||||
use solana_sdk::{clock::Epoch, commitment_config::CommitmentConfig};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
@@ -47,6 +47,7 @@ pub struct RpcStakeConfig {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccountInfoConfig {
|
||||
pub encoding: Option<UiAccountEncoding>,
|
||||
pub data_slice: Option<UiDataSliceConfig>,
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
}
|
||||
@@ -58,3 +59,17 @@ pub struct RpcProgramAccountsConfig {
|
||||
#[serde(flatten)]
|
||||
pub account_config: RpcAccountInfoConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcTokenAccountsFilter {
|
||||
Mint(String),
|
||||
ProgramId(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcGetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<String>, // Signature as base-58 string
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
@@ -1,4 +1,5 @@
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fmt;
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -13,6 +14,7 @@ pub enum RpcRequest {
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedSignaturesForAddress2,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
@@ -36,6 +38,10 @@ pub enum RpcRequest {
|
||||
GetSlotsPerSegment,
|
||||
GetStoragePubkeysForSlot,
|
||||
GetSupply,
|
||||
GetTokenAccountBalance,
|
||||
GetTokenAccountsByDelegate,
|
||||
GetTokenAccountsByOwner,
|
||||
GetTokenSupply,
|
||||
GetTotalSupply,
|
||||
GetTransactionCount,
|
||||
GetVersion,
|
||||
@@ -60,6 +66,7 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress2 => "getConfirmedSignaturesForAddress2",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
@@ -83,6 +90,10 @@ impl fmt::Display for RpcRequest {
|
||||
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
|
||||
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||
RpcRequest::GetSupply => "getSupply",
|
||||
RpcRequest::GetTokenAccountBalance => "getTokenAccountBalance",
|
||||
RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate",
|
||||
RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner",
|
||||
RpcRequest::GetTokenSupply => "getTokenSupply",
|
||||
RpcRequest::GetTotalSupply => "getTotalSupply",
|
||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||
RpcRequest::GetVersion => "getVersion",
|
||||
@@ -103,6 +114,7 @@ pub const NUM_LARGEST_ACCOUNTS: usize = 20;
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
pub const MAX_GET_CONFIRMED_BLOCKS_RANGE: u64 = 500_000;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT: usize = 1_000;
|
||||
|
||||
// Validators that are this number of slots behind are considered delinquent
|
||||
pub const DELINQUENT_VALIDATOR_SLOT_DISTANCE: u64 = 128;
|
||||
@@ -131,9 +143,16 @@ pub enum RpcError {
|
||||
ForUser(String), /* "direct-to-user message" */
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum TokenAccountsFilter {
|
||||
Mint(Pubkey),
|
||||
ProgramId(Pubkey),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::rpc_config::RpcTokenAccountsFilter;
|
||||
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
|
||||
|
||||
#[test]
|
||||
@@ -198,5 +217,16 @@ mod tests {
|
||||
let request =
|
||||
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
|
||||
assert_eq!(request["params"], json!([addr, commitment_config]));
|
||||
|
||||
// Test request with CommitmentConfig and params
|
||||
let test_request = RpcRequest::GetTokenAccountsByOwner;
|
||||
let mint = Pubkey::new_rand();
|
||||
let token_account_filter = RpcTokenAccountsFilter::Mint(mint.to_string());
|
||||
let request = test_request
|
||||
.build_request_json(1, json!([addr, token_account_filter, commitment_config]));
|
||||
assert_eq!(
|
||||
request["params"],
|
||||
json!([addr, token_account_filter, commitment_config])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,11 +1,12 @@
|
||||
use crate::client_error;
|
||||
use solana_account_decoder::UiAccount;
|
||||
use solana_account_decoder::{parse_token::UiTokenAmount, UiAccount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
inflation::Inflation,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::ConfirmedTransactionStatusWithSignature;
|
||||
use std::{collections::HashMap, net::SocketAddr};
|
||||
|
||||
pub type RpcResult<T> = client_error::Result<Response<T>>;
|
||||
@@ -219,3 +220,37 @@ pub struct RpcStakeActivation {
|
||||
pub active: u64,
|
||||
pub inactive: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcTokenAccountBalance {
|
||||
pub address: String,
|
||||
#[serde(flatten)]
|
||||
pub amount: UiTokenAmount,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcConfirmedTransactionStatusWithSignature {
|
||||
pub signature: String,
|
||||
pub slot: Slot,
|
||||
pub err: Option<TransactionError>,
|
||||
pub memo: Option<String>,
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
fn from(value: ConfirmedTransactionStatusWithSignature) -> Self {
|
||||
let ConfirmedTransactionStatusWithSignature {
|
||||
signature,
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
} = value;
|
||||
Self {
|
||||
signature: signature.to_string(),
|
||||
slot,
|
||||
err,
|
||||
memo,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -21,7 +21,7 @@ byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
ed25519-dalek = "=1.0.0-pre.4"
|
||||
fs_extra = "1.1.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = "1.3"
|
||||
@@ -42,39 +42,42 @@ regex = "1.3.7"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.53"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.14" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.14" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.14" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.14" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.14" }
|
||||
solana-measure = { path = "../measure", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-perf = { path = "../perf", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.14" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.14" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.14" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.14" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.2.23" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-perf = { path = "../perf", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.2.23" }
|
||||
solana-streamer = { path = "../streamer", version = "1.2.23" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.2.23" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.23" }
|
||||
spl-token-v1-0 = { package = "spl-token", version = "1.0.6", features = ["skip-no-mangle"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.14" }
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
tokio_01 = { version = "0.1", package = "tokio" }
|
||||
tokio_fs_01 = { version = "0.1", package = "tokio-fs" }
|
||||
tokio_io_01 = { version = "0.1", package = "tokio-io" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
base64 = "0.12.3"
|
||||
matches = "0.1.6"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serial_test = "0.4.0"
|
||||
|
@@ -1490,12 +1490,6 @@ impl ClusterInfo {
|
||||
.time_gossip_write_lock("purge", &self.stats.purge)
|
||||
.purge(timestamp(), &timeouts);
|
||||
inc_new_counter_info!("cluster_info-purge-count", num_purged);
|
||||
let table_size = self.gossip.read().unwrap().crds.table.len();
|
||||
datapoint_debug!(
|
||||
"cluster_info-purge",
|
||||
("table_size", table_size as i64, i64),
|
||||
("purge_stake_timeout", timeout as i64, i64)
|
||||
);
|
||||
}
|
||||
|
||||
/// randomly pick a node and ask them for updates asynchronously
|
||||
@@ -1743,7 +1737,7 @@ impl ClusterInfo {
|
||||
"generate_pull_responses",
|
||||
&self.stats.generate_pull_responses,
|
||||
)
|
||||
.generate_pull_responses(&caller_and_filters);
|
||||
.generate_pull_responses(&caller_and_filters, now);
|
||||
|
||||
self.time_gossip_write_lock("process_pull_reqs", &self.stats.process_pull_requests)
|
||||
.process_pull_requests(caller_and_filters, now);
|
||||
@@ -2085,6 +2079,10 @@ impl ClusterInfo {
|
||||
|
||||
fn print_reset_stats(&self, last_print: &mut Instant) {
|
||||
if last_print.elapsed().as_millis() > 2000 {
|
||||
let (table_size, purged_values_size) = {
|
||||
let r_gossip = self.gossip.read().unwrap();
|
||||
(r_gossip.crds.table.len(), r_gossip.pull.purged_values.len())
|
||||
};
|
||||
datapoint_info!(
|
||||
"cluster_info_stats",
|
||||
("entrypoint", self.stats.entrypoint.clear(), i64),
|
||||
@@ -2108,6 +2106,8 @@ impl ClusterInfo {
|
||||
self.stats.new_push_requests_num.clear(),
|
||||
i64
|
||||
),
|
||||
("table_size", table_size as i64, i64),
|
||||
("purged_values_size", purged_values_size as i64, i64),
|
||||
);
|
||||
datapoint_info!(
|
||||
"cluster_info_stats2",
|
||||
|
193
core/src/cluster_slots_service.rs
Normal file
193
core/src/cluster_slots_service.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use crate::{cluster_info::ClusterInfo, cluster_slots::ClusterSlots};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
{Arc, RwLock},
|
||||
},
|
||||
thread::sleep,
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct ClusterSlotsServiceTiming {
|
||||
pub lowest_slot_elapsed: u64,
|
||||
pub update_completed_slots_elapsed: u64,
|
||||
}
|
||||
|
||||
impl ClusterSlotsServiceTiming {
|
||||
fn update(&mut self, lowest_slot_elapsed: u64, update_completed_slots_elapsed: u64) {
|
||||
self.lowest_slot_elapsed += lowest_slot_elapsed;
|
||||
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ClusterSlotsService {
|
||||
t_cluster_slots_service: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl ClusterSlotsService {
|
||||
pub fn new(
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, &blockstore, &cluster_info);
|
||||
Self::initialize_epoch_slots(&blockstore, &cluster_info, &completed_slots_receiver);
|
||||
let t_cluster_slots_service = Builder::new()
|
||||
.name("solana-cluster-slots-service".to_string())
|
||||
.spawn(move || {
|
||||
Self::run(
|
||||
blockstore,
|
||||
cluster_slots,
|
||||
bank_forks,
|
||||
cluster_info,
|
||||
completed_slots_receiver,
|
||||
exit,
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
ClusterSlotsService {
|
||||
t_cluster_slots_service,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_cluster_slots_service.join()
|
||||
}
|
||||
|
||||
fn run(
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) {
|
||||
let mut cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
|
||||
let mut last_stats = Instant::now();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let new_root = bank_forks.read().unwrap().root();
|
||||
let id = cluster_info.id();
|
||||
let mut lowest_slot_elapsed = Measure::start("lowest_slot_elapsed");
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
lowest_slot_elapsed.stop();
|
||||
let mut update_completed_slots_elapsed =
|
||||
Measure::start("update_completed_slots_elapsed");
|
||||
Self::update_completed_slots(&completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, &cluster_info, &bank_forks);
|
||||
update_completed_slots_elapsed.stop();
|
||||
|
||||
cluster_slots_service_timing.update(
|
||||
lowest_slot_elapsed.as_us(),
|
||||
update_completed_slots_elapsed.as_us(),
|
||||
);
|
||||
|
||||
if last_stats.elapsed().as_secs() > 2 {
|
||||
datapoint_info!(
|
||||
"cluster_slots_service-timing",
|
||||
(
|
||||
"lowest_slot_elapsed",
|
||||
cluster_slots_service_timing.lowest_slot_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"update_completed_slots_elapsed",
|
||||
cluster_slots_service_timing.update_completed_slots_elapsed,
|
||||
i64
|
||||
),
|
||||
);
|
||||
cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
|
||||
last_stats = Instant::now();
|
||||
}
|
||||
sleep(Duration::from_millis(200));
|
||||
}
|
||||
}
|
||||
|
||||
fn update_completed_slots(
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
let mut slots: Vec<Slot> = vec![];
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
// will provide a schedule to window_service for any incoming shreds up to the
|
||||
// last_confirmed_epoch.
|
||||
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
|
||||
}
|
||||
|
||||
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::cluster_info::Node;
|
||||
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
|
||||
ClusterSlotsService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
|
||||
let lowest = cluster_info
|
||||
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
|
||||
lowest_slot.clone()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(lowest.lowest, 5);
|
||||
}
|
||||
}
|
@@ -8,6 +8,7 @@ use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
cmp::max,
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
@@ -103,27 +104,8 @@ impl AggregateCommitmentService {
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
let cache_slot_info =
|
||||
Self::update_commitment_cache(block_commitment_cache, aggregation_data, ancestors);
|
||||
aggregate_commitment_time.stop();
|
||||
datapoint_info!(
|
||||
"block-commitment-cache",
|
||||
@@ -134,12 +116,50 @@ impl AggregateCommitmentService {
|
||||
)
|
||||
);
|
||||
|
||||
subscriptions.notify_subscribers(CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
});
|
||||
// Triggers rpc_subscription notifications as soon as new commitment data is available,
|
||||
// sending just the commitment cache slot information that the notifications thread
|
||||
// needs
|
||||
subscriptions.notify_subscribers(cache_slot_info);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_commitment_cache(
|
||||
block_commitment_cache: &RwLock<BlockCommitmentCache>,
|
||||
aggregation_data: CommitmentAggregationData,
|
||||
ancestors: Vec<u64>,
|
||||
) -> CacheSlotInfo {
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
aggregation_data.root,
|
||||
);
|
||||
new_block_commitment.highest_confirmed_slot =
|
||||
new_block_commitment.calculate_highest_confirmed_slot();
|
||||
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
|
||||
let largest_confirmed_root = max(
|
||||
new_block_commitment.largest_confirmed_root(),
|
||||
w_block_commitment_cache.largest_confirmed_root(),
|
||||
);
|
||||
new_block_commitment.set_largest_confirmed_root(largest_confirmed_root);
|
||||
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
CacheSlotInfo {
|
||||
current_slot: w_block_commitment_cache.slot(),
|
||||
node_root: w_block_commitment_cache.root(),
|
||||
largest_confirmed_root: w_block_commitment_cache.largest_confirmed_root(),
|
||||
highest_confirmed_slot: w_block_commitment_cache.highest_confirmed_slot(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -225,10 +245,24 @@ impl AggregateCommitmentService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_runtime::genesis_utils::{
|
||||
create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs,
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
use solana_vote_program::{
|
||||
vote_state::{self, VoteStateVersions},
|
||||
vote_transaction,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
@@ -451,4 +485,163 @@ mod tests {
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_highest_confirmed_root_advance() {
|
||||
fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Arc<Bank>) -> Slot {
|
||||
let account = &bank.vote_accounts()[&vote_pubkey].1;
|
||||
let vote_state = VoteState::from(account).unwrap();
|
||||
vote_state.root_slot.unwrap()
|
||||
}
|
||||
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
);
|
||||
|
||||
let node_keypair = Keypair::new().to_bytes();
|
||||
let vote_keypair = Keypair::new().to_bytes();
|
||||
let stake_keypair = Keypair::new().to_bytes();
|
||||
let validator_keypairs = vec![ValidatorVoteKeypairs {
|
||||
node_keypair: Keypair::from_bytes(&node_keypair).unwrap(),
|
||||
vote_keypair: Keypair::from_bytes(&vote_keypair).unwrap(),
|
||||
stake_keypair: Keypair::from_bytes(&stake_keypair).unwrap(),
|
||||
}];
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
voting_keypair: _,
|
||||
} = create_genesis_config_with_vote_accounts(1_000_000_000, &validator_keypairs, 100);
|
||||
|
||||
let node_keypair = Keypair::from_bytes(&node_keypair).unwrap();
|
||||
let vote_keypair = Keypair::from_bytes(&vote_keypair).unwrap();
|
||||
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
bank0
|
||||
.transfer(100_000, &mint_keypair, &node_keypair.pubkey())
|
||||
.unwrap();
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
|
||||
// Fill bank_forks with banks with votes landing in the next slot
|
||||
// Create enough banks such that vote account will root slots 0 and 1
|
||||
for x in 0..33 {
|
||||
let previous_bank = bank_forks.get(x).unwrap();
|
||||
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
|
||||
let vote = vote_transaction::new_vote_transaction(
|
||||
vec![x],
|
||||
previous_bank.hash(),
|
||||
previous_bank.last_blockhash(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
);
|
||||
bank.process_transaction(&vote).unwrap();
|
||||
bank_forks.insert(bank);
|
||||
}
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
|
||||
for x in 0..root {
|
||||
bank_forks.set_root(x, &None, None);
|
||||
}
|
||||
|
||||
// Add an additional bank/vote that will root slot 2
|
||||
let bank33 = bank_forks.get(33).unwrap();
|
||||
let bank34 = Bank::new_from_parent(bank33, &Pubkey::default(), 34);
|
||||
let vote33 = vote_transaction::new_vote_transaction(
|
||||
vec![33],
|
||||
bank33.hash(),
|
||||
bank33.last_blockhash(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
);
|
||||
bank34.process_transaction(&vote33).unwrap();
|
||||
bank_forks.insert(bank34);
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
|
||||
let ancestors = working_bank.status_cache_ancestors();
|
||||
let _ = AggregateCommitmentService::update_commitment_cache(
|
||||
&block_commitment_cache,
|
||||
CommitmentAggregationData {
|
||||
bank: working_bank,
|
||||
root: 0,
|
||||
total_staked: 100,
|
||||
},
|
||||
ancestors,
|
||||
);
|
||||
let largest_confirmed_root = block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root();
|
||||
bank_forks.set_root(root, &None, Some(largest_confirmed_root));
|
||||
let largest_confirmed_root_bank = bank_forks.get(largest_confirmed_root);
|
||||
assert!(largest_confirmed_root_bank.is_some());
|
||||
|
||||
// Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote
|
||||
// account's root (and thus the highest_confirmed_root) rolls back to slot 1
|
||||
let bank33 = bank_forks.get(33).unwrap();
|
||||
let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35);
|
||||
bank_forks.insert(bank35);
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let ancestors = working_bank.status_cache_ancestors();
|
||||
let _ = AggregateCommitmentService::update_commitment_cache(
|
||||
&block_commitment_cache,
|
||||
CommitmentAggregationData {
|
||||
bank: working_bank,
|
||||
root: 1,
|
||||
total_staked: 100,
|
||||
},
|
||||
ancestors,
|
||||
);
|
||||
let largest_confirmed_root = block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root();
|
||||
let largest_confirmed_root_bank = bank_forks.get(largest_confirmed_root);
|
||||
assert!(largest_confirmed_root_bank.is_some());
|
||||
|
||||
// Add additional banks beyond lockout built on the new fork to ensure that behavior
|
||||
// continues normally
|
||||
for x in 35..=37 {
|
||||
let previous_bank = bank_forks.get(x).unwrap();
|
||||
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
|
||||
let vote = vote_transaction::new_vote_transaction(
|
||||
vec![x],
|
||||
previous_bank.hash(),
|
||||
previous_bank.last_blockhash(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&vote_keypair,
|
||||
);
|
||||
bank.process_transaction(&vote).unwrap();
|
||||
bank_forks.insert(bank);
|
||||
}
|
||||
|
||||
let working_bank = bank_forks.working_bank();
|
||||
let root = get_vote_account_root_slot(vote_keypair.pubkey(), &working_bank);
|
||||
let ancestors = working_bank.status_cache_ancestors();
|
||||
let _ = AggregateCommitmentService::update_commitment_cache(
|
||||
&block_commitment_cache,
|
||||
CommitmentAggregationData {
|
||||
bank: working_bank,
|
||||
root: 0,
|
||||
total_staked: 100,
|
||||
},
|
||||
ancestors,
|
||||
);
|
||||
let largest_confirmed_root = block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root();
|
||||
bank_forks.set_root(root, &None, Some(largest_confirmed_root));
|
||||
let largest_confirmed_root_bank = bank_forks.get(largest_confirmed_root);
|
||||
assert!(largest_confirmed_root_bank.is_some());
|
||||
}
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
@@ -159,8 +159,9 @@ impl CrdsGossip {
|
||||
pub fn generate_pull_responses(
|
||||
&self,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.pull.generate_pull_responses(&self.crds, filters)
|
||||
self.pull.generate_pull_responses(&self.crds, filters, now)
|
||||
}
|
||||
|
||||
pub fn filter_pull_responses(
|
||||
|
@@ -131,7 +131,7 @@ pub struct CrdsGossipPull {
|
||||
/// timestamp of last request
|
||||
pub pull_request_time: HashMap<Pubkey, u64>,
|
||||
/// hash and insert time
|
||||
purged_values: VecDeque<(Hash, u64)>,
|
||||
pub purged_values: VecDeque<(Hash, u64)>,
|
||||
pub crds_timeout: u64,
|
||||
pub msg_timeout: u64,
|
||||
pub num_pulls: usize,
|
||||
@@ -237,8 +237,9 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &Crds,
|
||||
requests: &[(CrdsValue, CrdsFilter)],
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
self.filter_crds_values(crds, requests)
|
||||
self.filter_crds_values(crds, requests, now)
|
||||
}
|
||||
|
||||
// Checks if responses should be inserted and
|
||||
@@ -364,22 +365,50 @@ impl CrdsGossipPull {
|
||||
for (value_hash, _insert_timestamp) in &self.purged_values {
|
||||
filters.iter_mut().for_each(|filter| filter.add(value_hash));
|
||||
}
|
||||
|
||||
filters
|
||||
}
|
||||
|
||||
/// filter values that fail the bloom filter up to max_bytes
|
||||
fn filter_crds_values(
|
||||
&self,
|
||||
crds: &Crds,
|
||||
filters: &[(CrdsValue, CrdsFilter)],
|
||||
now: u64,
|
||||
) -> Vec<Vec<CrdsValue>> {
|
||||
let mut ret = vec![vec![]; filters.len()];
|
||||
let msg_timeout = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
|
||||
let jitter = rand::thread_rng().gen_range(0, msg_timeout / 4);
|
||||
let start = filters.len();
|
||||
//skip filters from callers that are too old
|
||||
let future = now.saturating_add(msg_timeout);
|
||||
let past = now.saturating_sub(msg_timeout);
|
||||
let recent: Vec<_> = filters
|
||||
.iter()
|
||||
.filter(|(caller, _)| caller.wallclock() < future && caller.wallclock() >= past)
|
||||
.collect();
|
||||
inc_new_counter_info!(
|
||||
"gossip_filter_crds_values-dropped_requests",
|
||||
start - recent.len()
|
||||
);
|
||||
if recent.is_empty() {
|
||||
return ret;
|
||||
}
|
||||
let mut total_skipped = 0;
|
||||
for v in crds.table.values() {
|
||||
filters.iter().enumerate().for_each(|(i, (_, filter))| {
|
||||
recent.iter().enumerate().for_each(|(i, (caller, filter))| {
|
||||
//skip values that are too new
|
||||
if v.value.wallclock() > caller.wallclock().checked_add(jitter).unwrap_or_else(|| 0)
|
||||
{
|
||||
total_skipped += 1;
|
||||
return;
|
||||
}
|
||||
if !filter.contains(&v.value_hash) {
|
||||
ret[i].push(v.value.clone());
|
||||
}
|
||||
});
|
||||
}
|
||||
inc_new_counter_info!("gossip_filter_crds_values-dropped_values", total_skipped);
|
||||
ret
|
||||
}
|
||||
pub fn make_timeouts_def(
|
||||
@@ -636,6 +665,62 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generate_pull_responses() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry, 0).unwrap();
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(new, 0).unwrap();
|
||||
let req = node.new_pull_request(
|
||||
&node_crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
0,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
|
||||
let mut dest_crds = Crds::default();
|
||||
let dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let mut filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
|
||||
)));
|
||||
dest_crds
|
||||
.insert(new, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS)
|
||||
.unwrap();
|
||||
|
||||
//should skip new value since caller is to old
|
||||
let rsp =
|
||||
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
|
||||
assert_eq!(rsp[0].len(), 0);
|
||||
|
||||
//should return new value since caller is new
|
||||
filters[0].0 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS + 1,
|
||||
)));
|
||||
|
||||
let rsp =
|
||||
dest.generate_pull_responses(&dest_crds, &filters, CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS);
|
||||
assert_eq!(rsp[0].len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_pull_request() {
|
||||
let mut node_crds = Crds::default();
|
||||
@@ -664,7 +749,7 @@ mod test {
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
let rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 1);
|
||||
assert!(rsp.iter().all(|rsp| rsp.is_empty()));
|
||||
assert!(dest_crds.lookup(&caller.label()).is_some());
|
||||
@@ -688,7 +773,7 @@ mod test {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
1,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
@@ -696,7 +781,7 @@ mod test {
|
||||
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
1,
|
||||
)));
|
||||
node_crds.insert(new, 0).unwrap();
|
||||
|
||||
@@ -735,7 +820,7 @@ mod test {
|
||||
);
|
||||
let (_, filters, caller) = req.unwrap();
|
||||
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters);
|
||||
let mut rsp = dest.generate_pull_responses(&dest_crds, &filters, 0);
|
||||
dest.process_pull_requests(&mut dest_crds, filters, 0);
|
||||
// if there is a false positive this is empty
|
||||
// prob should be around 0.1 per iteration
|
||||
|
@@ -19,6 +19,7 @@ pub mod contact_info;
|
||||
pub mod bank_weight_fork_choice;
|
||||
pub mod cluster_info;
|
||||
pub mod cluster_slots;
|
||||
pub mod cluster_slots_service;
|
||||
pub mod consensus;
|
||||
pub mod crds;
|
||||
pub mod crds_gossip;
|
||||
|
@@ -23,14 +23,14 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSuppl
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
StakeState::Initialized(meta) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(meta, _stake) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
if meta.lockup.is_in_force(&clock, None)
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
|
@@ -13,7 +13,7 @@ use crate::{
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
|
||||
blockstore::{Blockstore, SlotMeta},
|
||||
shred::Nonce,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
@@ -80,8 +80,6 @@ pub struct RepairTiming {
|
||||
pub set_root_elapsed: u64,
|
||||
pub get_votes_elapsed: u64,
|
||||
pub add_votes_elapsed: u64,
|
||||
pub lowest_slot_elapsed: u64,
|
||||
pub update_completed_slots_elapsed: u64,
|
||||
pub get_best_orphans_elapsed: u64,
|
||||
pub get_best_shreds_elapsed: u64,
|
||||
pub send_repairs_elapsed: u64,
|
||||
@@ -93,15 +91,11 @@ impl RepairTiming {
|
||||
set_root_elapsed: u64,
|
||||
get_votes_elapsed: u64,
|
||||
add_votes_elapsed: u64,
|
||||
lowest_slot_elapsed: u64,
|
||||
update_completed_slots_elapsed: u64,
|
||||
send_repairs_elapsed: u64,
|
||||
) {
|
||||
self.set_root_elapsed += set_root_elapsed;
|
||||
self.get_votes_elapsed += get_votes_elapsed;
|
||||
self.add_votes_elapsed += add_votes_elapsed;
|
||||
self.lowest_slot_elapsed += lowest_slot_elapsed;
|
||||
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
|
||||
self.send_repairs_elapsed += send_repairs_elapsed;
|
||||
}
|
||||
}
|
||||
@@ -114,7 +108,6 @@ pub const MAX_ORPHANS: usize = 5;
|
||||
|
||||
pub struct RepairInfo {
|
||||
pub bank_forks: Arc<RwLock<BankForks>>,
|
||||
pub completed_slots_receiver: CompletedSlotsReceiver,
|
||||
pub epoch_schedule: EpochSchedule,
|
||||
pub duplicate_slots_reset_sender: DuplicateSlotsResetSender,
|
||||
}
|
||||
@@ -183,18 +176,12 @@ impl RepairService {
|
||||
let mut repair_weight = RepairWeight::new(repair_info.bank_forks.read().unwrap().root());
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, blockstore, &cluster_info);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut repair_timing = RepairTiming::default();
|
||||
let mut last_stats = Instant::now();
|
||||
let duplicate_slot_repair_statuses: HashMap<Slot, DuplicateSlotRepairStatus> =
|
||||
HashMap::new();
|
||||
|
||||
Self::initialize_epoch_slots(
|
||||
blockstore,
|
||||
&cluster_info,
|
||||
&repair_info.completed_slots_receiver,
|
||||
);
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@@ -203,8 +190,6 @@ impl RepairService {
|
||||
let mut set_root_elapsed;
|
||||
let mut get_votes_elapsed;
|
||||
let mut add_votes_elapsed;
|
||||
let mut lowest_slot_elapsed;
|
||||
let mut update_completed_slots_elapsed;
|
||||
let repairs = {
|
||||
let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone();
|
||||
let new_root = root_bank.slot();
|
||||
@@ -237,15 +222,6 @@ impl RepairService {
|
||||
root_bank.epoch_schedule(),
|
||||
);
|
||||
add_votes_elapsed.stop();
|
||||
|
||||
lowest_slot_elapsed = Measure::start("lowest_slot_elapsed");
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
lowest_slot_elapsed.stop();
|
||||
update_completed_slots_elapsed = Measure::start("update_completed_slots_elapsed");
|
||||
Self::update_completed_slots(&repair_info.completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, &cluster_info, &repair_info.bank_forks);
|
||||
update_completed_slots_elapsed.stop();
|
||||
/*let new_duplicate_slots = Self::find_new_duplicate_slots(
|
||||
&duplicate_slot_repair_statuses,
|
||||
blockstore,
|
||||
@@ -301,8 +277,6 @@ impl RepairService {
|
||||
set_root_elapsed.as_us(),
|
||||
get_votes_elapsed.as_us(),
|
||||
add_votes_elapsed.as_us(),
|
||||
lowest_slot_elapsed.as_us(),
|
||||
update_completed_slots_elapsed.as_us(),
|
||||
send_repairs_elapsed.as_us(),
|
||||
);
|
||||
|
||||
@@ -337,16 +311,6 @@ impl RepairService {
|
||||
repair_timing.get_best_shreds_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"lowest-slot-elapsed",
|
||||
repair_timing.lowest_slot_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"update-completed-slots-elapsed",
|
||||
repair_timing.update_completed_slots_elapsed,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"send-repairs-elapsed",
|
||||
repair_timing.send_repairs_elapsed,
|
||||
@@ -652,59 +616,6 @@ impl RepairService {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
// will provide a schedule to window_service for any incoming shreds up to the
|
||||
// last_confirmed_epoch.
|
||||
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
|
||||
}
|
||||
|
||||
fn update_completed_slots(
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
let mut slots: Vec<Slot> = vec![];
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_repair.join()
|
||||
}
|
||||
@@ -982,19 +893,6 @@ mod test {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
|
||||
RepairService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
|
||||
let lowest = cluster_info
|
||||
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
|
||||
lowest_slot.clone()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(lowest.lowest, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_generate_duplicate_repairs_for_slot() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
|
@@ -170,11 +170,20 @@ impl RepairWeight {
|
||||
if new_root == self.root {
|
||||
return;
|
||||
}
|
||||
|
||||
// Root slot of the tree that contains `new_root`, if one exists
|
||||
let new_root_tree_root = self.slot_to_tree.get(&new_root).cloned();
|
||||
|
||||
// Purge outdated trees from `self.trees`
|
||||
let subtrees_to_purge: Vec<_> = self
|
||||
.trees
|
||||
.keys()
|
||||
.filter(|subtree_root| **subtree_root < new_root && **subtree_root != self.root)
|
||||
.filter(|subtree_root| {
|
||||
**subtree_root < new_root
|
||||
&& new_root_tree_root
|
||||
.map(|new_root_tree_root| **subtree_root != new_root_tree_root)
|
||||
.unwrap_or(true)
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
for subtree_root in subtrees_to_purge {
|
||||
@@ -188,25 +197,26 @@ impl RepairWeight {
|
||||
);
|
||||
}
|
||||
|
||||
let mut root_tree = self.trees.remove(&self.root).expect("root tree must exist");
|
||||
if let Some(new_root_tree_root) = new_root_tree_root {
|
||||
let mut new_root_tree = self
|
||||
.trees
|
||||
.remove(&new_root_tree_root)
|
||||
.expect("Found slot root earlier in self.slot_to_trees, treee must exist");
|
||||
// Find all descendants of `self.root` that are not reachable from `new_root`.
|
||||
// These are exactly the unrooted slots, which can be purged and added to
|
||||
// `self.unrooted_slots`.
|
||||
let unrooted_slots = new_root_tree.subtree_diff(new_root_tree_root, new_root);
|
||||
self.remove_tree_slots(unrooted_slots.iter(), new_root);
|
||||
|
||||
// Find all descendants of `self.root` that are not reachable from `new_root`.
|
||||
// These are exactly the unrooted slots, which can be purged and added to
|
||||
// `self.unrooted_slots`.
|
||||
let unrooted_slots = root_tree.subtree_diff(self.root, new_root);
|
||||
self.remove_tree_slots(unrooted_slots.iter(), new_root);
|
||||
new_root_tree.set_root(new_root);
|
||||
|
||||
if !root_tree.contains_slot(new_root) {
|
||||
// If the current `root_tree` does not contain the new root, we can
|
||||
// just make a new tree for the new root
|
||||
self.insert_new_tree(new_root);
|
||||
} else {
|
||||
root_tree.set_root(new_root);
|
||||
// Update `self.slot_to_tree` to reflect new root
|
||||
self.rename_tree_root(&root_tree, new_root);
|
||||
self.rename_tree_root(&new_root_tree, new_root);
|
||||
|
||||
// Insert the tree for the new root
|
||||
self.trees.insert(new_root, root_tree);
|
||||
self.trees.insert(new_root, new_root_tree);
|
||||
} else {
|
||||
self.insert_new_tree(new_root);
|
||||
}
|
||||
|
||||
// Purge `self.unrooted_slots` of slots less than `new_root` as we know any
|
||||
@@ -954,6 +964,27 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_root_existing_non_root_tree() {
|
||||
let (_, _, mut repair_weight) = setup_orphan_repair_weight();
|
||||
|
||||
// Set root in an existing orphan branch, slot 10
|
||||
repair_weight.set_root(10);
|
||||
check_old_root_purged_verify_new_root(0, 10, &repair_weight);
|
||||
|
||||
// Should purge old root tree [0, 6]
|
||||
for slot in 0..6 {
|
||||
assert!(!repair_weight.slot_to_tree.contains_key(&slot));
|
||||
}
|
||||
|
||||
// Should purge orphan parent as well
|
||||
assert!(!repair_weight.slot_to_tree.contains_key(&8));
|
||||
|
||||
// Other higher orphan branch rooted at slot `20` remains unchanged
|
||||
assert_eq!(repair_weight.trees.get(&20).unwrap().root(), 20);
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&20).unwrap(), 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_root_check_unrooted_slots() {
|
||||
let (blockstore, bank, mut repair_weight) = setup_orphan_repair_weight();
|
||||
|
@@ -1863,7 +1863,8 @@ impl ReplayStage {
|
||||
pub fn get_unlock_switch_vote_slot(operating_mode: OperatingMode) -> Slot {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => 0,
|
||||
OperatingMode::Stable => std::u64::MAX / 2,
|
||||
// 400_000 slots into epoch 61
|
||||
OperatingMode::Stable => 26_752_000,
|
||||
// Epoch 63
|
||||
OperatingMode::Preview => 21_692_256,
|
||||
}
|
||||
@@ -1872,7 +1873,8 @@ impl ReplayStage {
|
||||
pub fn get_unlock_heaviest_subtree_fork_choice(operating_mode: OperatingMode) -> Slot {
|
||||
match operating_mode {
|
||||
OperatingMode::Development => 0,
|
||||
OperatingMode::Stable => std::u64::MAX / 2,
|
||||
// 400_000 slots into epoch 61
|
||||
OperatingMode::Stable => 26_752_000,
|
||||
// Epoch 63
|
||||
OperatingMode::Preview => 21_692_256,
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@ use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
cluster_slots_service::ClusterSlotsService,
|
||||
contact_info::ContactInfo,
|
||||
repair_service::DuplicateSlotsResetSender,
|
||||
repair_service::RepairInfo,
|
||||
@@ -394,6 +395,7 @@ pub fn retransmitter(
|
||||
pub struct RetransmitStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
window_service: WindowService,
|
||||
cluster_slots_service: ClusterSlotsService,
|
||||
}
|
||||
|
||||
impl RetransmitStage {
|
||||
@@ -427,9 +429,16 @@ impl RetransmitStage {
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
let cluster_slots_service = ClusterSlotsService::new(
|
||||
blockstore.clone(),
|
||||
cluster_slots.clone(),
|
||||
bank_forks.clone(),
|
||||
cluster_info.clone(),
|
||||
completed_slots_receiver,
|
||||
exit.clone(),
|
||||
);
|
||||
let repair_info = RepairInfo {
|
||||
bank_forks,
|
||||
completed_slots_receiver,
|
||||
epoch_schedule,
|
||||
duplicate_slots_reset_sender,
|
||||
};
|
||||
@@ -466,6 +475,7 @@ impl RetransmitStage {
|
||||
Self {
|
||||
thread_hdls,
|
||||
window_service,
|
||||
cluster_slots_service,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -474,6 +484,7 @@ impl RetransmitStage {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
self.window_service.join()?;
|
||||
self.cluster_slots_service.join()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
1373
core/src/rpc.rs
1373
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@ use solana_sdk::clock::Slot;
|
||||
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
|
||||
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
|
||||
const JSON_RPC_SERVER_ERROR_2: i64 = -32002;
|
||||
const JSON_RPC_SERVER_ERROR_3: i64 = -32003;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
NonexistentClusterRoot {
|
||||
@@ -17,6 +18,7 @@ pub enum RpcCustomError {
|
||||
SendTransactionPreflightFailure {
|
||||
message: String,
|
||||
},
|
||||
SendTransactionIsNotSigned,
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@@ -49,6 +51,11 @@ impl From<RpcCustomError> for Error {
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SendTransactionIsNotSigned => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_3),
|
||||
message: "Transaction is not signed".to_string(),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -562,6 +562,7 @@ mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -677,6 +678,7 @@ mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: Some(UiAccountEncoding::JsonParsed),
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -700,7 +702,13 @@ mod tests {
|
||||
.get_account(&nonce_account.pubkey())
|
||||
.unwrap()
|
||||
.data;
|
||||
let expected_data = parse_account_data(&system_program::id(), &expected_data).unwrap();
|
||||
let expected_data = parse_account_data(
|
||||
&nonce_account.pubkey(),
|
||||
&system_program::id(),
|
||||
&expected_data,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "accountNotification",
|
||||
@@ -800,6 +808,7 @@ mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -856,6 +865,7 @@ mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
);
|
||||
|
||||
|
@@ -24,7 +24,7 @@ use std::{
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
use tokio::runtime;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@@ -33,6 +33,7 @@ pub struct JsonRpcService {
|
||||
pub request_processor: JsonRpcRequestProcessor, // Used only by test_rpc_new()...
|
||||
|
||||
close_handle: Option<CloseHandle>,
|
||||
runtime: runtime::Runtime,
|
||||
}
|
||||
|
||||
struct RpcRequestMiddleware {
|
||||
@@ -98,6 +99,9 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
|
||||
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
// Stuck on tokio 0.1 until the jsonrpc-http-server crate upgrades to tokio 0.2
|
||||
use tokio_01::prelude::*;
|
||||
|
||||
let stem = path.split_at(1).1; // Drop leading '/' from path
|
||||
let filename = {
|
||||
match path {
|
||||
@@ -116,10 +120,10 @@ impl RpcRequestMiddleware {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(
|
||||
tokio_fs::file::File::open(filename)
|
||||
tokio_fs_01::file::File::open(filename)
|
||||
.and_then(|file| {
|
||||
let buf: Vec<u8> = Vec::new();
|
||||
tokio_io::io::read_to_end(file, buf)
|
||||
tokio_io_01::io::read_to_end(file, buf)
|
||||
.and_then(|item| Ok(hyper::Response::new(item.1.into())))
|
||||
.or_else(|_| Ok(RpcRequestMiddleware::internal_server_error()))
|
||||
})
|
||||
@@ -256,6 +260,27 @@ impl JsonRpcService {
|
||||
&exit_send_transaction_service,
|
||||
));
|
||||
|
||||
let mut runtime = runtime::Builder::new()
|
||||
.threaded_scheduler()
|
||||
.thread_name("rpc-runtime")
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("Runtime");
|
||||
|
||||
let bigtable_ledger_storage = if config.enable_bigtable_ledger_storage {
|
||||
runtime
|
||||
.block_on(solana_storage_bigtable::LedgerStorage::new(false))
|
||||
.map(|x| {
|
||||
info!("BigTable ledger storage initialized");
|
||||
Some(x)
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to initialize BigTable ledger storage: {:?}", err);
|
||||
None
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let request_processor = JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks.clone(),
|
||||
@@ -266,6 +291,8 @@ impl JsonRpcService {
|
||||
cluster_info,
|
||||
genesis_hash,
|
||||
send_transaction_service,
|
||||
&runtime,
|
||||
bigtable_ledger_storage,
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -325,6 +352,7 @@ impl JsonRpcService {
|
||||
.register_exit(Box::new(move || close_handle_.close()));
|
||||
Self {
|
||||
thread_hdl,
|
||||
runtime,
|
||||
#[cfg(test)]
|
||||
request_processor: test_request_processor,
|
||||
close_handle: Some(close_handle),
|
||||
@@ -338,6 +366,7 @@ impl JsonRpcService {
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.runtime.shutdown_background();
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
//! The `pubsub` module implements a threaded subscription service on client RPC request
|
||||
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use crate::{
|
||||
commitment::BlockCommitmentCache,
|
||||
rpc::{get_parsed_token_account, get_parsed_token_accounts},
|
||||
};
|
||||
use core::hash::Hash;
|
||||
use jsonrpc_core::futures::Future;
|
||||
use jsonrpc_pubsub::{
|
||||
@@ -8,7 +11,7 @@ use jsonrpc_pubsub::{
|
||||
SubscriptionId,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use solana_account_decoder::{UiAccount, UiAccountEncoding};
|
||||
use solana_account_decoder::{parse_token::spl_token_id_v1_0, UiAccount, UiAccountEncoding};
|
||||
use solana_client::{
|
||||
rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig},
|
||||
rpc_filter::RpcFilterType,
|
||||
@@ -36,7 +39,9 @@ use std::{
|
||||
iter,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
};
|
||||
use tokio::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
|
||||
|
||||
// Stuck on tokio 0.1 until the jsonrpc-pubsub crate upgrades to tokio 0.2
|
||||
use tokio_01::runtime::{Builder as RuntimeBuilder, Runtime, TaskExecutor};
|
||||
|
||||
const RECEIVE_DELAY_MILLIS: u64 = 100;
|
||||
|
||||
@@ -184,7 +189,7 @@ where
|
||||
K: Eq + Hash + Clone + Copy,
|
||||
S: Clone + Serialize,
|
||||
B: Fn(&Bank, &K) -> X,
|
||||
F: Fn(X, Slot, Option<T>) -> (Box<dyn Iterator<Item = S>>, Slot),
|
||||
F: Fn(X, &K, Slot, Option<T>, Option<Arc<Bank>>) -> (Box<dyn Iterator<Item = S>>, Slot),
|
||||
X: Clone + Serialize + Default,
|
||||
T: Clone,
|
||||
{
|
||||
@@ -208,16 +213,19 @@ where
|
||||
cache_slot_info.highest_confirmed_slot
|
||||
}
|
||||
};
|
||||
let results = {
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
bank_forks
|
||||
.get(slot)
|
||||
.map(|desired_bank| bank_method(&desired_bank, hashmap_key))
|
||||
.unwrap_or_default()
|
||||
};
|
||||
let bank = bank_forks.read().unwrap().get(slot).cloned();
|
||||
let results = bank
|
||||
.clone()
|
||||
.map(|desired_bank| bank_method(&desired_bank, hashmap_key))
|
||||
.unwrap_or_default();
|
||||
let mut w_last_notified_slot = last_notified_slot.write().unwrap();
|
||||
let (filter_results, result_slot) =
|
||||
filter_results(results, *w_last_notified_slot, config.as_ref().cloned());
|
||||
let (filter_results, result_slot) = filter_results(
|
||||
results,
|
||||
hashmap_key,
|
||||
*w_last_notified_slot,
|
||||
config.as_ref().cloned(),
|
||||
bank,
|
||||
);
|
||||
for result in filter_results {
|
||||
notifier.notify(
|
||||
Response {
|
||||
@@ -248,18 +256,30 @@ impl RpcNotifier {
|
||||
|
||||
fn filter_account_result(
|
||||
result: Option<(Account, Slot)>,
|
||||
pubkey: &Pubkey,
|
||||
last_notified_slot: Slot,
|
||||
encoding: Option<UiAccountEncoding>,
|
||||
bank: Option<Arc<Bank>>,
|
||||
) -> (Box<dyn Iterator<Item = UiAccount>>, Slot) {
|
||||
if let Some((account, fork)) = result {
|
||||
// If fork < last_notified_slot this means that we last notified for a fork
|
||||
// and should notify that the account state has been reverted.
|
||||
if fork != last_notified_slot {
|
||||
let encoding = encoding.unwrap_or(UiAccountEncoding::Binary);
|
||||
return (
|
||||
Box::new(iter::once(UiAccount::encode(account, encoding))),
|
||||
fork,
|
||||
);
|
||||
if account.owner == spl_token_id_v1_0() && encoding == UiAccountEncoding::JsonParsed {
|
||||
let bank = bank.unwrap(); // If result.is_some(), bank must also be Some
|
||||
return (
|
||||
Box::new(iter::once(get_parsed_token_account(bank, pubkey, account))),
|
||||
fork,
|
||||
);
|
||||
} else {
|
||||
return (
|
||||
Box::new(iter::once(UiAccount::encode(
|
||||
pubkey, account, encoding, None, None,
|
||||
))),
|
||||
fork,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
(Box::new(iter::empty()), last_notified_slot)
|
||||
@@ -267,8 +287,10 @@ fn filter_account_result(
|
||||
|
||||
fn filter_signature_result(
|
||||
result: Option<transaction::Result<()>>,
|
||||
_signature: &Signature,
|
||||
last_notified_slot: Slot,
|
||||
_config: Option<()>,
|
||||
_bank: Option<Arc<Bank>>,
|
||||
) -> (Box<dyn Iterator<Item = RpcSignatureResult>>, Slot) {
|
||||
(
|
||||
Box::new(
|
||||
@@ -282,29 +304,33 @@ fn filter_signature_result(
|
||||
|
||||
fn filter_program_results(
|
||||
accounts: Vec<(Pubkey, Account)>,
|
||||
_program_id: &Pubkey,
|
||||
last_notified_slot: Slot,
|
||||
config: Option<ProgramConfig>,
|
||||
bank: Option<Arc<Bank>>,
|
||||
) -> (Box<dyn Iterator<Item = RpcKeyedAccount>>, Slot) {
|
||||
let config = config.unwrap_or_default();
|
||||
let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary);
|
||||
let filters = config.filters;
|
||||
(
|
||||
Box::new(
|
||||
accounts
|
||||
.into_iter()
|
||||
.filter(move |(_, account)| {
|
||||
filters.iter().all(|filter_type| match filter_type {
|
||||
RpcFilterType::DataSize(size) => account.data.len() as u64 == *size,
|
||||
RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data),
|
||||
})
|
||||
})
|
||||
.map(move |(pubkey, account)| RpcKeyedAccount {
|
||||
let keyed_accounts = accounts.into_iter().filter(move |(_, account)| {
|
||||
filters.iter().all(|filter_type| match filter_type {
|
||||
RpcFilterType::DataSize(size) => account.data.len() as u64 == *size,
|
||||
RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data),
|
||||
})
|
||||
});
|
||||
let accounts: Box<dyn Iterator<Item = RpcKeyedAccount>> =
|
||||
if encoding == UiAccountEncoding::JsonParsed {
|
||||
let bank = bank.unwrap(); // If !accounts.is_empty(), bank must be Some
|
||||
Box::new(get_parsed_token_accounts(bank, keyed_accounts))
|
||||
} else {
|
||||
Box::new(
|
||||
keyed_accounts.map(move |(pubkey, account)| RpcKeyedAccount {
|
||||
pubkey: pubkey.to_string(),
|
||||
account: UiAccount::encode(account, encoding.clone()),
|
||||
account: UiAccount::encode(&pubkey, account, encoding.clone(), None, None),
|
||||
}),
|
||||
),
|
||||
last_notified_slot,
|
||||
)
|
||||
)
|
||||
};
|
||||
(accounts, last_notified_slot)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -871,7 +897,7 @@ impl RpcSubscriptions {
|
||||
&subscriptions.gossip_account_subscriptions,
|
||||
&subscriptions.gossip_program_subscriptions,
|
||||
&subscriptions.gossip_signature_subscriptions,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
&cache_slot_info,
|
||||
¬ifier,
|
||||
);
|
||||
@@ -892,7 +918,7 @@ impl RpcSubscriptions {
|
||||
for pubkey in &pubkeys {
|
||||
Self::check_account(
|
||||
pubkey,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
account_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
@@ -906,7 +932,7 @@ impl RpcSubscriptions {
|
||||
for program_id in &programs {
|
||||
Self::check_program(
|
||||
program_id,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
program_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
@@ -920,7 +946,7 @@ impl RpcSubscriptions {
|
||||
for signature in &signatures {
|
||||
Self::check_signature(
|
||||
signature,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
signature_subscriptions.clone(),
|
||||
¬ifier,
|
||||
&cache_slot_info,
|
||||
@@ -965,7 +991,7 @@ pub(crate) mod tests {
|
||||
system_transaction,
|
||||
};
|
||||
use std::{fmt::Debug, sync::mpsc::channel, time::Instant};
|
||||
use tokio::{prelude::FutureExt, runtime::Runtime, timer::Delay};
|
||||
use tokio_01::{prelude::FutureExt, runtime::Runtime, timer::Delay};
|
||||
|
||||
pub(crate) fn robust_poll_or_panic<T: Debug + Send + 'static>(
|
||||
receiver: futures::sync::mpsc::Receiver<T>,
|
||||
@@ -1030,6 +1056,7 @@ pub(crate) mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
sub_id.clone(),
|
||||
subscriber,
|
||||
@@ -1526,6 +1553,7 @@ pub(crate) mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::single_gossip()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
sub_id0.clone(),
|
||||
subscriber0,
|
||||
@@ -1594,6 +1622,7 @@ pub(crate) mod tests {
|
||||
Some(RpcAccountInfoConfig {
|
||||
commitment: Some(CommitmentConfig::single_gossip()),
|
||||
encoding: None,
|
||||
data_slice: None,
|
||||
}),
|
||||
sub_id1.clone(),
|
||||
subscriber1,
|
||||
|
@@ -269,7 +269,7 @@ pub mod tests {
|
||||
|
||||
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
let (blockstore, l_receiver, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(&blockstore_path)
|
||||
Blockstore::open_with_signal(&blockstore_path, None)
|
||||
.expect("Expected to successfully open ledger");
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let bank = bank_forks.working_bank();
|
||||
|
@@ -28,6 +28,7 @@ use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver, PurgeType},
|
||||
blockstore_db::BlockstoreRecoveryMode,
|
||||
blockstore_processor::{self, TransactionStatusSender},
|
||||
create_new_tmp_ledger,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
@@ -83,6 +84,7 @@ pub struct ValidatorConfig {
|
||||
pub no_rocksdb_compaction: bool,
|
||||
pub accounts_hash_interval_slots: u64,
|
||||
pub max_genesis_archive_unpacked_size: u64,
|
||||
pub wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@@ -110,6 +112,7 @@ impl Default for ValidatorConfig {
|
||||
no_rocksdb_compaction: false,
|
||||
accounts_hash_interval_slots: std::u64::MAX,
|
||||
max_genesis_archive_unpacked_size: MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
|
||||
wal_recovery_mode: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -603,7 +606,8 @@ fn new_banks_from_blockstore(
|
||||
}
|
||||
|
||||
let (mut blockstore, ledger_signal_receiver, completed_slots_receiver) =
|
||||
Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database");
|
||||
Blockstore::open_with_signal(blockstore_path, config.wal_recovery_mode.clone())
|
||||
.expect("Failed to open ledger database");
|
||||
blockstore.set_no_compaction(config.no_rocksdb_compaction);
|
||||
|
||||
let process_options = blockstore_processor::ProcessOptions {
|
||||
|
@@ -436,7 +436,7 @@ fn network_run_pull(
|
||||
let rsp = node
|
||||
.lock()
|
||||
.unwrap()
|
||||
.generate_pull_responses(&filters)
|
||||
.generate_pull_responses(&filters, now)
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect();
|
||||
|
@@ -26,7 +26,7 @@ use std::{
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio_01::runtime::Runtime;
|
||||
|
||||
macro_rules! json_req {
|
||||
($method: expr, $params: expr) => {{
|
||||
@@ -100,6 +100,20 @@ fn test_rpc_send_tx() {
|
||||
|
||||
assert_eq!(confirmed_tx, true);
|
||||
|
||||
use solana_account_decoder::UiAccountEncoding;
|
||||
use solana_client::rpc_config::RpcAccountInfoConfig;
|
||||
let config = RpcAccountInfoConfig {
|
||||
encoding: Some(UiAccountEncoding::Binary64),
|
||||
commitment: None,
|
||||
data_slice: None,
|
||||
};
|
||||
let req = json_req!(
|
||||
"getAccountInfo",
|
||||
json!([bs58::encode(bob_pubkey).into_string(), config])
|
||||
);
|
||||
let json: Value = post_rpc(req, &leader_data);
|
||||
info!("{:?}", json["result"]["value"]);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
@@ -189,7 +203,7 @@ fn test_rpc_subscriptions() {
|
||||
.and_then(move |client| {
|
||||
for sig in signature_set {
|
||||
let status_sender = status_sender.clone();
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.signature_subscribe(sig.clone(), None)
|
||||
.and_then(move |sig_stream| {
|
||||
@@ -203,7 +217,7 @@ fn test_rpc_subscriptions() {
|
||||
}),
|
||||
);
|
||||
}
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.slot_subscribe()
|
||||
.and_then(move |slot_stream| {
|
||||
@@ -218,7 +232,7 @@ fn test_rpc_subscriptions() {
|
||||
);
|
||||
for pubkey in account_set {
|
||||
let account_sender = account_sender.clone();
|
||||
tokio::spawn(
|
||||
tokio_01::spawn(
|
||||
client
|
||||
.account_subscribe(pubkey, None)
|
||||
.and_then(move |account_stream| {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -21,7 +21,7 @@ rand_chacha = { version = "0.2.2" }
|
||||
regex-syntax = { version = "0.6.12" }
|
||||
reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde = { version = "1.0.100", features = ["rc"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.3", features = ["serde"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.4", features = ["serde"] }
|
||||
syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] }
|
||||
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
|
||||
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
|
||||
|
@@ -17,7 +17,7 @@ module.exports = {
|
||||
links: [
|
||||
{
|
||||
to: "introduction",
|
||||
label: "Docs",
|
||||
label: "Introduction",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
@@ -30,6 +30,11 @@ module.exports = {
|
||||
label: "Validators",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
to: "clusters",
|
||||
label: "Clusters",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
href: "https://discordapp.com/invite/pquxPsq",
|
||||
label: "Chat",
|
||||
|
@@ -42,6 +42,7 @@ module.exports = {
|
||||
"cli/manage-stake-accounts",
|
||||
"offline-signing",
|
||||
"offline-signing/durable-nonce",
|
||||
"cli/usage",
|
||||
],
|
||||
"Solana Clusters": ["clusters"],
|
||||
"Develop Applications": [
|
||||
@@ -61,6 +62,7 @@ module.exports = {
|
||||
"running-validator",
|
||||
"running-validator/validator-reqs",
|
||||
"running-validator/validator-start",
|
||||
"running-validator/vote-accounts",
|
||||
"running-validator/validator-stake",
|
||||
"running-validator/validator-monitor",
|
||||
"running-validator/validator-info",
|
||||
@@ -144,7 +146,6 @@ module.exports = {
|
||||
"implemented-proposals/repair-service",
|
||||
"implemented-proposals/testing-programs",
|
||||
"implemented-proposals/readonly-accounts",
|
||||
"implemented-proposals/embedding-move",
|
||||
"implemented-proposals/staking-rewards",
|
||||
"implemented-proposals/rent",
|
||||
"implemented-proposals/durable-tx-nonces",
|
||||
@@ -170,7 +171,6 @@ module.exports = {
|
||||
"proposals/block-confirmation",
|
||||
"proposals/rust-clients",
|
||||
"proposals/optimistic_confirmation",
|
||||
"proposals/abi-management",
|
||||
],
|
||||
},
|
||||
};
|
||||
|
@@ -24,6 +24,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
- [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
|
||||
- [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
|
||||
- [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress)
|
||||
- [getConfirmedSignaturesForAddress2](jsonrpc-api.md#getconfirmedsignaturesforaddress2)
|
||||
- [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction)
|
||||
- [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
||||
- [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
@@ -64,6 +65,16 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
- [slotSubscribe](jsonrpc-api.md#slotsubscribe)
|
||||
- [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe)
|
||||
|
||||
## Unstable Methods
|
||||
|
||||
Unstable methods may see breaking changes in patch releases and may not be supported in perpetuity.
|
||||
|
||||
- [getTokenAccountBalance](jsonrpc-api.md#gettokenaccountbalance)
|
||||
- [getTokenAccountsByDelegate](jsonrpc-api.md#gettokenaccountsbydelegate)
|
||||
- [getTokenAccountsByOwner](jsonrpc-api.md#gettokenaccountsbyowner)
|
||||
- [getTokenLargestAccounts](jsonrpc-api.md#gettokenlargestaccounts)
|
||||
- [getTokenSupply](jsonrpc-api.md#gettokensupply)
|
||||
|
||||
## Request Formatting
|
||||
|
||||
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
|
||||
@@ -146,8 +157,9 @@ Returns all information associated with the account of provided Pubkey
|
||||
- `<string>` - Pubkey of account to query, as base-58 encoded string
|
||||
- `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary", "binary64", or jsonParsed". If parameter not provided, the default encoding is "binary". "binary" is base-58 encoded and limited to Account data of less than 128 bytes. "binary64" will return base64 encoded data for Account data of any size.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
|
||||
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
|
||||
|
||||
#### Results:
|
||||
|
||||
@@ -295,7 +307,7 @@ Returns identity and transaction information about a confirmed block in the ledg
|
||||
#### Parameters:
|
||||
|
||||
- `<u64>` - slot, as u64 integer
|
||||
- `<string>` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON.
|
||||
- `<string>` - (optional) encoding for each returned Transaction, either "json", "jsonParsed", or "binary". If parameter not provided, the default encoding is JSON. **jsonParsed encoding is UNSTABLE**
|
||||
Parsed-JSON encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
|
||||
|
||||
#### Results:
|
||||
@@ -385,6 +397,8 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
|
||||
### getConfirmedSignaturesForAddress
|
||||
|
||||
**DEPRECATED: Please use getConfirmedSignaturesForAddress2 instead**
|
||||
|
||||
Returns a list of all the confirmed signatures for transactions involving an
|
||||
address, within a specified Slot range. Max range allowed is 10,000 Slots
|
||||
|
||||
@@ -412,6 +426,37 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
{"jsonrpc":"2.0","result":{["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4bJdGN8Tt2kLWZ3Fa1dpwPSEkXWWTSszPSf1rRVsCwNjxbbUdwTeiWtmi8soA26YmwnKD4aAxNp8ci1Gjpdv4gsr","4LQ14a7BYY27578Uj8LPCaVhSdJGLn9DJqnUJHpy95FMqdKf9acAhUhecPQNjNUy6VoNFUbvwYkPociFSf87cWbG"]},"id":1}
|
||||
```
|
||||
|
||||
|
||||
### getConfirmedSignaturesForAddress2
|
||||
|
||||
Returns confirmed signatures for transactions involving an
|
||||
address backwards in time from the provided signature or most recent confirmed block
|
||||
|
||||
#### Parameters:
|
||||
* `<string>` - account address as base-58 encoded string
|
||||
* `<object>` - (optional) Configuration object containing the following fields:
|
||||
* `before: <string>` - (optional) start searching backwards from this transaction signature.
|
||||
If not provided the search starts from the top of the highest max confirmed block.
|
||||
* `limit: <number>` - (optional) maximum transaction signatures to return (between 1 and 1,000, default: 1,000).
|
||||
|
||||
#### Results:
|
||||
The result field will be an array of transaction signature information, ordered
|
||||
from newest to oldest transaction:
|
||||
* `<object>`
|
||||
* `signature: <string>` - transaction signature as base-58 encoded string
|
||||
* `slot: <u64>` - The slot that contains the block with the transaction
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `memo: <string |null>` - Memo associated with the transaction, null if no memo is present
|
||||
|
||||
#### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress2","params":["Vote111111111111111111111111111111111111111", {"limit": 1}]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[{"err":null,"memo":null,"signature":"5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv","slot":114}],"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedTransaction
|
||||
|
||||
Returns transaction details for a confirmed transaction
|
||||
@@ -420,7 +465,7 @@ Returns transaction details for a confirmed transaction
|
||||
|
||||
- `<string>` - transaction signature as base-58 encoded string
|
||||
N encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If parsed-JSON is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields).
|
||||
- `<string>` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary".
|
||||
- `<string>` - (optional) encoding for the returned Transaction, either "json", "jsonParsed", or "binary". **jsonParsed encoding is UNSTABLE**
|
||||
|
||||
#### Results:
|
||||
|
||||
@@ -801,7 +846,8 @@ Returns all accounts owned by the provided program Pubkey
|
||||
- `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
|
||||
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
|
||||
- (optional) `filters: <array>` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results
|
||||
|
||||
##### Filters:
|
||||
@@ -1016,6 +1062,159 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"circulating":16000,"nonCirculating":1000000,"nonCirculatingAccounts":["FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5","9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA","3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9","BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z],total:1016000}},"id":1}
|
||||
```
|
||||
|
||||
### getTokenAccountBalance
|
||||
|
||||
Returns the token balance of an SPL Token account. **UNSTABLE**
|
||||
|
||||
#### Parameters:
|
||||
|
||||
- `<string>` - Pubkey of Token account to query, as base-58 encoded string
|
||||
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to a JSON object containing:
|
||||
|
||||
- `uiAmount: <f64>` - the balance, using mint-prescribed decimals
|
||||
- `amount: <string>` - the raw balance without decimals, a string representation of u64
|
||||
- `decimals: <u8>` - number of base 10 digits to the right of the decimal place
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getTokenAccountBalance", "params": ["7fUAJdStEuGbc3sM84cKRL6yYaaSstyLSU4ve5oovLS7"]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"uiAmount":98.64,"amount":"9864","decimals":2},"id":1}
|
||||
```
|
||||
|
||||
### getTokenAccountsByDelegate
|
||||
|
||||
Returns all SPL Token accounts by approved Delegate. **UNSTABLE**
|
||||
|
||||
#### Parameters:
|
||||
|
||||
- `<string>` - Pubkey of account delegate to query, as base-58 encoded string
|
||||
- `<object>` - Either:
|
||||
* `mint: <string>` - Pubkey of the specific token Mint to limit accounts to, as base-58 encoded string; or
|
||||
* `programId: <string>` - Pubkey of the Token program ID that owns the accounts, as base-58 encoded string
|
||||
- `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
|
||||
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to an array of JSON objects, which will contain:
|
||||
|
||||
- `pubkey: <string>` - the account Pubkey as base-58 encoded string
|
||||
- `account: <object>` - a JSON object, with the following sub fields:
|
||||
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
|
||||
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
|
||||
- `data: <object>`, Token state data associated with the account, either as base-58 encoded binary data or in JSON format `{<program>: <state>}`
|
||||
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params": ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"programId": "TokenSVp5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"}, {"encoding": "jsonParsed"}]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":[{"data":{"program":"spl-token","parsed":{"accountType":"account","info":{"tokenAmount":{"amount":"1","uiAmount":0.1,"decimals":1},"delegate":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","delegatedAmount":1,"isInitialized":true,"isNative":false,"mint":"3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E","owner":"CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD"}}},"executable":false,"lamports":1726080,"owner":"TokenSVp5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o","rentEpoch":4},"pubkey":"CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD"}],"id":1}
|
||||
```
|
||||
|
||||
### getTokenAccountsByOwner
|
||||
|
||||
Returns all SPL Token accounts by token owner. **UNSTABLE**
|
||||
|
||||
#### Parameters:
|
||||
|
||||
- `<string>` - Pubkey of account owner to query, as base-58 encoded string
|
||||
- `<object>` - Either:
|
||||
* `mint: <string>` - Pubkey of the specific token Mint to limit accounts to, as base-58 encoded string; or
|
||||
* `programId: <string>` - Pubkey of the Token program ID that owns the accounts, as base-58 encoded string
|
||||
- `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
|
||||
- (optional) `dataSlice: <object>` - limit the returned account data using the provided `offset: <usize>` and `length: <usize>` fields; only available for "binary" or "binary64" encoding.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to an array of JSON objects, which will contain:
|
||||
|
||||
- `pubkey: <string>` - the account Pubkey as base-58 encoded string
|
||||
- `account: <object>` - a JSON object, with the following sub fields:
|
||||
- `lamports: <u64>`, number of lamports assigned to this account, as a u64
|
||||
- `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
|
||||
- `data: <object>`, Token state data associated with the account, either as base-58 encoded binary data or in JSON format `{<program>: <state>}`
|
||||
- `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
- `rentEpoch: <u64>`, the epoch at which this account will next owe rent, as u64
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params": ["4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F", {"mint":"3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E"}, {"encoding": "jsonParsed"}]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":[{"data":{"program":"spl-token","parsed":{"accountType":"account","info":{"tokenAmount":{"amount":"1","uiAmount":0.1,"decimals":1},"delegate":null,"delegatedAmount":1,"isInitialized":true,"isNative":false,"mint":"3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E","owner":"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F"}}},"executable":false,"lamports":1726080,"owner":"TokenSVp5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o","rentEpoch":4},"pubkey":"CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD"}],"id":1}
|
||||
```
|
||||
|
||||
### getTokenLargestAccounts
|
||||
|
||||
Returns the 20 largest accounts of a particular SPL Token type. **UNSTABLE**
|
||||
|
||||
#### Parameters:
|
||||
|
||||
- `<string>` - Pubkey of token Mint to query, as base-58 encoded string
|
||||
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to an array of JSON objects containing:
|
||||
|
||||
- `address: <string>` - the address of the token account
|
||||
- `uiAmount: <f64>` - the token account balance, using mint-prescribed decimals
|
||||
- `amount: <string>` - the raw token account balance without decimals, a string representation of u64
|
||||
- `decimals: <u8>` - number of base 10 digits to the right of the decimal place
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getTokenLargestAccounts", "params": ["3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E"]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":[{"address":"FYjHNoFtSQ5uijKrZFyYAxvEr87hsKXkXcxkcmkBAf4r","amount":"771","decimals":2,"uiAmount":7.71},{"address":"BnsywxTcaYeNUtzrPxQUvzAWxfzZe3ZLUJ4wMMuLESnu","amount":"229","decimals":2,"uiAmount":2.29}],"id":1}
|
||||
```
|
||||
|
||||
### getTokenSupply
|
||||
|
||||
Returns the total supply of an SPL Token type. **UNSTABLE**
|
||||
|
||||
#### Parameters:
|
||||
|
||||
- `<string>` - Pubkey of token Mint to query, as base-58 encoded string
|
||||
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to a JSON object containing:
|
||||
|
||||
- `uiAmount: <f64>` - the total token supply, using mint-prescribed decimals
|
||||
- `amount: <string>` - the raw total token supply without decimals, a string representation of u64
|
||||
- `decimals: <u8>` - number of base 10 digits to the right of the decimal place
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getTokenSupply", "params": ["3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E"]}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"uiAmount":1000.0,"amount":"100000","decimals":2},"id":1}
|
||||
```
|
||||
|
||||
### getTransactionCount
|
||||
|
||||
Returns the current Transaction count from the ledger
|
||||
@@ -1058,7 +1257,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.2.14"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.2.23"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@@ -1184,7 +1383,7 @@ Simulate sending a transaction
|
||||
An RpcResponse containing a TransactionStatus object
|
||||
The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields:
|
||||
|
||||
- `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
- `err: <object | string | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
- `logs: <array | null>` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure)
|
||||
|
||||
#### Example:
|
||||
@@ -1194,7 +1393,7 @@ The result will be an RpcResponse JSON object with `value` set to a JSON object
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"simulateTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":218},"value":{"confirmations":0,"err":null,"slot":218,"status":{"Ok":null}}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":218},"value":{"err":null,"logs":["BPF program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri success"]},"id":1}
|
||||
```
|
||||
|
||||
### setLogFilter
|
||||
@@ -1259,7 +1458,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
- `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
- `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
|
||||
|
||||
#### Results:
|
||||
|
||||
@@ -1314,8 +1513,10 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
},
|
||||
"value": {
|
||||
"data": {
|
||||
"nonce": {
|
||||
"initialized": {
|
||||
"program": "nonce"
|
||||
"parsed": {
|
||||
"type": "initialized",
|
||||
"info": {
|
||||
"authority": "Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX",
|
||||
"blockhash": "LUaQTmM7WbMRiATdMMHaRGakPtCkc2GHtH57STKXs6k",
|
||||
"feeCalculator": {
|
||||
@@ -1367,7 +1568,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
- `<object>` - (optional) Configuration object containing the following optional fields:
|
||||
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `encoding: <string>` - encoding for Account data, either "binary" or jsonParsed". If parameter not provided, the default encoding is binary.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`.
|
||||
Parsed-JSON encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If parsed-JSON is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type `<string>`. **jsonParsed encoding is UNSTABLE**
|
||||
- (optional) `filters: <array>` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results
|
||||
|
||||
#### Results:
|
||||
@@ -1430,8 +1631,10 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
"pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq"
|
||||
"account": {
|
||||
"data": {
|
||||
"nonce": {
|
||||
"initialized": {
|
||||
"program": "nonce"
|
||||
"parsed": {
|
||||
"type": "initialized",
|
||||
"info": {
|
||||
"authority": "Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX",
|
||||
"blockhash": "LUaQTmM7WbMRiATdMMHaRGakPtCkc2GHtH57STKXs6k",
|
||||
"feeCalculator": {
|
||||
|
@@ -1,4 +1,6 @@
|
||||
# solana CLI
|
||||
---
|
||||
title: CLI Usage Reference
|
||||
---
|
||||
|
||||
The [solana-cli crate](https://crates.io/crates/solana-cli) provides a command-line interface tool for Solana
|
||||
|
||||
|
@@ -14,6 +14,14 @@ Currently, the rent cost is fixed at the genesis. However, it's anticipated to b
|
||||
|
||||
There are two timings of collecting rent from accounts: \(1\) when referenced by a transaction, \(2\) periodically once an epoch. \(1\) includes the transaction to create the new account itself, and it happens during the normal transaction processing by the bank as part of the load phase. \(2\) exists to ensure to collect rents from stale accounts, which aren't referenced in recent epochs at all. \(2\) requires the whole scan of accounts and is spread over an epoch based on account address prefix to avoid load spikes due to this rent collection.
|
||||
|
||||
On the contrary, rent collection isn't applied to accounts that are directly manipulated by any of protocol-level bookkeeping processes including:
|
||||
|
||||
- The distribution of rent collection itself (Otherwise, it may cause recursive rent collection handling)
|
||||
- The distribution of staking rewards at the start of every epoch (To reduce as much as processing spike at the start of new epoch)
|
||||
- The distribution of transaction fee at the end of every slot
|
||||
|
||||
Even if those processes are out of scope of rent collection, all of manipulated accounts will eventually be handled by the \(2\) mechanism.
|
||||
|
||||
## Actual processing of collecting rent
|
||||
|
||||
Rent is due for one epoch's worth of time, and accounts always have `Account::rent_epoch` of `current_epoch + 1`.
|
||||
|
106
docs/src/implemented-proposals/rpc-transaction-history.md
Normal file
106
docs/src/implemented-proposals/rpc-transaction-history.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Long term RPC Transaction History
|
||||
There's a need for RPC to serve at least 6 months of transaction history. The
|
||||
current history, on the order of days, is insufficient for downstream users.
|
||||
|
||||
6 months of transaction data cannot be stored practically in a validator's
|
||||
rocksdb ledger so an external data store is necessary. The validator's
|
||||
rocksdb ledger will continue to serve as the primary data source, and then will
|
||||
fall back to the external data store.
|
||||
|
||||
The affected RPC endpoints are:
|
||||
* [getFirstAvailableBlock](https://docs.solana.com/apps/jsonrpc-api#getfirstavailableblock)
|
||||
* [getConfirmedBlock](https://docs.solana.com/apps/jsonrpc-api#getconfirmedblock)
|
||||
* [getConfirmedBlocks](https://docs.solana.com/apps/jsonrpc-api#getconfirmedblocks)
|
||||
* [getConfirmedSignaturesForAddress](https://docs.solana.com/apps/jsonrpc-api#getconfirmedsignaturesforaddress)
|
||||
* [getConfirmedTransaction](https://docs.solana.com/apps/jsonrpc-api#getconfirmedtransaction)
|
||||
* [getSignatureStatuses](https://docs.solana.com/apps/jsonrpc-api#getsignaturestatuses)
|
||||
|
||||
Note that [getBlockTime](https://docs.solana.com/apps/jsonrpc-api#getblocktime)
|
||||
is not supported, as once https://github.com/solana-labs/solana/issues/10089 is
|
||||
fixed then `getBlockTime` can be removed.
|
||||
|
||||
Some system design constraints:
|
||||
* The volume of data to store and search can quickly jump into the terabytes,
|
||||
and is immutable.
|
||||
* The system should be as light as possible for SREs. For example an SQL
|
||||
database cluster that requires an SRE to continually monitor and rebalance
|
||||
nodes is undesirable.
|
||||
* Data must be searchable in real time - batched queries that take minutes or
|
||||
hours to run are unacceptable.
|
||||
* Easy to replicate the data worldwide to co-locate it with the RPC endpoints
|
||||
that will utilize it.
|
||||
* Interfacing with the external data store should be easy and not require
|
||||
depending on risky lightly-used community-supported code libraries
|
||||
|
||||
Based on these constraints, Google's BigTable product is selected as the data
|
||||
store.
|
||||
|
||||
## Table Schema
|
||||
A BigTable instance is used to hold all transaction data, broken up into
|
||||
different tables for quick searching.
|
||||
|
||||
New data may be copied into the instance at anytime without affecting the existing
|
||||
data, and all data is immutable. Generally the expectation is that new data
|
||||
will be uploaded once an current epoch completes but there is no limitation on
|
||||
the frequency of data dumps.
|
||||
|
||||
Cleanup of old data is automatic by configuring the data retention policy of the
|
||||
instance tables appropriately, it just disappears. Therefore the order of when data is
|
||||
added becomes important. For example if data from epoch N-1 is added after data
|
||||
from epoch N, the older epoch data will outlive the newer data. However beyond
|
||||
producing _holes_ in query results, this kind of unordered deletion will
|
||||
have no ill effect. Note that this method of cleanup effectively allows for an
|
||||
unlimited amount of transaction data to be stored, restricted only by the
|
||||
monetary costs of doing so.
|
||||
|
||||
The table layout s supports the existing RPC endpoints only. New RPC endpoints
|
||||
in the future may require additions to the schema and potentially iterating over
|
||||
all transactions to build up the necessary metadata.
|
||||
|
||||
## Accessing BigTable
|
||||
BigTable has a gRPC endpoint that can be accessed using the
|
||||
[tonic](https://crates.io/crates/crate)] and the raw protobuf API, as currently no
|
||||
higher-level Rust crate for BigTable exists. Practically this makes parsing the
|
||||
results of BigTable queries more complicated but is not a significant issue.
|
||||
|
||||
## Data Population
|
||||
The ongoing population of instance data will occur on an epoch cadence through the
|
||||
use of a new `solana-ledger-tool` command that will convert rocksdb data for a
|
||||
given slot range into the instance schema.
|
||||
|
||||
The same process will be run once, manually, to backfill the existing ledger
|
||||
data.
|
||||
|
||||
### Block Table: `block`
|
||||
|
||||
This table contains the compressed block data for a given slot.
|
||||
|
||||
The row key is generated by taking the 16 digit lower case hexadecimal
|
||||
representation of the slot, to ensure that the oldest slot with a confirmed
|
||||
block will always be first when the rows are listed. eg, The row key for slot
|
||||
42 would be 000000000000002a.
|
||||
|
||||
The row data is a compressed `StoredConfirmedBlock` struct.
|
||||
|
||||
|
||||
### Account Address Transaction Signature Lookup Table: `tx-by-addr`
|
||||
|
||||
This table contains the transactions that affect a given address.
|
||||
|
||||
The row key is `<base58
|
||||
address>/<slot-id-one's-compliment-hex-slot-0-prefixed-to-16-digits>`. The row
|
||||
data is a compressed `TransactionByAddrInfo` struct.
|
||||
|
||||
Taking the one's compliment of the slot allows for listing of slots ensures that
|
||||
the newest slot with transactions that affect an address will always
|
||||
be listed first.
|
||||
|
||||
Sysvar addresses are not indexed. However frequently used programs such as
|
||||
Vote or System are, and will likely have a row for every confirmed slot.
|
||||
|
||||
### Transaction Signature Lookup Table: `tx`
|
||||
|
||||
This table maps a transaction signature to its confirmed block, and index within that block.
|
||||
|
||||
The row key is the base58-encoded transaction signature.
|
||||
The row data is a compressed `TransactionInfo` struct.
|
@@ -168,12 +168,15 @@ vote account on the network. If you have completed this step, you should see the
|
||||
solana-keygen new -o ~/vote-account-keypair.json
|
||||
```
|
||||
|
||||
Create your vote account on the blockchain:
|
||||
The following command can be used to create your vote account on the blockchain
|
||||
with all the default options:
|
||||
|
||||
```bash
|
||||
solana create-vote-account ~/vote-account-keypair.json ~/validator-keypair.json
|
||||
```
|
||||
|
||||
Read more about [creating and managing a vote account](vote-accounts.md).
|
||||
|
||||
## Trusted validators
|
||||
|
||||
If you know and trust other validator nodes, you can specify this on the command line with the `--trusted-validator <PUBKEY>`
|
||||
|
146
docs/src/running-validator/vote-accounts.md
Normal file
146
docs/src/running-validator/vote-accounts.md
Normal file
@@ -0,0 +1,146 @@
|
||||
---
|
||||
title: Vote Account Management
|
||||
---
|
||||
|
||||
This page describes how to set up an on-chain _vote account_. Creating a vote
|
||||
account is needed if you plan to run a validator node on Solana.
|
||||
|
||||
## Create a Vote Account
|
||||
A vote account can be created with the
|
||||
[create-vote-account](../cli/usage.md#solana-create-vote-account) command.
|
||||
The vote account can be configured when first created or after the validator is
|
||||
running. All aspects of the vote account can be changed except for the
|
||||
[vote account address](#vote-account-address), which is fixed for the lifetime
|
||||
of the account.
|
||||
|
||||
### Configure an Existing Vote Account
|
||||
- To change the [validator identity](#validator-identity), use
|
||||
[vote-update-validator](../cli/usage.md#solana-vote-update-validator).
|
||||
- To change the [vote authority](#vote-authority), use
|
||||
[vote-authorize-voter](../cli/usage.md#solana-vote-authorize-voter).
|
||||
- To change the [withdraw authority](#withdraw-authority), use
|
||||
[vote-authorize-withdrawer](../cli/usage.md#solana-vote-authorize-withdrawer).
|
||||
- To change the [commission](#commission), use
|
||||
[vote-update-commission](../cli/usage.md#solana-vote-update-commission).
|
||||
|
||||
## Vote Account Structure
|
||||
|
||||
### Vote Account Address
|
||||
A vote account is created at an address that is either the public key of a
|
||||
keypair file, or at a derived address based on a keypair file's public key and
|
||||
a seed string.
|
||||
|
||||
The address of a vote account is never needed to sign any transactions,
|
||||
but is just used to look up the account information.
|
||||
|
||||
When someone wants to [delegate tokens in a stake account](../staking.md),
|
||||
the delegation command is pointed at the vote account address of the validator
|
||||
to whom the token-holder wants to delegate.
|
||||
|
||||
### Validator Identity
|
||||
|
||||
The _validator identity_ is a system account that is used to pay for all the
|
||||
vote transaction fees submitted to the vote account.
|
||||
Because the validator is expected to vote on most valid blocks it receives,
|
||||
the validator identity account is frequently
|
||||
(potentially multiple times per second) signing transactions and
|
||||
paying fees. For this reason the validator identity keypair must be
|
||||
stored as a "hot wallet" in a keypair file on the same system the validator
|
||||
process is running.
|
||||
|
||||
Because a hot wallet is generally less secure than an offline or "cold" wallet,
|
||||
the validator operator may choose to store only enough SOL on the identity
|
||||
account to cover voting fees for a limited amount of time, such as a few weeks
|
||||
or months. The validator identity account could be periodically topped off
|
||||
from a more secure wallet.
|
||||
|
||||
This practice can reduce the risk of loss of funds if the validator node's
|
||||
disk or file system becomes compromised or corrupted.
|
||||
|
||||
The validator identity is required to be provided when a vote account is created.
|
||||
The validator identity can also be changed after an account is created by using
|
||||
the [vote-update-validator](../cli/usage.md#solana-vote-update-validator) command.
|
||||
|
||||
### Vote Authority
|
||||
|
||||
The _vote authority_ keypair is used to sign each vote transaction the validator
|
||||
node wants to submit to the cluster. This doesn't necessarily have to be unique
|
||||
from the validator identity, as you will see later in this document. Because
|
||||
the vote authority, like the validator identity, is signing transactions
|
||||
frequently, this also must be a hot keypair on the same file system as the
|
||||
validator process.
|
||||
|
||||
The vote authority can be set to the same address as the validator identity.
|
||||
If the validator identity is also the vote authority, only one
|
||||
signature per vote transaction is needed in order to both sign the vote and pay
|
||||
the transaction fee. Because transaction fees on Solana are assessed
|
||||
per-signature, having one signer instead of two will result in half the transaction
|
||||
fee paid compared to setting the vote authority and validator identity to two
|
||||
different accounts.
|
||||
|
||||
The vote authority can be set when the vote account is created. If it is not
|
||||
provided, the default behavior is to assign it the same as the validator identity.
|
||||
The vote authority can be changed later with the
|
||||
[vote-authorize-voter](../cli/usage.md#solana-vote-authorize-voter) command.
|
||||
|
||||
The vote authority can be changed at most once per epoch. If the authority is
|
||||
changed with [vote-authorize-voter](../cli/usage.md#solana-vote-authorize-voter),
|
||||
this will not take effect until the beginning of the next epoch.
|
||||
To support a smooth transition of the vote signing,
|
||||
`solana-validator` allows the `--authorized-voter` argument to be specified
|
||||
multiple times. This allows the validator process to keep voting successfully
|
||||
when the network reaches an epoch boundary at which the validator's vote
|
||||
authority account changes.
|
||||
|
||||
### Withdraw Authority
|
||||
|
||||
The _withdraw authority_ keypair is used to withdraw funds from a vote account
|
||||
using the [withdraw-from-vote-account](../cli/usage.md#solana-withdraw-from-vote-account)
|
||||
command. Any network rewards a validator earns are deposited into the vote
|
||||
account and are only retrievable by signing with the withdraw authority keypair.
|
||||
|
||||
The withdraw authority is also required to sign any transaction to change
|
||||
a vote account's [commission](#commission), and to change the validator
|
||||
identity on a vote account.
|
||||
|
||||
Because the vote account could accrue a significant balance, consider keeping
|
||||
the withdraw authority keypair in an offline/cold wallet, as it is
|
||||
not needed to sign frequent transactions.
|
||||
|
||||
The withdraw authority can be set at vote account creation with the
|
||||
`--authorized-withdrawer` option. If this is not provided, the validator
|
||||
identity will be set as the withdraw authority by default.
|
||||
|
||||
The withdraw authority can be changed later with the
|
||||
[vote-authorize-withdrawer](../cli/usage.md#solana-vote-authorize-withdrawer)
|
||||
command.
|
||||
|
||||
### Commission
|
||||
|
||||
_Commission_ is the percent of network rewards earned by a validator that are
|
||||
deposited into the validator's vote account. The remainder of the rewards
|
||||
are distributed to all of the stake accounts delegated to that vote account,
|
||||
proportional to the active stake weight of each stake account.
|
||||
|
||||
For example, if a vote account has a commission of 10%, for all rewards earned
|
||||
by that validator in a given epoch, 10% of these rewards will be deposited into
|
||||
the vote account in the first block of the following epoch. The remaining 90%
|
||||
will be deposited into delegated stake accounts as immediately active stake.
|
||||
|
||||
A validator may choose to set a low commission to try to attract more stake
|
||||
delegations as a lower commission results in a larger percentage of rewards
|
||||
passed along to the delegator. As there are costs associated with setting up
|
||||
and operating a validator node, a validator would ideally set a high enough
|
||||
commission to at least cover their expenses.
|
||||
|
||||
Commission can be set upon vote account creation with the `--commission` option.
|
||||
If it is not provided, it will default to 100%, which will result in all
|
||||
rewards deposited in the vote account, and none passed on to any delegated
|
||||
stake accounts.
|
||||
|
||||
Commission can also be changed later with the
|
||||
[vote-update-commission](../cli/usage.md#solana-vote-update-commission) command.
|
||||
|
||||
When setting the commission, only integer values in the set [0-100] are accepted.
|
||||
The integer represents the number of percentage points for the commission, so
|
||||
creating an account with `--commission 10` will set a 10% commission.
|
@@ -53,10 +53,10 @@ Solana supports supports several types of wallets in the Solana native
|
||||
command-line app as well as wallets from third-parties.
|
||||
|
||||
For the majority of users, we recommend using one of the
|
||||
[app wallets](apps.md), which will provide a more familiar user
|
||||
[app wallets](wallet-guide/apps.md), which will provide a more familiar user
|
||||
experience rather than needing to learn command line tools.
|
||||
|
||||
For advanced users or developers, the [command-line wallets](cli.md)
|
||||
For advanced users or developers, the [command-line wallets](wallet-guide/cli.md)
|
||||
may be more appropriate, as new features on the Solana blockchain will always be
|
||||
supported on the command line first before being integrated into third-party
|
||||
solutions.
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-dos"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,14 +13,14 @@ clap = "2.33.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.3.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-download-utils"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Download Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,8 +14,8 @@ console = "0.10.1"
|
||||
indicatif = "0.14.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
tar = "0.4.28"
|
||||
|
||||
[lib]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -16,11 +16,11 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PERF_LIBS_VERSION=v0.19.0
|
||||
PERF_LIBS_VERSION=v0.19.1
|
||||
VERSION=$PERF_LIBS_VERSION-1
|
||||
|
||||
set -e
|
||||
|
50
fetch-spl.sh
Executable file
50
fetch-spl.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Fetches the latest SPL programs and produces the solana-genesis command-line
|
||||
# arguments needed to install them
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
fetch_program() {
|
||||
declare name=$1
|
||||
declare version=$2
|
||||
declare address=$3
|
||||
|
||||
declare so=spl_$name-$version.so
|
||||
|
||||
genesis_args+=(--bpf-program "$address" "$so")
|
||||
|
||||
if [[ -r $so ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -r ~/.cache/solana-spl/$so ]]; then
|
||||
cp ~/.cache/solana-spl/"$so" "$so"
|
||||
else
|
||||
echo "Downloading $name $version"
|
||||
(
|
||||
set -x
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused \
|
||||
-o "$so" \
|
||||
"https://github.com/solana-labs/solana-program-library/releases/download/$name-v$version/spl_$name.so"
|
||||
)
|
||||
|
||||
mkdir -p ~/.cache/solana-spl
|
||||
cp "$so" ~/.cache/solana-spl/"$so"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
fetch_program token 1.0.0 TokenSVp5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o
|
||||
fetch_program memo 1.0.0 Memo1UhkJRfHyvLMcVucJwxXeuD728EqVDDwQDxFMNo
|
||||
|
||||
echo "${genesis_args[@]}" > spl-genesis-args.sh
|
||||
|
||||
echo
|
||||
echo "Available SPL programs:"
|
||||
ls -l spl_*.so
|
||||
|
||||
echo
|
||||
echo "solana-genesis command-line arguments (spl-genesis-args.sh):"
|
||||
cat spl-genesis-args.sh
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,12 +10,12 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.14" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.14" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.2.14" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.2.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.2.23" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -1,6 +1,5 @@
|
||||
use solana_sdk::{
|
||||
clock::Epoch, genesis_config::OperatingMode, inflation::Inflation,
|
||||
move_loader::solana_move_loader_program, pubkey::Pubkey,
|
||||
clock::Epoch, genesis_config::OperatingMode, inflation::Inflation, pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
@@ -57,7 +56,6 @@ pub fn get_programs(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(
|
||||
// Programs that are only available in Development mode
|
||||
solana_budget_program!(),
|
||||
solana_exchange_program!(),
|
||||
solana_move_loader_program(),
|
||||
])
|
||||
} else {
|
||||
None
|
||||
@@ -107,6 +105,11 @@ pub fn get_entered_epoch_callback(operating_mode: OperatingMode) -> EnteredEpoch
|
||||
bank.add_native_program(name, program_id);
|
||||
}
|
||||
}
|
||||
if OperatingMode::Stable == operating_mode {
|
||||
bank.set_cross_program_support(bank.epoch() >= 63);
|
||||
} else {
|
||||
bank.set_cross_program_support(true);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -135,7 +138,7 @@ mod tests {
|
||||
fn test_development_programs() {
|
||||
assert_eq!(
|
||||
get_programs(OperatingMode::Development, 0).unwrap().len(),
|
||||
5
|
||||
4
|
||||
);
|
||||
assert_eq!(get_programs(OperatingMode::Development, 1), None);
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,14 +15,14 @@ chrono = "0.4"
|
||||
serde = "1.0.110"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -12,7 +12,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock,
|
||||
bpf_loader, clock,
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::FeeRateGovernor,
|
||||
genesis_config::{GenesisConfig, OperatingMode},
|
||||
@@ -26,7 +26,14 @@ use solana_sdk::{
|
||||
use solana_stake_program::stake_state::{self, StakeState};
|
||||
use solana_vote_program::vote_state::{self, VoteState};
|
||||
use std::{
|
||||
collections::HashMap, error, fs::File, io, path::PathBuf, process, str::FromStr, time::Duration,
|
||||
collections::HashMap,
|
||||
error,
|
||||
fs::File,
|
||||
io::{self, Read},
|
||||
path::PathBuf,
|
||||
process,
|
||||
str::FromStr,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub enum AccountFileFormat {
|
||||
@@ -341,6 +348,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
"maximum total uncompressed file size of created genesis archive",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bpf_program")
|
||||
.long("bpf-program")
|
||||
.value_name("ADDRESS BPF_PROGRAM.SO")
|
||||
.takes_value(true)
|
||||
.number_of_values(2)
|
||||
.multiple(true)
|
||||
.help("Install a BPF program at the given address"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let faucet_lamports = value_t!(matches, "faucet_lamports", u64).unwrap_or(0);
|
||||
@@ -535,6 +551,39 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
|
||||
add_genesis_accounts(&mut genesis_config, issued_lamports - faucet_lamports);
|
||||
|
||||
if let Some(values) = matches.values_of("bpf_program") {
|
||||
let values: Vec<&str> = values.collect::<Vec<_>>();
|
||||
for address_program in values.chunks(2) {
|
||||
match address_program {
|
||||
[address, program] => {
|
||||
let address = address.parse::<Pubkey>().unwrap_or_else(|err| {
|
||||
eprintln!("Error: invalid address {}: {}", address, err);
|
||||
process::exit(1);
|
||||
});
|
||||
|
||||
let mut program_data = vec![];
|
||||
File::open(program)
|
||||
.and_then(|mut file| file.read_to_end(&mut program_data))
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Error: failed to read {}: {}", program, err);
|
||||
process::exit(1);
|
||||
});
|
||||
genesis_config.add_account(
|
||||
address,
|
||||
Account {
|
||||
lamports: genesis_config.rent.minimum_balance(program_data.len()),
|
||||
data: program_data,
|
||||
executable: true,
|
||||
owner: bpf_loader::id(),
|
||||
rent_epoch: 0,
|
||||
},
|
||||
);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
solana_logger::setup();
|
||||
create_new_ledger(
|
||||
&ledger_path,
|
||||
|
@@ -3,20 +3,20 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -24,12 +24,12 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.110"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
semver = "0.9.0"
|
||||
tar = "0.4.28"
|
||||
tempdir = "0.3.7"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -13,11 +13,11 @@ bs58 = "0.3.1"
|
||||
clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
num_cpus = "1.13.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.14" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.2.23" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
tiny-bip39 = "0.7.0"
|
||||
|
||||
[[bin]]
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,22 +12,27 @@ homepage = "https://solana.com/"
|
||||
bs58 = "0.3.1"
|
||||
bytecount = "0.6.0"
|
||||
clap = "2.33.1"
|
||||
futures = "0.3.5"
|
||||
futures-util = "0.3.5"
|
||||
histogram = "*"
|
||||
log = { version = "0.4.8" }
|
||||
regex = "1"
|
||||
serde_json = "1.0.53"
|
||||
serde_yaml = "0.8.12"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-cli = { path = "../cli", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.14" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-cli = { path = "../cli", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "1.2.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
tempfile = "3.1.0"
|
||||
regex = "1"
|
||||
tokio = { version = "0.2.22", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "1.0"
|
||||
|
551
ledger-tool/src/bigtable.rs
Normal file
551
ledger-tool/src/bigtable.rs
Normal file
@@ -0,0 +1,551 @@
|
||||
/// The `bigtable` subcommand
|
||||
use clap::{value_t, value_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use log::*;
|
||||
use solana_clap_utils::{
|
||||
input_parsers::pubkey_of,
|
||||
input_validators::{is_slot, is_valid_pubkey},
|
||||
};
|
||||
use solana_cli::display::println_transaction;
|
||||
use solana_ledger::{blockstore::Blockstore, blockstore_db::AccessType};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
|
||||
use solana_transaction_status::UiTransactionEncoding;
|
||||
use std::{collections::HashSet, path::Path, process::exit, result::Result, time::Duration};
|
||||
use tokio::time::delay_for;
|
||||
|
||||
// Attempt to upload this many blocks in parallel
|
||||
const NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL: usize = 32;
|
||||
|
||||
// Read up to this many blocks from blockstore before blocking on the upload process
|
||||
const BLOCK_READ_AHEAD_DEPTH: usize = NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL * 2;
|
||||
|
||||
async fn upload(
|
||||
blockstore: Blockstore,
|
||||
starting_slot: Slot,
|
||||
ending_slot: Option<Slot>,
|
||||
allow_missing_metadata: bool,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let mut measure = Measure::start("entire upload");
|
||||
|
||||
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
|
||||
.await
|
||||
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
|
||||
|
||||
info!("Loading ledger slots...");
|
||||
let blockstore_slots: Vec<_> = blockstore
|
||||
.slot_meta_iterator(starting_slot)
|
||||
.map_err(|err| {
|
||||
format!(
|
||||
"Failed to load entries starting from slot {}: {:?}",
|
||||
starting_slot, err
|
||||
)
|
||||
})?
|
||||
.filter_map(|(slot, _slot_meta)| {
|
||||
if let Some(ending_slot) = &ending_slot {
|
||||
if slot > *ending_slot {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(slot)
|
||||
})
|
||||
.collect();
|
||||
|
||||
if blockstore_slots.is_empty() {
|
||||
info!("Ledger has no slots in the specified range");
|
||||
return Ok(());
|
||||
}
|
||||
info!(
|
||||
"Found {} slots in the range ({}, {})",
|
||||
blockstore_slots.len(),
|
||||
blockstore_slots.first().unwrap(),
|
||||
blockstore_slots.last().unwrap()
|
||||
);
|
||||
|
||||
let mut blockstore_slots_with_no_confirmed_block = HashSet::new();
|
||||
|
||||
// Gather the blocks that are already present in bigtable, by slot
|
||||
let bigtable_slots = {
|
||||
let mut bigtable_slots = vec![];
|
||||
let first_blockstore_slot = *blockstore_slots.first().unwrap();
|
||||
let last_blockstore_slot = *blockstore_slots.last().unwrap();
|
||||
info!(
|
||||
"Loading list of bigtable blocks between slots {} and {}...",
|
||||
first_blockstore_slot, last_blockstore_slot
|
||||
);
|
||||
|
||||
let mut start_slot = *blockstore_slots.first().unwrap();
|
||||
while start_slot <= last_blockstore_slot {
|
||||
let mut next_bigtable_slots = loop {
|
||||
match bigtable.get_confirmed_blocks(start_slot, 1000).await {
|
||||
Ok(slots) => break slots,
|
||||
Err(err) => {
|
||||
error!("get_confirmed_blocks for {} failed: {:?}", start_slot, err);
|
||||
// Consider exponential backoff...
|
||||
delay_for(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
};
|
||||
if next_bigtable_slots.is_empty() {
|
||||
break;
|
||||
}
|
||||
bigtable_slots.append(&mut next_bigtable_slots);
|
||||
start_slot = bigtable_slots.last().unwrap() + 1;
|
||||
}
|
||||
bigtable_slots
|
||||
.into_iter()
|
||||
.filter(|slot| *slot <= last_blockstore_slot)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
// The blocks that still need to be uploaded is the difference between what's already in the
|
||||
// bigtable and what's in blockstore...
|
||||
let blocks_to_upload = {
|
||||
let blockstore_slots = blockstore_slots.iter().cloned().collect::<HashSet<_>>();
|
||||
let bigtable_slots = bigtable_slots.into_iter().collect::<HashSet<_>>();
|
||||
|
||||
let mut blocks_to_upload = blockstore_slots
|
||||
.difference(&blockstore_slots_with_no_confirmed_block)
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>()
|
||||
.difference(&bigtable_slots)
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
blocks_to_upload.sort();
|
||||
blocks_to_upload
|
||||
};
|
||||
|
||||
if blocks_to_upload.is_empty() {
|
||||
info!("No blocks need to be uploaded to bigtable");
|
||||
return Ok(());
|
||||
}
|
||||
info!(
|
||||
"{} blocks to be uploaded to the bucket in the range ({}, {})",
|
||||
blocks_to_upload.len(),
|
||||
blocks_to_upload.first().unwrap(),
|
||||
blocks_to_upload.last().unwrap()
|
||||
);
|
||||
|
||||
// Load the blocks out of blockstore in a separate thread to allow for concurrent block uploading
|
||||
let (_loader_thread, receiver) = {
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(BLOCK_READ_AHEAD_DEPTH);
|
||||
(
|
||||
std::thread::spawn(move || {
|
||||
let mut measure = Measure::start("block loader thread");
|
||||
for (i, slot) in blocks_to_upload.iter().enumerate() {
|
||||
let _ = match blockstore.get_confirmed_block(
|
||||
*slot,
|
||||
Some(solana_transaction_status::UiTransactionEncoding::Binary),
|
||||
) {
|
||||
Ok(confirmed_block) => sender.send((*slot, Some(confirmed_block))),
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"Failed to get load confirmed block from slot {}: {:?}",
|
||||
slot, err
|
||||
);
|
||||
sender.send((*slot, None))
|
||||
}
|
||||
};
|
||||
|
||||
if i % NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL == 0 {
|
||||
info!(
|
||||
"{}% of blocks processed ({}/{})",
|
||||
i * 100 / blocks_to_upload.len(),
|
||||
i,
|
||||
blocks_to_upload.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
info!("{} to load {} blocks", measure, blocks_to_upload.len());
|
||||
}),
|
||||
receiver,
|
||||
)
|
||||
};
|
||||
|
||||
let mut failures = 0;
|
||||
use futures::stream::StreamExt;
|
||||
|
||||
let mut stream =
|
||||
tokio::stream::iter(receiver.into_iter()).chunks(NUM_BLOCKS_TO_UPLOAD_IN_PARALLEL);
|
||||
|
||||
while let Some(blocks) = stream.next().await {
|
||||
let mut measure_upload = Measure::start("Upload");
|
||||
let mut num_blocks = blocks.len();
|
||||
info!("Preparing the next {} blocks for upload", num_blocks);
|
||||
|
||||
let uploads = blocks.into_iter().filter_map(|(slot, block)| match block {
|
||||
None => {
|
||||
blockstore_slots_with_no_confirmed_block.insert(slot);
|
||||
num_blocks -= 1;
|
||||
None
|
||||
}
|
||||
Some(confirmed_block) => {
|
||||
if confirmed_block
|
||||
.transactions
|
||||
.iter()
|
||||
.any(|transaction| transaction.meta.is_none())
|
||||
{
|
||||
if allow_missing_metadata {
|
||||
info!("Transaction metadata missing from slot {}", slot);
|
||||
} else {
|
||||
panic!("Transaction metadata missing from slot {}", slot);
|
||||
}
|
||||
}
|
||||
Some(bigtable.upload_confirmed_block(slot, confirmed_block))
|
||||
}
|
||||
});
|
||||
|
||||
for result in futures::future::join_all(uploads).await {
|
||||
if result.is_err() {
|
||||
error!("upload_confirmed_block() failed: {:?}", result.err());
|
||||
failures += 1;
|
||||
}
|
||||
}
|
||||
|
||||
measure_upload.stop();
|
||||
info!("{} for {} blocks", measure_upload, num_blocks);
|
||||
}
|
||||
|
||||
measure.stop();
|
||||
info!("{}", measure);
|
||||
if failures > 0 {
|
||||
Err(format!("Incomplete upload, {} operations failed", failures).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn first_available_block() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let bigtable = solana_storage_bigtable::LedgerStorage::new(true).await?;
|
||||
match bigtable.get_first_available_block().await? {
|
||||
Some(block) => println!("{}", block),
|
||||
None => println!("No blocks available"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn block(slot: Slot) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
|
||||
.await
|
||||
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
|
||||
|
||||
let block = bigtable
|
||||
.get_confirmed_block(slot, UiTransactionEncoding::Binary)
|
||||
.await?;
|
||||
|
||||
println!("Slot: {}", slot);
|
||||
println!("Parent Slot: {}", block.parent_slot);
|
||||
println!("Blockhash: {}", block.blockhash);
|
||||
println!("Previous Blockhash: {}", block.previous_blockhash);
|
||||
if block.block_time.is_some() {
|
||||
println!("Block Time: {:?}", block.block_time);
|
||||
}
|
||||
if !block.rewards.is_empty() {
|
||||
println!("Rewards: {:?}", block.rewards);
|
||||
}
|
||||
for (index, transaction_with_meta) in block.transactions.iter().enumerate() {
|
||||
println!("Transaction {}:", index);
|
||||
println_transaction(
|
||||
&transaction_with_meta.transaction.decode().unwrap(),
|
||||
&transaction_with_meta.meta,
|
||||
" ",
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn blocks(starting_slot: Slot, limit: usize) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
|
||||
.await
|
||||
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
|
||||
|
||||
let slots = bigtable.get_confirmed_blocks(starting_slot, limit).await?;
|
||||
println!("{:?}", slots);
|
||||
println!("{} blocks found", slots.len());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn confirm(signature: &Signature, verbose: bool) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let bigtable = solana_storage_bigtable::LedgerStorage::new(false)
|
||||
.await
|
||||
.map_err(|err| format!("Failed to connect to storage: {:?}", err))?;
|
||||
|
||||
let transaction_status = bigtable.get_signature_status(signature).await?;
|
||||
|
||||
if verbose {
|
||||
match bigtable
|
||||
.get_confirmed_transaction(signature, UiTransactionEncoding::Binary)
|
||||
.await
|
||||
{
|
||||
Ok(Some(confirmed_transaction)) => {
|
||||
println!(
|
||||
"\nTransaction executed in slot {}:",
|
||||
confirmed_transaction.slot
|
||||
);
|
||||
println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
.decode()
|
||||
.expect("Successful decode"),
|
||||
&confirmed_transaction.transaction.meta,
|
||||
" ",
|
||||
);
|
||||
}
|
||||
Ok(None) => println!("Confirmed transaction details not available"),
|
||||
Err(err) => println!("Unable to get confirmed transaction details: {}", err),
|
||||
}
|
||||
println!();
|
||||
}
|
||||
match transaction_status.status {
|
||||
Ok(_) => println!("Confirmed"),
|
||||
Err(err) => println!("Transaction failed: {}", err),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn transaction_history(
|
||||
address: &Pubkey,
|
||||
mut limit: usize,
|
||||
mut before: Option<Signature>,
|
||||
verbose: bool,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let bigtable = solana_storage_bigtable::LedgerStorage::new(true).await?;
|
||||
|
||||
while limit > 0 {
|
||||
let results = bigtable
|
||||
.get_confirmed_signatures_for_address(address, before.as_ref(), limit.min(1000))
|
||||
.await?;
|
||||
|
||||
if results.is_empty() {
|
||||
break;
|
||||
}
|
||||
before = Some(results.last().unwrap().signature);
|
||||
assert!(limit >= results.len());
|
||||
limit = limit.saturating_sub(results.len());
|
||||
|
||||
for result in results {
|
||||
if verbose {
|
||||
println!(
|
||||
"{}, slot={}, memo=\"{}\", status={}",
|
||||
result.signature,
|
||||
result.slot,
|
||||
result.memo.unwrap_or_else(|| "".to_string()),
|
||||
match result.err {
|
||||
None => "Confirmed".to_string(),
|
||||
Some(err) => format!("Failed: {:?}", err),
|
||||
}
|
||||
);
|
||||
} else {
|
||||
println!("{}", result.signature);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub trait BigTableSubCommand {
|
||||
fn bigtable_subcommand(self) -> Self;
|
||||
}
|
||||
|
||||
impl BigTableSubCommand for App<'_, '_> {
|
||||
fn bigtable_subcommand(self) -> Self {
|
||||
self.subcommand(
|
||||
SubCommand::with_name("bigtable")
|
||||
.about("Ledger data on a BigTable instance")
|
||||
.setting(AppSettings::ArgRequiredElseHelp)
|
||||
.subcommand(
|
||||
SubCommand::with_name("upload")
|
||||
.about("Upload the ledger to BigTable")
|
||||
.arg(
|
||||
Arg::with_name("starting_slot")
|
||||
.long("starting-slot")
|
||||
.validator(is_slot)
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.index(1)
|
||||
.help(
|
||||
"Start uploading at this slot [default: first available slot]",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ending_slot")
|
||||
.long("ending-slot")
|
||||
.validator(is_slot)
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.index(2)
|
||||
.help("Stop uploading at this slot [default: last available slot]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("allow_missing_metadata")
|
||||
.long("allow-missing-metadata")
|
||||
.takes_value(false)
|
||||
.help("Don't panic if transaction metadata is missing"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("first-available-block")
|
||||
.about("Get the first available block in the storage"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("blocks")
|
||||
.about("Get a list of slots with confirmed blocks for the given range")
|
||||
.arg(
|
||||
Arg::with_name("starting_slot")
|
||||
.long("starting-slot")
|
||||
.validator(is_slot)
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.index(1)
|
||||
.required(true)
|
||||
.default_value("0")
|
||||
.help("Start listing at this slot"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.validator(is_slot)
|
||||
.value_name("LIMIT")
|
||||
.takes_value(true)
|
||||
.index(2)
|
||||
.required(true)
|
||||
.default_value("1000")
|
||||
.help("Maximum number of slots to return"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("block")
|
||||
.about("Get a confirmed block")
|
||||
.arg(
|
||||
Arg::with_name("slot")
|
||||
.long("slot")
|
||||
.validator(is_slot)
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.index(1)
|
||||
.required(true),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("confirm")
|
||||
.about("Confirm transaction by signature")
|
||||
.arg(
|
||||
Arg::with_name("signature")
|
||||
.long("signature")
|
||||
.value_name("TRANSACTION_SIGNATURE")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.index(1)
|
||||
.help("The transaction signature to confirm"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("verbose")
|
||||
.short("v")
|
||||
.long("verbose")
|
||||
.takes_value(false)
|
||||
.help("Show additional information"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-history")
|
||||
.about(
|
||||
"Show historical transactions affecting the given address \
|
||||
from newest to oldest",
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("address")
|
||||
.index(1)
|
||||
.value_name("ADDRESS")
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Account address"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.takes_value(true)
|
||||
.value_name("LIMIT")
|
||||
.validator(is_slot)
|
||||
.index(2)
|
||||
.default_value("18446744073709551615")
|
||||
.help("Maximum number of transaction signatures to return"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("before")
|
||||
.long("before")
|
||||
.value_name("TRANSACTION_SIGNATURE")
|
||||
.takes_value(true)
|
||||
.help("Start with the first signature older than this one"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("verbose")
|
||||
.short("v")
|
||||
.long("verbose")
|
||||
.takes_value(false)
|
||||
.help("Show additional information"),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
|
||||
let mut runtime = tokio::runtime::Runtime::new().unwrap();
|
||||
|
||||
let future = match matches.subcommand() {
|
||||
("upload", Some(arg_matches)) => {
|
||||
let starting_slot = value_t!(arg_matches, "starting_slot", Slot).unwrap_or(0);
|
||||
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok();
|
||||
let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata");
|
||||
let blockstore =
|
||||
crate::open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
|
||||
|
||||
runtime.block_on(upload(
|
||||
blockstore,
|
||||
starting_slot,
|
||||
ending_slot,
|
||||
allow_missing_metadata,
|
||||
))
|
||||
}
|
||||
("first-available-block", Some(_arg_matches)) => runtime.block_on(first_available_block()),
|
||||
("block", Some(arg_matches)) => {
|
||||
let slot = value_t_or_exit!(arg_matches, "slot", Slot);
|
||||
runtime.block_on(block(slot))
|
||||
}
|
||||
("blocks", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
let limit = value_t_or_exit!(arg_matches, "limit", usize);
|
||||
|
||||
runtime.block_on(blocks(starting_slot, limit))
|
||||
}
|
||||
("confirm", Some(arg_matches)) => {
|
||||
let signature = arg_matches
|
||||
.value_of("signature")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.expect("Invalid signature");
|
||||
let verbose = arg_matches.is_present("verbose");
|
||||
|
||||
runtime.block_on(confirm(&signature, verbose))
|
||||
}
|
||||
("transaction-history", Some(arg_matches)) => {
|
||||
let address = pubkey_of(arg_matches, "address").unwrap();
|
||||
let limit = value_t_or_exit!(arg_matches, "limit", usize);
|
||||
let before = arg_matches
|
||||
.value_of("before")
|
||||
.map(|signature| signature.parse().expect("Invalid signature"));
|
||||
let verbose = arg_matches.is_present("verbose");
|
||||
|
||||
runtime.block_on(transaction_history(&address, limit, before, verbose))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
future.unwrap_or_else(|err| {
|
||||
eprintln!("{:?}", err);
|
||||
exit(1);
|
||||
});
|
||||
}
|
@@ -2,6 +2,7 @@ use clap::{
|
||||
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg,
|
||||
ArgMatches, SubCommand,
|
||||
};
|
||||
use log::*;
|
||||
use regex::Regex;
|
||||
use serde_json::json;
|
||||
use solana_clap_utils::input_validators::{is_parsable, is_slot};
|
||||
@@ -11,7 +12,7 @@ use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blockstore::Blockstore,
|
||||
blockstore_db::{self, AccessType, Column, Database},
|
||||
blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database},
|
||||
blockstore_processor::ProcessOptions,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
rooted_slot_iterator::RootedSlotIterator,
|
||||
@@ -40,7 +41,8 @@ use std::{
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use log::*;
|
||||
mod bigtable;
|
||||
use bigtable::*;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
enum LedgerOutputMethod {
|
||||
@@ -530,8 +532,12 @@ fn analyze_storage(database: &Database) -> Result<(), String> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn open_blockstore(ledger_path: &Path, access_type: AccessType) -> Blockstore {
|
||||
match Blockstore::open_with_access_type(ledger_path, access_type) {
|
||||
fn open_blockstore(
|
||||
ledger_path: &Path,
|
||||
access_type: AccessType,
|
||||
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Blockstore {
|
||||
match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode) {
|
||||
Ok(blockstore) => blockstore,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
|
||||
@@ -541,7 +547,7 @@ fn open_blockstore(ledger_path: &Path, access_type: AccessType) -> Blockstore {
|
||||
}
|
||||
|
||||
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
|
||||
match Database::open(&ledger_path.join("rocksdb"), access_type) {
|
||||
match Database::open(&ledger_path.join("rocksdb"), access_type, None) {
|
||||
Ok(database) => database,
|
||||
Err(err) => {
|
||||
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
|
||||
@@ -565,8 +571,9 @@ fn load_bank_forks(
|
||||
genesis_config: &GenesisConfig,
|
||||
process_options: ProcessOptions,
|
||||
access_type: AccessType,
|
||||
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> bank_forks_utils::LoadResult {
|
||||
let blockstore = open_blockstore(&ledger_path, access_type);
|
||||
let blockstore = open_blockstore(&ledger_path, access_type, wal_recovery_mode);
|
||||
let snapshot_path = ledger_path.clone().join(if blockstore.is_primary_access() {
|
||||
"snapshot"
|
||||
} else {
|
||||
@@ -704,6 +711,22 @@ fn main() {
|
||||
.global(true)
|
||||
.help("Use DIR for ledger location"),
|
||||
)
|
||||
.bigtable_subcommand()
|
||||
.arg(
|
||||
Arg::with_name("wal_recovery_mode")
|
||||
.long("wal-recovery-mode")
|
||||
.value_name("MODE")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.possible_values(&[
|
||||
"tolerate_corrupted_tail_records",
|
||||
"absolute_consistency",
|
||||
"point_in_time",
|
||||
"skip_any_corrupted_record"])
|
||||
.help(
|
||||
"Mode to recovery the ledger db write ahead log."
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("print")
|
||||
.about("Print the ledger")
|
||||
@@ -974,12 +997,21 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let wal_recovery_mode = matches
|
||||
.value_of("wal_recovery_mode")
|
||||
.map(BlockstoreRecoveryMode::from);
|
||||
|
||||
match matches.subcommand() {
|
||||
("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches),
|
||||
("print", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
|
||||
output_ledger(
|
||||
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary),
|
||||
open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
),
|
||||
starting_slot,
|
||||
allow_dead_slots,
|
||||
LedgerOutputMethod::Print,
|
||||
@@ -1008,6 +1040,7 @@ fn main() {
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
) {
|
||||
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
println!(
|
||||
@@ -1027,7 +1060,11 @@ fn main() {
|
||||
("slot", Some(arg_matches)) => {
|
||||
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
|
||||
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
);
|
||||
for slot in slots {
|
||||
println!("Slot {}", slot);
|
||||
if let Err(err) = output_slot(
|
||||
@@ -1044,7 +1081,11 @@ fn main() {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
let allow_dead_slots = arg_matches.is_present("allow_dead_slots");
|
||||
output_ledger(
|
||||
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary),
|
||||
open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
),
|
||||
starting_slot,
|
||||
allow_dead_slots,
|
||||
LedgerOutputMethod::Json,
|
||||
@@ -1052,7 +1093,8 @@ fn main() {
|
||||
}
|
||||
("set-dead-slot", Some(arg_matches)) => {
|
||||
let slots = values_t_or_exit!(arg_matches, "slots", Slot);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::PrimaryOnly);
|
||||
let blockstore =
|
||||
open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode);
|
||||
for slot in slots {
|
||||
match blockstore.set_dead_slot(slot) {
|
||||
Ok(_) => println!("Slot {} dead", slot),
|
||||
@@ -1063,7 +1105,11 @@ fn main() {
|
||||
("parse_full_frozen", Some(arg_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
|
||||
let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
);
|
||||
let mut ancestors = BTreeSet::new();
|
||||
if blockstore.meta(ending_slot).unwrap().is_none() {
|
||||
panic!("Ending slot doesn't exist");
|
||||
@@ -1142,6 +1188,7 @@ fn main() {
|
||||
&open_genesis_config_by(&ledger_path, arg_matches),
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Ledger verification failed: {:?}", err);
|
||||
@@ -1165,6 +1212,7 @@ fn main() {
|
||||
&open_genesis_config_by(&ledger_path, arg_matches),
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
) {
|
||||
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let dot = graph_forks(&bank_forks, arg_matches.is_present("include_all_votes"));
|
||||
@@ -1216,6 +1264,7 @@ fn main() {
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
) {
|
||||
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let bank = bank_forks
|
||||
@@ -1310,6 +1359,7 @@ fn main() {
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
) {
|
||||
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let slot = bank_forks.working_bank().slot();
|
||||
@@ -1358,6 +1408,7 @@ fn main() {
|
||||
&genesis_config,
|
||||
process_options,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
) {
|
||||
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
|
||||
let slot = bank_forks.working_bank().slot();
|
||||
@@ -1468,12 +1519,17 @@ fn main() {
|
||||
("purge", Some(arg_matches)) => {
|
||||
let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot);
|
||||
let end_slot = value_t_or_exit!(arg_matches, "end_slot", Slot);
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::PrimaryOnly);
|
||||
let blockstore =
|
||||
open_blockstore(&ledger_path, AccessType::PrimaryOnly, wal_recovery_mode);
|
||||
blockstore.purge_and_compact_slots(start_slot, end_slot);
|
||||
blockstore.purge_from_next_slots(start_slot, end_slot);
|
||||
}
|
||||
("list-roots", Some(arg_matches)) => {
|
||||
let blockstore = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary);
|
||||
let blockstore = open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
);
|
||||
let max_height = if let Some(height) = arg_matches.value_of("max_height") {
|
||||
usize::from_str(height).expect("Maximum height must be a number")
|
||||
} else {
|
||||
@@ -1526,8 +1582,12 @@ fn main() {
|
||||
});
|
||||
}
|
||||
("bounds", Some(arg_matches)) => {
|
||||
match open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary)
|
||||
.slot_meta_iterator(0)
|
||||
match open_blockstore(
|
||||
&ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
wal_recovery_mode,
|
||||
)
|
||||
.slot_meta_iterator(0)
|
||||
{
|
||||
Ok(metas) => {
|
||||
let all = arg_matches.is_present("all");
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -32,19 +32,19 @@ reed-solomon-erasure = { version = "4.0.2", features = ["simd-accel"] }
|
||||
regex = "1.3.7"
|
||||
serde = "1.0.110"
|
||||
serde_bytes = "0.11.4"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.14" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-measure = { path = "../measure", version = "1.2.14" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.14" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.14" }
|
||||
solana-perf = { path = "../perf", version = "1.2.14" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.2.23" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-measure = { path = "../measure", version = "1.2.23" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
solana-perf = { path = "../perf", version = "1.2.23" }
|
||||
ed25519-dalek = "1.0.0-pre.3"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
symlink = "0.1.0"
|
||||
tar = "0.4.28"
|
||||
thiserror = "1.0"
|
||||
@@ -62,7 +62,7 @@ features = ["lz4"]
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
matches = "0.1.6"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.14" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.2.23" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -4,8 +4,8 @@
|
||||
pub use crate::{blockstore_db::BlockstoreError, blockstore_meta::SlotMeta};
|
||||
use crate::{
|
||||
blockstore_db::{
|
||||
columns as cf, AccessType, Column, Database, IteratorDirection, IteratorMode, LedgerColumn,
|
||||
Result, WriteBatch,
|
||||
columns as cf, AccessType, BlockstoreRecoveryMode, Column, Database, IteratorDirection,
|
||||
IteratorMode, LedgerColumn, Result, WriteBatch,
|
||||
},
|
||||
blockstore_meta::*,
|
||||
entry::{create_ticks, Entry},
|
||||
@@ -37,14 +37,15 @@ use solana_sdk::{
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, EncodedTransaction, Rewards, TransactionStatusMeta,
|
||||
TransactionWithStatusMeta, UiTransactionEncoding, UiTransactionStatusMeta,
|
||||
ConfirmedBlock, ConfirmedTransaction, ConfirmedTransactionStatusWithSignature,
|
||||
EncodedTransaction, Rewards, TransactionStatusMeta, TransactionWithStatusMeta,
|
||||
UiTransactionEncoding, UiTransactionStatusMeta,
|
||||
};
|
||||
use solana_vote_program::{vote_instruction::VoteInstruction, vote_state::TIMESTAMP_SLOT_INTERVAL};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
collections::{HashMap, HashSet},
|
||||
fs,
|
||||
io::{Error as IOError, ErrorKind},
|
||||
path::{Path, PathBuf},
|
||||
@@ -231,17 +232,22 @@ impl Blockstore {
|
||||
|
||||
/// Opens a Ledger in directory, provides "infinite" window of shreds
|
||||
pub fn open(ledger_path: &Path) -> Result<Blockstore> {
|
||||
Self::do_open(ledger_path, AccessType::PrimaryOnly)
|
||||
Self::do_open(ledger_path, AccessType::PrimaryOnly, None)
|
||||
}
|
||||
|
||||
pub fn open_with_access_type(
|
||||
ledger_path: &Path,
|
||||
access_type: AccessType,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Result<Blockstore> {
|
||||
Self::do_open(ledger_path, access_type)
|
||||
Self::do_open(ledger_path, access_type, recovery_mode)
|
||||
}
|
||||
|
||||
fn do_open(ledger_path: &Path, access_type: AccessType) -> Result<Blockstore> {
|
||||
fn do_open(
|
||||
ledger_path: &Path,
|
||||
access_type: AccessType,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Result<Blockstore> {
|
||||
fs::create_dir_all(&ledger_path)?;
|
||||
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
|
||||
|
||||
@@ -250,7 +256,7 @@ impl Blockstore {
|
||||
// Open the database
|
||||
let mut measure = Measure::start("open");
|
||||
info!("Opening database at {:?}", blockstore_path);
|
||||
let db = Database::open(&blockstore_path, access_type)?;
|
||||
let db = Database::open(&blockstore_path, access_type, recovery_mode)?;
|
||||
|
||||
// Create the metadata column family
|
||||
let meta_cf = db.column();
|
||||
@@ -331,8 +337,10 @@ impl Blockstore {
|
||||
|
||||
pub fn open_with_signal(
|
||||
ledger_path: &Path,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Result<(Self, Receiver<bool>, CompletedSlotsReceiver)> {
|
||||
let mut blockstore = Self::open_with_access_type(ledger_path, AccessType::PrimaryOnly)?;
|
||||
let mut blockstore =
|
||||
Self::open_with_access_type(ledger_path, AccessType::PrimaryOnly, recovery_mode)?;
|
||||
let (signal_sender, signal_receiver) = sync_channel(1);
|
||||
let (completed_slots_sender, completed_slots_receiver) =
|
||||
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
|
||||
@@ -1651,7 +1659,7 @@ impl Blockstore {
|
||||
iterator
|
||||
.map(|transaction| {
|
||||
let signature = transaction.signatures[0];
|
||||
let encoded_transaction = EncodedTransaction::encode(transaction, encoding.clone());
|
||||
let encoded_transaction = EncodedTransaction::encode(transaction, encoding);
|
||||
TransactionWithStatusMeta {
|
||||
transaction: encoded_transaction,
|
||||
meta: self
|
||||
@@ -1663,6 +1671,10 @@ impl Blockstore {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Initializes the TransactionStatusIndex column family with two records, `0` and `1`,
|
||||
/// which are used as the primary index for entries in the TransactionStatus and
|
||||
/// AddressSignatures columns. At any given time, one primary index is active (ie. new records
|
||||
/// are stored under this index), the other is frozen.
|
||||
fn initialize_transaction_status_index(&self) -> Result<()> {
|
||||
self.transaction_status_index_cf
|
||||
.put(0, &TransactionStatusIndexMeta::default())?;
|
||||
@@ -1679,6 +1691,8 @@ impl Blockstore {
|
||||
)
|
||||
}
|
||||
|
||||
/// Toggles the active primary index between `0` and `1`, and clears the stored max-slot of the
|
||||
/// frozen index in preparation for pruning.
|
||||
fn toggle_transaction_status_index(
|
||||
&self,
|
||||
batch: &mut WriteBatch,
|
||||
@@ -1796,9 +1810,9 @@ impl Blockstore {
|
||||
(transaction_status_cf_primary_index, signature, 0),
|
||||
IteratorDirection::Forward,
|
||||
))?;
|
||||
for ((_, sig, slot), data) in index_iterator {
|
||||
for ((i, sig, slot), data) in index_iterator {
|
||||
counter += 1;
|
||||
if sig != signature {
|
||||
if i != transaction_status_cf_primary_index || sig != signature {
|
||||
break;
|
||||
}
|
||||
if self.is_root(slot) {
|
||||
@@ -1834,8 +1848,9 @@ impl Blockstore {
|
||||
("method", "get_confirmed_transaction".to_string(), String)
|
||||
);
|
||||
if let Some((slot, status)) = self.get_transaction_status(signature.clone())? {
|
||||
let transaction = self.find_transaction_in_slot(slot, signature)?
|
||||
.expect("Transaction to exist in slot entries if it exists in statuses and hasn't been cleaned up");
|
||||
let transaction = self
|
||||
.find_transaction_in_slot(slot, signature)?
|
||||
.ok_or(BlockstoreError::TransactionStatusSlotMismatch)?; // Should not happen
|
||||
let encoding = encoding.unwrap_or(UiTransactionEncoding::Json);
|
||||
let encoded_transaction = EncodedTransaction::encode(transaction, encoding);
|
||||
Ok(Some(ConfirmedTransaction {
|
||||
@@ -1864,7 +1879,8 @@ impl Blockstore {
|
||||
}
|
||||
|
||||
// Returns all cached signatures for an address, ordered by slot that the transaction was
|
||||
// processed in
|
||||
// processed in. Within each slot the transactions will be ordered by signature, and NOT by
|
||||
// the order in which the transactions exist in the block
|
||||
fn find_address_signatures(
|
||||
&self,
|
||||
pubkey: Pubkey,
|
||||
@@ -1892,7 +1908,7 @@ impl Blockstore {
|
||||
}
|
||||
}
|
||||
}
|
||||
signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
|
||||
signatures.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap().then(a.1.cmp(&b.1)));
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
@@ -1914,6 +1930,217 @@ impl Blockstore {
|
||||
.map(|signatures| signatures.iter().map(|(_, signature)| *signature).collect())
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address2(
|
||||
&self,
|
||||
address: Pubkey,
|
||||
highest_confirmed_root: Slot,
|
||||
before: Option<Signature>,
|
||||
limit: usize,
|
||||
) -> Result<Vec<ConfirmedTransactionStatusWithSignature>> {
|
||||
datapoint_info!(
|
||||
"blockstore-rpc-api",
|
||||
(
|
||||
"method",
|
||||
"get_confirmed_signatures_for_address2".to_string(),
|
||||
String
|
||||
)
|
||||
);
|
||||
|
||||
// Figure the `slot` to start listing signatures at, based on the ledger location of the
|
||||
// `before` signature if present. Also generate a HashSet of signatures that should
|
||||
// be excluded from the results.
|
||||
let mut get_before_slot_timer = Measure::start("get_before_slot_timer");
|
||||
let (slot, mut excluded_signatures) = match before {
|
||||
None => (highest_confirmed_root, None),
|
||||
Some(before) => {
|
||||
let transaction_status = self.get_transaction_status(before)?;
|
||||
match transaction_status {
|
||||
None => return Ok(vec![]),
|
||||
Some((slot, _)) => {
|
||||
let confirmed_block = self
|
||||
.get_confirmed_block(slot, Some(UiTransactionEncoding::Binary))
|
||||
.map_err(|err| {
|
||||
BlockstoreError::IO(IOError::new(
|
||||
ErrorKind::Other,
|
||||
format!("Unable to get confirmed block: {}", err),
|
||||
))
|
||||
})?;
|
||||
|
||||
// Load all signatures for the block
|
||||
let mut slot_signatures: Vec<_> = confirmed_block
|
||||
.transactions
|
||||
.iter()
|
||||
.filter_map(|transaction_with_meta| {
|
||||
if let Some(transaction) =
|
||||
transaction_with_meta.transaction.decode()
|
||||
{
|
||||
transaction.signatures.into_iter().next()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort signatures as a way to entire a stable ordering within a slot, as
|
||||
// the AddressSignatures column is ordered by signatures within a slot,
|
||||
// not by block ordering
|
||||
slot_signatures.sort();
|
||||
slot_signatures.reverse();
|
||||
|
||||
if let Some(pos) = slot_signatures.iter().position(|&x| x == before) {
|
||||
slot_signatures.truncate(pos + 1);
|
||||
}
|
||||
|
||||
(
|
||||
slot,
|
||||
Some(slot_signatures.into_iter().collect::<HashSet<_>>()),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
get_before_slot_timer.stop();
|
||||
|
||||
// Fetch the list of signatures that affect the given address
|
||||
let first_available_block = self.get_first_available_block()?;
|
||||
let mut address_signatures = vec![];
|
||||
|
||||
// Get signatures in `slot`
|
||||
let mut get_initial_slot_timer = Measure::start("get_initial_slot_timer");
|
||||
let mut signatures = self.find_address_signatures(address, slot, slot)?;
|
||||
signatures.reverse();
|
||||
if let Some(excluded_signatures) = excluded_signatures.take() {
|
||||
address_signatures.extend(
|
||||
signatures
|
||||
.into_iter()
|
||||
.filter(|(_, signature)| !excluded_signatures.contains(&signature)),
|
||||
)
|
||||
} else {
|
||||
address_signatures.append(&mut signatures);
|
||||
}
|
||||
get_initial_slot_timer.stop();
|
||||
|
||||
// Check the active_transaction_status_index to see if it contains slot. If so, start with
|
||||
// that index, as it will contain higher slots
|
||||
let starting_primary_index = *self.active_transaction_status_index.read().unwrap();
|
||||
let next_primary_index = if starting_primary_index == 0 { 1 } else { 0 };
|
||||
let next_max_slot = self
|
||||
.transaction_status_index_cf
|
||||
.get(next_primary_index)?
|
||||
.unwrap()
|
||||
.max_slot;
|
||||
|
||||
let mut starting_primary_index_iter_timer = Measure::start("starting_primary_index_iter");
|
||||
if slot > next_max_slot {
|
||||
let mut starting_iterator = self.address_signatures_cf.iter(IteratorMode::From(
|
||||
(starting_primary_index, address, slot, Signature::default()),
|
||||
IteratorDirection::Reverse,
|
||||
))?;
|
||||
|
||||
// Iterate through starting_iterator until limit is reached
|
||||
while address_signatures.len() < limit {
|
||||
if let Some(((i, key_address, slot, signature), _)) = starting_iterator.next() {
|
||||
if slot == next_max_slot {
|
||||
break;
|
||||
}
|
||||
if i == starting_primary_index
|
||||
&& key_address == address
|
||||
&& slot >= first_available_block
|
||||
{
|
||||
if self.is_root(slot) {
|
||||
address_signatures.push((slot, signature));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle slots that cross primary indexes
|
||||
let mut signatures =
|
||||
self.find_address_signatures(address, next_max_slot, next_max_slot)?;
|
||||
signatures.reverse();
|
||||
address_signatures.append(&mut signatures);
|
||||
}
|
||||
starting_primary_index_iter_timer.stop();
|
||||
|
||||
// Iterate through next_iterator until limit is reached
|
||||
let mut next_primary_index_iter_timer = Measure::start("next_primary_index_iter_timer");
|
||||
let mut next_iterator = self.address_signatures_cf.iter(IteratorMode::From(
|
||||
(next_primary_index, address, slot, Signature::default()),
|
||||
IteratorDirection::Reverse,
|
||||
))?;
|
||||
while address_signatures.len() < limit {
|
||||
if let Some(((i, key_address, slot, signature), _)) = next_iterator.next() {
|
||||
// Skip next_max_slot, which is already included
|
||||
if slot == next_max_slot {
|
||||
continue;
|
||||
}
|
||||
if i == next_primary_index
|
||||
&& key_address == address
|
||||
&& slot >= first_available_block
|
||||
{
|
||||
if self.is_root(slot) {
|
||||
address_signatures.push((slot, signature));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
next_primary_index_iter_timer.stop();
|
||||
address_signatures.truncate(limit);
|
||||
|
||||
// Fill in the status information for each found transaction
|
||||
let mut get_status_info_timer = Measure::start("get_status_info_timer");
|
||||
let mut infos = vec![];
|
||||
for (slot, signature) in address_signatures.into_iter() {
|
||||
let transaction_status = self.get_transaction_status(signature)?;
|
||||
let err = match transaction_status {
|
||||
None => None,
|
||||
Some((_slot, status)) => status.status.err(),
|
||||
};
|
||||
infos.push(ConfirmedTransactionStatusWithSignature {
|
||||
signature,
|
||||
slot,
|
||||
err,
|
||||
memo: None,
|
||||
});
|
||||
}
|
||||
get_status_info_timer.stop();
|
||||
|
||||
datapoint_info!(
|
||||
"blockstore-get-conf-sigs-for-addr-2",
|
||||
(
|
||||
"get_before_slot_us",
|
||||
get_before_slot_timer.as_us() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get_initial_slot_us",
|
||||
get_initial_slot_timer.as_us() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"starting_primary_index_iter_us",
|
||||
starting_primary_index_iter_timer.as_us() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"next_primary_index_iter_us",
|
||||
next_primary_index_iter_timer.as_us() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"get_status_info_us",
|
||||
get_status_info_timer.as_us() as i64,
|
||||
i64
|
||||
)
|
||||
);
|
||||
|
||||
Ok(infos)
|
||||
}
|
||||
|
||||
pub fn read_rewards(&self, index: Slot) -> Result<Option<Rewards>> {
|
||||
self.rewards_cf.get(index)
|
||||
}
|
||||
@@ -2719,7 +2946,7 @@ pub fn create_new_ledger(
|
||||
genesis_config.write(&ledger_path)?;
|
||||
|
||||
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
|
||||
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type)?;
|
||||
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None)?;
|
||||
let ticks_per_slot = genesis_config.ticks_per_slot;
|
||||
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
|
||||
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
|
||||
@@ -3656,7 +3883,7 @@ pub mod tests {
|
||||
pub fn test_new_shreds_signal() {
|
||||
// Initialize ledger
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let (ledger, recvr, _) = Blockstore::open_with_signal(&ledger_path).unwrap();
|
||||
let (ledger, recvr, _) = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 50;
|
||||
@@ -3736,7 +3963,7 @@ pub mod tests {
|
||||
pub fn test_completed_shreds_signal() {
|
||||
// Initialize ledger
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path).unwrap();
|
||||
let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 10;
|
||||
@@ -3758,7 +3985,7 @@ pub mod tests {
|
||||
pub fn test_completed_shreds_signal_orphans() {
|
||||
// Initialize ledger
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path).unwrap();
|
||||
let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 10;
|
||||
@@ -3798,7 +4025,7 @@ pub mod tests {
|
||||
pub fn test_completed_shreds_signal_many() {
|
||||
// Initialize ledger
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path).unwrap();
|
||||
let (ledger, _, recvr) = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 10;
|
||||
@@ -5942,6 +6169,19 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_transaction_status() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
blockstore.set_roots(&[0]).unwrap();
|
||||
assert_eq!(
|
||||
blockstore
|
||||
.get_confirmed_transaction(Signature::default(), None)
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_confirmed_signatures_for_address() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@@ -6082,6 +6322,173 @@ pub mod tests {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_confirmed_signatures_for_address2() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
fn make_slot_entries_with_transaction_addresses(addresses: &[Pubkey]) -> Vec<Entry> {
|
||||
let mut entries: Vec<Entry> = Vec::new();
|
||||
for address in addresses {
|
||||
let transaction = Transaction::new_with_compiled_instructions(
|
||||
&[&Keypair::new()],
|
||||
&[*address],
|
||||
Hash::default(),
|
||||
vec![Pubkey::new_rand()],
|
||||
vec![CompiledInstruction::new(1, &(), vec![0])],
|
||||
);
|
||||
entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
|
||||
let mut tick = create_ticks(1, 0, hash(&serialize(address).unwrap()));
|
||||
entries.append(&mut tick);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
let address0 = Pubkey::new_rand();
|
||||
let address1 = Pubkey::new_rand();
|
||||
|
||||
for slot in 2..=8 {
|
||||
let entries = make_slot_entries_with_transaction_addresses(&[
|
||||
address0, address1, address0, address1,
|
||||
]);
|
||||
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
for (i, entry) in entries.iter().enumerate() {
|
||||
if slot == 4 && i == 2 {
|
||||
// Purge to freeze index 0 and write address-signatures in new primary index
|
||||
blockstore.run_purge(0, 1, PurgeType::PrimaryIndex).unwrap();
|
||||
}
|
||||
for transaction in &entry.transactions {
|
||||
assert_eq!(transaction.signatures.len(), 1);
|
||||
blockstore
|
||||
.write_transaction_status(
|
||||
slot,
|
||||
transaction.signatures[0],
|
||||
transaction.message.account_keys.iter().collect(),
|
||||
vec![],
|
||||
&TransactionStatusMeta::default(),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
// Leave one slot unrooted to test only returns confirmed signatures
|
||||
blockstore.set_roots(&[1, 2, 4, 5, 6, 7, 8]).unwrap();
|
||||
let highest_confirmed_root = 8;
|
||||
|
||||
// Fetch all signatures for address 0 at once...
|
||||
let all0 = blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address0,
|
||||
highest_confirmed_root,
|
||||
None,
|
||||
usize::MAX,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(all0.len(), 12);
|
||||
|
||||
// Fetch all signatures for address 1 at once...
|
||||
let all1 = blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address1,
|
||||
highest_confirmed_root,
|
||||
None,
|
||||
usize::MAX,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(all1.len(), 12);
|
||||
|
||||
assert!(all0 != all1);
|
||||
|
||||
// Fetch all signatures for address 0 individually
|
||||
for i in 0..all0.len() {
|
||||
let results = blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address0,
|
||||
highest_confirmed_root,
|
||||
if i == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(all0[i - 1].signature)
|
||||
},
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(results.len(), 1);
|
||||
assert_eq!(results[0], all0[i], "Unexpected result for {}", i);
|
||||
}
|
||||
|
||||
assert!(blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address0,
|
||||
highest_confirmed_root,
|
||||
Some(all0[all0.len() - 1].signature),
|
||||
1,
|
||||
)
|
||||
.unwrap()
|
||||
.is_empty());
|
||||
|
||||
// Fetch all signatures for address 0, three at a time
|
||||
assert!(all0.len() % 3 == 0);
|
||||
for i in (0..all0.len()).step_by(3) {
|
||||
let results = blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address0,
|
||||
highest_confirmed_root,
|
||||
if i == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(all0[i - 1].signature)
|
||||
},
|
||||
3,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(results.len(), 3);
|
||||
assert_eq!(results[0], all0[i]);
|
||||
assert_eq!(results[1], all0[i + 1]);
|
||||
assert_eq!(results[2], all0[i + 2]);
|
||||
}
|
||||
|
||||
// Ensure that the signatures within a slot are reverse ordered by signature
|
||||
// (current limitation of the .get_confirmed_signatures_for_address2())
|
||||
for i in (0..all1.len()).step_by(2) {
|
||||
let results = blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address1,
|
||||
highest_confirmed_root,
|
||||
if i == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(all1[i - 1].signature)
|
||||
},
|
||||
2,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(results.len(), 2);
|
||||
assert_eq!(results[0].slot, results[1].slot);
|
||||
assert!(results[0].signature >= results[1].signature);
|
||||
assert_eq!(results[0], all1[i]);
|
||||
assert_eq!(results[1], all1[i + 1]);
|
||||
}
|
||||
|
||||
// A search for address 0 with a `before` signature from address1 should also work
|
||||
let results = blockstore
|
||||
.get_confirmed_signatures_for_address2(
|
||||
address0,
|
||||
highest_confirmed_root,
|
||||
Some(all1[0].signature),
|
||||
usize::MAX,
|
||||
)
|
||||
.unwrap();
|
||||
// The exact number of results returned is variable, based on the sort order of the
|
||||
// random signatures that are generated
|
||||
assert!(!results.is_empty());
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_last_hash() {
|
||||
let mut entries: Vec<Entry> = vec![];
|
||||
|
@@ -235,6 +235,9 @@ impl Blockstore {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Purges special columns (using a non-Slot primary-index) exactly, by deserializing each slot
|
||||
/// being purged and iterating through all transactions to determine the keys of individual
|
||||
/// records. **This method is very slow.**
|
||||
fn purge_special_columns_exact(
|
||||
&self,
|
||||
batch: &mut WriteBatch,
|
||||
@@ -279,6 +282,8 @@ impl Blockstore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Purges special columns (using a non-Slot primary-index) by range. Purge occurs if frozen
|
||||
/// primary index has a max-slot less than the highest slot being purged.
|
||||
fn purge_special_columns_with_primary_index(
|
||||
&self,
|
||||
write_batch: &mut WriteBatch,
|
||||
|
@@ -4,7 +4,7 @@ use byteorder::{BigEndian, ByteOrder};
|
||||
use log::*;
|
||||
pub use rocksdb::Direction as IteratorDirection;
|
||||
use rocksdb::{
|
||||
self, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator,
|
||||
self, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator, DBRecoveryMode,
|
||||
IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch, DB,
|
||||
};
|
||||
use serde::de::DeserializeOwned;
|
||||
@@ -39,7 +39,9 @@ const CODE_SHRED_CF: &str = "code_shred";
|
||||
const TRANSACTION_STATUS_CF: &str = "transaction_status";
|
||||
/// Column family for Address Signatures
|
||||
const ADDRESS_SIGNATURES_CF: &str = "address_signatures";
|
||||
/// Column family for Transaction Status Index
|
||||
/// Column family for the Transaction Status Index.
|
||||
/// This column family is used for tracking the active primary index for columns that for
|
||||
/// query performance reasons should not be indexed by Slot.
|
||||
const TRANSACTION_STATUS_INDEX_CF: &str = "transaction_status_index";
|
||||
/// Column family for Rewards
|
||||
const REWARDS_CF: &str = "rewards";
|
||||
@@ -57,6 +59,7 @@ pub enum BlockstoreError {
|
||||
SlotCleanedUp,
|
||||
UnpackError(#[from] UnpackError),
|
||||
UnableToSetOpenFileDescriptorLimit,
|
||||
TransactionStatusSlotMismatch,
|
||||
}
|
||||
pub type Result<T> = std::result::Result<T, BlockstoreError>;
|
||||
|
||||
@@ -137,11 +140,51 @@ pub enum ActualAccessType {
|
||||
Secondary,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum BlockstoreRecoveryMode {
|
||||
TolerateCorruptedTailRecords,
|
||||
AbsoluteConsistency,
|
||||
PointInTime,
|
||||
SkipAnyCorruptedRecord,
|
||||
}
|
||||
|
||||
impl From<&str> for BlockstoreRecoveryMode {
|
||||
fn from(string: &str) -> Self {
|
||||
match string {
|
||||
"tolerate_corrupted_tail_records" => {
|
||||
BlockstoreRecoveryMode::TolerateCorruptedTailRecords
|
||||
}
|
||||
"absolute_consistency" => BlockstoreRecoveryMode::AbsoluteConsistency,
|
||||
"point_in_time" => BlockstoreRecoveryMode::PointInTime,
|
||||
"skip_any_corrupted_record" => BlockstoreRecoveryMode::SkipAnyCorruptedRecord,
|
||||
bad_mode => panic!("Invalid recovery mode: {}", bad_mode),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Into<DBRecoveryMode> for BlockstoreRecoveryMode {
|
||||
fn into(self) -> DBRecoveryMode {
|
||||
match self {
|
||||
BlockstoreRecoveryMode::TolerateCorruptedTailRecords => {
|
||||
DBRecoveryMode::TolerateCorruptedTailRecords
|
||||
}
|
||||
BlockstoreRecoveryMode::AbsoluteConsistency => DBRecoveryMode::AbsoluteConsistency,
|
||||
BlockstoreRecoveryMode::PointInTime => DBRecoveryMode::PointInTime,
|
||||
BlockstoreRecoveryMode::SkipAnyCorruptedRecord => {
|
||||
DBRecoveryMode::SkipAnyCorruptedRecord
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Rocks(rocksdb::DB, ActualAccessType);
|
||||
|
||||
impl Rocks {
|
||||
fn open(path: &Path, access_type: AccessType) -> Result<Rocks> {
|
||||
fn open(
|
||||
path: &Path,
|
||||
access_type: AccessType,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Result<Rocks> {
|
||||
use columns::{
|
||||
AddressSignatures, DeadSlots, DuplicateSlots, ErasureMeta, Index, Orphans, Rewards,
|
||||
Root, ShredCode, ShredData, SlotMeta, TransactionStatus, TransactionStatusIndex,
|
||||
@@ -151,6 +194,9 @@ impl Rocks {
|
||||
|
||||
// Use default database options
|
||||
let mut db_options = get_db_options();
|
||||
if let Some(recovery_mode) = recovery_mode {
|
||||
db_options.set_wal_recovery_mode(recovery_mode.into());
|
||||
}
|
||||
|
||||
// Column family names
|
||||
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
||||
@@ -626,8 +672,12 @@ pub struct WriteBatch<'a> {
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub fn open(path: &Path, access_type: AccessType) -> Result<Self> {
|
||||
let backend = Arc::new(Rocks::open(path, access_type)?);
|
||||
pub fn open(
|
||||
path: &Path,
|
||||
access_type: AccessType,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Result<Self> {
|
||||
let backend = Arc::new(Rocks::open(path, access_type, recovery_mode)?);
|
||||
|
||||
Ok(Database {
|
||||
backend,
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-local-cluster"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,22 +12,22 @@ homepage = "https://solana.com/"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.14" }
|
||||
solana-core = { path = "../core", version = "1.2.14" }
|
||||
solana-client = { path = "../client", version = "1.2.14" }
|
||||
solana-download-utils = { path = "../download-utils", version = "1.2.14" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.14" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.14" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.14" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.14" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.2.14" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.14" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.23" }
|
||||
solana-core = { path = "../core", version = "1.2.23" }
|
||||
solana-client = { path = "../client", version = "1.2.23" }
|
||||
solana-download-utils = { path = "../download-utils", version = "1.2.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.2.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.2.23" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.2.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.23" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.2.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.23" }
|
||||
tempfile = "3.1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.14" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.2.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-log-analyzer"
|
||||
description = "The solana cluster network analysis tool"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,9 +14,9 @@ byte-unit = "3.1.1"
|
||||
clap = "2.33.1"
|
||||
serde = "1.0.110"
|
||||
serde_json = "1.0.53"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.14" }
|
||||
solana-logger = { path = "../logger", version = "1.2.14" }
|
||||
solana-version = { path = "../version", version = "1.2.14" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.2.23" }
|
||||
solana-logger = { path = "../logger", version = "1.2.23" }
|
||||
solana-version = { path = "../version", version = "1.2.23" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana-log-analyzer"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-logger"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Logger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@@ -26,7 +26,7 @@ fn replace_logger(logger: env_logger::Logger) {
|
||||
let max_level = logger.filter();
|
||||
log::set_max_level(max_level);
|
||||
let mut rw = LOGGER.write().unwrap();
|
||||
std::mem::replace(&mut *rw, logger);
|
||||
let _ = std::mem::replace(&mut *rw, logger);
|
||||
let _ = log::set_boxed_logger(Box::new(LoggerShim {}));
|
||||
}
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-measure"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -12,8 +12,8 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.2.23" }
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
jemallocator = "0.3.2"
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-merkle-tree"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Merkle Tree"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
fast-math = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-metrics"
|
||||
version = "1.2.14"
|
||||
version = "1.2.23"
|
||||
description = "Solana Metrics"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,7 +14,7 @@ gethostname = "0.2.1"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.10.4", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.14" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.7.0"
|
||||
|
@@ -334,7 +334,7 @@ lazy_static! {
|
||||
pub fn set_host_id(host_id: String) {
|
||||
let mut rw = HOST_ID.write().unwrap();
|
||||
info!("host id: {}", host_id);
|
||||
std::mem::replace(&mut *rw, host_id);
|
||||
let _ = std::mem::replace(&mut *rw, host_id);
|
||||
}
|
||||
|
||||
/// Submits a new point from any thread. Note that points are internally queued
|
||||
|
@@ -48,6 +48,9 @@ while [[ -n $1 ]]; do
|
||||
elif [[ $1 = --enable-rpc-transaction-history ]]; then
|
||||
args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then
|
||||
args+=("$1")
|
||||
shift
|
||||
elif [[ $1 = --skip-poh-verify ]]; then
|
||||
args+=("$1")
|
||||
shift
|
||||
|
@@ -33,9 +33,19 @@ args=(
|
||||
"$SOLANA_CONFIG_DIR"/bootstrap-validator/vote-account.json
|
||||
"$SOLANA_CONFIG_DIR"/bootstrap-validator/stake-account.json
|
||||
)
|
||||
|
||||
"$SOLANA_ROOT"/fetch-spl.sh
|
||||
if [[ -r spl-genesis-args.sh ]]; then
|
||||
SPL_GENESIS_ARGS=$(cat "$SOLANA_ROOT"/spl-genesis-args.sh)
|
||||
#shellcheck disable=SC2207
|
||||
#shellcheck disable=SC2206
|
||||
args+=($SPL_GENESIS_ARGS)
|
||||
fi
|
||||
|
||||
default_arg --ledger "$SOLANA_CONFIG_DIR"/bootstrap-validator
|
||||
default_arg --faucet-pubkey "$SOLANA_CONFIG_DIR"/faucet.json
|
||||
default_arg --faucet-lamports 500000000000000000
|
||||
default_arg --hashes-per-tick auto
|
||||
default_arg --operating-mode development
|
||||
|
||||
$solana_genesis "${args[@]}"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user