Compare commits

...

11 Commits

37 changed files with 2575 additions and 595 deletions

14
Cargo.lock generated
View File

@ -4584,6 +4584,20 @@ dependencies = [
"syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "solana-stake-accounts"
version = "1.2.0"
dependencies = [
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"solana-clap-utils 1.2.0",
"solana-cli-config 1.2.0",
"solana-client 1.2.0",
"solana-remote-wallet 1.2.0",
"solana-runtime 1.2.0",
"solana-sdk 1.2.0",
"solana-stake-program 1.2.0",
]
[[package]]
name = "solana-stake-monitor"
version = "1.1.0"

View File

@ -52,6 +52,7 @@ members = [
"sdk",
"sdk-c",
"scripts",
"stake-accounts",
"stake-monitor",
"sys-tuner",
"transaction-status",

View File

@ -14,7 +14,7 @@ use solana_core::{
contact_info::ContactInfo,
gossip_service::GossipService,
repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
repair_service::{RepairService, RepairSlotRange, RepairStats, RepairStrategy},
serve_repair::ServeRepair,
shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
@ -844,13 +844,14 @@ impl Archiver {
repair_service::MAX_REPAIR_LENGTH,
&repair_slot_range,
);
let mut repair_stats = RepairStats::default();
//iter over the repairs and send them
if let Ok(repairs) = repairs {
let reqs: Vec<_> = repairs
.into_iter()
.filter_map(|repair_request| {
serve_repair
.map_repair_request(&repair_request)
.map_repair_request(&repair_request, &mut repair_stats)
.map(|result| ((archiver_info.gossip, result), repair_request))
.ok()
})

View File

@ -410,19 +410,34 @@ pub fn process_catchup(
node_pubkey: &Pubkey,
node_json_rpc_url: &Option<String>,
) -> ProcessResult {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
let sleep_interval = 5;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let node_client = if let Some(node_json_rpc_url) = node_json_rpc_url {
RpcClient::new(node_json_rpc_url.to_string())
} else {
RpcClient::new_socket(
cluster_nodes
let rpc_addr = loop {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
if let Some(contact_info) = cluster_nodes
.iter()
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
.rpc
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?,
)
{
if let Some(rpc_addr) = contact_info.rpc {
break rpc_addr;
}
progress_bar.set_message(&format!("RPC service not found for {}", node_pubkey));
} else {
progress_bar.set_message(&format!(
"Contact information not found for {}",
node_pubkey
));
}
sleep(Duration::from_secs(sleep_interval as u64));
};
RpcClient::new_socket(rpc_addr)
};
let reported_node_pubkey = node_client.get_identity()?;
@ -438,12 +453,8 @@ pub fn process_catchup(
return Err("Both RPC URLs reference the same node, unable to monitor for catchup. Try a different --url".into());
}
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let mut previous_rpc_slot = std::u64::MAX;
let mut previous_slot_distance = 0;
let sleep_interval = 5;
loop {
let rpc_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::recent())?;
let node_slot = node_client.get_slot_with_commitment(CommitmentConfig::recent())?;

View File

@ -1,3 +1,6 @@
use crate::consensus::VOTE_THRESHOLD_SIZE;
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::clock::Slot;
use solana_vote_program::{vote_state::VoteState, vote_state::MAX_LOCKOUT_HISTORY};
@ -31,17 +34,40 @@ impl BlockCommitment {
}
}
#[derive(Debug, Default)]
#[derive(Default)]
pub struct BlockCommitmentCache {
block_commitment: HashMap<Slot, BlockCommitment>,
total_stake: u64,
bank: Arc<Bank>,
root: Slot,
}
impl std::fmt::Debug for BlockCommitmentCache {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BlockCommitmentCache")
.field("block_commitment", &self.block_commitment)
.field("total_stake", &self.total_stake)
.field(
"bank",
&format_args!("Bank({{current_slot: {:?}}})", self.bank.slot()),
)
.field("root", &self.root)
.finish()
}
}
impl BlockCommitmentCache {
pub fn new(block_commitment: HashMap<Slot, BlockCommitment>, total_stake: u64) -> Self {
pub fn new(
block_commitment: HashMap<Slot, BlockCommitment>,
total_stake: u64,
bank: Arc<Bank>,
root: Slot,
) -> Self {
Self {
block_commitment,
total_stake,
bank,
root,
}
}
@ -53,38 +79,62 @@ impl BlockCommitmentCache {
self.total_stake
}
pub fn get_block_with_depth_commitment(
&self,
minimum_depth: usize,
minimum_stake_percentage: f64,
) -> Option<Slot> {
self.block_commitment
.iter()
.filter(|&(_, block_commitment)| {
let fork_stake_minimum_depth: u64 = block_commitment.commitment[minimum_depth..]
.iter()
.cloned()
.sum();
fork_stake_minimum_depth as f64 / self.total_stake as f64
>= minimum_stake_percentage
})
.map(|(slot, _)| *slot)
.max()
pub fn bank(&self) -> Arc<Bank> {
self.bank.clone()
}
pub fn get_rooted_block_with_commitment(&self, minimum_stake_percentage: f64) -> Option<u64> {
self.get_block_with_depth_commitment(MAX_LOCKOUT_HISTORY - 1, minimum_stake_percentage)
pub fn slot(&self) -> Slot {
self.bank.slot()
}
pub fn root(&self) -> Slot {
self.root
}
pub fn get_confirmation_count(&self, slot: Slot) -> Option<usize> {
self.get_lockout_count(slot, VOTE_THRESHOLD_SIZE)
}
// Returns the lowest level at which at least `minimum_stake_percentage` of the total epoch
// stake is locked out
fn get_lockout_count(&self, slot: Slot, minimum_stake_percentage: f64) -> Option<usize> {
self.get_block_commitment(slot).map(|block_commitment| {
let iterator = block_commitment.commitment.iter().enumerate().rev();
let mut sum = 0;
for (i, stake) in iterator {
sum += stake;
if (sum as f64 / self.total_stake as f64) > minimum_stake_percentage {
return i + 1;
}
}
0
})
}
#[cfg(test)]
pub fn new_for_tests() -> Self {
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
block_commitment.insert(0, BlockCommitment::default());
Self {
block_commitment,
total_stake: 42,
..Self::default()
}
}
}
pub struct CommitmentAggregationData {
bank: Arc<Bank>,
root: Slot,
total_staked: u64,
}
impl CommitmentAggregationData {
pub fn new(bank: Arc<Bank>, total_staked: u64) -> Self {
Self { bank, total_staked }
pub fn new(bank: Arc<Bank>, root: Slot, total_staked: u64) -> Self {
Self {
bank,
root,
total_staked,
}
}
}
@ -144,14 +194,24 @@ impl AggregateCommitmentService {
continue;
}
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, aggregation_data.total_staked);
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
aggregation_data.total_staked,
aggregation_data.bank,
aggregation_data.root,
);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
aggregate_commitment_time.stop();
inc_new_counter_info!(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as usize
);
}
}
@ -246,84 +306,31 @@ mod tests {
}
#[test]
fn test_get_block_with_depth_commitment() {
fn test_get_confirmations() {
let bank = Arc::new(Bank::default());
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 15);
cache0.increase_confirmation_stake(2, 25);
cache0.increase_confirmation_stake(1, 5);
cache0.increase_confirmation_stake(2, 40);
let mut cache1 = BlockCommitment::default();
cache1.increase_confirmation_stake(1, 10);
cache1.increase_confirmation_stake(2, 20);
cache1.increase_confirmation_stake(1, 40);
cache1.increase_confirmation_stake(2, 5);
let mut cache2 = BlockCommitment::default();
cache2.increase_confirmation_stake(1, 20);
cache2.increase_confirmation_stake(2, 5);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
block_commitment.entry(2).or_insert(cache2.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50, bank, 0);
// Neither slot has rooted votes
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.1),
None
);
// Neither slot meets the minimum level of commitment 0.6 at depth 1
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.6),
None
);
// Only slot 0 meets the minimum level of commitment 0.5 at depth 1
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.5),
Some(0)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(1, 0.4),
Some(1)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(0, 0.6),
Some(1)
);
// Neither slot meets the minimum level of commitment 0.9 at depth 0
assert_eq!(
block_commitment_cache.get_block_with_depth_commitment(0, 0.9),
None
);
}
#[test]
fn test_get_rooted_block_with_commitment() {
// Build BlockCommitmentCache with rooted votes
let mut cache0 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 40);
cache0.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
let mut cache1 = BlockCommitment::new([0; MAX_LOCKOUT_HISTORY]);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, 30);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 1, 10);
cache1.increase_confirmation_stake(MAX_LOCKOUT_HISTORY - 2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50);
// Only slot 0 meets the minimum level of commitment 0.66 at root
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.66),
Some(0)
);
// If multiple slots meet the minimum level of commitment, method should return the most recent
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.6),
Some(1)
);
// Neither slot meets the minimum level of commitment 0.9 at root
assert_eq!(
block_commitment_cache.get_rooted_block_with_commitment(0.9),
None
);
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
assert_eq!(block_commitment_cache.get_confirmation_count(2), Some(0),);
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
}
#[test]

View File

@ -482,6 +482,7 @@ pub mod test {
use super::*;
use crate::{
cluster_info_vote_listener::VoteTracker,
cluster_slots::ClusterSlots,
progress_map::ForkProgress,
replay_stage::{HeaviestForkFailures, ReplayStage},
};
@ -612,6 +613,7 @@ pub mod test {
tower,
progress,
&VoteTracker::default(),
&ClusterSlots::default(),
bank_forks,
&mut HashSet::new(),
);

View File

@ -1,6 +1,8 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
@ -11,13 +13,22 @@ use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 512GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 250m shreds can be stored in 512gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5.5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - To try and keep the RocksDB size under 512GB at 50k tps (100 slots take ~2GB).
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 270_000;
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 250_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
@ -36,7 +47,7 @@ impl LedgerCleanupService {
max_ledger_slots
);
let exit = exit.clone();
let mut next_purge_batch = max_ledger_slots;
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
@ -47,7 +58,8 @@ impl LedgerCleanupService {
&new_root_receiver,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
@ -59,45 +71,123 @@ impl LedgerCleanupService {
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_slots: u64,
next_purge_batch: &mut u64,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
// Notify blockstore of impending purge
if root > *next_purge_batch {
//cleanup
let lowest_slot = root - max_ledger_slots;
*blockstore.lowest_cleanup_slot.write().unwrap() = lowest_slot;
blockstore.purge_slots(0, Some(lowest_slot));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
(disk_utilization_pre, disk_utilization_post)
{
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", disk_utilization_pre as i64, i64),
("disk_utilization_post", disk_utilization_post as i64, i64),
(
"disk_utilization_delta",
(disk_utilization_pre as i64 - disk_utilization_post as i64),
i64
)
);
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
@ -111,6 +201,7 @@ mod tests {
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
@ -118,10 +209,10 @@ mod tests {
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill slots 0-40
let mut next_purge_slot = 0;
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
@ -134,6 +225,62 @@ mod tests {
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_compaction() {
let blockstore_path = get_tmp_ledger_path!();
@ -142,7 +289,7 @@ mod tests {
let n = 10_000;
let batch_size = 100;
let batches = n / batch_size;
let max_ledger_slots = 100;
let max_ledger_shreds = 100;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
@ -158,8 +305,9 @@ mod tests {
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
max_ledger_slots,
max_ledger_shreds,
&mut next_purge_batch,
10,
)
.unwrap();
@ -170,7 +318,7 @@ mod tests {
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
// check that early slots don't exist
let max_slot = n - max_ledger_slots;
let max_slot = n - max_ledger_shreds - 1;
blockstore
.slot_meta_iterator(0)
.unwrap()

View File

@ -1,13 +1,13 @@
use crate::{
cluster_info_vote_listener::SlotVoteTracker, consensus::StakeLockout,
replay_stage::SUPERMINORITY_THRESHOLD,
cluster_info_vote_listener::SlotVoteTracker, cluster_slots::SlotPubkeys,
consensus::StakeLockout, replay_stage::SUPERMINORITY_THRESHOLD,
};
use solana_ledger::{
bank_forks::BankForks,
blockstore_processor::{ConfirmationProgress, ConfirmationTiming},
};
use solana_runtime::bank::Bank;
use solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey};
use solana_sdk::{account::Account, clock::Slot, hash::Hash, pubkey::Pubkey};
use std::{
collections::{HashMap, HashSet},
rc::Rc,
@ -179,14 +179,84 @@ pub(crate) struct ForkStats {
#[derive(Clone, Default)]
pub(crate) struct PropagatedStats {
pub(crate) propagated_validators: HashSet<Rc<Pubkey>>,
pub(crate) propagated_node_ids: HashSet<Rc<Pubkey>>,
pub(crate) propagated_validators_stake: u64,
pub(crate) is_propagated: bool,
pub(crate) is_leader_slot: bool,
pub(crate) prev_leader_slot: Option<Slot>,
pub(crate) slot_vote_tracker: Option<Arc<RwLock<SlotVoteTracker>>>,
pub(crate) cluster_slot_pubkeys: Option<Arc<RwLock<SlotPubkeys>>>,
pub(crate) total_epoch_stake: u64,
}
impl PropagatedStats {
pub fn add_vote_pubkey(
&mut self,
vote_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
stake: u64,
) {
if !self.propagated_validators.contains(vote_pubkey) {
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(vote_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(*vote_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let vote_pubkey = cached_pubkey.unwrap();
self.propagated_validators.insert(vote_pubkey);
self.propagated_validators_stake += stake;
}
}
pub fn add_node_pubkey(
&mut self,
node_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
bank: &Bank,
) {
if !self.propagated_node_ids.contains(node_pubkey) {
let node_vote_accounts = bank
.epoch_vote_accounts_for_node_id(&node_pubkey)
.map(|v| &v.vote_accounts);
if let Some(node_vote_accounts) = node_vote_accounts {
self.add_node_pubkey_internal(
node_pubkey,
all_pubkeys,
node_vote_accounts,
bank.epoch_vote_accounts(bank.epoch())
.expect("Epoch stakes for bank's own epoch must exist"),
);
}
}
}
fn add_node_pubkey_internal(
&mut self,
node_pubkey: &Pubkey,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
vote_account_pubkeys: &[Pubkey],
epoch_vote_accounts: &HashMap<Pubkey, (u64, Account)>,
) {
let mut cached_pubkey: Option<Rc<Pubkey>> = all_pubkeys.get(node_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(*node_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let node_pubkey = cached_pubkey.unwrap();
self.propagated_node_ids.insert(node_pubkey);
for vote_account_pubkey in vote_account_pubkeys.iter() {
let stake = epoch_vote_accounts
.get(vote_account_pubkey)
.map(|(stake, _)| *stake)
.unwrap_or(0);
self.add_vote_pubkey(vote_account_pubkey, all_pubkeys, stake);
}
}
}
#[derive(Default)]
pub(crate) struct ProgressMap {
progress_map: HashMap<Slot, ForkProgress>,
@ -288,6 +358,120 @@ impl ProgressMap {
mod test {
use super::*;
#[test]
fn test_add_vote_pubkey() {
let mut stats = PropagatedStats::default();
let mut all_pubkeys = HashSet::new();
let mut vote_pubkey = Pubkey::new_rand();
all_pubkeys.insert(Rc::new(vote_pubkey.clone()));
// Add a vote pubkey, the number of references in all_pubkeys
// should be 2
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
// Adding it again should change no state since the key already existed
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 1);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
// Addding another pubkey should succeed
vote_pubkey = Pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 3);
assert_eq!(Rc::strong_count(all_pubkeys.get(&vote_pubkey).unwrap()), 2);
}
#[test]
fn test_add_node_pubkey_internal() {
let num_vote_accounts = 10;
let staked_vote_accounts = 5;
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
.iter()
.skip(num_vote_accounts - staked_vote_accounts)
.map(|pubkey| (*pubkey, (1, Account::default())))
.collect();
let mut stats = PropagatedStats::default();
let mut all_pubkeys = HashSet::new();
let mut node_pubkey = Pubkey::new_rand();
all_pubkeys.insert(Rc::new(node_pubkey.clone()));
// Add a vote pubkey, the number of references in all_pubkeys
// should be 2
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
// Adding it again should not change any state
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
// Addding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase
node_pubkey = Pubkey::new_rand();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
// Addding another pubkey with different vote accounts should succeed
// and increase stake
node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(|| Pubkey::new_rand())
.take(num_vote_accounts)
.collect();
let epoch_vote_accounts: HashMap<_, _> = vote_account_pubkeys
.iter()
.skip(num_vote_accounts - staked_vote_accounts)
.map(|pubkey| (*pubkey, (1, Account::default())))
.collect();
stats.add_node_pubkey_internal(
&node_pubkey,
&mut all_pubkeys,
&vote_account_pubkeys,
&epoch_vote_accounts,
);
assert!(stats.propagated_node_ids.contains(&node_pubkey));
assert_eq!(
stats.propagated_validators_stake,
2 * staked_vote_accounts as u64
);
assert_eq!(Rc::strong_count(all_pubkeys.get(&node_pubkey).unwrap()), 2);
}
#[test]
fn test_is_propagated_status_on_construction() {
// If the given ValidatorStakeInfo == None, then this is not

View File

@ -20,9 +20,31 @@ use std::{
sync::{Arc, RwLock},
thread::sleep,
thread::{self, Builder, JoinHandle},
time::Duration,
time::{Duration, Instant},
};
#[derive(Default)]
pub struct RepairStatsGroup {
pub count: u64,
pub min: u64,
pub max: u64,
}
impl RepairStatsGroup {
pub fn update(&mut self, slot: u64) {
self.count += 1;
self.min = std::cmp::min(self.min, slot);
self.max = std::cmp::max(self.max, slot);
}
}
#[derive(Default)]
pub struct RepairStats {
pub shred: RepairStatsGroup,
pub highest_shred: RepairStatsGroup,
pub orphan: RepairStatsGroup,
}
pub const MAX_REPAIR_LENGTH: usize = 512;
pub const REPAIR_MS: u64 = 100;
pub const MAX_ORPHANS: usize = 5;
@ -93,6 +115,8 @@ impl RepairService {
if let RepairStrategy::RepairAll { .. } = repair_strategy {
Self::initialize_lowest_slot(id, blockstore, cluster_info);
}
let mut repair_stats = RepairStats::default();
let mut last_stats = Instant::now();
loop {
if exit.load(Ordering::Relaxed) {
break;
@ -137,7 +161,12 @@ impl RepairService {
.into_iter()
.filter_map(|repair_request| {
serve_repair
.repair_request(&cluster_slots, &repair_request, &mut cache)
.repair_request(
&cluster_slots,
&repair_request,
&mut cache,
&mut repair_stats,
)
.map(|result| (result, repair_request))
.ok()
})
@ -150,6 +179,24 @@ impl RepairService {
});
}
}
if last_stats.elapsed().as_secs() > 1 {
let repair_total = repair_stats.shred.count
+ repair_stats.highest_shred.count
+ repair_stats.orphan.count;
if repair_total > 0 {
datapoint_info!(
"serve_repair-repair",
("repair-total", repair_total, i64),
("shred-count", repair_stats.shred.count, i64),
("highest-shred-count", repair_stats.highest_shred.count, i64),
("orphan-count", repair_stats.orphan.count, i64),
("repair-highest-slot", repair_stats.highest_shred.max, i64),
("repair-orphan", repair_stats.orphan.max, i64),
);
}
repair_stats = RepairStats::default();
last_stats = Instant::now();
}
sleep(Duration::from_millis(REPAIR_MS));
}
}

View File

@ -4,6 +4,7 @@ use crate::{
broadcast_stage::RetransmitSlotsSender,
cluster_info::ClusterInfo,
cluster_info_vote_listener::VoteTracker,
cluster_slots::ClusterSlots,
commitment::{AggregateCommitmentService, BlockCommitmentCache, CommitmentAggregationData},
consensus::{StakeLockout, Tower},
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
@ -114,6 +115,7 @@ impl ReplayStage {
ledger_signal_receiver: Receiver<bool>,
poh_recorder: Arc<Mutex<PohRecorder>>,
vote_tracker: Arc<VoteTracker>,
cluster_slots: Arc<ClusterSlots>,
retransmit_slots_sender: RetransmitSlotsSender,
) -> (Self, Receiver<Vec<Arc<Bank>>>) {
let ReplayStageConfig {
@ -136,7 +138,7 @@ impl ReplayStage {
// Start the replay stage loop
let (lockouts_sender, commitment_service) =
AggregateCommitmentService::new(&exit, block_commitment_cache);
AggregateCommitmentService::new(&exit, block_commitment_cache.clone());
#[allow(clippy::cognitive_complexity)]
let t_replay = Builder::new()
@ -231,6 +233,7 @@ impl ReplayStage {
&tower,
&mut progress,
&vote_tracker,
&cluster_slots,
&bank_forks,
&mut all_pubkeys,
);
@ -288,10 +291,11 @@ impl ReplayStage {
progress.get_propagated_stats(latest_leader_slot)
{
info!(
"total staked: {}, observed staked: {}, pubkeys: {:?}, latest_leader_slot: {}, epoch: {:?}",
"total staked: {}, observed staked: {}, vote pubkeys: {:?}, node_pubkeys: {:?}, latest_leader_slot: {}, epoch: {:?}",
stats.total_epoch_stake,
stats.propagated_validators_stake,
stats.propagated_validators,
stats.propagated_node_ids,
latest_leader_slot,
bank_forks.read().unwrap().get(latest_leader_slot).map(|x| x.epoch()),
);
@ -306,7 +310,7 @@ impl ReplayStage {
// Vote on a fork
let voted_on_different_fork = {
if let Some(ref vote_bank) = vote_bank {
subscriptions.notify_subscribers(vote_bank.slot(), &bank_forks);
subscriptions.notify_subscribers(block_commitment_cache.read().unwrap().slot(), &bank_forks);
if let Some(votable_leader) = leader_schedule_cache
.slot_leader_at(vote_bank.slot(), Some(vote_bank))
{
@ -762,6 +766,7 @@ impl ReplayStage {
Self::update_commitment_cache(
bank.clone(),
bank_forks.read().unwrap().root(),
progress.get_fork_stats(bank.slot()).unwrap().total_staked,
lockouts_sender,
);
@ -792,10 +797,13 @@ impl ReplayStage {
fn update_commitment_cache(
bank: Arc<Bank>,
root: Slot,
total_staked: u64,
lockouts_sender: &Sender<CommitmentAggregationData>,
) {
if let Err(e) = lockouts_sender.send(CommitmentAggregationData::new(bank, total_staked)) {
if let Err(e) =
lockouts_sender.send(CommitmentAggregationData::new(bank, root, total_staked))
{
trace!("lockouts_sender failed: {:?}", e);
}
}
@ -912,6 +920,7 @@ impl ReplayStage {
tower: &Tower,
progress: &mut ProgressMap,
vote_tracker: &VoteTracker,
cluster_slots: &ClusterSlots,
bank_forks: &RwLock<BankForks>,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
) -> Vec<Slot> {
@ -972,6 +981,7 @@ impl ReplayStage {
all_pubkeys,
bank_forks,
vote_tracker,
cluster_slots,
);
let stats = progress
@ -996,6 +1006,7 @@ impl ReplayStage {
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
bank_forks: &RwLock<BankForks>,
vote_tracker: &VoteTracker,
cluster_slots: &ClusterSlots,
) {
// If propagation has already been confirmed, return
if progress.is_propagated(slot) {
@ -1017,14 +1028,33 @@ impl ReplayStage {
.slot_vote_tracker = slot_vote_tracker.clone();
}
let mut cluster_slot_pubkeys = progress
.get_propagated_stats(slot)
.expect("All frozen banks must exist in the Progress map")
.cluster_slot_pubkeys
.clone();
if cluster_slot_pubkeys.is_none() {
cluster_slot_pubkeys = cluster_slots.lookup(slot);
progress
.get_propagated_stats_mut(slot)
.expect("All frozen banks must exist in the Progress map")
.cluster_slot_pubkeys = cluster_slot_pubkeys.clone();
}
let newly_voted_pubkeys = slot_vote_tracker
.as_ref()
.and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates())
.unwrap_or_else(|| vec![]);
let cluster_slot_pubkeys = cluster_slot_pubkeys
.map(|v| v.read().unwrap().keys().cloned().collect())
.unwrap_or_else(|| vec![]);
Self::update_fork_propagated_threshold_from_votes(
progress,
newly_voted_pubkeys,
cluster_slot_pubkeys,
slot,
bank_forks,
all_pubkeys,
@ -1248,6 +1278,7 @@ impl ReplayStage {
fn update_fork_propagated_threshold_from_votes(
progress: &mut ProgressMap,
mut newly_voted_pubkeys: Vec<impl Deref<Target = Pubkey>>,
mut cluster_slot_pubkeys: Vec<impl Deref<Target = Pubkey>>,
fork_tip: Slot,
bank_forks: &RwLock<BankForks>,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
@ -1267,14 +1298,15 @@ impl ReplayStage {
.expect("current_leader_slot > root, so must exist in the progress map");
// If a descendant has reached propagation threshold, then
// all its ancestor banks have alsso reached propagation
// all its ancestor banks have also reached propagation
// threshold as well (Validators can't have voted for a
// descendant without also getting the ancestor block)
if leader_propagated_stats.is_propagated ||
// If there's no new validators to record, and there's no
// newly achieved threshold, then there's no further
// information to propagate backwards to past leader blocks
(newly_voted_pubkeys.is_empty() && !did_newly_reach_threshold)
(newly_voted_pubkeys.is_empty() && cluster_slot_pubkeys.is_empty() &&
!did_newly_reach_threshold)
{
break;
}
@ -1292,6 +1324,7 @@ impl ReplayStage {
did_newly_reach_threshold = Self::update_slot_propagated_threshold_from_votes(
&mut newly_voted_pubkeys,
&mut cluster_slot_pubkeys,
&leader_bank,
leader_propagated_stats,
all_pubkeys,
@ -1305,6 +1338,7 @@ impl ReplayStage {
fn update_slot_propagated_threshold_from_votes(
newly_voted_pubkeys: &mut Vec<impl Deref<Target = Pubkey>>,
cluster_slot_pubkeys: &mut Vec<impl Deref<Target = Pubkey>>,
leader_bank: &Bank,
leader_propagated_stats: &mut PropagatedStats,
all_pubkeys: &mut HashSet<Rc<Pubkey>>,
@ -1331,30 +1365,32 @@ impl ReplayStage {
return false;
}
// Remove the valdators that we already know voted for this slot
// Those validators are safe to drop because they don't to be ported back any
// further because parents must have:
// 1) Also recorded this validator already, or
// Remove the vote/node pubkeys that we already know voted for this
// slot. These vote accounts/validator identities are safe to drop
// because they don't to be ported back any further because earler
// parents must have:
// 1) Also recorded these pubkeyss already, or
// 2) Already reached the propagation threshold, in which case
// they no longer need to track the set of propagated validators
newly_voted_pubkeys.retain(|voting_pubkey| {
if !leader_propagated_stats
newly_voted_pubkeys.retain(|vote_pubkey| {
let exists = leader_propagated_stats
.propagated_validators
.contains(&**voting_pubkey)
{
let mut cached_pubkey: Option<Rc<Pubkey>> =
all_pubkeys.get(&**voting_pubkey).cloned();
if cached_pubkey.is_none() {
let new_pubkey = Rc::new(**voting_pubkey);
all_pubkeys.insert(new_pubkey.clone());
cached_pubkey = Some(new_pubkey);
}
let voting_pubkey = cached_pubkey.unwrap();
leader_propagated_stats
.propagated_validators
.insert(voting_pubkey.clone());
leader_propagated_stats.propagated_validators_stake +=
leader_bank.epoch_vote_account_stake(&voting_pubkey);
.contains(&**vote_pubkey);
leader_propagated_stats.add_vote_pubkey(
&*vote_pubkey,
all_pubkeys,
leader_bank.epoch_vote_account_stake(&vote_pubkey),
);
!exists
});
cluster_slot_pubkeys.retain(|node_pubkey| {
let exists = leader_propagated_stats
.propagated_node_ids
.contains(&**node_pubkey);
leader_propagated_stats.add_node_pubkey(&*node_pubkey, all_pubkeys, leader_bank);
!exists
});
if leader_propagated_stats.total_epoch_stake == 0
|| leader_propagated_stats.propagated_validators_stake as f64
@ -1364,11 +1400,6 @@ impl ReplayStage {
leader_propagated_stats.is_propagated = true;
did_newly_reach_threshold = true
}
true
} else {
false
}
});
did_newly_reach_threshold
}
@ -1480,20 +1511,15 @@ impl ReplayStage {
&rewards_recorder_sender,
subscriptions,
);
if let Some(leader_vote_accounts) =
child_bank.epoch_vote_accounts_for_node_id(&leader)
{
let empty: Vec<&Pubkey> = vec![];
Self::update_fork_propagated_threshold_from_votes(
progress,
leader_vote_accounts
.vote_accounts
.iter()
.collect::<Vec<_>>(),
empty,
vec![&leader],
parent_bank.slot(),
bank_forks,
all_pubkeys,
);
}
new_banks.insert(child_slot, child_bank);
}
}
@ -1619,6 +1645,7 @@ pub(crate) mod tests {
}
let vote_tracker = VoteTracker::default();
let cluster_slots = ClusterSlots::default();
let mut towers: Vec<Tower> = iter::repeat_with(|| Tower::new_for_tests(8, 0.67))
.take(validators.len())
.collect();
@ -1785,6 +1812,7 @@ pub(crate) mod tests {
&towers[i],
&mut fork_progresses[i],
&vote_tracker,
&cluster_slots,
&wrapped_bank_fork,
&mut all_pubkeys,
);
@ -1900,7 +1928,10 @@ pub(crate) mod tests {
);
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
));
let mut bank_forks = BankForks::new(0, bank0);
// Insert a non-root bank so that the propagation logic will update this
@ -2350,7 +2381,12 @@ pub(crate) mod tests {
bank_forks.write().unwrap().insert(bank1);
let arc_bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
leader_vote(&arc_bank1, &leader_voting_pubkey);
ReplayStage::update_commitment_cache(arc_bank1.clone(), leader_lamports, &lockouts_sender);
ReplayStage::update_commitment_cache(
arc_bank1.clone(),
0,
leader_lamports,
&lockouts_sender,
);
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
let _res = bank2.transfer(10, &genesis_config_info.mint_keypair, &Pubkey::new_rand());
@ -2361,7 +2397,12 @@ pub(crate) mod tests {
bank_forks.write().unwrap().insert(bank2);
let arc_bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
leader_vote(&arc_bank2, &leader_voting_pubkey);
ReplayStage::update_commitment_cache(arc_bank2.clone(), leader_lamports, &lockouts_sender);
ReplayStage::update_commitment_cache(
arc_bank2.clone(),
0,
leader_lamports,
&lockouts_sender,
);
thread::sleep(Duration::from_millis(200));
let mut expected0 = BlockCommitment::default();
@ -2555,6 +2596,7 @@ pub(crate) mod tests {
&tower,
&mut progress,
&VoteTracker::default(),
&ClusterSlots::default(),
&bank_forks,
&mut HashSet::new(),
);
@ -2592,6 +2634,7 @@ pub(crate) mod tests {
&tower,
&mut progress,
&VoteTracker::default(),
&ClusterSlots::default(),
&bank_forks,
&mut HashSet::new(),
);
@ -2624,6 +2667,7 @@ pub(crate) mod tests {
&tower,
&mut progress,
&VoteTracker::default(),
&ClusterSlots::default(),
&bank_forks,
&mut HashSet::new(),
);
@ -2682,6 +2726,7 @@ pub(crate) mod tests {
&tower,
&mut progress,
&VoteTracker::default(),
&ClusterSlots::default(),
&bank_forks,
&mut HashSet::new(),
);
@ -2746,27 +2791,76 @@ pub(crate) mod tests {
.take(10)
.collect();
let vote_pubkeys: Vec<_> = keypairs
let new_vote_pubkeys: Vec<_> = keypairs
.values()
.map(|keys| keys.vote_keypair.pubkey())
.collect();
let new_node_pubkeys: Vec<_> = keypairs
.values()
.map(|keys| keys.node_keypair.pubkey())
.collect();
// Once 4/10 validators have voted, we have hit threshold
run_test_update_slot_propagated_threshold_from_votes(&keypairs, &new_vote_pubkeys, &[], 4);
// Adding the same node pubkey's instead of the corresponding
// vote pubkeys should be equivalent
run_test_update_slot_propagated_threshold_from_votes(&keypairs, &[], &new_node_pubkeys, 4);
// Adding the same node pubkey's in the same order as their
// corresponding vote accounts is redundant, so we don't
// reach the threshold any sooner.
run_test_update_slot_propagated_threshold_from_votes(
&keypairs,
&new_vote_pubkeys,
&new_node_pubkeys,
4,
);
// However, if we add different node pubkey's than the
// vote accounts, we should hit threshold much faster
// because now we are getting 2 new pubkeys on each
// iteration instead of 1, so by the 2nd iteration
// we should have 4/10 validators voting
run_test_update_slot_propagated_threshold_from_votes(
&keypairs,
&new_vote_pubkeys[0..5],
&new_node_pubkeys[5..],
2,
);
}
fn run_test_update_slot_propagated_threshold_from_votes(
all_keypairs: &HashMap<Pubkey, ValidatorVoteKeypairs>,
new_vote_pubkeys: &[Pubkey],
new_node_pubkeys: &[Pubkey],
success_index: usize,
) {
let stake = 10_000;
let (bank_forks, _) = initialize_state(&keypairs, stake);
let (bank_forks, _) = initialize_state(&all_keypairs, stake);
let root_bank = bank_forks.root_bank().clone();
let mut propagated_stats = PropagatedStats {
total_epoch_stake: stake * 10,
total_epoch_stake: stake * all_keypairs.len() as u64,
..PropagatedStats::default()
};
let mut all_pubkeys = HashSet::new();
let mut child_reached_threshold = false;
for i in 0..10 {
let child_reached_threshold = false;
for i in 0..std::cmp::max(new_vote_pubkeys.len(), new_node_pubkeys.len()) {
propagated_stats.is_propagated = false;
let mut newly_voted_pubkeys = vote_pubkeys[..i].iter().cloned().map(Arc::new).collect();
let len = std::cmp::min(i, new_vote_pubkeys.len());
let mut voted_pubkeys = new_vote_pubkeys[..len]
.iter()
.cloned()
.map(Arc::new)
.collect();
let len = std::cmp::min(i, new_node_pubkeys.len());
let mut node_pubkeys = new_node_pubkeys[..len]
.iter()
.cloned()
.map(Arc::new)
.collect();
let did_newly_reach_threshold =
ReplayStage::update_slot_propagated_threshold_from_votes(
&mut newly_voted_pubkeys,
&mut voted_pubkeys,
&mut node_pubkeys,
&root_bank,
&mut propagated_stats,
&mut all_pubkeys,
@ -2775,20 +2869,28 @@ pub(crate) mod tests {
// Only the i'th voted pubkey should be new (everything else was
// inserted in previous iteration of the loop), so those redundant
// pubkeys should be filtered out
let added_pubkeys = {
if i == 0 {
// pubkeys should have been filtered out
let remaining_vote_pubkeys = {
if i == 0 || i >= new_vote_pubkeys.len() {
vec![]
} else {
vec![Arc::new(vote_pubkeys[i - 1])]
vec![Arc::new(new_vote_pubkeys[i - 1])]
}
};
assert_eq!(newly_voted_pubkeys, added_pubkeys);
let remaining_node_pubkeys = {
if i == 0 || i >= new_node_pubkeys.len() {
vec![]
} else {
vec![Arc::new(new_node_pubkeys[i - 1])]
}
};
assert_eq!(voted_pubkeys, remaining_vote_pubkeys);
assert_eq!(node_pubkeys, remaining_node_pubkeys);
// If we crossed the superminority threshold, then
// `did_newly_reach_threshold == true`, otherwise the
// threshold has not been reached
if i >= 4 {
if i >= success_index {
assert!(propagated_stats.is_propagated);
assert!(did_newly_reach_threshold);
} else {
@ -2796,21 +2898,29 @@ pub(crate) mod tests {
assert!(!did_newly_reach_threshold);
}
}
}
#[test]
fn test_update_slot_propagated_threshold_from_votes2() {
let mut empty: Vec<&Pubkey> = vec![];
let genesis_config = create_genesis_config(100_000_000).genesis_config;
let root_bank = Bank::new(&genesis_config);
let stake = 10_000;
// Simulate a child slot seeing threshold (`child_reached_threshold` = true),
// then the parent should also be marked as having reached threshold,
// even if there are no new pubkeys to add (`newly_voted_pubkeys.is_empty()`)
propagated_stats = PropagatedStats {
let mut propagated_stats = PropagatedStats {
total_epoch_stake: stake * 10,
..PropagatedStats::default()
};
propagated_stats.total_epoch_stake = stake * 10;
all_pubkeys = HashSet::new();
child_reached_threshold = true;
let mut all_pubkeys = HashSet::new();
let child_reached_threshold = true;
let mut newly_voted_pubkeys: Vec<Arc<Pubkey>> = vec![];
assert!(ReplayStage::update_slot_propagated_threshold_from_votes(
&mut newly_voted_pubkeys,
&mut empty,
&root_bank,
&mut propagated_stats,
&mut all_pubkeys,
@ -2825,19 +2935,20 @@ pub(crate) mod tests {
};
propagated_stats.is_propagated = true;
all_pubkeys = HashSet::new();
child_reached_threshold = true;
newly_voted_pubkeys = vec![];
assert!(!ReplayStage::update_slot_propagated_threshold_from_votes(
&mut newly_voted_pubkeys,
&mut empty,
&root_bank,
&mut propagated_stats,
&mut all_pubkeys,
child_reached_threshold,
));
child_reached_threshold = false;
let child_reached_threshold = false;
assert!(!ReplayStage::update_slot_propagated_threshold_from_votes(
&mut newly_voted_pubkeys,
&mut empty,
&root_bank,
&mut propagated_stats,
&mut all_pubkeys,
@ -2906,6 +3017,7 @@ pub(crate) mod tests {
&mut HashSet::new(),
&RwLock::new(bank_forks),
&vote_tracker,
&ClusterSlots::default(),
);
let propagated_stats = &progress_map.get(&10).unwrap().propagated_stats;
@ -2992,6 +3104,7 @@ pub(crate) mod tests {
&mut HashSet::new(),
&RwLock::new(bank_forks),
&vote_tracker,
&ClusterSlots::default(),
);
for i in 1..=10 {
@ -3079,6 +3192,7 @@ pub(crate) mod tests {
&mut HashSet::new(),
&RwLock::new(bank_forks),
&vote_tracker,
&ClusterSlots::default(),
);
// Only the first 5 banks should have reached the threshold

View File

@ -13,7 +13,7 @@ use solana_ledger::{
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
};
use solana_perf::packet::PACKET_DATA_SIZE;
use solana_runtime::{bank::Bank, status_cache::SignatureConfirmationStatus};
use solana_runtime::bank::Bank;
use solana_sdk::{
clock::{Slot, UnixTimestamp},
commitment_config::{CommitmentConfig, CommitmentLevel},
@ -196,11 +196,9 @@ impl JsonRpcRequestProcessor {
match signature {
Err(e) => Err(e),
Ok(sig) => {
let status = bank.get_signature_confirmation_status(&sig);
let status = bank.get_signature_status(&sig);
match status {
Some(SignatureConfirmationStatus { status, .. }) => {
new_response(bank, status.is_ok())
}
Some(status) => new_response(bank, status.is_ok()),
None => new_response(bank, false),
}
}
@ -409,21 +407,24 @@ impl JsonRpcRequestProcessor {
let bank = self.bank(commitment);
for signature in signatures {
let status = bank.get_signature_confirmation_status(&signature).map(
|SignatureConfirmationStatus {
let status = bank
.get_signature_status_slot(&signature)
.map(|(slot, status)| {
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
let confirmations = if r_block_commitment_cache.root() >= slot {
None
} else {
r_block_commitment_cache
.get_confirmation_count(slot)
.or(Some(0))
};
TransactionStatus {
slot,
status,
confirmations,
}| TransactionStatus {
slot,
status,
confirmations: if confirmations <= MAX_LOCKOUT_HISTORY {
Some(confirmations)
} else {
None
},
},
);
}
});
statuses.push(status);
}
Ok(Response {
@ -1222,18 +1223,6 @@ pub mod tests {
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank();
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
block_commitment
.entry(0)
.or_insert(commitment_slot0.clone());
block_commitment
.entry(1)
.or_insert(commitment_slot1.clone());
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
@ -1249,6 +1238,24 @@ pub mod tests {
blockstore.clone(),
);
let mut commitment_slot0 = BlockCommitment::default();
commitment_slot0.increase_confirmation_stake(2, 9);
let mut commitment_slot1 = BlockCommitment::default();
commitment_slot1.increase_confirmation_stake(1, 9);
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
block_commitment
.entry(0)
.or_insert(commitment_slot0.clone());
block_commitment
.entry(1)
.or_insert(commitment_slot1.clone());
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
block_commitment,
10,
bank.clone(),
0,
)));
// Add timestamp vote to blockstore
let vote = Vote {
slots: vec![1],
@ -1770,7 +1777,9 @@ pub mod tests {
let result: Option<TransactionStatus> =
serde_json::from_value(json["result"]["value"][0].clone())
.expect("actual response deserialization");
assert_eq!(expected_res, result.as_ref().unwrap().status);
let result = result.as_ref().unwrap();
assert_eq!(expected_res, result.status);
assert_eq!(None, result.confirmations);
// Test getSignatureStatus request on unprocessed tx
let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash);
@ -2119,6 +2128,8 @@ pub mod tests {
fn test_rpc_processor_get_block_commitment() {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let bank_forks = new_bank_forks().0;
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
@ -2128,8 +2139,12 @@ pub mod tests {
block_commitment
.entry(1)
.or_insert(commitment_slot1.clone());
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new(
block_commitment,
42,
bank_forks.read().unwrap().working_bank(),
0,
)));
let ledger_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&ledger_path).unwrap();
@ -2137,7 +2152,7 @@ pub mod tests {
config.enable_validator_exit = true;
let request_processor = JsonRpcRequestProcessor::new(
config,
new_bank_forks().0,
bank_forks,
block_commitment_cache,
Arc::new(blockstore),
StorageState::default(),
@ -2201,7 +2216,7 @@ pub mod tests {
.get_block_commitment(0)
.map(|block_commitment| block_commitment.commitment)
);
assert_eq!(total_stake, 42);
assert_eq!(total_stake, 10);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}}"#);
@ -2219,7 +2234,7 @@ pub mod tests {
panic!("Expected single response");
};
assert_eq!(commitment_response.commitment, None);
assert_eq!(commitment_response.total_stake, 42);
assert_eq!(commitment_response.total_stake, 10);
}
#[test]

View File

@ -312,7 +312,10 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
#[cfg(test)]
mod tests {
use super::*;
use crate::rpc_subscriptions::tests::robust_poll_or_panic;
use crate::{
commitment::{BlockCommitment, BlockCommitmentCache},
rpc_subscriptions::tests::robust_poll_or_panic,
};
use jsonrpc_core::{futures::sync::mpsc, Response};
use jsonrpc_pubsub::{PubSubHandler, Session};
use solana_budget_program::{self, budget_instruction};
@ -325,7 +328,12 @@ mod tests {
system_program, system_transaction,
transaction::{self, Transaction},
};
use std::{sync::RwLock, thread::sleep, time::Duration};
use std::{
collections::HashMap,
sync::{atomic::AtomicBool, RwLock},
thread::sleep,
time::Duration,
};
fn process_transaction_and_notify(
bank_forks: &Arc<RwLock<BankForks>>,
@ -358,8 +366,13 @@ mod tests {
let bank = Bank::new(&genesis_config);
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
)),
..RpcSolPubSubImpl::default()
};
// Test signature subscriptions
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
@ -457,7 +470,13 @@ mod tests {
let blockhash = bank.last_blockhash();
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let rpc = RpcSolPubSubImpl::default();
let rpc = RpcSolPubSubImpl {
subscriptions: Arc::new(RpcSubscriptions::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
)),
..RpcSolPubSubImpl::default()
};
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(
@ -591,7 +610,13 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bob = Keypair::new();
let rpc = RpcSolPubSubImpl::default();
let mut rpc = RpcSolPubSubImpl::default();
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
@ -622,7 +647,12 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
let bob = Keypair::new();
let rpc = RpcSolPubSubImpl::default();
let mut rpc = RpcSolPubSubImpl::default();
let exit = Arc::new(AtomicBool::new(false));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests()));
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
rpc.subscriptions = Arc::new(subscriptions);
let session = create_session();
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
rpc.account_subscribe(session, subscriber, bob.pubkey().to_string(), Some(2));
@ -640,10 +670,32 @@ mod tests {
let bank0 = bank_forks.read().unwrap()[0].clone();
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank1 = bank_forks.read().unwrap()[1].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment =
BlockCommitmentCache::new(block_commitment, 10, bank1.clone(), 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(1, &bank_forks);
let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2);
bank_forks.write().unwrap().insert(bank2);
let bank2 = bank_forks.read().unwrap()[2].clone();
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(2, 10);
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
let mut new_block_commitment = BlockCommitmentCache::new(block_commitment, 10, bank2, 0);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
drop(w_block_commitment_cache);
rpc.subscriptions.notify_subscribers(2, &bank_forks);
let expected = json!({
"jsonrpc": "2.0",

View File

@ -1,14 +1,20 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl};
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::{
rpc_pubsub::{RpcSolPubSub, RpcSolPubSubImpl},
rpc_subscriptions::RpcSubscriptions,
};
use jsonrpc_pubsub::{PubSubHandler, Session};
use jsonrpc_ws_server::{RequestContext, ServerBuilder};
use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
};
pub struct PubSubService {
thread_hdl: JoinHandle<()>,
@ -66,13 +72,20 @@ impl PubSubService {
#[cfg(test)]
mod tests {
use super::*;
use std::net::{IpAddr, Ipv4Addr};
use crate::commitment::BlockCommitmentCache;
use std::{
net::{IpAddr, Ipv4Addr},
sync::RwLock,
};
#[test]
fn test_pubsub_new() {
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
));
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
let thread = pubsub_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-pubsub");

View File

@ -1,5 +1,6 @@
//! The `pubsub` module implements a threaded subscription service on client RPC request
use crate::commitment::BlockCommitmentCache;
use core::hash::Hash;
use jsonrpc_core::futures::Future;
use jsonrpc_pubsub::{
@ -14,11 +15,14 @@ use solana_sdk::{
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError, SendError, Sender};
use std::sync::{
atomic::{AtomicBool, Ordering},
mpsc::{Receiver, RecvTimeoutError, SendError, Sender},
};
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
use std::{
cmp::min,
collections::{HashMap, HashSet},
iter,
sync::{Arc, Mutex, RwLock},
@ -80,11 +84,7 @@ fn add_subscription<K, S>(
{
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let confirmations = confirmations.unwrap_or(0);
let confirmations = if confirmations > MAX_LOCKOUT_HISTORY {
MAX_LOCKOUT_HISTORY
} else {
confirmations
};
let confirmations = min(confirmations, MAX_LOCKOUT_HISTORY + 1);
if let Some(current_hashmap) = subscriptions.get_mut(&hashmap_key) {
current_hashmap.insert(sub_id, (sink, confirmations));
return;
@ -120,8 +120,8 @@ where
fn check_confirmations_and_notify<K, S, B, F, X>(
subscriptions: &HashMap<K, HashMap<SubscriptionId, (Sink<Response<S>>, Confirmations)>>,
hashmap_key: &K,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
bank_method: B,
filter_results: F,
notifier: &RpcNotifier,
@ -133,6 +133,10 @@ where
F: Fn(X, u64) -> Box<dyn Iterator<Item = S>>,
X: Clone + Serialize,
{
let mut confirmation_slots: HashMap<usize, Slot> = HashMap::new();
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
let current_slot = r_block_commitment_cache.slot();
let root = r_block_commitment_cache.root();
let current_ancestors = bank_forks
.read()
.unwrap()
@ -140,27 +144,29 @@ where
.unwrap()
.ancestors
.clone();
for (slot, _) in current_ancestors.iter() {
if let Some(confirmations) = r_block_commitment_cache.get_confirmation_count(*slot) {
confirmation_slots.entry(confirmations).or_insert(*slot);
}
}
drop(r_block_commitment_cache);
let mut notified_set: HashSet<SubscriptionId> = HashSet::new();
if let Some(hashmap) = subscriptions.get(hashmap_key) {
for (sub_id, (sink, confirmations)) in hashmap.iter() {
let desired_slot: Vec<u64> = current_ancestors
.iter()
.filter(|(_, &v)| v == *confirmations)
.map(|(k, _)| k)
.cloned()
.collect();
let root: Vec<u64> = current_ancestors
.iter()
.filter(|(_, &v)| v == 32)
.map(|(k, _)| k)
.cloned()
.collect();
let root = if root.len() == 1 { root[0] } else { 0 };
if desired_slot.len() == 1 {
let slot = desired_slot[0];
let desired_bank = bank_forks.read().unwrap().get(slot).unwrap().clone();
let results = bank_method(&desired_bank, hashmap_key);
let desired_slot = if *confirmations == 0 {
Some(&current_slot)
} else if *confirmations == MAX_LOCKOUT_HISTORY + 1 {
Some(&root)
} else {
confirmation_slots.get(confirmations)
};
if let Some(&slot) = desired_slot {
let results = {
let bank_forks = bank_forks.read().unwrap();
let desired_bank = bank_forks.get(slot).unwrap();
bank_method(&desired_bank, hashmap_key)
};
for result in filter_results(results, root) {
notifier.notify(
Response {
@ -236,7 +242,10 @@ pub struct RpcSubscriptions {
impl Default for RpcSubscriptions {
fn default() -> Self {
Self::new(&Arc::new(AtomicBool::new(false)))
Self::new(
&Arc::new(AtomicBool::new(false)),
Arc::new(RwLock::new(BlockCommitmentCache::default())),
)
}
}
@ -249,7 +258,10 @@ impl Drop for RpcSubscriptions {
}
impl RpcSubscriptions {
pub fn new(exit: &Arc<AtomicBool>) -> Self {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> Self {
let (notification_sender, notification_receiver): (
Sender<NotificationEntry>,
Receiver<NotificationEntry>,
@ -288,6 +300,7 @@ impl RpcSubscriptions {
signature_subscriptions_clone,
slot_subscriptions_clone,
root_subscriptions_clone,
block_commitment_cache,
);
})
.unwrap();
@ -307,8 +320,8 @@ impl RpcSubscriptions {
fn check_account(
pubkey: &Pubkey,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
account_subscriptions: Arc<RpcAccountSubscriptions>,
notifier: &RpcNotifier,
) {
@ -316,8 +329,8 @@ impl RpcSubscriptions {
check_confirmations_and_notify(
&subscriptions,
pubkey,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_account_modified_since_parent,
filter_account_result,
notifier,
@ -326,8 +339,8 @@ impl RpcSubscriptions {
fn check_program(
program_id: &Pubkey,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
program_subscriptions: Arc<RpcProgramSubscriptions>,
notifier: &RpcNotifier,
) {
@ -335,8 +348,8 @@ impl RpcSubscriptions {
check_confirmations_and_notify(
&subscriptions,
program_id,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_program_accounts_modified_since_parent,
filter_program_results,
notifier,
@ -345,8 +358,8 @@ impl RpcSubscriptions {
fn check_signature(
signature: &Signature,
current_slot: Slot,
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
notifier: &RpcNotifier,
) {
@ -354,8 +367,8 @@ impl RpcSubscriptions {
let notified_ids = check_confirmations_and_notify(
&subscriptions,
signature,
current_slot,
bank_forks,
block_commitment_cache,
Bank::get_signature_status_processed_since_parent,
filter_signature_result,
notifier,
@ -499,6 +512,7 @@ impl RpcSubscriptions {
signature_subscriptions: Arc<RpcSignatureSubscriptions>,
slot_subscriptions: Arc<RpcSlotSubscriptions>,
root_subscriptions: Arc<RpcRootSubscriptions>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) {
loop {
if exit.load(Ordering::Relaxed) {
@ -518,7 +532,7 @@ impl RpcSubscriptions {
notifier.notify(root, sink);
}
}
NotificationEntry::Bank((current_slot, bank_forks)) => {
NotificationEntry::Bank((_current_slot, bank_forks)) => {
let pubkeys: Vec<_> = {
let subs = account_subscriptions.read().unwrap();
subs.keys().cloned().collect()
@ -526,8 +540,8 @@ impl RpcSubscriptions {
for pubkey in &pubkeys {
Self::check_account(
pubkey,
current_slot,
&bank_forks,
&block_commitment_cache,
account_subscriptions.clone(),
&notifier,
);
@ -540,8 +554,8 @@ impl RpcSubscriptions {
for program_id in &programs {
Self::check_program(
program_id,
current_slot,
&bank_forks,
&block_commitment_cache,
program_subscriptions.clone(),
&notifier,
);
@ -554,8 +568,8 @@ impl RpcSubscriptions {
for signature in &signatures {
Self::check_signature(
signature,
current_slot,
&bank_forks,
&block_commitment_cache,
signature_subscriptions.clone(),
&notifier,
);
@ -596,6 +610,7 @@ impl RpcSubscriptions {
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::commitment::BlockCommitment;
use jsonrpc_core::futures::{self, stream::Stream};
use jsonrpc_pubsub::typed::Subscriber;
use solana_budget_program;
@ -663,7 +678,10 @@ pub(crate) mod tests {
Subscriber::new_test("accountNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
assert!(subscriptions
@ -732,7 +750,10 @@ pub(crate) mod tests {
Subscriber::new_test("programNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_program_subscription(
solana_budget_program::id(),
None,
@ -812,27 +833,41 @@ pub(crate) mod tests {
.unwrap()
.process_transaction(&processed_tx)
.unwrap();
let bank1 = bank_forks[1].clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let mut cache0 = BlockCommitment::default();
cache0.increase_confirmation_stake(1, 10);
let cache1 = BlockCommitment::default();
let (past_bank_sub, _id_receiver, past_bank_recv) =
let mut block_commitment = HashMap::new();
block_commitment.entry(0).or_insert(cache0.clone());
block_commitment.entry(1).or_insert(cache1.clone());
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 10, bank1, 0);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions =
RpcSubscriptions::new(&exit, Arc::new(RwLock::new(block_commitment_cache)));
let (past_bank_sub1, _id_receiver, past_bank_recv1) =
Subscriber::new_test("signatureNotification");
let (past_bank_sub2, _id_receiver, past_bank_recv2) =
Subscriber::new_test("signatureNotification");
let (processed_sub, _id_receiver, processed_recv) =
Subscriber::new_test("signatureNotification");
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(0),
SubscriptionId::Number(1 as u64),
Subscriber::new_test("signatureNotification").0,
past_bank_sub1,
);
subscriptions.add_signature_subscription(
past_bank_tx.signatures[0],
Some(1),
SubscriptionId::Number(2 as u64),
past_bank_sub,
past_bank_sub2,
);
subscriptions.add_signature_subscription(
processed_tx.signatures[0],
@ -857,41 +892,46 @@ pub(crate) mod tests {
subscriptions.notify_subscribers(1, &bank_forks);
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
let expected = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": 0 },
"value": expected_res,
},
"subscription": 2,
struct Notification {
slot: Slot,
id: u64,
}
});
let (response, _) = robust_poll_or_panic(past_bank_recv);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let expected = json!({
let expected_notification = |exp: Notification| -> String {
let json = json!({
"jsonrpc": "2.0",
"method": "signatureNotification",
"params": {
"result": {
"context": { "slot": 1 },
"value": expected_res,
"context": { "slot": exp.slot },
"value": &expected_res,
},
"subscription": 3,
"subscription": exp.id,
}
});
serde_json::to_string(&json).unwrap()
};
// Expect to receive a notification from bank 1 because this subscription is
// looking for 0 confirmations and so checks the current bank
let expected = expected_notification(Notification { slot: 1, id: 1 });
let (response, _) = robust_poll_or_panic(past_bank_recv1);
assert_eq!(expected, response);
// Expect to receive a notification from bank 0 because this subscription is
// looking for 1 confirmation and so checks the past bank
let expected = expected_notification(Notification { slot: 0, id: 2 });
let (response, _) = robust_poll_or_panic(past_bank_recv2);
assert_eq!(expected, response);
let expected = expected_notification(Notification { slot: 1, id: 3 });
let (response, _) = robust_poll_or_panic(processed_recv);
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
assert_eq!(expected, response);
// Subscription should be automatically removed after notification
let sig_subs = subscriptions.signature_subscriptions.read().unwrap();
assert!(!sig_subs.contains_key(&processed_tx.signatures[0]));
// Only one notification is expected for signature processed in previous bank
assert_eq!(sig_subs.get(&past_bank_tx.signatures[0]).unwrap().len(), 1);
assert!(!sig_subs.contains_key(&past_bank_tx.signatures[0]));
// Unprocessed signature subscription should not be removed
assert_eq!(
@ -906,7 +946,10 @@ pub(crate) mod tests {
Subscriber::new_test("slotNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_slot_subscription(sub_id.clone(), subscriber);
assert!(subscriptions
@ -944,7 +987,10 @@ pub(crate) mod tests {
Subscriber::new_test("rootNotification");
let sub_id = SubscriptionId::Number(0 as u64);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = RpcSubscriptions::new(&exit);
let subscriptions = RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
);
subscriptions.add_root_subscription(sub_id.clone(), subscriber);
assert!(subscriptions

View File

@ -2,6 +2,7 @@ use crate::{
cluster_info::{ClusterInfo, ClusterInfoError},
cluster_slots::ClusterSlots,
contact_info::ContactInfo,
repair_service::RepairStats,
result::{Error, Result},
weighted_shuffle::weighted_best,
};
@ -46,6 +47,16 @@ impl RepairType {
}
}
#[derive(Default)]
pub struct ServeRepairStats {
pub total_packets: usize,
pub processed: usize,
pub self_repair: usize,
pub window_index: usize,
pub highest_window_index: usize,
pub orphan: usize,
}
/// Window protocol messages
#[derive(Serialize, Deserialize, Debug)]
enum RepairProtocol {
@ -106,6 +117,7 @@ impl ServeRepair {
from_addr: &SocketAddr,
blockstore: Option<&Arc<Blockstore>>,
request: RepairProtocol,
stats: &mut ServeRepairStats,
) -> Option<Packets> {
let now = Instant::now();
@ -113,18 +125,14 @@ impl ServeRepair {
let my_id = me.read().unwrap().keypair.pubkey();
let from = Self::get_repair_sender(&request);
if from.id == my_id {
warn!(
"{}: Ignored received repair request from ME {}",
my_id, from.id,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
stats.self_repair += 1;
return None;
}
let (res, label) = {
match &request {
RepairProtocol::WindowIndex(from, slot, shred_index) => {
inc_new_counter_debug!("serve_repair-request-window-index", 1);
stats.window_index += 1;
(
Self::run_window_request(
recycler,
@ -140,7 +148,7 @@ impl ServeRepair {
}
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
stats.highest_window_index += 1;
(
Self::run_highest_window_request(
recycler,
@ -153,7 +161,7 @@ impl ServeRepair {
)
}
RepairProtocol::Orphan(_, slot) => {
inc_new_counter_debug!("serve_repair-request-orphan", 1);
stats.orphan += 1;
(
Self::run_orphan(
recycler,
@ -187,15 +195,42 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
stats: &mut ServeRepairStats,
) -> Result<()> {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
stats.total_packets += reqs.packets.len();
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
Ok(())
}
fn report_reset_stats(me: &Arc<RwLock<Self>>, stats: &mut ServeRepairStats) {
if stats.self_repair > 0 {
let my_id = me.read().unwrap().keypair.pubkey();
warn!(
"{}: Ignored received repair requests from ME: {}",
my_id, stats.self_repair,
);
inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair);
}
debug!(
"repair_listener: total_packets: {} passed: {}",
stats.total_packets, stats.processed
);
inc_new_counter_debug!("serve_repair-request-window-index", stats.window_index);
inc_new_counter_debug!(
"serve_repair-request-highest-window-index",
stats.highest_window_index
);
inc_new_counter_debug!("serve_repair-request-orphan", stats.orphan);
*stats = ServeRepairStats::default();
}
pub fn listen(
me: Arc<RwLock<Self>>,
blockstore: Option<Arc<Blockstore>>,
@ -207,13 +242,17 @@ impl ServeRepair {
let recycler = PacketsRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || loop {
.spawn(move || {
let mut last_print = Instant::now();
let mut stats = ServeRepairStats::default();
loop {
let result = Self::run_listen(
&me,
&recycler,
blockstore.as_ref(),
&requests_receiver,
&response_sender,
&mut stats,
);
match result {
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
@ -222,7 +261,12 @@ impl ServeRepair {
if exit.load(Ordering::Relaxed) {
return;
}
if last_print.elapsed().as_secs() > 2 {
Self::report_reset_stats(&me, &mut stats);
last_print = Instant::now();
}
thread_mem_usage::datapoint("solana-repair-listen");
}
})
.unwrap()
}
@ -233,6 +277,7 @@ impl ServeRepair {
blockstore: Option<&Arc<Blockstore>>,
packets: Packets,
response_sender: &PacketSender,
stats: &mut ServeRepairStats,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
@ -242,7 +287,9 @@ impl ServeRepair {
limited_deserialize(&packet.data[..packet.meta.size])
.into_iter()
.for_each(|request| {
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
stats.processed += 1;
let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
@ -277,6 +324,7 @@ impl ServeRepair {
cluster_slots: &ClusterSlots,
repair_request: &RepairType,
cache: &mut RepairCache,
repair_stats: &mut RepairStats,
) -> Result<(SocketAddr, Vec<u8>)> {
// find a peer that appears to be accepting replication and has the desired slot, as indicated
// by a valid tvu port location
@ -295,30 +343,26 @@ impl ServeRepair {
let (repair_peers, weights) = cache.get(&repair_request.slot()).unwrap();
let n = weighted_best(&weights, Pubkey::new_rand().to_bytes());
let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port
let out = self.map_repair_request(repair_request)?;
let out = self.map_repair_request(repair_request, repair_stats)?;
Ok((addr, out))
}
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
pub fn map_repair_request(
&self,
repair_request: &RepairType,
repair_stats: &mut RepairStats,
) -> Result<Vec<u8>> {
match repair_request {
RepairType::Shred(slot, shred_index) => {
datapoint_debug!(
"serve_repair-repair",
("repair-slot", *slot, i64),
("repair-ix", *shred_index, i64)
);
repair_stats.shred.update(*slot);
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
}
RepairType::HighestShred(slot, shred_index) => {
datapoint_info!(
"serve_repair-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *shred_index, i64)
);
repair_stats.highest_shred.update(*slot);
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
}
RepairType::Orphan(slot) => {
datapoint_info!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
repair_stats.orphan.update(*slot);
Ok(self.orphan_bytes(*slot)?)
}
}
@ -583,6 +627,7 @@ mod tests {
&cluster_slots,
&RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
);
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
@ -608,6 +653,7 @@ mod tests {
&cluster_slots,
&RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
.unwrap();
assert_eq!(nxt.serve_repair, serve_repair_addr);
@ -639,6 +685,7 @@ mod tests {
&cluster_slots,
&RepairType::Shred(0, 0),
&mut HashMap::new(),
&mut RepairStats::default(),
)
.unwrap();
if rv.0 == serve_repair_addr {

View File

@ -4,6 +4,7 @@
use crate::{
cluster_info::ClusterInfo,
commitment::BlockCommitmentCache,
contact_info::ContactInfo,
result::{Error, Result},
};
@ -11,9 +12,7 @@ use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_chacha_cuda::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::{
bank::Bank, status_cache::SignatureConfirmationStatus, storage_utils::archiver_accounts,
};
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
use solana_sdk::{
account::Account,
account_utils::StateMut,
@ -30,6 +29,7 @@ use solana_storage_program::{
storage_instruction,
storage_instruction::proof_validation,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
cmp,
collections::HashMap,
@ -185,6 +185,7 @@ impl StorageStage {
exit: &Arc<AtomicBool>,
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
) -> Self {
let (instruction_sender, instruction_receiver) = channel();
@ -256,6 +257,7 @@ impl StorageStage {
&keypair,
&storage_keypair,
&transactions_socket,
&block_commitment_cache,
)
.unwrap_or_else(|err| {
info!("failed to send storage transaction: {:?}", err)
@ -289,6 +291,7 @@ impl StorageStage {
keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
transactions_socket: &UdpSocket,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
) -> io::Result<()> {
let working_bank = bank_forks.read().unwrap().working_bank();
let blockhash = working_bank.confirmed_last_blockhash().0;
@ -323,7 +326,12 @@ impl StorageStage {
cluster_info.read().unwrap().my_data().tpu,
)?;
sleep(Duration::from_millis(100));
if Self::poll_for_signature_confirmation(bank_forks, &transaction.signatures[0], 0)
if Self::poll_for_signature_confirmation(
bank_forks,
block_commitment_cache,
&transaction.signatures[0],
0,
)
.is_ok()
{
break;
@ -334,23 +342,24 @@ impl StorageStage {
fn poll_for_signature_confirmation(
bank_forks: &Arc<RwLock<BankForks>>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
signature: &Signature,
min_confirmed_blocks: usize,
) -> Result<()> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = bank_forks
.read()
.unwrap()
.working_bank()
.get_signature_confirmation_status(signature);
if let Some(SignatureConfirmationStatus {
confirmations,
status,
..
}) = response
{
let working_bank = bank_forks.read().unwrap().working_bank();
let response = working_bank.get_signature_status_slot(signature);
if let Some((slot, status)) = response {
let confirmations = if working_bank.src.roots().contains(&slot) {
MAX_LOCKOUT_HISTORY + 1
} else {
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
r_block_commitment_cache
.get_confirmation_count(slot)
.unwrap_or(0)
};
if status.is_ok() {
if confirmed_blocks != confirmations {
now = Instant::now();
@ -655,12 +664,18 @@ mod tests {
use rayon::prelude::*;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hasher;
use solana_sdk::signature::{Keypair, Signer};
use std::cmp::{max, min};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use solana_sdk::{
hash::Hasher,
signature::{Keypair, Signer},
};
use std::{
cmp::{max, min},
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::channel,
Arc, RwLock,
},
};
#[test]
fn test_storage_stage_none_ledger() {
@ -675,6 +690,7 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let (_slot_sender, slot_receiver) = channel();
let storage_state = StorageState::new(
&bank.last_blockhash(),
@ -690,6 +706,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
exit.store(true, Ordering::Relaxed);
storage_stage.join().unwrap();

View File

@ -160,7 +160,7 @@ impl Tvu {
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
cfg,
tvu_config.shred_version,
cluster_slots,
cluster_slots.clone(),
);
let (ledger_cleanup_slot_sender, ledger_cleanup_slot_receiver) = channel();
@ -185,7 +185,7 @@ impl Tvu {
leader_schedule_cache: leader_schedule_cache.clone(),
latest_root_senders: vec![ledger_cleanup_slot_sender],
accounts_hash_sender: Some(accounts_hash_sender),
block_commitment_cache,
block_commitment_cache: block_commitment_cache.clone(),
transaction_status_sender,
rewards_recorder_sender,
};
@ -198,6 +198,7 @@ impl Tvu {
ledger_signal_receiver,
poh_recorder.clone(),
vote_tracker,
cluster_slots,
retransmit_slots_sender,
);
@ -221,6 +222,7 @@ impl Tvu {
&exit,
&bank_forks,
&cluster_info,
block_commitment_cache,
);
Tvu {
@ -307,7 +309,10 @@ pub mod tests {
blockstore,
&StorageState::default(),
l_receiver,
&Arc::new(RpcSubscriptions::new(&exit)),
&Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
)),
&poh_recorder,
&leader_schedule_cache,
&exit,

View File

@ -234,7 +234,7 @@ impl Validator {
let blockstore = Arc::new(blockstore);
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
let rpc_service = config.rpc_ports.map(|(rpc_port, rpc_pubsub_port)| {
if ContactInfo::is_valid_address(&node.info.rpc) {

View File

@ -3,8 +3,8 @@ use solana_client::{
rpc_client::RpcClient,
};
use solana_core::{
rpc_pubsub_service::PubSubService, rpc_subscriptions::RpcSubscriptions,
validator::TestValidator,
commitment::BlockCommitmentCache, rpc_pubsub_service::PubSubService,
rpc_subscriptions::RpcSubscriptions, validator::TestValidator,
};
use solana_sdk::{
commitment_config::CommitmentConfig, pubkey::Pubkey, rpc_port, signature::Signer,
@ -15,7 +15,7 @@ use std::{
net::{IpAddr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
Arc, RwLock,
},
thread::sleep,
time::{Duration, Instant},
@ -85,7 +85,10 @@ fn test_slot_subscription() {
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
);
let exit = Arc::new(AtomicBool::new(false));
let subscriptions = Arc::new(RpcSubscriptions::new(&exit));
let subscriptions = Arc::new(RpcSubscriptions::new(
&exit,
Arc::new(RwLock::new(BlockCommitmentCache::default())),
));
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
std::thread::sleep(Duration::from_millis(400));

View File

@ -16,6 +16,7 @@ use solana_sdk::{
commitment_config::CommitmentConfig,
hash::Hash,
pubkey::Pubkey,
signature::Signer,
system_transaction,
transaction::{self, Transaction},
};
@ -24,7 +25,6 @@ use std::{
fs::remove_dir_all,
net::UdpSocket,
sync::mpsc::channel,
sync::{Arc, Mutex},
thread::sleep,
time::{Duration, Instant},
};
@ -210,9 +210,11 @@ fn test_rpc_subscriptions() {
..
} = TestValidator::run();
// Create transaction signatures to subscribe to
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let transactions: Vec<Transaction> = (0..100)
transactions_socket.connect(leader_data.tpu).unwrap();
// Create transaction signatures to subscribe to
let transactions: Vec<Transaction> = (0..1000)
.map(|_| system_transaction::transfer(&alice, &Pubkey::new_rand(), 1, genesis_hash))
.collect();
let mut signature_set: HashSet<String> = transactions
@ -220,15 +222,15 @@ fn test_rpc_subscriptions() {
.map(|tx| tx.signatures[0].to_string())
.collect();
// Track when subscriptions are ready
let (ready_sender, ready_receiver) = channel::<()>();
// Track when status notifications are received
let (status_sender, status_receiver) = channel::<(String, Response<transaction::Result<()>>)>();
// Create the pub sub runtime
let mut rt = Runtime::new().unwrap();
let rpc_pubsub_url = format!("ws://{}/", leader_data.rpc_pubsub);
let (status_sender, status_receiver) = channel::<(String, Response<transaction::Result<()>>)>();
let status_sender = Arc::new(Mutex::new(status_sender));
let (sent_sender, sent_receiver) = channel::<()>();
let sent_sender = Arc::new(Mutex::new(sent_sender));
// Subscribe to all signatures
rt.spawn({
let connect = ws::try_connect::<PubsubClient>(&rpc_pubsub_url).unwrap();
@ -237,18 +239,12 @@ fn test_rpc_subscriptions() {
.and_then(move |client| {
for sig in signature_set {
let status_sender = status_sender.clone();
let sent_sender = sent_sender.clone();
tokio::spawn(
client
.signature_subscribe(sig.clone(), None)
.and_then(move |sig_stream| {
sent_sender.lock().unwrap().send(()).unwrap();
sig_stream.for_each(move |result| {
status_sender
.lock()
.unwrap()
.send((sig.clone(), result))
.unwrap();
status_sender.send((sig.clone(), result)).unwrap();
future::ok(())
})
})
@ -257,37 +253,50 @@ fn test_rpc_subscriptions() {
}),
);
}
tokio::spawn(
client
.slot_subscribe()
.and_then(move |slot_stream| {
slot_stream.for_each(move |_| {
ready_sender.send(()).unwrap();
future::ok(())
})
})
.map_err(|err| {
eprintln!("slot sub err: {:#?}", err);
}),
);
future::ok(())
})
.map_err(|_| ())
});
// Wait for signature subscriptions
let deadline = Instant::now() + Duration::from_secs(2);
(0..transactions.len()).for_each(|_| {
sent_receiver
.recv_timeout(deadline.saturating_duration_since(Instant::now()))
.unwrap();
});
ready_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut transaction_count = rpc_client
.get_transaction_count_with_commitment(CommitmentConfig::recent())
.unwrap();
let mut mint_balance = rpc_client
.get_balance_with_commitment(&alice.pubkey(), CommitmentConfig::recent())
.unwrap()
.value;
assert!(mint_balance >= transactions.len() as u64);
// Send all transactions to tpu socket for processing
transactions.iter().for_each(|tx| {
transactions_socket
.send_to(&bincode::serialize(&tx).unwrap(), leader_data.tpu)
.send(&bincode::serialize(&tx).unwrap())
.unwrap();
});
// Track mint balance to know when transactions have completed
let now = Instant::now();
let expected_transaction_count = transaction_count + transactions.len() as u64;
while transaction_count < expected_transaction_count && now.elapsed() < Duration::from_secs(5) {
transaction_count = rpc_client
.get_transaction_count_with_commitment(CommitmentConfig::recent())
.unwrap();
sleep(Duration::from_millis(200));
let expected_mint_balance = mint_balance - transactions.len() as u64;
while mint_balance != expected_mint_balance && now.elapsed() < Duration::from_secs(5) {
mint_balance = rpc_client
.get_balance_with_commitment(&alice.pubkey(), CommitmentConfig::recent())
.unwrap()
.value;
sleep(Duration::from_millis(100));
}
// Wait for all signature subscriptions
@ -300,12 +309,12 @@ fn test_rpc_subscriptions() {
assert!(signature_set.remove(&sig));
}
Err(_err) => {
eprintln!(
assert!(
false,
"recv_timeout, {}/{} signatures remaining",
signature_set.len(),
transactions.len()
);
assert!(false)
}
}
}

View File

@ -3,28 +3,36 @@
#[cfg(test)]
mod tests {
use log::*;
use solana_core::storage_stage::{test_cluster_info, SLOTS_PER_TURN_TEST};
use solana_core::storage_stage::{StorageStage, StorageState};
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blockstore_processor;
use solana_ledger::entry;
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger};
use solana_core::{
commitment::BlockCommitmentCache,
storage_stage::{test_cluster_info, StorageStage, StorageState, SLOTS_PER_TURN_TEST},
};
use solana_ledger::{
bank_forks::BankForks,
blockstore::Blockstore,
blockstore_processor, create_new_tmp_ledger, entry,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
};
use solana_runtime::bank::Bank;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::Hash;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::transaction::Transaction;
use solana_storage_program::storage_instruction;
use solana_storage_program::storage_instruction::StorageAccountType;
use std::fs::remove_dir_all;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use solana_sdk::{
clock::DEFAULT_TICKS_PER_SLOT,
hash::Hash,
message::Message,
pubkey::Pubkey,
signature::{Keypair, Signer},
transaction::Transaction,
};
use solana_storage_program::storage_instruction::{self, StorageAccountType};
use std::{
fs::remove_dir_all,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc, RwLock,
},
thread::sleep,
time::Duration,
};
#[test]
fn test_storage_stage_process_account_proofs() {
@ -52,6 +60,7 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let cluster_info = test_cluster_info(&keypair.pubkey());
let (bank_sender, bank_receiver) = channel();
@ -69,6 +78,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
bank_sender.send(vec![bank.clone()]).unwrap();
@ -171,6 +181,7 @@ mod tests {
&[bank.clone()],
vec![0],
)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let cluster_info = test_cluster_info(&keypair.pubkey());
let (bank_sender, bank_receiver) = channel();
@ -188,6 +199,7 @@ mod tests {
&exit.clone(),
&bank_forks,
&cluster_info,
block_commitment_cache,
);
bank_sender.send(vec![bank.clone()]).unwrap();

View File

@ -15,6 +15,7 @@
* [Generate Keys](cli/generate-keys.md)
* [Send and Receive Tokens](cli/transfer-tokens.md)
* [Delegate Stake](cli/delegate-stake.md)
* [Manage Stake Accounts](cli/manage-stake-accounts.md)
* [Offline Signing](offline-signing/README.md)
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
* [Command-line Reference](cli/usage.md)

View File

@ -118,7 +118,7 @@ Many methods that take a commitment parameter return an RpcResponse JSON object
### confirmTransaction
Returns a transaction receipt
Returns a transaction receipt. This method only searches the recent status cache of signatures, which retains all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
#### Parameters:
@ -656,14 +656,13 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### getSignatureStatus
Returns the status of a given signature. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events.
Returns the status of a given signature. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events. This method only searches the recent status cache of signatures, which retains all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
#### Parameters:
* `<array>` - An array of transaction signatures to confirm, as base-58 encoded strings
* `<object>` - (optional) Extended Rpc configuration, containing the following optional fields:
* `commitment: <string>` - [Commitment](jsonrpc-api.md#configuring-state-commitment)
* `searchTransactionHistory: <bool>` - whether to search the ledger transaction status cache, which may be expensive
#### Results:

View File

@ -0,0 +1,74 @@
# Manage Stake Accounts
If you want to delegate stake to many different validators, you will need
to create a separate stake account for each. If you follow the convention
of creating the first stake account at seed "0", the second at "1", the
third at "2", and so on, then the `solana-stake-accounts` tool will allow
you to operate on all accounts with single invocations. You can use it to
sum up the balances of all accounts, move accounts to a new wallet, or set
new authorities.
## Usage
### Create a stake account
Create and fund a derived stake account at the stake authority public key:
```bash
solana-stake-accounts new <FUNDING_KEYPAIR> <BASE_KEYPAIR> <AMOUNT> \
--stake-authority <PUBKEY> --withdraw-authority <PUBKEY>
```
### Count accounts
Count the number of derived accounts:
```bash
solana-stake-accounts count <BASE_PUBKEY>
```
### Get stake account balances
Sum the balance of derived stake accounts:
```bash
solana-stake-accounts balance <BASE_PUBKEY> --num-accounts <NUMBER>
```
### Get stake account addresses
List the address of each stake account derived from the given public key:
```bash
solana-stake-accounts addresses <BASE_PUBKEY> --num-accounts <NUMBER>
```
### Set new authorities
Set new authorities on each derived stake account:
```bash
solana-stake-accounts authorize <BASE_PUBKEY> \
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
--new-stake-authority <PUBKEY> --new-withdraw-authority <PUBKEY> \
--num-accounts <NUMBER>
```
### Relocate stake accounts
Relocate stake accounts:
```bash
solana-stake-accounts rebase <BASE_PUBKEY> <NEW_BASE_KEYPAIR> \
--stake-authority <KEYPAIR> --num-accounts <NUMBER>
```
To atomically rebase and authorize each stake account, use the 'move'
command:
```bash
solana-stake-accounts move <BASE_PUBKEY> <NEW_BASE_KEYPAIR> \
--stake-authority <KEYPAIR> --withdraw-authority <KEYPAIR> \
--new-stake-authority <PUBKEY> --new-withdraw-authority <PUBKEY> \
--num-accounts <NUMBER>
```

View File

@ -53,7 +53,7 @@ pub enum BlockstoreError {
FsExtraError(#[from] fs_extra::error::Error),
SlotCleanedUp,
}
pub(crate) type Result<T> = std::result::Result<T, BlockstoreError>;
pub type Result<T> = std::result::Result<T, BlockstoreError>;
impl std::fmt::Display for BlockstoreError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {

View File

@ -6928,7 +6928,7 @@
],
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT last(\"repair-highest-slot\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair_highest\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"query": "SELECT last(\"repair-highest-slot\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "C",
"resultFormat": "time_series",
@ -6965,7 +6965,7 @@
],
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT last(\"repair-highest-ix\") AS \"ix\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair_highest\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"query": "SELECT last(\"repair-highest-ix\") AS \"ix\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@ -7245,7 +7245,7 @@
],
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT last(\"repair-orphan\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair_orphan\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"query": "SELECT last(\"repair-orphan\") AS \"slot\" FROM \"$testnet\".\"autogen\".\"serve_repair-repair\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "C",
"resultFormat": "time_series",

View File

@ -775,7 +775,14 @@ impl AccountsDB {
}
}
for account_infos in purges.values() {
// Another pass to check if there are some filtered accounts which
// do not match the criteria of deleting all appendvecs which contain them
// then increment their storage count.
for (pubkey, account_infos) in &purges {
let no_delete =
if account_infos.len() as u64 != accounts_index.ref_count_from_storage(&pubkey) {
true
} else {
let mut no_delete = false;
for (_slot, account_info) in account_infos {
if *store_counts.get(&account_info.store_id).unwrap() != 0 {
@ -783,6 +790,8 @@ impl AccountsDB {
break;
}
}
no_delete
};
if no_delete {
for (_slot, account_info) in account_infos {
*store_counts.get_mut(&account_info.store_id).unwrap() += 1;
@ -794,17 +803,13 @@ impl AccountsDB {
// Only keep purges where the entire history of the account in the root set
// can be purged. All AppendVecs for those updates are dead.
let mut purge_filter = Measure::start("purge_filter");
purges.retain(|pubkey, account_infos| {
let mut would_unref_count = 0;
for (_slot, account_info) in account_infos {
if *store_counts.get(&account_info.store_id).unwrap() == 0 {
would_unref_count += 1;
} else {
purges.retain(|_pubkey, account_infos| {
for (_slot, account_info) in account_infos.iter() {
if *store_counts.get(&account_info.store_id).unwrap() != 0 {
return false;
}
}
would_unref_count == accounts_index.ref_count_from_storage(&pubkey)
true
});
purge_filter.stop();

View File

@ -80,10 +80,19 @@ impl<'a> StoredAccount<'a> {
}
fn sanitize(&self) -> bool {
self.sanitize_executable() && self.sanitize_lamports()
}
fn sanitize_executable(&self) -> bool {
// Sanitize executable to ensure higher 7-bits are cleared correctly.
self.ref_executable_byte() & !1 == 0
}
fn sanitize_lamports(&self) -> bool {
// Sanitize 0 lamports to ensure to be same as Account::default()
self.account_meta.lamports != 0 || self.clone_account() == Account::default()
}
fn ref_executable_byte(&self) -> &u8 {
// Use extra references to avoid value silently clamped to 1 (=true) and 0 (=false)
// Yes, this really happens; see test_set_file_crafted_executable
@ -258,7 +267,7 @@ impl AppendVec {
if !self.sanitize_layout_and_length() {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"incorrect layout/length",
"incorrect layout/length/data",
));
}
@ -686,6 +695,32 @@ pub mod tests {
);
}
#[test]
fn test_set_file_crafted_zero_lamport_account() {
let file = get_append_vec_path("test_append");
let path = &file.path;
let mut av = AppendVec::new(&path, true, 1024 * 1024);
let pubkey = Pubkey::new_rand();
let owner = Pubkey::default();
let data_len = 3 as u64;
let mut account = Account::new(0, data_len as usize, &owner);
account.data = b"abc".to_vec();
let stored_meta = StoredMeta {
write_version: 0,
pubkey,
data_len,
};
let account_with_meta = (stored_meta, account);
let index = av.append_account_test(&account_with_meta).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account_with_meta);
av.flush().unwrap();
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
#[test]
fn test_set_file_crafted_data_len() {
let file = get_append_vec_path("test_set_file_crafted_data_len");
@ -709,7 +744,7 @@ pub mod tests {
av.flush().unwrap();
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length");
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
#[test]
@ -733,7 +768,7 @@ pub mod tests {
av.flush().unwrap();
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length");
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
#[test]
@ -786,6 +821,6 @@ pub mod tests {
av.flush().unwrap();
av.file_size = 0;
let result = av.set_file(path);
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length");
assert_matches!(result, Err(ref message) if message.to_string() == *"incorrect layout/length/data");
}
}

View File

@ -14,7 +14,7 @@ use crate::{
deserialize_atomicbool, deserialize_atomicu64, serialize_atomicbool, serialize_atomicu64,
},
stakes::Stakes,
status_cache::{SignatureConfirmationStatus, SlotDelta, StatusCache},
status_cache::{SlotDelta, StatusCache},
storage_utils,
storage_utils::StorageAccounts,
system_instruction_processor::{get_system_account_kind, SystemAccountKind},
@ -1845,29 +1845,25 @@ impl Bank {
&self,
signature: &Signature,
) -> Option<Result<()>> {
if let Some(status) = self.get_signature_confirmation_status(signature) {
if status.slot == self.slot() {
return Some(status.status);
if let Some((slot, status)) = self.get_signature_status_slot(signature) {
if slot <= self.slot() {
return Some(status);
}
}
None
}
pub fn get_signature_confirmation_status(
&self,
signature: &Signature,
) -> Option<SignatureConfirmationStatus<Result<()>>> {
pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> {
let rcache = self.src.status_cache.read().unwrap();
rcache.get_signature_status_slow(signature, &self.ancestors)
rcache.get_signature_slot(signature, &self.ancestors)
}
pub fn get_signature_status(&self, signature: &Signature) -> Option<Result<()>> {
self.get_signature_confirmation_status(signature)
.map(|v| v.status)
self.get_signature_status_slot(signature).map(|v| v.1)
}
pub fn has_signature(&self, signature: &Signature) -> bool {
self.get_signature_confirmation_status(signature).is_some()
self.get_signature_status_slot(signature).is_some()
}
/// Hash the `accounts` HashMap. This represents a validator's interpretation

View File

@ -1,4 +1,4 @@
use crate::{bank::Bank, status_cache::SignatureConfirmationStatus};
use crate::bank::Bank;
use solana_sdk::{
account::Account,
client::{AsyncClient, Client, SyncClient},
@ -184,26 +184,15 @@ impl SyncClient for BankClient {
signature: &Signature,
min_confirmed_blocks: usize,
) -> Result<usize> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
// https://github.com/solana-labs/solana/issues/7199
assert_eq!(min_confirmed_blocks, 1, "BankClient cannot observe the passage of multiple blocks, so min_confirmed_blocks must be 1");
let now = Instant::now();
let confirmed_blocks;
loop {
let response = self.bank.get_signature_confirmation_status(signature);
if let Some(SignatureConfirmationStatus {
confirmations,
status,
..
}) = response
{
if status.is_ok() {
if confirmed_blocks != confirmations {
now = Instant::now();
confirmed_blocks = confirmations;
}
if confirmations >= min_confirmed_blocks {
if self.bank.get_signature_status(signature).is_some() {
confirmed_blocks = 1;
break;
}
}
};
if now.elapsed().as_secs() > 15 {
return Err(TransportError::IoError(io::Error::new(
io::ErrorKind::Other,

View File

@ -103,29 +103,20 @@ impl<T: Serialize + Clone> StatusCache<T> {
None
}
pub fn get_signature_status_slow(
pub fn get_signature_slot(
&self,
sig: &Signature,
signature: &Signature,
ancestors: &HashMap<Slot, usize>,
) -> Option<SignatureConfirmationStatus<T>> {
trace!("get_signature_status_slow");
) -> Option<(Slot, T)> {
let mut keys = vec![];
let mut val: Vec<_> = self.cache.iter().map(|(k, _)| *k).collect();
keys.append(&mut val);
for blockhash in keys.iter() {
trace!("get_signature_status_slow: trying {}", blockhash);
if let Some((forkid, res)) = self.get_signature_status(sig, blockhash, ancestors) {
trace!("get_signature_status_slow: got {}", forkid);
let confirmations = ancestors
.get(&forkid)
.copied()
.unwrap_or_else(|| ancestors.len());
return Some(SignatureConfirmationStatus {
slot: forkid,
confirmations,
status: res,
});
trace!("get_signature_slot: trying {}", blockhash);
let status = self.get_signature_status(signature, blockhash, ancestors);
if status.is_some() {
return status;
}
}
None
@ -265,10 +256,7 @@ mod tests {
status_cache.get_signature_status(&sig, &blockhash, &HashMap::new()),
None
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &HashMap::new()),
None
);
assert_eq!(status_cache.get_signature_slot(&sig, &HashMap::new()), None);
}
#[test]
@ -283,12 +271,8 @@ mod tests {
Some((0, ()))
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
Some(SignatureConfirmationStatus {
slot: 0,
confirmations: 1,
status: ()
})
status_cache.get_signature_slot(&sig, &ancestors),
Some((0, ()))
);
}
@ -303,10 +287,7 @@ mod tests {
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
None
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
None
);
assert_eq!(status_cache.get_signature_slot(&sig, &ancestors), None);
}
#[test]
@ -323,24 +304,6 @@ mod tests {
);
}
#[test]
fn test_find_sig_with_root_ancestor_fork_max_len() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = vec![(2, 2)].into_iter().collect();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.add_root(0);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
Some(SignatureConfirmationStatus {
slot: 0,
confirmations: ancestors.len(),
status: ()
})
);
}
#[test]
fn test_insert_picks_latest_blockhash_fork() {
let sig = Signature::default();
@ -371,10 +334,6 @@ mod tests {
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
None
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
None
);
}
#[test]

View File

@ -66,6 +66,7 @@ if [[ $CI_OS_NAME = windows ]]; then
solana-install
solana-install-init
solana-keygen
solana-stake-accounts
)
else
./fetch-perf-libs.sh
@ -96,6 +97,7 @@ else
solana-ledger-tool
solana-log-analyzer
solana-net-shaper
solana-stake-accounts
solana-stake-monitor
solana-sys-tuner
solana-validator

21
stake-accounts/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "solana-stake-accounts"
description = "Blockchain, Rebuilt for Scale"
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
version = "1.1.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "1.1.0" }
solana-cli-config = { path = "../cli-config", version = "1.1.0" }
solana-client = { path = "../client", version = "1.1.0" }
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.0" }
solana-sdk = { path = "../sdk", version = "1.1.0" }
solana-stake-program = { path = "../programs/stake", version = "1.1.0" }
[dev-dependencies]
solana-runtime = { path = "../runtime", version = "1.1.0" }

382
stake-accounts/src/args.rs Normal file
View File

@ -0,0 +1,382 @@
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::input_validators::{is_amount, is_valid_pubkey, is_valid_signer};
use solana_cli_config::CONFIG_FILE;
use solana_sdk::native_token::sol_to_lamports;
use std::ffi::OsString;
use std::process::exit;
pub(crate) struct NewCommandConfig {
pub fee_payer: String,
pub funding_keypair: String,
pub base_keypair: String,
pub lamports: u64,
pub stake_authority: String,
pub withdraw_authority: String,
pub index: usize,
}
pub(crate) struct CountCommandConfig {
pub base_pubkey: String,
}
pub(crate) struct QueryCommandConfig {
pub base_pubkey: String,
pub num_accounts: usize,
}
pub(crate) struct AuthorizeCommandConfig {
pub fee_payer: String,
pub base_pubkey: String,
pub stake_authority: String,
pub withdraw_authority: String,
pub new_stake_authority: String,
pub new_withdraw_authority: String,
pub num_accounts: usize,
}
pub(crate) struct RebaseCommandConfig {
pub fee_payer: String,
pub base_pubkey: String,
pub new_base_keypair: String,
pub stake_authority: String,
pub num_accounts: usize,
}
pub(crate) struct MoveCommandConfig {
pub rebase_config: RebaseCommandConfig,
pub authorize_config: AuthorizeCommandConfig,
}
pub(crate) enum Command {
New(NewCommandConfig),
Count(CountCommandConfig),
Addresses(QueryCommandConfig),
Balance(QueryCommandConfig),
Authorize(AuthorizeCommandConfig),
Rebase(RebaseCommandConfig),
Move(Box<MoveCommandConfig>),
}
pub(crate) struct CommandConfig {
pub config_file: String,
pub url: Option<String>,
pub command: Command,
}
fn fee_payer_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("fee_payer")
.long("fee-payer")
.required(true)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help("Fee payer")
}
fn funding_keypair_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("funding_keypair")
.required(true)
.takes_value(true)
.value_name("FUNDING_KEYPAIR")
.validator(is_valid_signer)
.help("Keypair to fund accounts")
}
fn base_pubkey_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("base_pubkey")
.required(true)
.takes_value(true)
.value_name("BASE_PUBKEY")
.validator(is_valid_pubkey)
.help("Public key which stake account addresses are derived from")
}
fn new_base_keypair_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("new_base_keypair")
.required(true)
.takes_value(true)
.value_name("NEW_BASE_KEYPAIR")
.validator(is_valid_signer)
.help("New keypair which stake account addresses are derived from")
}
fn stake_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("stake_authority")
.long("stake-authority")
.required(true)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help("Stake authority")
}
fn withdraw_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("withdraw_authority")
.long("withdraw-authority")
.required(true)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help("Withdraw authority")
}
fn new_stake_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("new_stake_authority")
.long("new-stake-authority")
.required(true)
.takes_value(true)
.value_name("PUBKEY")
.validator(is_valid_pubkey)
.help("New stake authority")
}
fn new_withdraw_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("new_withdraw_authority")
.long("new-withdraw-authority")
.required(true)
.takes_value(true)
.value_name("PUBKEY")
.validator(is_valid_pubkey)
.help("New withdraw authority")
}
fn num_accounts_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("num_accounts")
.long("num-accounts")
.required(true)
.takes_value(true)
.value_name("NUMBER")
.help("Number of derived stake accounts")
}
pub(crate) fn get_matches<'a, I, T>(args: I) -> ArgMatches<'a>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
let default_config_file = CONFIG_FILE.as_ref().unwrap();
App::new("solana-stake-accounts")
.about("about")
.version("version")
.arg(
Arg::with_name("config_file")
.long("config")
.takes_value(true)
.value_name("FILEPATH")
.default_value(default_config_file)
.help("Config file"),
)
.arg(
Arg::with_name("url")
.long("url")
.global(true)
.takes_value(true)
.value_name("URL")
.help("RPC entrypoint address. i.e. http://devnet.solana.com"),
)
.subcommand(
SubCommand::with_name("new")
.about("Create derived stake accounts")
.arg(fee_payer_arg())
.arg(funding_keypair_arg().index(1))
.arg(
Arg::with_name("base_keypair")
.required(true)
.index(2)
.takes_value(true)
.value_name("BASE_KEYPAIR")
.validator(is_valid_signer)
.help("Keypair which stake account addresses are derived from"),
)
.arg(
Arg::with_name("amount")
.required(true)
.index(3)
.takes_value(true)
.value_name("AMOUNT")
.validator(is_amount)
.help("Amount to move into the new stake accounts, in SOL"),
)
.arg(
Arg::with_name("stake_authority")
.long("stake-authority")
.required(true)
.takes_value(true)
.value_name("PUBKEY")
.validator(is_valid_pubkey)
.help("Stake authority"),
)
.arg(
Arg::with_name("withdraw_authority")
.long("withdraw-authority")
.required(true)
.takes_value(true)
.value_name("PUBKEY")
.validator(is_valid_pubkey)
.help("Withdraw authority"),
)
.arg(
Arg::with_name("index")
.long("index")
.takes_value(true)
.default_value("0")
.value_name("NUMBER")
.help("Index of the derived account to create"),
),
)
.subcommand(
SubCommand::with_name("count")
.about("Count derived stake accounts")
.arg(base_pubkey_arg().index(1)),
)
.subcommand(
SubCommand::with_name("addresses")
.about("Show public keys of all derived stake accounts")
.arg(base_pubkey_arg().index(1))
.arg(num_accounts_arg()),
)
.subcommand(
SubCommand::with_name("balance")
.about("Sum balances of all derived stake accounts")
.arg(base_pubkey_arg().index(1))
.arg(num_accounts_arg()),
)
.subcommand(
SubCommand::with_name("authorize")
.about("Set new authorities in all derived stake accounts")
.arg(fee_payer_arg())
.arg(base_pubkey_arg().index(1))
.arg(stake_authority_arg())
.arg(withdraw_authority_arg())
.arg(new_stake_authority_arg())
.arg(new_withdraw_authority_arg())
.arg(num_accounts_arg()),
)
.subcommand(
SubCommand::with_name("rebase")
.about("Relocate derived stake accounts")
.arg(fee_payer_arg())
.arg(base_pubkey_arg().index(1))
.arg(new_base_keypair_arg().index(2))
.arg(stake_authority_arg())
.arg(num_accounts_arg()),
)
.subcommand(
SubCommand::with_name("move")
.about("Rebase and set new authorities in all derived stake accounts")
.arg(fee_payer_arg())
.arg(base_pubkey_arg().index(1))
.arg(new_base_keypair_arg().index(2))
.arg(stake_authority_arg())
.arg(withdraw_authority_arg())
.arg(new_stake_authority_arg())
.arg(new_withdraw_authority_arg())
.arg(num_accounts_arg()),
)
.get_matches_from(args)
}
fn parse_new_args(matches: &ArgMatches<'_>) -> NewCommandConfig {
let fee_payer = value_t_or_exit!(matches, "fee_payer", String);
let funding_keypair = value_t_or_exit!(matches, "funding_keypair", String);
let lamports = sol_to_lamports(value_t_or_exit!(matches, "amount", f64));
let base_keypair = value_t_or_exit!(matches, "base_keypair", String);
let stake_authority = value_t_or_exit!(matches, "stake_authority", String);
let withdraw_authority = value_t_or_exit!(matches, "withdraw_authority", String);
let index = value_t_or_exit!(matches, "index", usize);
NewCommandConfig {
fee_payer,
funding_keypair,
lamports,
base_keypair,
stake_authority,
withdraw_authority,
index,
}
}
fn parse_count_args(matches: &ArgMatches<'_>) -> CountCommandConfig {
let base_pubkey = value_t_or_exit!(matches, "base_pubkey", String);
CountCommandConfig { base_pubkey }
}
fn parse_query_args(matches: &ArgMatches<'_>) -> QueryCommandConfig {
let base_pubkey = value_t_or_exit!(matches, "base_pubkey", String);
let num_accounts = value_t_or_exit!(matches, "num_accounts", usize);
QueryCommandConfig {
base_pubkey,
num_accounts,
}
}
fn parse_authorize_args(matches: &ArgMatches<'_>) -> AuthorizeCommandConfig {
let fee_payer = value_t_or_exit!(matches, "fee_payer", String);
let base_pubkey = value_t_or_exit!(matches, "base_pubkey", String);
let stake_authority = value_t_or_exit!(matches, "stake_authority", String);
let withdraw_authority = value_t_or_exit!(matches, "withdraw_authority", String);
let new_stake_authority = value_t_or_exit!(matches, "new_stake_authority", String);
let new_withdraw_authority = value_t_or_exit!(matches, "new_withdraw_authority", String);
let num_accounts = value_t_or_exit!(matches, "num_accounts", usize);
AuthorizeCommandConfig {
fee_payer,
base_pubkey,
stake_authority,
withdraw_authority,
new_stake_authority,
new_withdraw_authority,
num_accounts,
}
}
fn parse_rebase_args(matches: &ArgMatches<'_>) -> RebaseCommandConfig {
let fee_payer = value_t_or_exit!(matches, "fee_payer", String);
let base_pubkey = value_t_or_exit!(matches, "base_pubkey", String);
let new_base_keypair = value_t_or_exit!(matches, "new_base_keypair", String);
let stake_authority = value_t_or_exit!(matches, "stake_authority", String);
let num_accounts = value_t_or_exit!(matches, "num_accounts", usize);
RebaseCommandConfig {
fee_payer,
base_pubkey,
new_base_keypair,
stake_authority,
num_accounts,
}
}
fn parse_move_args(matches: &ArgMatches<'_>) -> MoveCommandConfig {
let rebase_config = parse_rebase_args(matches);
let authorize_config = parse_authorize_args(matches);
MoveCommandConfig {
rebase_config,
authorize_config,
}
}
pub(crate) fn parse_args<I, T>(args: I) -> CommandConfig
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
let matches = get_matches(args);
let config_file = matches.value_of("config_file").unwrap().to_string();
let url = matches.value_of("url").map(|x| x.to_string());
let command = match matches.subcommand() {
("new", Some(matches)) => Command::New(parse_new_args(matches)),
("count", Some(matches)) => Command::Count(parse_count_args(matches)),
("addresses", Some(matches)) => Command::Addresses(parse_query_args(matches)),
("balance", Some(matches)) => Command::Balance(parse_query_args(matches)),
("authorize", Some(matches)) => Command::Authorize(parse_authorize_args(matches)),
("rebase", Some(matches)) => Command::Rebase(parse_rebase_args(matches)),
("move", Some(matches)) => Command::Move(Box::new(parse_move_args(matches))),
_ => {
eprintln!("{}", matches.usage());
exit(1);
}
};
CommandConfig {
config_file,
url,
command,
}
}

306
stake-accounts/src/main.rs Normal file
View File

@ -0,0 +1,306 @@
mod args;
mod stake_accounts;
use crate::args::{
parse_args, AuthorizeCommandConfig, Command, MoveCommandConfig, NewCommandConfig,
RebaseCommandConfig,
};
use clap::ArgMatches;
use solana_clap_utils::keypair::{pubkey_from_path, signer_from_path};
use solana_cli_config::Config;
use solana_client::client_error::ClientError;
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
use solana_sdk::{
message::Message,
native_token::lamports_to_sol,
pubkey::Pubkey,
signature::{Signature, Signer},
signers::Signers,
transaction::Transaction,
};
use std::env;
use std::error::Error;
use std::sync::Arc;
fn resolve_stake_authority(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
key_url: &str,
) -> Result<Box<dyn Signer>, Box<dyn Error>> {
let matches = ArgMatches::default();
signer_from_path(&matches, key_url, "stake authority", wallet_manager)
}
fn resolve_withdraw_authority(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
key_url: &str,
) -> Result<Box<dyn Signer>, Box<dyn Error>> {
let matches = ArgMatches::default();
signer_from_path(&matches, key_url, "withdraw authority", wallet_manager)
}
fn resolve_new_stake_authority(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
key_url: &str,
) -> Result<Pubkey, Box<dyn Error>> {
let matches = ArgMatches::default();
pubkey_from_path(&matches, key_url, "new stake authority", wallet_manager)
}
fn resolve_new_withdraw_authority(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
key_url: &str,
) -> Result<Pubkey, Box<dyn Error>> {
let matches = ArgMatches::default();
pubkey_from_path(&matches, key_url, "new withdraw authority", wallet_manager)
}
fn resolve_fee_payer(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
key_url: &str,
) -> Result<Box<dyn Signer>, Box<dyn Error>> {
let matches = ArgMatches::default();
signer_from_path(&matches, key_url, "fee-payer", wallet_manager)
}
fn resolve_base_pubkey(
wallet_manager: Option<&Arc<RemoteWalletManager>>,
key_url: &str,
) -> Result<Pubkey, Box<dyn Error>> {
let matches = ArgMatches::default();
pubkey_from_path(&matches, key_url, "base pubkey", wallet_manager)
}
fn get_balance_at(client: &RpcClient, pubkey: &Pubkey, i: usize) -> Result<u64, ClientError> {
let address = stake_accounts::derive_stake_account_address(pubkey, i);
client.get_balance(&address)
}
// Return the number of derived stake accounts with balances
fn count_stake_accounts(client: &RpcClient, base_pubkey: &Pubkey) -> Result<usize, ClientError> {
let mut i = 0;
while get_balance_at(client, base_pubkey, i)? > 0 {
i += 1;
}
Ok(i)
}
fn get_balances(
client: &RpcClient,
addresses: Vec<Pubkey>,
) -> Result<Vec<(Pubkey, u64)>, ClientError> {
addresses
.into_iter()
.map(|pubkey| client.get_balance(&pubkey).map(|bal| (pubkey, bal)))
.collect()
}
fn process_new_stake_account(
client: &RpcClient,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
new_config: &NewCommandConfig,
) -> Result<Signature, Box<dyn Error>> {
let matches = ArgMatches::default();
let fee_payer_keypair = resolve_fee_payer(wallet_manager, &new_config.fee_payer)?;
let funding_keypair = signer_from_path(
&matches,
&new_config.funding_keypair,
"funding keypair",
wallet_manager,
)?;
let base_keypair = signer_from_path(
&matches,
&new_config.base_keypair,
"base keypair",
wallet_manager,
)?;
let stake_authority_pubkey = pubkey_from_path(
&matches,
&new_config.stake_authority,
"stake authority",
wallet_manager,
)?;
let withdraw_authority_pubkey = pubkey_from_path(
&matches,
&new_config.withdraw_authority,
"withdraw authority",
wallet_manager,
)?;
let message = stake_accounts::new_stake_account(
&fee_payer_keypair.pubkey(),
&funding_keypair.pubkey(),
&base_keypair.pubkey(),
new_config.lamports,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
new_config.index,
);
let signers = vec![&*fee_payer_keypair, &*funding_keypair, &*base_keypair];
let signature = send_message(client, message, &signers)?;
Ok(signature)
}
fn process_authorize_stake_accounts(
client: &RpcClient,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
authorize_config: &AuthorizeCommandConfig,
) -> Result<(), Box<dyn Error>> {
let fee_payer_keypair = resolve_fee_payer(wallet_manager, &authorize_config.fee_payer)?;
let base_pubkey = resolve_base_pubkey(wallet_manager, &authorize_config.base_pubkey)?;
let stake_authority_keypair =
resolve_stake_authority(wallet_manager, &authorize_config.stake_authority)?;
let withdraw_authority_keypair =
resolve_withdraw_authority(wallet_manager, &authorize_config.withdraw_authority)?;
let new_stake_authority_pubkey =
resolve_new_stake_authority(wallet_manager, &authorize_config.new_stake_authority)?;
let new_withdraw_authority_pubkey =
resolve_new_withdraw_authority(wallet_manager, &authorize_config.new_withdraw_authority)?;
let messages = stake_accounts::authorize_stake_accounts(
&fee_payer_keypair.pubkey(),
&base_pubkey,
&stake_authority_keypair.pubkey(),
&withdraw_authority_keypair.pubkey(),
&new_stake_authority_pubkey,
&new_withdraw_authority_pubkey,
authorize_config.num_accounts,
);
let signers = vec![
&*fee_payer_keypair,
&*stake_authority_keypair,
&*withdraw_authority_keypair,
];
for message in messages {
let signature = send_message(client, message, &signers)?;
println!("{}", signature);
}
Ok(())
}
fn process_rebase_stake_accounts(
client: &RpcClient,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
rebase_config: &RebaseCommandConfig,
) -> Result<(), Box<dyn Error>> {
let fee_payer_keypair = resolve_fee_payer(wallet_manager, &rebase_config.fee_payer)?;
let base_pubkey = resolve_base_pubkey(wallet_manager, &rebase_config.base_pubkey)?;
let stake_authority_keypair =
resolve_stake_authority(wallet_manager, &rebase_config.stake_authority)?;
let addresses =
stake_accounts::derive_stake_account_addresses(&base_pubkey, rebase_config.num_accounts);
let balances = get_balances(&client, addresses)?;
let messages = stake_accounts::rebase_stake_accounts(
&fee_payer_keypair.pubkey(),
&base_pubkey,
&stake_authority_keypair.pubkey(),
&balances,
);
let signers = vec![&*fee_payer_keypair, &*stake_authority_keypair];
for message in messages {
let signature = send_message(client, message, &signers)?;
println!("{}", signature);
}
Ok(())
}
fn process_move_stake_accounts(
client: &RpcClient,
wallet_manager: Option<&Arc<RemoteWalletManager>>,
move_config: &MoveCommandConfig,
) -> Result<(), Box<dyn Error>> {
let authorize_config = &move_config.authorize_config;
let fee_payer_keypair = resolve_fee_payer(wallet_manager, &authorize_config.fee_payer)?;
let base_pubkey = resolve_base_pubkey(wallet_manager, &authorize_config.base_pubkey)?;
let stake_authority_keypair =
resolve_stake_authority(wallet_manager, &authorize_config.stake_authority)?;
let withdraw_authority_keypair =
resolve_withdraw_authority(wallet_manager, &authorize_config.withdraw_authority)?;
let new_stake_authority_pubkey =
resolve_new_stake_authority(wallet_manager, &authorize_config.new_stake_authority)?;
let new_withdraw_authority_pubkey =
resolve_new_withdraw_authority(wallet_manager, &authorize_config.new_withdraw_authority)?;
let addresses =
stake_accounts::derive_stake_account_addresses(&base_pubkey, authorize_config.num_accounts);
let balances = get_balances(&client, addresses)?;
let messages = stake_accounts::move_stake_accounts(
&fee_payer_keypair.pubkey(),
&base_pubkey,
&stake_authority_keypair.pubkey(),
&withdraw_authority_keypair.pubkey(),
&new_stake_authority_pubkey,
&new_withdraw_authority_pubkey,
&balances,
);
let signers = vec![
&*fee_payer_keypair,
&*stake_authority_keypair,
&*withdraw_authority_keypair,
];
for message in messages {
let signature = send_message(client, message, &signers)?;
println!("{}", signature);
}
Ok(())
}
fn send_message<S: Signers>(
client: &RpcClient,
message: Message,
signers: &S,
) -> Result<Signature, ClientError> {
let mut transaction = Transaction::new_unsigned(message);
client.resign_transaction(&mut transaction, signers)?;
client.send_and_confirm_transaction_with_spinner(&mut transaction, signers)
}
fn main() -> Result<(), Box<dyn Error>> {
let command_config = parse_args(env::args_os());
let config = Config::load(&command_config.config_file)?;
let json_rpc_url = command_config.url.unwrap_or(config.json_rpc_url);
let client = RpcClient::new(json_rpc_url);
let wallet_manager = maybe_wallet_manager()?;
let wallet_manager = wallet_manager.as_ref();
match command_config.command {
Command::New(new_config) => {
process_new_stake_account(&client, wallet_manager, &new_config)?;
}
Command::Count(count_config) => {
let base_pubkey = resolve_base_pubkey(wallet_manager, &count_config.base_pubkey)?;
let num_accounts = count_stake_accounts(&client, &base_pubkey)?;
println!("{}", num_accounts);
}
Command::Addresses(query_config) => {
let base_pubkey = resolve_base_pubkey(wallet_manager, &query_config.base_pubkey)?;
let addresses = stake_accounts::derive_stake_account_addresses(
&base_pubkey,
query_config.num_accounts,
);
for address in addresses {
println!("{:?}", address);
}
}
Command::Balance(query_config) => {
let base_pubkey = resolve_base_pubkey(wallet_manager, &query_config.base_pubkey)?;
let addresses = stake_accounts::derive_stake_account_addresses(
&base_pubkey,
query_config.num_accounts,
);
let balances = get_balances(&client, addresses)?;
let lamports: u64 = balances.into_iter().map(|(_, bal)| bal).sum();
let sol = lamports_to_sol(lamports);
println!("{} SOL", sol);
}
Command::Authorize(authorize_config) => {
process_authorize_stake_accounts(&client, wallet_manager, &authorize_config)?;
}
Command::Rebase(rebase_config) => {
process_rebase_stake_accounts(&client, wallet_manager, &rebase_config)?;
}
Command::Move(move_config) => {
process_move_stake_accounts(&client, wallet_manager, &move_config)?;
}
}
Ok(())
}

View File

@ -0,0 +1,463 @@
use solana_sdk::{instruction::Instruction, message::Message, pubkey::Pubkey};
use solana_stake_program::{
stake_instruction,
stake_state::{Authorized, Lockup, StakeAuthorize},
};
pub(crate) fn derive_stake_account_address(base_pubkey: &Pubkey, i: usize) -> Pubkey {
Pubkey::create_with_seed(base_pubkey, &i.to_string(), &solana_stake_program::id()).unwrap()
}
// Return derived addresses
pub(crate) fn derive_stake_account_addresses(
base_pubkey: &Pubkey,
num_accounts: usize,
) -> Vec<Pubkey> {
(0..num_accounts)
.map(|i| derive_stake_account_address(base_pubkey, i))
.collect()
}
pub(crate) fn new_stake_account(
fee_payer_pubkey: &Pubkey,
funding_pubkey: &Pubkey,
base_pubkey: &Pubkey,
lamports: u64,
stake_authority_pubkey: &Pubkey,
withdraw_authority_pubkey: &Pubkey,
index: usize,
) -> Message {
let stake_account_address = derive_stake_account_address(base_pubkey, index);
let authorized = Authorized {
staker: *stake_authority_pubkey,
withdrawer: *withdraw_authority_pubkey,
};
let instructions = stake_instruction::create_account_with_seed(
funding_pubkey,
&stake_account_address,
&base_pubkey,
&index.to_string(),
&authorized,
&Lockup::default(),
lamports,
);
Message::new_with_payer(&instructions, Some(fee_payer_pubkey))
}
fn authorize_stake_accounts_instructions(
stake_account_address: &Pubkey,
stake_authority_pubkey: &Pubkey,
withdraw_authority_pubkey: &Pubkey,
new_stake_authority_pubkey: &Pubkey,
new_withdraw_authority_pubkey: &Pubkey,
) -> Vec<Instruction> {
let instruction0 = stake_instruction::authorize(
&stake_account_address,
stake_authority_pubkey,
new_stake_authority_pubkey,
StakeAuthorize::Staker,
);
let instruction1 = stake_instruction::authorize(
&stake_account_address,
withdraw_authority_pubkey,
new_withdraw_authority_pubkey,
StakeAuthorize::Withdrawer,
);
vec![instruction0, instruction1]
}
fn rebase_stake_account(
stake_account_address: &Pubkey,
new_base_pubkey: &Pubkey,
i: usize,
fee_payer_pubkey: &Pubkey,
stake_authority_pubkey: &Pubkey,
lamports: u64,
) -> Message {
let new_stake_account_address = derive_stake_account_address(new_base_pubkey, i);
let instructions = stake_instruction::split_with_seed(
stake_account_address,
stake_authority_pubkey,
lamports,
&new_stake_account_address,
new_base_pubkey,
&i.to_string(),
);
Message::new_with_payer(&instructions, Some(&fee_payer_pubkey))
}
fn move_stake_account(
stake_account_address: &Pubkey,
new_base_pubkey: &Pubkey,
i: usize,
fee_payer_pubkey: &Pubkey,
stake_authority_pubkey: &Pubkey,
withdraw_authority_pubkey: &Pubkey,
new_stake_authority_pubkey: &Pubkey,
new_withdraw_authority_pubkey: &Pubkey,
lamports: u64,
) -> Message {
let new_stake_account_address = derive_stake_account_address(new_base_pubkey, i);
let mut instructions = stake_instruction::split_with_seed(
stake_account_address,
stake_authority_pubkey,
lamports,
&new_stake_account_address,
new_base_pubkey,
&i.to_string(),
);
let authorize_instructions = authorize_stake_accounts_instructions(
&new_stake_account_address,
stake_authority_pubkey,
withdraw_authority_pubkey,
new_stake_authority_pubkey,
new_withdraw_authority_pubkey,
);
instructions.extend(authorize_instructions.into_iter());
Message::new_with_payer(&instructions, Some(&fee_payer_pubkey))
}
pub(crate) fn authorize_stake_accounts(
fee_payer_pubkey: &Pubkey,
base_pubkey: &Pubkey,
stake_authority_pubkey: &Pubkey,
withdraw_authority_pubkey: &Pubkey,
new_stake_authority_pubkey: &Pubkey,
new_withdraw_authority_pubkey: &Pubkey,
num_accounts: usize,
) -> Vec<Message> {
let stake_account_addresses = derive_stake_account_addresses(base_pubkey, num_accounts);
stake_account_addresses
.iter()
.map(|stake_account_address| {
let instructions = authorize_stake_accounts_instructions(
stake_account_address,
stake_authority_pubkey,
withdraw_authority_pubkey,
new_stake_authority_pubkey,
new_withdraw_authority_pubkey,
);
Message::new_with_payer(&instructions, Some(&fee_payer_pubkey))
})
.collect::<Vec<_>>()
}
pub(crate) fn rebase_stake_accounts(
fee_payer_pubkey: &Pubkey,
new_base_pubkey: &Pubkey,
stake_authority_pubkey: &Pubkey,
balances: &[(Pubkey, u64)],
) -> Vec<Message> {
balances
.iter()
.enumerate()
.map(|(i, (stake_account_address, lamports))| {
rebase_stake_account(
stake_account_address,
new_base_pubkey,
i,
fee_payer_pubkey,
stake_authority_pubkey,
*lamports,
)
})
.collect()
}
pub(crate) fn move_stake_accounts(
fee_payer_pubkey: &Pubkey,
new_base_pubkey: &Pubkey,
stake_authority_pubkey: &Pubkey,
withdraw_authority_pubkey: &Pubkey,
new_stake_authority_pubkey: &Pubkey,
new_withdraw_authority_pubkey: &Pubkey,
balances: &[(Pubkey, u64)],
) -> Vec<Message> {
balances
.iter()
.enumerate()
.map(|(i, (stake_account_address, lamports))| {
move_stake_account(
stake_account_address,
new_base_pubkey,
i,
fee_payer_pubkey,
stake_authority_pubkey,
withdraw_authority_pubkey,
new_stake_authority_pubkey,
new_withdraw_authority_pubkey,
*lamports,
)
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use solana_runtime::{bank::Bank, bank_client::BankClient};
use solana_sdk::{
account::Account,
client::SyncClient,
genesis_config::create_genesis_config,
signature::{Keypair, Signer},
};
use solana_stake_program::stake_state::StakeState;
fn create_bank(lamports: u64) -> (Bank, Keypair, u64) {
let (genesis_config, mint_keypair) = create_genesis_config(lamports);
let mut bank = Bank::new(&genesis_config);
bank.add_instruction_processor(
solana_stake_program::id(),
solana_stake_program::stake_instruction::process_instruction,
);
let rent = bank.get_minimum_balance_for_rent_exemption(std::mem::size_of::<StakeState>());
(bank, mint_keypair, rent)
}
fn create_account<C: SyncClient>(
client: &C,
funding_keypair: &Keypair,
lamports: u64,
) -> Keypair {
let fee_payer_keypair = Keypair::new();
client
.transfer(lamports, &funding_keypair, &fee_payer_keypair.pubkey())
.unwrap();
fee_payer_keypair
}
fn get_account_at<C: SyncClient>(client: &C, base_pubkey: &Pubkey, i: usize) -> Account {
let account_address = derive_stake_account_address(&base_pubkey, i);
client.get_account(&account_address).unwrap().unwrap()
}
fn get_balances<C: SyncClient>(
client: &C,
base_pubkey: &Pubkey,
num_accounts: usize,
) -> Vec<(Pubkey, u64)> {
(0..num_accounts)
.into_iter()
.map(|i| {
let address = derive_stake_account_address(&base_pubkey, i);
(address, client.get_balance(&address).unwrap())
})
.collect()
}
#[test]
fn test_new_derived_stake_account() {
let (bank, funding_keypair, rent) = create_bank(10_000_000);
let funding_pubkey = funding_keypair.pubkey();
let bank_client = BankClient::new(bank);
let fee_payer_keypair = create_account(&bank_client, &funding_keypair, 1);
let fee_payer_pubkey = fee_payer_keypair.pubkey();
let base_keypair = Keypair::new();
let base_pubkey = base_keypair.pubkey();
let lamports = rent + 1;
let stake_authority_pubkey = Pubkey::new_rand();
let withdraw_authority_pubkey = Pubkey::new_rand();
let message = new_stake_account(
&fee_payer_pubkey,
&funding_pubkey,
&base_pubkey,
lamports,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
0,
);
let signers = [&funding_keypair, &fee_payer_keypair, &base_keypair];
bank_client.send_message(&signers, message).unwrap();
let account = get_account_at(&bank_client, &base_pubkey, 0);
assert_eq!(account.lamports, lamports);
let authorized = StakeState::authorized_from(&account).unwrap();
assert_eq!(authorized.staker, stake_authority_pubkey);
assert_eq!(authorized.withdrawer, withdraw_authority_pubkey);
}
#[test]
fn test_authorize_stake_accounts() {
let (bank, funding_keypair, rent) = create_bank(10_000_000);
let funding_pubkey = funding_keypair.pubkey();
let bank_client = BankClient::new(bank);
let fee_payer_keypair = create_account(&bank_client, &funding_keypair, 1);
let fee_payer_pubkey = fee_payer_keypair.pubkey();
let base_keypair = Keypair::new();
let base_pubkey = base_keypair.pubkey();
let lamports = rent + 1;
let stake_authority_keypair = Keypair::new();
let stake_authority_pubkey = stake_authority_keypair.pubkey();
let withdraw_authority_keypair = Keypair::new();
let withdraw_authority_pubkey = withdraw_authority_keypair.pubkey();
let message = new_stake_account(
&fee_payer_pubkey,
&funding_pubkey,
&base_pubkey,
lamports,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
0,
);
let signers = [&funding_keypair, &fee_payer_keypair, &base_keypair];
bank_client.send_message(&signers, message).unwrap();
let new_stake_authority_pubkey = Pubkey::new_rand();
let new_withdraw_authority_pubkey = Pubkey::new_rand();
let messages = authorize_stake_accounts(
&fee_payer_pubkey,
&base_pubkey,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
&new_stake_authority_pubkey,
&new_withdraw_authority_pubkey,
1,
);
let signers = [
&fee_payer_keypair,
&stake_authority_keypair,
&withdraw_authority_keypair,
];
for message in messages {
bank_client.send_message(&signers, message).unwrap();
}
let account = get_account_at(&bank_client, &base_pubkey, 0);
let authorized = StakeState::authorized_from(&account).unwrap();
assert_eq!(authorized.staker, new_stake_authority_pubkey);
assert_eq!(authorized.withdrawer, new_withdraw_authority_pubkey);
}
#[test]
fn test_rebase_stake_accounts() {
let (bank, funding_keypair, rent) = create_bank(10_000_000);
let funding_pubkey = funding_keypair.pubkey();
let bank_client = BankClient::new(bank);
let fee_payer_keypair = create_account(&bank_client, &funding_keypair, 1);
let fee_payer_pubkey = fee_payer_keypair.pubkey();
let base_keypair = Keypair::new();
let base_pubkey = base_keypair.pubkey();
let lamports = rent + 1;
let stake_authority_keypair = Keypair::new();
let stake_authority_pubkey = stake_authority_keypair.pubkey();
let withdraw_authority_keypair = Keypair::new();
let withdraw_authority_pubkey = withdraw_authority_keypair.pubkey();
let num_accounts = 1;
let message = new_stake_account(
&fee_payer_pubkey,
&funding_pubkey,
&base_pubkey,
lamports,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
0,
);
let signers = [&funding_keypair, &fee_payer_keypair, &base_keypair];
bank_client.send_message(&signers, message).unwrap();
let new_base_keypair = Keypair::new();
let new_base_pubkey = new_base_keypair.pubkey();
let balances = get_balances(&bank_client, &base_pubkey, num_accounts);
let messages = rebase_stake_accounts(
&fee_payer_pubkey,
&new_base_pubkey,
&stake_authority_pubkey,
&balances,
);
assert_eq!(messages.len(), num_accounts);
let signers = [
&fee_payer_keypair,
&new_base_keypair,
&stake_authority_keypair,
];
for message in messages {
bank_client.send_message(&signers, message).unwrap();
}
// Ensure the new accounts are duplicates of the previous ones.
let account = get_account_at(&bank_client, &new_base_pubkey, 0);
let authorized = StakeState::authorized_from(&account).unwrap();
assert_eq!(authorized.staker, stake_authority_pubkey);
assert_eq!(authorized.withdrawer, withdraw_authority_pubkey);
}
#[test]
fn test_move_stake_accounts() {
let (bank, funding_keypair, rent) = create_bank(10_000_000);
let funding_pubkey = funding_keypair.pubkey();
let bank_client = BankClient::new(bank);
let fee_payer_keypair = create_account(&bank_client, &funding_keypair, 1);
let fee_payer_pubkey = fee_payer_keypair.pubkey();
let base_keypair = Keypair::new();
let base_pubkey = base_keypair.pubkey();
let lamports = rent + 1;
let stake_authority_keypair = Keypair::new();
let stake_authority_pubkey = stake_authority_keypair.pubkey();
let withdraw_authority_keypair = Keypair::new();
let withdraw_authority_pubkey = withdraw_authority_keypair.pubkey();
let num_accounts = 1;
let message = new_stake_account(
&fee_payer_pubkey,
&funding_pubkey,
&base_pubkey,
lamports,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
0,
);
let signers = [&funding_keypair, &fee_payer_keypair, &base_keypair];
bank_client.send_message(&signers, message).unwrap();
let new_base_keypair = Keypair::new();
let new_base_pubkey = new_base_keypair.pubkey();
let new_stake_authority_pubkey = Pubkey::new_rand();
let new_withdraw_authority_pubkey = Pubkey::new_rand();
let balances = get_balances(&bank_client, &base_pubkey, num_accounts);
let messages = move_stake_accounts(
&fee_payer_pubkey,
&new_base_pubkey,
&stake_authority_pubkey,
&withdraw_authority_pubkey,
&new_stake_authority_pubkey,
&new_withdraw_authority_pubkey,
&balances,
);
assert_eq!(messages.len(), num_accounts);
let signers = [
&fee_payer_keypair,
&new_base_keypair,
&stake_authority_keypair,
&withdraw_authority_keypair,
];
for message in messages {
bank_client.send_message(&signers, message).unwrap();
}
// Ensure the new accounts have the new authorities.
let account = get_account_at(&bank_client, &new_base_pubkey, 0);
let authorized = StakeState::authorized_from(&account).unwrap();
assert_eq!(authorized.staker, new_stake_authority_pubkey);
assert_eq!(authorized.withdrawer, new_withdraw_authority_pubkey);
}
}

View File

@ -10,7 +10,7 @@ use solana_clap_utils::{
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
};
use solana_client::rpc_client::RpcClient;
use solana_core::ledger_cleanup_service::DEFAULT_MAX_LEDGER_SLOTS;
use solana_core::ledger_cleanup_service::DEFAULT_MAX_LEDGER_SHREDS;
use solana_core::{
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo,
@ -401,7 +401,7 @@ fn download_then_check_genesis_hash(
pub fn main() {
let default_dynamic_port_range =
&format!("{}-{}", VALIDATOR_PORT_RANGE.0, VALIDATOR_PORT_RANGE.1);
let default_limit_ledger_size = &DEFAULT_MAX_LEDGER_SLOTS.to_string();
let default_limit_ledger_size = &DEFAULT_MAX_LEDGER_SHREDS.to_string();
let matches = App::new(crate_name!()).about(crate_description!())
.version(solana_clap_utils::version!())
@ -615,12 +615,12 @@ pub fn main() {
.arg(
clap::Arg::with_name("limit_ledger_size")
.long("limit-ledger-size")
.value_name("SLOT_COUNT")
.value_name("SHRED_COUNT")
.takes_value(true)
.min_values(0)
.max_values(1)
.default_value(default_limit_ledger_size)
.help("Drop ledger data for slots older than this value"),
.help("Keep this amount of shreds in root slots."),
)
.arg(
clap::Arg::with_name("skip_poh_verify")
@ -878,10 +878,10 @@ pub fn main() {
if matches.is_present("limit_ledger_size") {
let limit_ledger_size = value_t_or_exit!(matches, "limit_ledger_size", u64);
if limit_ledger_size < DEFAULT_MAX_LEDGER_SLOTS {
if limit_ledger_size < DEFAULT_MAX_LEDGER_SHREDS {
eprintln!(
"The provided --limit-ledger-size value was too small, the minimum value is {}",
DEFAULT_MAX_LEDGER_SLOTS
DEFAULT_MAX_LEDGER_SHREDS
);
exit(1);
}