@ -70,7 +70,11 @@ impl ForkProgress {
|
||||
}
|
||||
|
||||
impl ReplayStage {
|
||||
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
|
||||
#[allow(
|
||||
clippy::new_ret_no_self,
|
||||
clippy::too_many_arguments,
|
||||
clippy::type_complexity
|
||||
)]
|
||||
pub fn new<T>(
|
||||
my_pubkey: &Pubkey,
|
||||
vote_account: &Pubkey,
|
||||
@ -83,11 +87,11 @@ impl ReplayStage {
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
) -> (Self, Receiver<(u64, Pubkey)>, Receiver<Vec<u64>>)
|
||||
) -> (Self, Receiver<(u64, Pubkey)>, Receiver<Vec<Arc<Bank>>>)
|
||||
where
|
||||
T: 'static + KeypairUtil + Send + Sync,
|
||||
{
|
||||
let (root_slot_sender, root_slot_receiver) = channel();
|
||||
let (root_bank_sender, root_bank_receiver) = channel();
|
||||
let (slot_full_sender, slot_full_receiver) = channel();
|
||||
trace!("replay stage");
|
||||
let exit_ = exit.clone();
|
||||
@ -152,7 +156,7 @@ impl ReplayStage {
|
||||
&cluster_info,
|
||||
&blocktree,
|
||||
&leader_schedule_cache,
|
||||
&root_slot_sender,
|
||||
&root_bank_sender,
|
||||
)?;
|
||||
|
||||
Self::reset_poh_recorder(
|
||||
@ -208,7 +212,7 @@ impl ReplayStage {
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
(Self { t_replay }, slot_full_receiver, root_slot_receiver)
|
||||
(Self { t_replay }, slot_full_receiver, root_bank_receiver)
|
||||
}
|
||||
pub fn start_leader(
|
||||
my_pubkey: &Pubkey,
|
||||
@ -297,7 +301,7 @@ impl ReplayStage {
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
root_slot_sender: &Sender<Vec<u64>>,
|
||||
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
|
||||
) -> Result<()>
|
||||
where
|
||||
T: 'static + KeypairUtil + Send + Sync,
|
||||
@ -310,12 +314,9 @@ impl ReplayStage {
|
||||
.get(new_root)
|
||||
.expect("Root bank doesn't exist")
|
||||
.clone();
|
||||
let mut rooted_slots = root_bank
|
||||
.parents()
|
||||
.into_iter()
|
||||
.map(|bank| bank.slot())
|
||||
.collect::<Vec<_>>();
|
||||
rooted_slots.push(root_bank.slot());
|
||||
let mut rooted_banks = root_bank.parents();
|
||||
rooted_banks.push(root_bank);
|
||||
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
|
||||
blocktree
|
||||
.set_roots(&rooted_slots)
|
||||
.expect("Ledger set roots failed");
|
||||
@ -325,7 +326,7 @@ impl ReplayStage {
|
||||
leader_schedule_cache.set_root(new_root);
|
||||
bank_forks.write().unwrap().set_root(new_root);
|
||||
Self::handle_new_root(&bank_forks, progress);
|
||||
root_slot_sender.send(rooted_slots)?;
|
||||
root_bank_sender.send(rooted_banks)?;
|
||||
}
|
||||
locktower.update_epoch(&bank);
|
||||
if let Some(ref voting_keypair) = voting_keypair {
|
||||
|
@ -317,7 +317,7 @@ impl Replicator {
|
||||
Ok(blockhash_and_slot) => blockhash_and_slot,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Error could get a newer blockhash than {:?}. {:?}",
|
||||
"Error couldn't get a newer blockhash than {:?}. {:?}",
|
||||
self.blockhash, e
|
||||
);
|
||||
break;
|
||||
@ -471,7 +471,7 @@ impl Replicator {
|
||||
let instruction = storage_instruction::mining_proof(
|
||||
&self.storage_keypair.pubkey(),
|
||||
self.sha_state,
|
||||
self.slot,
|
||||
get_segment_from_slot(self.slot),
|
||||
Signature::new(&self.signature.to_bytes()),
|
||||
);
|
||||
let message = Message::new_with_payer(vec![instruction], Some(&self.keypair.pubkey()));
|
||||
@ -508,7 +508,7 @@ impl Replicator {
|
||||
fn poll_for_blockhash_and_slot(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
previous_blockhash: &str,
|
||||
) -> Result<(String, u64)> {
|
||||
) -> result::Result<(String, u64), Error> {
|
||||
for _ in 0..10 {
|
||||
let rpc_client = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
@ -519,12 +519,18 @@ impl Replicator {
|
||||
};
|
||||
let storage_blockhash = rpc_client
|
||||
.retry_make_rpc_request(&RpcRequest::GetStorageBlockhash, None, 0)
|
||||
.expect("rpc request")
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))
|
||||
})?
|
||||
.to_string();
|
||||
if storage_blockhash != *previous_blockhash {
|
||||
let storage_slot = rpc_client
|
||||
.retry_make_rpc_request(&RpcRequest::GetStorageSlot, None, 0)
|
||||
.expect("rpc request")
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))
|
||||
})?
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
info!("storage slot: {}", storage_slot);
|
||||
|
@ -119,7 +119,9 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
|
||||
fn get_storage_pubkeys_for_slot(&self, slot: u64) -> Result<Vec<Pubkey>> {
|
||||
Ok(self.storage_state.get_pubkeys_for_slot(slot))
|
||||
Ok(self
|
||||
.storage_state
|
||||
.get_pubkeys_for_slot(slot, &self.bank_forks))
|
||||
}
|
||||
|
||||
pub fn fullnode_exit(&self) -> Result<bool> {
|
||||
|
@ -9,17 +9,20 @@ use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::service::Service;
|
||||
use bincode::deserialize;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::storage_utils::replicator_accounts;
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::account_utils::State;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::instruction::Instruction;
|
||||
use solana_sdk::message::Message;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use solana_storage_api::storage_contract::{CheckedProof, Proof, ProofStatus};
|
||||
use solana_storage_api::storage_instruction::{proof_validation, StorageInstruction};
|
||||
use solana_storage_api::storage_contract::{CheckedProof, Proof, ProofStatus, StorageContract};
|
||||
use solana_storage_api::storage_instruction::proof_validation;
|
||||
use solana_storage_api::{get_segment_from_slot, storage_instruction};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
@ -29,7 +32,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// Block of hash answers to validate against
|
||||
// Vec of [ledger blocks] x [keys]
|
||||
@ -51,7 +54,7 @@ pub struct StorageStateInner {
|
||||
struct StorageSlots {
|
||||
last_root: u64,
|
||||
slot_count: u64,
|
||||
pending_roots: Vec<u64>,
|
||||
pending_root_banks: Vec<Arc<Bank>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
@ -119,17 +122,35 @@ impl StorageState {
|
||||
self.state.read().unwrap().slot
|
||||
}
|
||||
|
||||
pub fn get_pubkeys_for_slot(&self, slot: u64) -> Vec<Pubkey> {
|
||||
pub fn get_pubkeys_for_slot(
|
||||
&self,
|
||||
slot: u64,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
) -> Vec<Pubkey> {
|
||||
// TODO: keep track of age?
|
||||
const MAX_PUBKEYS_TO_RETURN: usize = 5;
|
||||
let index = get_segment_from_slot(slot) as usize;
|
||||
let replicator_map = &self.state.read().unwrap().replicator_map;
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let accounts = replicator_accounts(&working_bank);
|
||||
if index < replicator_map.len() {
|
||||
replicator_map[index]
|
||||
//perform an account owner lookup
|
||||
let mut slot_replicators = replicator_map[index]
|
||||
.keys()
|
||||
.cloned()
|
||||
.take(MAX_PUBKEYS_TO_RETURN)
|
||||
.collect::<Vec<_>>()
|
||||
.filter_map(|account_id| {
|
||||
accounts.get(account_id).and_then(|account| {
|
||||
if let Ok(StorageContract::ReplicatorStorage { owner, .. }) =
|
||||
account.state()
|
||||
{
|
||||
Some(owner)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
slot_replicators.truncate(MAX_PUBKEYS_TO_RETURN);
|
||||
slot_replicators
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
@ -140,7 +161,7 @@ impl StorageStage {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
storage_state: &StorageState,
|
||||
slot_receiver: Receiver<Vec<u64>>,
|
||||
bank_receiver: Receiver<Vec<Arc<Bank>>>,
|
||||
blocktree: Option<Arc<Blocktree>>,
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
@ -165,7 +186,7 @@ impl StorageStage {
|
||||
if let Err(e) = Self::process_entries(
|
||||
&storage_keypair,
|
||||
&storage_state_inner,
|
||||
&slot_receiver,
|
||||
&bank_receiver,
|
||||
&some_blocktree,
|
||||
&mut storage_slots,
|
||||
&mut current_key,
|
||||
@ -278,11 +299,54 @@ impl StorageStage {
|
||||
let signer_keys = vec![keypair.as_ref(), storage_keypair.as_ref()];
|
||||
let message = Message::new_with_payer(vec![instruction], Some(&signer_keys[0].pubkey()));
|
||||
let transaction = Transaction::new(&signer_keys, message, blockhash);
|
||||
// try sending the transaction upto 5 times
|
||||
for _ in 0..5 {
|
||||
transactions_socket.send_to(
|
||||
&bincode::serialize(&transaction).unwrap(),
|
||||
cluster_info.read().unwrap().my_data().tpu,
|
||||
)?;
|
||||
sleep(Duration::from_millis(100));
|
||||
if Self::poll_for_signature_confirmation(bank_forks, &transaction.signatures[0], 0)
|
||||
.is_ok()
|
||||
{
|
||||
break;
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
transactions_socket.send_to(
|
||||
&bincode::serialize(&transaction).unwrap(),
|
||||
cluster_info.read().unwrap().my_data().tpu,
|
||||
)?;
|
||||
fn poll_for_signature_confirmation(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> Result<()> {
|
||||
let mut now = Instant::now();
|
||||
let mut confirmed_blocks = 0;
|
||||
loop {
|
||||
let response = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.working_bank()
|
||||
.get_signature_confirmation_status(signature);
|
||||
if let Some((confirmations, res)) = response {
|
||||
if res.is_ok() {
|
||||
if confirmed_blocks != confirmations {
|
||||
now = Instant::now();
|
||||
confirmed_blocks = confirmations;
|
||||
}
|
||||
if confirmations >= min_confirmed_blocks {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
if now.elapsed().as_secs() > 5 {
|
||||
return Err(Error::from(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"signature not found",
|
||||
)));
|
||||
}
|
||||
sleep(Duration::from_millis(250));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -364,20 +428,18 @@ impl StorageStage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_storage_transaction(
|
||||
data: &[u8],
|
||||
fn process_replicator_storage(
|
||||
slot: u64,
|
||||
account_id: Pubkey,
|
||||
account: Account,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
current_key_idx: &mut usize,
|
||||
storage_account_key: Pubkey,
|
||||
) {
|
||||
match deserialize(data) {
|
||||
Ok(StorageInstruction::SubmitMiningProof {
|
||||
slot: proof_slot,
|
||||
signature,
|
||||
sha_state,
|
||||
}) => {
|
||||
if proof_slot < slot {
|
||||
if let Ok(StorageContract::ReplicatorStorage { proofs, .. }) = account.state() {
|
||||
//convert slot to segment
|
||||
let segment = get_segment_from_slot(slot);
|
||||
if let Some(proofs) = proofs.get(&segment) {
|
||||
for (_, proof) in proofs.iter() {
|
||||
{
|
||||
debug!(
|
||||
"generating storage_keys from storage txs current_key_idx: {}",
|
||||
@ -385,43 +447,37 @@ impl StorageStage {
|
||||
);
|
||||
let storage_keys = &mut storage_state.write().unwrap().storage_keys;
|
||||
storage_keys[*current_key_idx..*current_key_idx + size_of::<Signature>()]
|
||||
.copy_from_slice(signature.as_ref());
|
||||
.copy_from_slice(proof.signature.as_ref());
|
||||
*current_key_idx += size_of::<Signature>();
|
||||
*current_key_idx %= storage_keys.len();
|
||||
}
|
||||
|
||||
let mut statew = storage_state.write().unwrap();
|
||||
let max_segment_index = get_segment_from_slot(slot) as usize;
|
||||
let max_segment_index = get_segment_from_slot(slot);
|
||||
if statew.replicator_map.len() < max_segment_index {
|
||||
statew
|
||||
.replicator_map
|
||||
.resize(max_segment_index, HashMap::new());
|
||||
}
|
||||
let proof_segment_index = get_segment_from_slot(proof_slot) as usize;
|
||||
let proof_segment_index = proof.segment_index;
|
||||
if proof_segment_index < statew.replicator_map.len() {
|
||||
// TODO randomly select and verify the proof first
|
||||
// Copy the submitted proof
|
||||
statew.replicator_map[proof_segment_index]
|
||||
.entry(storage_account_key)
|
||||
.entry(account_id)
|
||||
.or_default()
|
||||
.push(Proof {
|
||||
signature,
|
||||
sha_state,
|
||||
});
|
||||
.push(proof.clone());
|
||||
}
|
||||
}
|
||||
debug!("storage proof: slot: {}", slot);
|
||||
}
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!("error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_entries(
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
slot_receiver: &Receiver<Vec<u64>>,
|
||||
bank_receiver: &Receiver<Vec<Arc<Bank>>>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
storage_slots: &mut StorageSlots,
|
||||
current_key_idx: &mut usize,
|
||||
@ -430,65 +486,48 @@ impl StorageStage {
|
||||
) -> Result<()> {
|
||||
let timeout = Duration::new(1, 0);
|
||||
storage_slots
|
||||
.pending_roots
|
||||
.append(&mut slot_receiver.recv_timeout(timeout)?);
|
||||
.pending_root_banks
|
||||
.append(&mut bank_receiver.recv_timeout(timeout)?);
|
||||
storage_slots
|
||||
.pending_roots
|
||||
.sort_unstable_by(|a, b| b.cmp(a));
|
||||
.pending_root_banks
|
||||
.sort_unstable_by(|a, b| b.slot().cmp(&a.slot()));
|
||||
// check if any rooted slots were missed leading up to this one and bump slot count and process proofs for each missed root
|
||||
while let Some(slot) = storage_slots.pending_roots.pop() {
|
||||
if slot > storage_slots.last_root {
|
||||
if !blocktree.is_full(slot) {
|
||||
// stick this slot back into pending_roots. Evaluate it next time around.
|
||||
storage_slots.pending_roots.push(slot);
|
||||
break;
|
||||
}
|
||||
while let Some(bank) = storage_slots.pending_root_banks.pop() {
|
||||
if bank.slot() > storage_slots.last_root {
|
||||
storage_slots.slot_count += 1;
|
||||
storage_slots.last_root = slot;
|
||||
storage_slots.last_root = bank.slot();
|
||||
|
||||
if let Ok(entries) = blocktree.get_slot_entries(slot, 0, None) {
|
||||
for entry in &entries {
|
||||
// Go through the transactions, find proofs, and use them to update
|
||||
// the storage_keys with their signatures
|
||||
for tx in &entry.transactions {
|
||||
for instruction in tx.message.instructions.iter() {
|
||||
let program_id =
|
||||
tx.message.account_keys[instruction.program_ids_index as usize];
|
||||
if solana_storage_api::check_id(&program_id) {
|
||||
let storage_account_key =
|
||||
tx.message.account_keys[instruction.accounts[0] as usize];
|
||||
Self::process_storage_transaction(
|
||||
&instruction.data,
|
||||
slot,
|
||||
storage_state,
|
||||
current_key_idx,
|
||||
storage_account_key,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if storage_slots.slot_count % storage_rotate_count == 0 {
|
||||
// assume the last entry in the slot is the blockhash for that slot
|
||||
let entry_hash = entries.last().unwrap().hash;
|
||||
debug!(
|
||||
"crosses sending at root slot: {}! with last entry's hash {}",
|
||||
storage_slots.slot_count, entry_hash
|
||||
if storage_slots.slot_count % storage_rotate_count == 0 {
|
||||
// load all the replicator accounts in the bank. collect all their proofs at the current slot
|
||||
let replicator_accounts = replicator_accounts(bank.as_ref());
|
||||
// find proofs, and use them to update
|
||||
// the storage_keys with their signatures
|
||||
for (account_id, account) in replicator_accounts.into_iter() {
|
||||
Self::process_replicator_storage(
|
||||
bank.slot(),
|
||||
account_id,
|
||||
account,
|
||||
storage_state,
|
||||
current_key_idx,
|
||||
);
|
||||
Self::process_entry_crossing(
|
||||
&storage_keypair,
|
||||
&storage_state,
|
||||
&blocktree,
|
||||
entry_hash,
|
||||
slot,
|
||||
instruction_sender,
|
||||
)?;
|
||||
Self::submit_verifications(
|
||||
&storage_state,
|
||||
&storage_keypair,
|
||||
instruction_sender,
|
||||
)?
|
||||
}
|
||||
|
||||
// TODO un-ignore this result and be sure to drain all pending proofs
|
||||
//process a "crossing"
|
||||
let _ignored = Self::process_entry_crossing(
|
||||
&storage_keypair,
|
||||
&storage_state,
|
||||
&blocktree,
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
instruction_sender,
|
||||
);
|
||||
Self::submit_verifications(
|
||||
get_segment_from_slot(bank.slot()),
|
||||
&storage_state,
|
||||
&storage_keypair,
|
||||
instruction_sender,
|
||||
)?
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -496,6 +535,7 @@ impl StorageStage {
|
||||
}
|
||||
|
||||
fn submit_verifications(
|
||||
current_segment: usize,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
ix_sender: &Sender<Instruction>,
|
||||
@ -507,35 +547,44 @@ impl StorageStage {
|
||||
.replicator_map
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
.flat_map(|(segment, proof_map)| {
|
||||
.flat_map(|(_, proof_map)| {
|
||||
let checked_proofs = proof_map
|
||||
.iter_mut()
|
||||
.map(|(id, proofs)| {
|
||||
(
|
||||
*id,
|
||||
proofs
|
||||
.drain(..)
|
||||
.map(|proof| CheckedProof {
|
||||
proof,
|
||||
status: ProofStatus::Valid,
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.filter_map(|(id, proofs)| {
|
||||
if !proofs.is_empty() {
|
||||
Some((
|
||||
*id,
|
||||
proofs
|
||||
.drain(..)
|
||||
.map(|proof| CheckedProof {
|
||||
proof,
|
||||
status: ProofStatus::Valid,
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
if !checked_proofs.is_empty() {
|
||||
let ix =
|
||||
proof_validation(&storage_keypair.pubkey(), segment as u64, checked_proofs);
|
||||
let ix = proof_validation(
|
||||
&storage_keypair.pubkey(),
|
||||
current_segment as u64,
|
||||
checked_proofs,
|
||||
);
|
||||
Some(ix)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// TODO Avoid AccountInUse errors in this loop
|
||||
let res: std::result::Result<_, _> = instructions
|
||||
.into_iter()
|
||||
.map(|ix| ix_sender.send(ix))
|
||||
.map(|ix| {
|
||||
sleep(Duration::from_millis(100));
|
||||
ix_sender.send(ix)
|
||||
})
|
||||
.collect();
|
||||
res?;
|
||||
Ok(())
|
||||
@ -554,18 +603,18 @@ impl Service for StorageStage {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::blocktree::tests::{entries_to_blobs, make_slot_entries};
|
||||
use crate::blocktree::{create_new_tmp_ledger, Blocktree};
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::entry::Entry;
|
||||
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
|
||||
use crate::service::Service;
|
||||
use crate::{blocktree_processor, entry};
|
||||
use rayon::prelude::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::hash::{Hash, Hasher};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_storage_api::SLOTS_PER_SEGMENT;
|
||||
use std::cmp::{max, min};
|
||||
use std::fs::remove_dir_all;
|
||||
@ -609,7 +658,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_process_entries() {
|
||||
fn test_storage_stage_process_banks() {
|
||||
solana_logger::setup();
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
@ -618,19 +667,17 @@ mod tests {
|
||||
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(1000);
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
|
||||
let (blobs, _entries) = make_slot_entries(1, 0, 64);
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let slot = 1;
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank], 0)));
|
||||
blocktree.insert_data_blobs(blobs).unwrap();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank.clone()], 0)));
|
||||
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
let (slot_sender, slot_receiver) = channel();
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
let storage_state = StorageState::new();
|
||||
let storage_stage = StorageStage::new(
|
||||
&storage_state,
|
||||
slot_receiver,
|
||||
bank_receiver,
|
||||
Some(blocktree.clone()),
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
@ -639,22 +686,34 @@ mod tests {
|
||||
STORAGE_ROTATE_TEST_COUNT,
|
||||
&cluster_info,
|
||||
);
|
||||
slot_sender.send(vec![slot]).unwrap();
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
let keypair = Keypair::new();
|
||||
let hash = Hash::default();
|
||||
let signature = keypair.sign_message(&hash.as_ref());
|
||||
#[cfg(feature = "cuda")]
|
||||
let mut result = storage_state.get_mining_result(&signature);
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
let result = storage_state.get_mining_result(&signature);
|
||||
|
||||
assert_eq!(result, Hash::default());
|
||||
|
||||
let rooted_slots = (slot..slot + SLOTS_PER_SEGMENT + 1)
|
||||
let mut last_bank = bank;
|
||||
let rooted_banks = (slot..slot + SLOTS_PER_SEGMENT + 1)
|
||||
.map(|i| {
|
||||
let (blobs, _entries) = make_slot_entries(i, i - 1, 64);
|
||||
blocktree.insert_data_blobs(blobs).unwrap();
|
||||
i
|
||||
let bank = Bank::new_from_parent(&last_bank, &keypair.pubkey(), i);
|
||||
blocktree_processor::process_entries(
|
||||
&bank,
|
||||
&entry::create_ticks(64, bank.last_blockhash()),
|
||||
)
|
||||
.expect("failed process entries");
|
||||
last_bank = Arc::new(bank);
|
||||
last_bank.clone()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
slot_sender.send(rooted_slots).unwrap();
|
||||
bank_sender.send(rooted_banks).unwrap();
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
for _ in 0..5 {
|
||||
result = storage_state.get_mining_result(&signature);
|
||||
if result != Hash::default() {
|
||||
@ -679,28 +738,35 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_process_proof_entries() {
|
||||
fn test_storage_stage_process_account_proofs() {
|
||||
solana_logger::setup();
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let replicator_keypair = Arc::new(Keypair::new());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(1000);
|
||||
let GenesisBlockInfo {
|
||||
mut genesis_block,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_block(1000);
|
||||
genesis_block
|
||||
.native_instruction_processors
|
||||
.push(solana_storage_program::solana_storage_program!());
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
|
||||
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||
let (blobs, entries) = make_slot_entries(1, 0, 128);
|
||||
blocktree.insert_data_blobs(blobs).unwrap();
|
||||
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank], 0)));
|
||||
let bank = Bank::new(&genesis_block);
|
||||
let bank = Arc::new(bank);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank.clone()], 0)));
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
|
||||
let (slot_sender, slot_receiver) = channel();
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
let storage_state = StorageState::new();
|
||||
let storage_stage = StorageStage::new(
|
||||
&storage_state,
|
||||
slot_receiver,
|
||||
bank_receiver,
|
||||
Some(blocktree.clone()),
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
@ -709,7 +775,24 @@ mod tests {
|
||||
STORAGE_ROTATE_TEST_COUNT,
|
||||
&cluster_info,
|
||||
);
|
||||
slot_sender.send(vec![1]).unwrap();
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
// create accounts
|
||||
let bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 1));
|
||||
let account_ix = storage_instruction::create_replicator_storage_account(
|
||||
&mint_keypair.pubkey(),
|
||||
&Pubkey::new_rand(),
|
||||
&replicator_keypair.pubkey(),
|
||||
1,
|
||||
);
|
||||
let account_tx = Transaction::new_signed_instructions(
|
||||
&[&mint_keypair],
|
||||
account_ix,
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
bank.process_transaction(&account_tx).expect("create");
|
||||
|
||||
bank_sender.send(vec![bank.clone()]).unwrap();
|
||||
|
||||
let mut reference_keys;
|
||||
{
|
||||
@ -719,21 +802,34 @@ mod tests {
|
||||
}
|
||||
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let mining_proof_ix = storage_instruction::mining_proof(
|
||||
&keypair.pubkey(),
|
||||
&replicator_keypair.pubkey(),
|
||||
Hash::default(),
|
||||
0,
|
||||
keypair.sign_message(b"test"),
|
||||
);
|
||||
let mining_proof_tx = Transaction::new_unsigned_instructions(vec![mining_proof_ix]);
|
||||
let mining_txs = vec![mining_proof_tx];
|
||||
|
||||
let next_hash = solana_sdk::hash::hash(entries.last().unwrap().hash.as_ref());
|
||||
let proof_entry = Entry::new(&next_hash, 1, mining_txs);
|
||||
blocktree
|
||||
.insert_data_blobs(entries_to_blobs(&vec![proof_entry], 2, 1, true))
|
||||
.unwrap();
|
||||
slot_sender.send(vec![2]).unwrap();
|
||||
let next_bank = Arc::new(Bank::new_from_parent(&bank, &keypair.pubkey(), 2));
|
||||
//register ticks so the program reports a different segment
|
||||
blocktree_processor::process_entries(
|
||||
&next_bank,
|
||||
&entry::create_ticks(
|
||||
DEFAULT_TICKS_PER_SLOT * SLOTS_PER_SEGMENT + 1,
|
||||
bank.last_blockhash(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
let message = Message::new_with_payer(vec![mining_proof_ix], Some(&mint_keypair.pubkey()));
|
||||
let mining_proof_tx = Transaction::new(
|
||||
&[&mint_keypair, replicator_keypair.as_ref()],
|
||||
message,
|
||||
next_bank.last_blockhash(),
|
||||
);
|
||||
next_bank
|
||||
.process_transaction(&mining_proof_tx)
|
||||
.expect("process txs");
|
||||
bank_sender.send(vec![next_bank]).unwrap();
|
||||
|
||||
for _ in 0..5 {
|
||||
{
|
||||
|
@ -111,7 +111,7 @@ impl Tvu {
|
||||
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
|
||||
);
|
||||
|
||||
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
|
||||
let (replay_stage, slot_full_receiver, root_bank_receiver) = ReplayStage::new(
|
||||
&keypair.pubkey(),
|
||||
vote_account,
|
||||
voting_keypair,
|
||||
@ -139,7 +139,7 @@ impl Tvu {
|
||||
|
||||
let storage_stage = StorageStage::new(
|
||||
storage_state,
|
||||
root_slot_receiver,
|
||||
root_bank_receiver,
|
||||
Some(blocktree),
|
||||
&keypair,
|
||||
storage_keypair,
|
||||
|
Reference in New Issue
Block a user