Change replicators to slot-based (#4118)
This commit is contained in:
@ -14,8 +14,8 @@ pub enum RpcRequest {
|
|||||||
GetSignatureStatus,
|
GetSignatureStatus,
|
||||||
GetSlotLeader,
|
GetSlotLeader,
|
||||||
GetStorageBlockhash,
|
GetStorageBlockhash,
|
||||||
GetStorageEntryHeight,
|
GetStorageSlot,
|
||||||
GetStoragePubkeysForEntryHeight,
|
GetStoragePubkeysForSlot,
|
||||||
GetTransactionCount,
|
GetTransactionCount,
|
||||||
RegisterNode,
|
RegisterNode,
|
||||||
RequestAirdrop,
|
RequestAirdrop,
|
||||||
@ -40,8 +40,8 @@ impl RpcRequest {
|
|||||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||||
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
|
||||||
RpcRequest::GetStorageEntryHeight => "getStorageEntryHeight",
|
RpcRequest::GetStorageSlot => "getStorageSlot",
|
||||||
RpcRequest::GetStoragePubkeysForEntryHeight => "getStoragePubkeysForEntryHeight",
|
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
|
||||||
RpcRequest::GetTransactionCount => "getTransactionCount",
|
RpcRequest::GetTransactionCount => "getTransactionCount",
|
||||||
RpcRequest::RegisterNode => "registerNode",
|
RpcRequest::RegisterNode => "registerNode",
|
||||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::cluster_info::{ClusterInfo, ClusterInfoError, NEIGHBORHOOD_SIZE};
|
use crate::cluster_info::{ClusterInfo, ClusterInfoError, NEIGHBORHOOD_SIZE};
|
||||||
use crate::entry::{EntrySender, EntrySlice};
|
use crate::entry::EntrySlice;
|
||||||
use crate::erasure::CodingGenerator;
|
use crate::erasure::CodingGenerator;
|
||||||
use crate::packet::index_blobs_with_genesis;
|
use crate::packet::index_blobs_with_genesis;
|
||||||
use crate::poh_recorder::WorkingBankEntries;
|
use crate::poh_recorder::WorkingBankEntries;
|
||||||
@ -39,7 +39,6 @@ impl Broadcast {
|
|||||||
receiver: &Receiver<WorkingBankEntries>,
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
sock: &UdpSocket,
|
sock: &UdpSocket,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: &EntrySender,
|
|
||||||
genesis_blockhash: &Hash,
|
genesis_blockhash: &Hash,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
@ -87,11 +86,9 @@ impl Broadcast {
|
|||||||
|
|
||||||
let blobs: Vec<_> = ventries
|
let blobs: Vec<_> = ventries
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map_with(storage_entry_sender.clone(), |s, p| {
|
.map(|p| {
|
||||||
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
|
let entries: Vec<_> = p.into_iter().map(|e| e.0).collect();
|
||||||
let blobs = entries.to_shared_blobs();
|
entries.to_shared_blobs()
|
||||||
let _ignored = s.send(entries);
|
|
||||||
blobs
|
|
||||||
})
|
})
|
||||||
.flatten()
|
.flatten()
|
||||||
.collect();
|
.collect();
|
||||||
@ -186,7 +183,6 @@ impl BroadcastStage {
|
|||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
receiver: &Receiver<WorkingBankEntries>,
|
receiver: &Receiver<WorkingBankEntries>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: EntrySender,
|
|
||||||
genesis_blockhash: &Hash,
|
genesis_blockhash: &Hash,
|
||||||
) -> BroadcastStageReturnType {
|
) -> BroadcastStageReturnType {
|
||||||
let me = cluster_info.read().unwrap().my_data().clone();
|
let me = cluster_info.read().unwrap().my_data().clone();
|
||||||
@ -198,14 +194,9 @@ impl BroadcastStage {
|
|||||||
};
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if let Err(e) = broadcast.run(
|
if let Err(e) =
|
||||||
&cluster_info,
|
broadcast.run(&cluster_info, receiver, sock, blocktree, genesis_blockhash)
|
||||||
receiver,
|
{
|
||||||
sock,
|
|
||||||
blocktree,
|
|
||||||
&storage_entry_sender,
|
|
||||||
genesis_blockhash,
|
|
||||||
) {
|
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) | Error::SendError => {
|
||||||
return BroadcastStageReturnType::ChannelDisconnected;
|
return BroadcastStageReturnType::ChannelDisconnected;
|
||||||
@ -243,7 +234,6 @@ impl BroadcastStage {
|
|||||||
receiver: Receiver<WorkingBankEntries>,
|
receiver: Receiver<WorkingBankEntries>,
|
||||||
exit_sender: &Arc<AtomicBool>,
|
exit_sender: &Arc<AtomicBool>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: EntrySender,
|
|
||||||
genesis_blockhash: &Hash,
|
genesis_blockhash: &Hash,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let blocktree = blocktree.clone();
|
let blocktree = blocktree.clone();
|
||||||
@ -258,7 +248,6 @@ impl BroadcastStage {
|
|||||||
&cluster_info,
|
&cluster_info,
|
||||||
&receiver,
|
&receiver,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
storage_entry_sender,
|
|
||||||
&genesis_blockhash,
|
&genesis_blockhash,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@ -320,7 +309,6 @@ mod test {
|
|||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||||
|
|
||||||
let exit_sender = Arc::new(AtomicBool::new(false));
|
let exit_sender = Arc::new(AtomicBool::new(false));
|
||||||
let (storage_sender, _receiver) = channel();
|
|
||||||
|
|
||||||
let (genesis_block, _) = GenesisBlock::new(10_000);
|
let (genesis_block, _) = GenesisBlock::new(10_000);
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
@ -332,7 +320,6 @@ mod test {
|
|||||||
entry_receiver,
|
entry_receiver,
|
||||||
&exit_sender,
|
&exit_sender,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
storage_sender,
|
|
||||||
&Hash::default(),
|
&Hash::default(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ use std::io::{BufWriter, Write};
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::storage_stage::ENTRIES_PER_SEGMENT;
|
use crate::storage_stage::SLOTS_PER_SEGMENT;
|
||||||
|
|
||||||
pub const CHACHA_BLOCK_SIZE: usize = 64;
|
pub const CHACHA_BLOCK_SIZE: usize = 64;
|
||||||
pub const CHACHA_KEY_SIZE: usize = 32;
|
pub const CHACHA_KEY_SIZE: usize = 32;
|
||||||
@ -50,8 +50,7 @@ pub fn chacha_cbc_encrypt_ledger(
|
|||||||
let mut entry = slice;
|
let mut entry = slice;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match blocktree.read_blobs_bytes(entry, ENTRIES_PER_SEGMENT - total_entries, &mut buffer, 0)
|
match blocktree.read_blobs_bytes(entry, SLOTS_PER_SEGMENT - total_entries, &mut buffer, 0) {
|
||||||
{
|
|
||||||
Ok((num_entries, entry_len)) => {
|
Ok((num_entries, entry_len)) => {
|
||||||
debug!(
|
debug!(
|
||||||
"chacha: encrypting slice: {} num_entries: {} entry_len: {}",
|
"chacha: encrypting slice: {} num_entries: {} entry_len: {}",
|
||||||
|
@ -11,7 +11,7 @@ use std::io;
|
|||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::storage_stage::ENTRIES_PER_SEGMENT;
|
use crate::storage_stage::SLOTS_PER_SEGMENT;
|
||||||
|
|
||||||
// Encrypt a file with multiple starting IV states, determined by ivecs.len()
|
// Encrypt a file with multiple starting IV states, determined by ivecs.len()
|
||||||
//
|
//
|
||||||
@ -47,8 +47,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
|
|||||||
chacha_init_sha_state(int_sha_states.as_mut_ptr(), num_keys as u32);
|
chacha_init_sha_state(int_sha_states.as_mut_ptr(), num_keys as u32);
|
||||||
}
|
}
|
||||||
loop {
|
loop {
|
||||||
match blocktree.read_blobs_bytes(entry, ENTRIES_PER_SEGMENT - total_entries, &mut buffer, 0)
|
match blocktree.read_blobs_bytes(entry, SLOTS_PER_SEGMENT - total_entries, &mut buffer, 0) {
|
||||||
{
|
|
||||||
Ok((num_entries, entry_len)) => {
|
Ok((num_entries, entry_len)) => {
|
||||||
debug!(
|
debug!(
|
||||||
"chacha_cuda: encrypting segment: {} num_entries: {} entry_len: {}",
|
"chacha_cuda: encrypting segment: {} num_entries: {} entry_len: {}",
|
||||||
@ -78,9 +77,9 @@ pub fn chacha_cbc_encrypt_file_many_keys(
|
|||||||
entry += num_entries;
|
entry += num_entries;
|
||||||
debug!(
|
debug!(
|
||||||
"total entries: {} entry: {} segment: {} entries_per_segment: {}",
|
"total entries: {} entry: {} segment: {} entries_per_segment: {}",
|
||||||
total_entries, entry, segment, ENTRIES_PER_SEGMENT
|
total_entries, entry, segment, SLOTS_PER_SEGMENT
|
||||||
);
|
);
|
||||||
if (entry - segment) >= ENTRIES_PER_SEGMENT {
|
if (entry - segment) >= SLOTS_PER_SEGMENT {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ use solana_vote_api::vote_instruction;
|
|||||||
use solana_vote_api::vote_state::Vote;
|
use solana_vote_api::vote_state::Vote;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver};
|
use std::sync::mpsc::Receiver;
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
use std::thread::Result;
|
use std::thread::Result;
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ impl Default for FullnodeConfig {
|
|||||||
// TODO: remove this, temporary parameter to configure
|
// TODO: remove this, temporary parameter to configure
|
||||||
// storage amount differently for test configurations
|
// storage amount differently for test configurations
|
||||||
// so tests don't take forever to run.
|
// so tests don't take forever to run.
|
||||||
const NUM_HASHES_FOR_STORAGE_ROTATE: u64 = 1024;
|
const NUM_HASHES_FOR_STORAGE_ROTATE: u64 = 128;
|
||||||
Self {
|
Self {
|
||||||
sigverify_disabled: false,
|
sigverify_disabled: false,
|
||||||
voting_disabled: false,
|
voting_disabled: false,
|
||||||
@ -220,14 +220,10 @@ impl Fullnode {
|
|||||||
Some(Arc::new(voting_keypair))
|
Some(Arc::new(voting_keypair))
|
||||||
};
|
};
|
||||||
|
|
||||||
// Setup channel for sending entries to storage stage
|
|
||||||
let (sender, receiver) = channel();
|
|
||||||
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
vote_account,
|
vote_account,
|
||||||
voting_keypair,
|
voting_keypair,
|
||||||
&bank_forks,
|
&bank_forks,
|
||||||
&bank_forks_info,
|
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
sockets,
|
sockets,
|
||||||
blocktree.clone(),
|
blocktree.clone(),
|
||||||
@ -237,8 +233,6 @@ impl Fullnode {
|
|||||||
ledger_signal_receiver,
|
ledger_signal_receiver,
|
||||||
&subscriptions,
|
&subscriptions,
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
sender.clone(),
|
|
||||||
receiver,
|
|
||||||
&leader_schedule_cache,
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
&genesis_blockhash,
|
&genesis_blockhash,
|
||||||
@ -258,7 +252,6 @@ impl Fullnode {
|
|||||||
node.sockets.broadcast,
|
node.sockets.broadcast,
|
||||||
config.sigverify_disabled,
|
config.sigverify_disabled,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
sender,
|
|
||||||
&exit,
|
&exit,
|
||||||
&genesis_blockhash,
|
&genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
@ -56,6 +56,7 @@ pub struct ClusterConfig {
|
|||||||
/// The fullnode config that should be applied to every node in the cluster
|
/// The fullnode config that should be applied to every node in the cluster
|
||||||
pub fullnode_config: FullnodeConfig,
|
pub fullnode_config: FullnodeConfig,
|
||||||
/// Number of replicators in the cluster
|
/// Number of replicators in the cluster
|
||||||
|
/// Note- replicators will timeout if ticks_per_slot is much larger than the default 8
|
||||||
pub num_replicators: u64,
|
pub num_replicators: u64,
|
||||||
/// Number of nodes that are unstaked and not voting (a.k.a listening)
|
/// Number of nodes that are unstaked and not voting (a.k.a listening)
|
||||||
pub num_listeners: u64,
|
pub num_listeners: u64,
|
||||||
@ -449,6 +450,7 @@ impl Drop for LocalCluster {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
|
||||||
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
|
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -465,6 +467,7 @@ mod test {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let mut fullnode_config = FullnodeConfig::default();
|
let mut fullnode_config = FullnodeConfig::default();
|
||||||
fullnode_config.rpc_config.enable_fullnode_exit = true;
|
fullnode_config.rpc_config.enable_fullnode_exit = true;
|
||||||
|
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
let num_replicators = 1;
|
let num_replicators = 1;
|
||||||
let config = ClusterConfig {
|
let config = ClusterConfig {
|
||||||
@ -472,7 +475,7 @@ mod test {
|
|||||||
num_replicators: 1,
|
num_replicators: 1,
|
||||||
node_stakes: vec![3; NUM_NODES],
|
node_stakes: vec![3; NUM_NODES],
|
||||||
cluster_lamports: 100,
|
cluster_lamports: 100,
|
||||||
ticks_per_slot: 16,
|
ticks_per_slot: 8,
|
||||||
slots_per_epoch: MINIMUM_SLOT_LENGTH as u64,
|
slots_per_epoch: MINIMUM_SLOT_LENGTH as u64,
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
};
|
};
|
||||||
|
@ -4,7 +4,7 @@ use crate::bank_forks::BankForks;
|
|||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::blocktree_processor;
|
use crate::blocktree_processor;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::entry::{Entry, EntrySender, EntrySlice};
|
use crate::entry::{Entry, EntrySlice};
|
||||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::leader_schedule_utils;
|
use crate::leader_schedule_utils;
|
||||||
use crate::locktower::{Locktower, StakeLockout};
|
use crate::locktower::{Locktower, StakeLockout};
|
||||||
@ -83,12 +83,12 @@ impl ReplayStage {
|
|||||||
ledger_signal_receiver: Receiver<bool>,
|
ledger_signal_receiver: Receiver<bool>,
|
||||||
subscriptions: &Arc<RpcSubscriptions>,
|
subscriptions: &Arc<RpcSubscriptions>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
storage_entry_sender: EntrySender,
|
|
||||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
) -> (Self, Receiver<(u64, Pubkey)>)
|
) -> (Self, Receiver<(u64, Pubkey)>, Receiver<u64>)
|
||||||
where
|
where
|
||||||
T: 'static + KeypairUtil + Send + Sync,
|
T: 'static + KeypairUtil + Send + Sync,
|
||||||
{
|
{
|
||||||
|
let (root_slot_sender, root_slot_receiver) = channel();
|
||||||
let (slot_full_sender, slot_full_receiver) = channel();
|
let (slot_full_sender, slot_full_receiver) = channel();
|
||||||
trace!("replay stage");
|
trace!("replay stage");
|
||||||
let exit_ = exit.clone();
|
let exit_ = exit.clone();
|
||||||
@ -132,7 +132,6 @@ impl ReplayStage {
|
|||||||
&my_id,
|
&my_id,
|
||||||
&mut ticks_per_slot,
|
&mut ticks_per_slot,
|
||||||
&mut progress,
|
&mut progress,
|
||||||
&storage_entry_sender,
|
|
||||||
&slot_full_sender,
|
&slot_full_sender,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -158,6 +157,7 @@ impl ReplayStage {
|
|||||||
&cluster_info,
|
&cluster_info,
|
||||||
&blocktree,
|
&blocktree,
|
||||||
&leader_schedule_cache,
|
&leader_schedule_cache,
|
||||||
|
&root_slot_sender,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Self::reset_poh_recorder(
|
Self::reset_poh_recorder(
|
||||||
@ -213,7 +213,7 @@ impl ReplayStage {
|
|||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
(Self { t_replay }, slot_full_receiver)
|
(Self { t_replay }, slot_full_receiver, root_slot_receiver)
|
||||||
}
|
}
|
||||||
pub fn start_leader(
|
pub fn start_leader(
|
||||||
my_id: &Pubkey,
|
my_id: &Pubkey,
|
||||||
@ -283,12 +283,10 @@ impl ReplayStage {
|
|||||||
bank: &Bank,
|
bank: &Bank,
|
||||||
blocktree: &Blocktree,
|
blocktree: &Blocktree,
|
||||||
progress: &mut HashMap<u64, ForkProgress>,
|
progress: &mut HashMap<u64, ForkProgress>,
|
||||||
forward_entry_sender: &EntrySender,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?;
|
let (entries, num) = Self::load_blocktree_entries(bank, blocktree, progress)?;
|
||||||
let len = entries.len();
|
let len = entries.len();
|
||||||
let result =
|
let result = Self::replay_entries_into_bank(bank, entries, progress, num);
|
||||||
Self::replay_entries_into_bank(bank, entries, progress, forward_entry_sender, num);
|
|
||||||
if result.is_ok() {
|
if result.is_ok() {
|
||||||
trace!("verified entries {}", len);
|
trace!("verified entries {}", len);
|
||||||
inc_new_counter_info!("replicate-stage_process_entries", len);
|
inc_new_counter_info!("replicate-stage_process_entries", len);
|
||||||
@ -300,6 +298,7 @@ impl ReplayStage {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn handle_votable_bank<T>(
|
fn handle_votable_bank<T>(
|
||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
bank_forks: &Arc<RwLock<BankForks>>,
|
bank_forks: &Arc<RwLock<BankForks>>,
|
||||||
@ -310,6 +309,7 @@ impl ReplayStage {
|
|||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
|
root_slot_sender: &Sender<u64>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
T: 'static + KeypairUtil + Send + Sync,
|
T: 'static + KeypairUtil + Send + Sync,
|
||||||
@ -319,6 +319,7 @@ impl ReplayStage {
|
|||||||
leader_schedule_cache.set_root(new_root);
|
leader_schedule_cache.set_root(new_root);
|
||||||
blocktree.set_root(new_root)?;
|
blocktree.set_root(new_root)?;
|
||||||
Self::handle_new_root(&bank_forks, progress);
|
Self::handle_new_root(&bank_forks, progress);
|
||||||
|
root_slot_sender.send(new_root)?;
|
||||||
}
|
}
|
||||||
locktower.update_epoch(&bank);
|
locktower.update_epoch(&bank);
|
||||||
if let Some(ref voting_keypair) = voting_keypair {
|
if let Some(ref voting_keypair) = voting_keypair {
|
||||||
@ -365,7 +366,6 @@ impl ReplayStage {
|
|||||||
my_id: &Pubkey,
|
my_id: &Pubkey,
|
||||||
ticks_per_slot: &mut u64,
|
ticks_per_slot: &mut u64,
|
||||||
progress: &mut HashMap<u64, ForkProgress>,
|
progress: &mut HashMap<u64, ForkProgress>,
|
||||||
forward_entry_sender: &EntrySender,
|
|
||||||
slot_full_sender: &Sender<(u64, Pubkey)>,
|
slot_full_sender: &Sender<(u64, Pubkey)>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let active_banks = bank_forks.read().unwrap().active_banks();
|
let active_banks = bank_forks.read().unwrap().active_banks();
|
||||||
@ -375,12 +375,7 @@ impl ReplayStage {
|
|||||||
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
||||||
*ticks_per_slot = bank.ticks_per_slot();
|
*ticks_per_slot = bank.ticks_per_slot();
|
||||||
if bank.collector_id() != *my_id {
|
if bank.collector_id() != *my_id {
|
||||||
Self::replay_blocktree_into_bank(
|
Self::replay_blocktree_into_bank(&bank, &blocktree, progress)?;
|
||||||
&bank,
|
|
||||||
&blocktree,
|
|
||||||
progress,
|
|
||||||
&forward_entry_sender,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
|
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
|
||||||
if bank.tick_height() == max_tick_height {
|
if bank.tick_height() == max_tick_height {
|
||||||
@ -514,7 +509,6 @@ impl ReplayStage {
|
|||||||
bank: &Bank,
|
bank: &Bank,
|
||||||
entries: Vec<Entry>,
|
entries: Vec<Entry>,
|
||||||
progress: &mut HashMap<u64, ForkProgress>,
|
progress: &mut HashMap<u64, ForkProgress>,
|
||||||
forward_entry_sender: &EntrySender,
|
|
||||||
num: usize,
|
num: usize,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let bank_progress = &mut progress
|
let bank_progress = &mut progress
|
||||||
@ -525,9 +519,6 @@ impl ReplayStage {
|
|||||||
if let Some(last_entry) = entries.last() {
|
if let Some(last_entry) = entries.last() {
|
||||||
bank_progress.last_entry = last_entry.hash;
|
bank_progress.last_entry = last_entry.hash;
|
||||||
}
|
}
|
||||||
if result.is_ok() {
|
|
||||||
forward_entry_sender.send(entries)?;
|
|
||||||
}
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -616,155 +607,14 @@ impl Service for ReplayStage {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::banking_stage::create_test_recorder;
|
use crate::blocktree::get_tmp_ledger_path;
|
||||||
use crate::blocktree::{create_new_tmp_ledger, get_tmp_ledger_path};
|
|
||||||
use crate::cluster_info::{ClusterInfo, Node};
|
|
||||||
use crate::entry::create_ticks;
|
|
||||||
use crate::entry::{next_entry_mut, Entry};
|
|
||||||
use crate::fullnode::new_banks_from_blocktree;
|
|
||||||
use crate::packet::Blob;
|
use crate::packet::Blob;
|
||||||
use crate::replay_stage::ReplayStage;
|
use crate::replay_stage::ReplayStage;
|
||||||
use crate::result::Error;
|
|
||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
||||||
use solana_vote_api::vote_state::Vote;
|
|
||||||
use std::fs::remove_dir_all;
|
use std::fs::remove_dir_all;
|
||||||
use std::sync::mpsc::channel;
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_vote_error_replay_stage_correctness() {
|
|
||||||
solana_logger::setup();
|
|
||||||
// Set up dummy node to host a ReplayStage
|
|
||||||
let my_keypair = Keypair::new();
|
|
||||||
let my_id = my_keypair.pubkey();
|
|
||||||
let my_node = Node::new_localhost_with_pubkey(&my_id);
|
|
||||||
|
|
||||||
// Create keypair for the leader
|
|
||||||
let leader_id = Pubkey::new_rand();
|
|
||||||
|
|
||||||
let (genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(10_000, &leader_id, 500);
|
|
||||||
|
|
||||||
let (my_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
|
||||||
|
|
||||||
// Set up the cluster info
|
|
||||||
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
|
||||||
my_node.info.clone(),
|
|
||||||
)));
|
|
||||||
|
|
||||||
// Set up the replay stage
|
|
||||||
{
|
|
||||||
let voting_keypair = Arc::new(Keypair::new());
|
|
||||||
let (bank_forks, _bank_forks_info, blocktree, l_receiver, leader_schedule_cache) =
|
|
||||||
new_banks_from_blocktree(&my_ledger_path, None);
|
|
||||||
let bank = bank_forks.working_bank();
|
|
||||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
|
||||||
let blocktree = Arc::new(blocktree);
|
|
||||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
|
||||||
create_test_recorder(&bank, &blocktree);
|
|
||||||
let (ledger_writer_sender, ledger_writer_receiver) = channel();
|
|
||||||
let (replay_stage, _slot_full_receiver) = ReplayStage::new(
|
|
||||||
&my_keypair.pubkey(),
|
|
||||||
&voting_keypair.pubkey(),
|
|
||||||
Some(voting_keypair.clone()),
|
|
||||||
blocktree.clone(),
|
|
||||||
&Arc::new(RwLock::new(bank_forks)),
|
|
||||||
cluster_info_me.clone(),
|
|
||||||
&exit,
|
|
||||||
l_receiver,
|
|
||||||
&Arc::new(RpcSubscriptions::default()),
|
|
||||||
&poh_recorder,
|
|
||||||
ledger_writer_sender,
|
|
||||||
&leader_schedule_cache,
|
|
||||||
);
|
|
||||||
let vote_ix = vote_instruction::vote(&voting_keypair.pubkey(), vec![Vote::new(0)]);
|
|
||||||
let vote_tx = Transaction::new_signed_instructions(
|
|
||||||
&[voting_keypair.as_ref()],
|
|
||||||
vec![vote_ix],
|
|
||||||
bank.last_blockhash(),
|
|
||||||
);
|
|
||||||
cluster_info_me.write().unwrap().push_vote(vote_tx);
|
|
||||||
|
|
||||||
info!("Send ReplayStage an entry, should see it on the ledger writer receiver");
|
|
||||||
let next_tick = create_ticks(1, bank.last_blockhash());
|
|
||||||
blocktree
|
|
||||||
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, next_tick.clone())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let received_tick = ledger_writer_receiver
|
|
||||||
.recv()
|
|
||||||
.expect("Expected to receive an entry on the ledger writer receiver");
|
|
||||||
|
|
||||||
assert_eq!(next_tick[0], received_tick[0]);
|
|
||||||
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
replay_stage.join().unwrap();
|
|
||||||
poh_service.join().unwrap();
|
|
||||||
}
|
|
||||||
let _ignored = remove_dir_all(&my_ledger_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_replay_stage_poh_ok_entry_receiver() {
|
|
||||||
let (forward_entry_sender, forward_entry_receiver) = channel();
|
|
||||||
let genesis_block = GenesisBlock::new(10_000).0;
|
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
|
||||||
let mut blockhash = bank.last_blockhash();
|
|
||||||
let mut entries = Vec::new();
|
|
||||||
for _ in 0..5 {
|
|
||||||
let entry = next_entry_mut(&mut blockhash, 1, vec![]); //just ticks
|
|
||||||
entries.push(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut progress = HashMap::new();
|
|
||||||
let res = ReplayStage::replay_entries_into_bank(
|
|
||||||
&bank,
|
|
||||||
entries.clone(),
|
|
||||||
&mut progress,
|
|
||||||
&forward_entry_sender,
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
assert!(res.is_ok(), "replay failed {:?}", res);
|
|
||||||
let res = forward_entry_receiver.try_recv();
|
|
||||||
match res {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => assert!(false, "Entries were not sent correctly {:?}", e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_replay_stage_poh_error_entry_receiver() {
|
|
||||||
let (forward_entry_sender, forward_entry_receiver) = channel();
|
|
||||||
let mut entries = Vec::new();
|
|
||||||
for _ in 0..5 {
|
|
||||||
let entry = Entry::new(&mut Hash::default(), 1, vec![]); //just broken entries
|
|
||||||
entries.push(entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
let genesis_block = GenesisBlock::new(10_000).0;
|
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
|
||||||
let mut progress = HashMap::new();
|
|
||||||
let res = ReplayStage::replay_entries_into_bank(
|
|
||||||
&bank,
|
|
||||||
entries.clone(),
|
|
||||||
&mut progress,
|
|
||||||
&forward_entry_sender,
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(_) => assert!(false, "Should have failed because entries are broken"),
|
|
||||||
Err(Error::BlobError(BlobError::VerificationFailed)) => (),
|
|
||||||
Err(e) => assert!(
|
|
||||||
false,
|
|
||||||
"Should have failed because with blob error, instead, got {:?}",
|
|
||||||
e
|
|
||||||
),
|
|
||||||
}
|
|
||||||
assert!(forward_entry_receiver.try_recv().is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_child_slots_of_same_parent() {
|
fn test_child_slots_of_same_parent() {
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
|
@ -9,7 +9,7 @@ use crate::packet::to_shared_blob;
|
|||||||
use crate::repair_service::RepairSlotRange;
|
use crate::repair_service::RepairSlotRange;
|
||||||
use crate::result::Result;
|
use crate::result::Result;
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use crate::storage_stage::{get_segment_from_entry, ENTRIES_PER_SEGMENT};
|
use crate::storage_stage::SLOTS_PER_SEGMENT;
|
||||||
use crate::streamer::receiver;
|
use crate::streamer::receiver;
|
||||||
use crate::streamer::responder;
|
use crate::streamer::responder;
|
||||||
use crate::window_service::WindowService;
|
use crate::window_service::WindowService;
|
||||||
@ -26,7 +26,7 @@ use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
|||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
use solana_sdk::transport::TransportError;
|
use solana_sdk::transport::TransportError;
|
||||||
use solana_storage_api::storage_instruction;
|
use solana_storage_api::{get_segment_from_slot, storage_instruction};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::BufReader;
|
use std::io::BufReader;
|
||||||
@ -107,18 +107,15 @@ pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
|||||||
Ok(hasher.result())
|
Ok(hasher.result())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_entry_heights_from_blockhash(
|
fn get_slot_from_blockhash(signature: &ed25519_dalek::Signature, storage_slot: u64) -> u64 {
|
||||||
signature: &ed25519_dalek::Signature,
|
|
||||||
storage_entry_height: u64,
|
|
||||||
) -> u64 {
|
|
||||||
let signature_vec = signature.to_bytes();
|
let signature_vec = signature.to_bytes();
|
||||||
let mut segment_index = u64::from(signature_vec[0])
|
let mut segment_index = u64::from(signature_vec[0])
|
||||||
| (u64::from(signature_vec[1]) << 8)
|
| (u64::from(signature_vec[1]) << 8)
|
||||||
| (u64::from(signature_vec[1]) << 16)
|
| (u64::from(signature_vec[1]) << 16)
|
||||||
| (u64::from(signature_vec[2]) << 24);
|
| (u64::from(signature_vec[2]) << 24);
|
||||||
let max_segment_index = get_segment_from_entry(storage_entry_height);
|
let max_segment_index = get_segment_from_slot(storage_slot);
|
||||||
segment_index %= max_segment_index as u64;
|
segment_index %= max_segment_index as u64;
|
||||||
segment_index * ENTRIES_PER_SEGMENT
|
segment_index * SLOTS_PER_SEGMENT
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_request_processor(
|
fn create_request_processor(
|
||||||
@ -213,16 +210,15 @@ impl Replicator {
|
|||||||
info!("Looking for leader at {:?}", cluster_entrypoint);
|
info!("Looking for leader at {:?}", cluster_entrypoint);
|
||||||
crate::gossip_service::discover_nodes(&cluster_entrypoint.gossip, 1)?;
|
crate::gossip_service::discover_nodes(&cluster_entrypoint.gossip, 1)?;
|
||||||
|
|
||||||
let (storage_blockhash, storage_entry_height) =
|
let (storage_blockhash, storage_slot) = Self::poll_for_blockhash_and_slot(&cluster_info)?;
|
||||||
Self::poll_for_blockhash_and_entry_height(&cluster_info)?;
|
|
||||||
|
|
||||||
let node_info = node.info.clone();
|
let node_info = node.info.clone();
|
||||||
let signature = storage_keypair.sign(storage_blockhash.as_ref());
|
let signature = storage_keypair.sign(storage_blockhash.as_ref());
|
||||||
let slot = get_entry_heights_from_blockhash(&signature, storage_entry_height);
|
let slot = get_slot_from_blockhash(&signature, storage_slot);
|
||||||
info!("replicating slot: {}", slot);
|
info!("replicating slot: {}", slot);
|
||||||
|
|
||||||
let mut repair_slot_range = RepairSlotRange::default();
|
let mut repair_slot_range = RepairSlotRange::default();
|
||||||
repair_slot_range.end = slot + ENTRIES_PER_SEGMENT;
|
repair_slot_range.end = slot + SLOTS_PER_SEGMENT;
|
||||||
repair_slot_range.start = slot;
|
repair_slot_range.start = slot;
|
||||||
|
|
||||||
let repair_socket = Arc::new(node.sockets.repair);
|
let repair_socket = Arc::new(node.sockets.repair);
|
||||||
@ -324,7 +320,7 @@ impl Replicator {
|
|||||||
if meta.is_connected {
|
if meta.is_connected {
|
||||||
current_slot += 1;
|
current_slot += 1;
|
||||||
warn!("current slot: {}", current_slot);
|
warn!("current slot: {}", current_slot);
|
||||||
if current_slot >= start_slot + ENTRIES_PER_SEGMENT {
|
if current_slot >= start_slot + SLOTS_PER_SEGMENT {
|
||||||
break 'outer;
|
break 'outer;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -481,11 +477,11 @@ impl Replicator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn entry_height(&self) -> u64 {
|
pub fn slot(&self) -> u64 {
|
||||||
self.slot
|
self.slot
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll_for_blockhash_and_entry_height(
|
fn poll_for_blockhash_and_slot(
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
) -> Result<(String, u64)> {
|
) -> Result<(String, u64)> {
|
||||||
for _ in 0..10 {
|
for _ in 0..10 {
|
||||||
@ -500,20 +496,20 @@ impl Replicator {
|
|||||||
.retry_make_rpc_request(&RpcRequest::GetStorageBlockhash, None, 0)
|
.retry_make_rpc_request(&RpcRequest::GetStorageBlockhash, None, 0)
|
||||||
.expect("rpc request")
|
.expect("rpc request")
|
||||||
.to_string();
|
.to_string();
|
||||||
let storage_entry_height = rpc_client
|
let storage_slot = rpc_client
|
||||||
.retry_make_rpc_request(&RpcRequest::GetStorageEntryHeight, None, 0)
|
.retry_make_rpc_request(&RpcRequest::GetStorageSlot, None, 0)
|
||||||
.expect("rpc request")
|
.expect("rpc request")
|
||||||
.as_u64()
|
.as_u64()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
info!("max entry_height: {}", storage_entry_height);
|
info!("max slot: {}", storage_slot);
|
||||||
if get_segment_from_entry(storage_entry_height) != 0 {
|
if get_segment_from_slot(storage_slot) != 0 {
|
||||||
return Ok((storage_blockhash, storage_entry_height));
|
return Ok((storage_blockhash, storage_slot));
|
||||||
}
|
}
|
||||||
sleep(Duration::from_secs(3));
|
sleep(Duration::from_secs(3));
|
||||||
}
|
}
|
||||||
Err(Error::new(
|
Err(Error::new(
|
||||||
ErrorKind::Other,
|
ErrorKind::Other,
|
||||||
"Couldn't get blockhash or entry_height",
|
"Couldn't get blockhash or slot",
|
||||||
))?
|
))?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,15 +105,13 @@ impl JsonRpcRequestProcessor {
|
|||||||
Ok(bs58::encode(hash).into_string())
|
Ok(bs58::encode(hash).into_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_storage_entry_height(&self) -> Result<u64> {
|
fn get_storage_slot(&self) -> Result<u64> {
|
||||||
let entry_height = self.storage_state.get_entry_height();
|
let slot = self.storage_state.get_slot();
|
||||||
Ok(entry_height)
|
Ok(slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_storage_pubkeys_for_entry_height(&self, entry_height: u64) -> Result<Vec<Pubkey>> {
|
fn get_storage_pubkeys_for_slot(&self, slot: u64) -> Result<Vec<Pubkey>> {
|
||||||
Ok(self
|
Ok(self.storage_state.get_pubkeys_for_slot(slot))
|
||||||
.storage_state
|
|
||||||
.get_pubkeys_for_entry_height(entry_height))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fullnode_exit(&self) -> Result<bool> {
|
pub fn fullnode_exit(&self) -> Result<bool> {
|
||||||
@ -225,15 +223,11 @@ pub trait RpcSol {
|
|||||||
#[rpc(meta, name = "getStorageBlockhash")]
|
#[rpc(meta, name = "getStorageBlockhash")]
|
||||||
fn get_storage_blockhash(&self, _: Self::Metadata) -> Result<String>;
|
fn get_storage_blockhash(&self, _: Self::Metadata) -> Result<String>;
|
||||||
|
|
||||||
#[rpc(meta, name = "getStorageEntryHeight")]
|
#[rpc(meta, name = "getStorageSlot")]
|
||||||
fn get_storage_entry_height(&self, _: Self::Metadata) -> Result<u64>;
|
fn get_storage_slot(&self, _: Self::Metadata) -> Result<u64>;
|
||||||
|
|
||||||
#[rpc(meta, name = "getStoragePubkeysForEntryHeight")]
|
#[rpc(meta, name = "getStoragePubkeysForSlot")]
|
||||||
fn get_storage_pubkeys_for_entry_height(
|
fn get_storage_pubkeys_for_slot(&self, _: Self::Metadata, _: u64) -> Result<Vec<Pubkey>>;
|
||||||
&self,
|
|
||||||
_: Self::Metadata,
|
|
||||||
_: u64,
|
|
||||||
) -> Result<Vec<Pubkey>>;
|
|
||||||
|
|
||||||
#[rpc(meta, name = "fullnodeExit")]
|
#[rpc(meta, name = "fullnodeExit")]
|
||||||
fn fullnode_exit(&self, _: Self::Metadata) -> Result<bool>;
|
fn fullnode_exit(&self, _: Self::Metadata) -> Result<bool>;
|
||||||
@ -464,22 +458,15 @@ impl RpcSol for RpcSolImpl {
|
|||||||
.get_storage_blockhash()
|
.get_storage_blockhash()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_storage_entry_height(&self, meta: Self::Metadata) -> Result<u64> {
|
fn get_storage_slot(&self, meta: Self::Metadata) -> Result<u64> {
|
||||||
meta.request_processor
|
meta.request_processor.read().unwrap().get_storage_slot()
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.get_storage_entry_height()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_storage_pubkeys_for_entry_height(
|
fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result<Vec<Pubkey>> {
|
||||||
&self,
|
|
||||||
meta: Self::Metadata,
|
|
||||||
entry_height: u64,
|
|
||||||
) -> Result<Vec<Pubkey>> {
|
|
||||||
meta.request_processor
|
meta.request_processor
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get_storage_pubkeys_for_entry_height(entry_height)
|
.get_storage_pubkeys_for_slot(slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fullnode_exit(&self, meta: Self::Metadata) -> Result<bool> {
|
fn fullnode_exit(&self, meta: Self::Metadata) -> Result<bool> {
|
||||||
|
@ -7,7 +7,6 @@ use crate::blocktree::Blocktree;
|
|||||||
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
#[cfg(all(feature = "chacha", feature = "cuda"))]
|
||||||
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::entry::{Entry, EntryReceiver};
|
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
@ -19,13 +18,14 @@ use solana_sdk::pubkey::Pubkey;
|
|||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::system_instruction;
|
use solana_sdk::system_instruction;
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
use solana_storage_api::storage_instruction::{self, StorageInstruction};
|
use solana_storage_api::storage_instruction::StorageInstruction;
|
||||||
|
use solana_storage_api::{get_segment_from_slot, storage_instruction};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::{channel, RecvTimeoutError, Sender};
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -42,7 +42,7 @@ pub struct StorageStateInner {
|
|||||||
storage_keys: StorageKeys,
|
storage_keys: StorageKeys,
|
||||||
replicator_map: ReplicatorMap,
|
replicator_map: ReplicatorMap,
|
||||||
storage_blockhash: Hash,
|
storage_blockhash: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
@ -55,25 +55,15 @@ pub struct StorageStage {
|
|||||||
t_storage_create_accounts: JoinHandle<()>,
|
t_storage_create_accounts: JoinHandle<()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! cross_boundary {
|
pub const STORAGE_ROTATE_TEST_COUNT: u64 = 2;
|
||||||
($start:expr, $len:expr, $boundary:expr) => {
|
|
||||||
(($start + $len) & !($boundary - 1)) > $start & !($boundary - 1)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const STORAGE_ROTATE_TEST_COUNT: u64 = 128;
|
|
||||||
// TODO: some way to dynamically size NUM_IDENTITIES
|
// TODO: some way to dynamically size NUM_IDENTITIES
|
||||||
const NUM_IDENTITIES: usize = 1024;
|
const NUM_IDENTITIES: usize = 1024;
|
||||||
pub const NUM_STORAGE_SAMPLES: usize = 4;
|
pub const NUM_STORAGE_SAMPLES: usize = 4;
|
||||||
pub const ENTRIES_PER_SEGMENT: u64 = 16;
|
pub const SLOTS_PER_SEGMENT: u64 = 16;
|
||||||
const KEY_SIZE: usize = 64;
|
const KEY_SIZE: usize = 64;
|
||||||
|
|
||||||
type InstructionSender = Sender<Instruction>;
|
type InstructionSender = Sender<Instruction>;
|
||||||
|
|
||||||
pub fn get_segment_from_entry(entry_height: u64) -> u64 {
|
|
||||||
entry_height / ENTRIES_PER_SEGMENT
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_identity_index_from_signature(key: &Signature) -> usize {
|
fn get_identity_index_from_signature(key: &Signature) -> usize {
|
||||||
let rkey = key.as_ref();
|
let rkey = key.as_ref();
|
||||||
let mut res: usize = (rkey[0] as usize)
|
let mut res: usize = (rkey[0] as usize)
|
||||||
@ -94,7 +84,7 @@ impl StorageState {
|
|||||||
storage_keys,
|
storage_keys,
|
||||||
storage_results,
|
storage_results,
|
||||||
replicator_map,
|
replicator_map,
|
||||||
entry_height: 0,
|
slot: 0,
|
||||||
storage_blockhash: Hash::default(),
|
storage_blockhash: Hash::default(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -117,14 +107,14 @@ impl StorageState {
|
|||||||
self.state.read().unwrap().storage_blockhash
|
self.state.read().unwrap().storage_blockhash
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_entry_height(&self) -> u64 {
|
pub fn get_slot(&self) -> u64 {
|
||||||
self.state.read().unwrap().entry_height
|
self.state.read().unwrap().slot
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_pubkeys_for_entry_height(&self, entry_height: u64) -> Vec<Pubkey> {
|
pub fn get_pubkeys_for_slot(&self, slot: u64) -> Vec<Pubkey> {
|
||||||
// TODO: keep track of age?
|
// TODO: keep track of age?
|
||||||
const MAX_PUBKEYS_TO_RETURN: usize = 5;
|
const MAX_PUBKEYS_TO_RETURN: usize = 5;
|
||||||
let index = (entry_height / ENTRIES_PER_SEGMENT) as usize;
|
let index = get_segment_from_slot(slot) as usize;
|
||||||
let replicator_map = &self.state.read().unwrap().replicator_map;
|
let replicator_map = &self.state.read().unwrap().replicator_map;
|
||||||
if index < replicator_map.len() {
|
if index < replicator_map.len() {
|
||||||
replicator_map[index]
|
replicator_map[index]
|
||||||
@ -142,18 +132,15 @@ impl StorageStage {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
storage_state: &StorageState,
|
storage_state: &StorageState,
|
||||||
storage_entry_receiver: EntryReceiver,
|
slot_receiver: Receiver<u64>,
|
||||||
blocktree: Option<Arc<Blocktree>>,
|
blocktree: Option<Arc<Blocktree>>,
|
||||||
keypair: &Arc<Keypair>,
|
keypair: &Arc<Keypair>,
|
||||||
storage_keypair: &Arc<Keypair>,
|
storage_keypair: &Arc<Keypair>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
entry_height: u64,
|
|
||||||
bank_forks: &Arc<RwLock<BankForks>>,
|
bank_forks: &Arc<RwLock<BankForks>>,
|
||||||
storage_rotate_count: u64,
|
storage_rotate_count: u64,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
debug!("storage_stage::new: entry_height: {}", entry_height);
|
|
||||||
storage_state.state.write().unwrap().entry_height = entry_height;
|
|
||||||
let storage_state_inner = storage_state.state.clone();
|
let storage_state_inner = storage_state.state.clone();
|
||||||
let exit0 = exit.clone();
|
let exit0 = exit.clone();
|
||||||
let keypair0 = storage_keypair.clone();
|
let keypair0 = storage_keypair.clone();
|
||||||
@ -163,18 +150,16 @@ impl StorageStage {
|
|||||||
let t_storage_mining_verifier = Builder::new()
|
let t_storage_mining_verifier = Builder::new()
|
||||||
.name("solana-storage-mining-verify-stage".to_string())
|
.name("solana-storage-mining-verify-stage".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let mut poh_height = 0;
|
|
||||||
let mut current_key = 0;
|
let mut current_key = 0;
|
||||||
let mut entry_height = entry_height;
|
let mut slot_count = 0;
|
||||||
loop {
|
loop {
|
||||||
if let Some(ref some_blocktree) = blocktree {
|
if let Some(ref some_blocktree) = blocktree {
|
||||||
if let Err(e) = Self::process_entries(
|
if let Err(e) = Self::process_entries(
|
||||||
&keypair0,
|
&keypair0,
|
||||||
&storage_state_inner,
|
&storage_state_inner,
|
||||||
&storage_entry_receiver,
|
&slot_receiver,
|
||||||
&some_blocktree,
|
&some_blocktree,
|
||||||
&mut poh_height,
|
&mut slot_count,
|
||||||
&mut entry_height,
|
|
||||||
&mut current_key,
|
&mut current_key,
|
||||||
storage_rotate_count,
|
storage_rotate_count,
|
||||||
&instruction_sender,
|
&instruction_sender,
|
||||||
@ -283,27 +268,23 @@ impl StorageStage {
|
|||||||
keypair: &Arc<Keypair>,
|
keypair: &Arc<Keypair>,
|
||||||
_blocktree: &Arc<Blocktree>,
|
_blocktree: &Arc<Blocktree>,
|
||||||
entry_id: Hash,
|
entry_id: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
instruction_sender: &InstructionSender,
|
instruction_sender: &InstructionSender,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
let signature = keypair.sign(&entry_id.as_ref());
|
let signature = keypair.sign(&entry_id.as_ref());
|
||||||
|
|
||||||
let ix = storage_instruction::advertise_recent_blockhash(
|
let ix = storage_instruction::advertise_recent_blockhash(&keypair.pubkey(), entry_id, slot);
|
||||||
&keypair.pubkey(),
|
|
||||||
entry_id,
|
|
||||||
entry_height,
|
|
||||||
);
|
|
||||||
instruction_sender.send(ix)?;
|
instruction_sender.send(ix)?;
|
||||||
|
|
||||||
seed.copy_from_slice(&signature.to_bytes()[..32]);
|
seed.copy_from_slice(&signature.to_bytes()[..32]);
|
||||||
|
|
||||||
let mut rng = ChaChaRng::from_seed(seed);
|
let mut rng = ChaChaRng::from_seed(seed);
|
||||||
|
|
||||||
state.write().unwrap().entry_height = entry_height;
|
state.write().unwrap().slot = slot;
|
||||||
|
|
||||||
// Regenerate the answers
|
// Regenerate the answers
|
||||||
let num_segments = (entry_height / ENTRIES_PER_SEGMENT) as usize;
|
let num_segments = get_segment_from_slot(slot) as usize;
|
||||||
if num_segments == 0 {
|
if num_segments == 0 {
|
||||||
info!("Ledger has 0 segments!");
|
info!("Ledger has 0 segments!");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -354,91 +335,99 @@ impl StorageStage {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn process_storage_transaction(
|
||||||
|
data: &[u8],
|
||||||
|
slot: u64,
|
||||||
|
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||||
|
current_key_idx: &mut usize,
|
||||||
|
transaction_key0: Pubkey,
|
||||||
|
) {
|
||||||
|
match deserialize(data) {
|
||||||
|
Ok(StorageInstruction::SubmitMiningProof {
|
||||||
|
slot: proof_slot,
|
||||||
|
signature,
|
||||||
|
..
|
||||||
|
}) => {
|
||||||
|
if proof_slot < slot {
|
||||||
|
{
|
||||||
|
debug!(
|
||||||
|
"generating storage_keys from storage txs current_key_idx: {}",
|
||||||
|
*current_key_idx
|
||||||
|
);
|
||||||
|
let storage_keys = &mut storage_state.write().unwrap().storage_keys;
|
||||||
|
storage_keys[*current_key_idx..*current_key_idx + size_of::<Signature>()]
|
||||||
|
.copy_from_slice(signature.as_ref());
|
||||||
|
*current_key_idx += size_of::<Signature>();
|
||||||
|
*current_key_idx %= storage_keys.len();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut statew = storage_state.write().unwrap();
|
||||||
|
let max_segment_index = get_segment_from_slot(slot) as usize;
|
||||||
|
if statew.replicator_map.len() <= max_segment_index {
|
||||||
|
statew
|
||||||
|
.replicator_map
|
||||||
|
.resize(max_segment_index, HashSet::new());
|
||||||
|
}
|
||||||
|
let proof_segment_index = get_segment_from_slot(proof_slot) as usize;
|
||||||
|
if proof_segment_index < statew.replicator_map.len() {
|
||||||
|
statew.replicator_map[proof_segment_index].insert(transaction_key0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("storage proof: slot: {}", slot);
|
||||||
|
}
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
info!("error: {:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn process_entries(
|
fn process_entries(
|
||||||
keypair: &Arc<Keypair>,
|
keypair: &Arc<Keypair>,
|
||||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||||
entry_receiver: &EntryReceiver,
|
slot_receiver: &Receiver<u64>,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
poh_height: &mut u64,
|
slot_count: &mut u64,
|
||||||
entry_height: &mut u64,
|
|
||||||
current_key_idx: &mut usize,
|
current_key_idx: &mut usize,
|
||||||
storage_rotate_count: u64,
|
storage_rotate_count: u64,
|
||||||
instruction_sender: &InstructionSender,
|
instruction_sender: &InstructionSender,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timeout = Duration::new(1, 0);
|
let timeout = Duration::new(1, 0);
|
||||||
let entries: Vec<Entry> = entry_receiver.recv_timeout(timeout)?;
|
let slot: u64 = slot_receiver.recv_timeout(timeout)?;
|
||||||
for entry in entries {
|
storage_state.write().unwrap().slot = slot;
|
||||||
// Go through the transactions, find proofs, and use them to update
|
*slot_count += 1;
|
||||||
// the storage_keys with their signatures
|
if let Ok(entries) = blocktree.get_slot_entries(slot, 0, None) {
|
||||||
for tx in entry.transactions {
|
for entry in entries {
|
||||||
let message = tx.message();
|
// Go through the transactions, find proofs, and use them to update
|
||||||
for instruction in &message.instructions {
|
// the storage_keys with their signatures
|
||||||
let program_id = instruction.program_id(message.program_ids());
|
for tx in entry.transactions {
|
||||||
if solana_storage_api::check_id(program_id) {
|
for (i, program_id) in tx.message.program_ids().iter().enumerate() {
|
||||||
match deserialize(&instruction.data) {
|
if solana_storage_api::check_id(&program_id) {
|
||||||
Ok(StorageInstruction::SubmitMiningProof {
|
Self::process_storage_transaction(
|
||||||
entry_height: proof_entry_height,
|
&tx.message().instructions[i].data,
|
||||||
signature,
|
slot,
|
||||||
..
|
storage_state,
|
||||||
}) => {
|
current_key_idx,
|
||||||
if proof_entry_height < *entry_height {
|
tx.message.account_keys[0],
|
||||||
{
|
);
|
||||||
debug!(
|
|
||||||
"generating storage_keys from storage txs current_key_idx: {}",
|
|
||||||
*current_key_idx
|
|
||||||
);
|
|
||||||
let storage_keys =
|
|
||||||
&mut storage_state.write().unwrap().storage_keys;
|
|
||||||
storage_keys[*current_key_idx
|
|
||||||
..*current_key_idx + size_of::<Signature>()]
|
|
||||||
.copy_from_slice(signature.as_ref());
|
|
||||||
*current_key_idx += size_of::<Signature>();
|
|
||||||
*current_key_idx %= storage_keys.len();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut statew = storage_state.write().unwrap();
|
|
||||||
let max_segment_index =
|
|
||||||
(*entry_height / ENTRIES_PER_SEGMENT) as usize;
|
|
||||||
if statew.replicator_map.len() <= max_segment_index {
|
|
||||||
statew
|
|
||||||
.replicator_map
|
|
||||||
.resize(max_segment_index, HashSet::new());
|
|
||||||
}
|
|
||||||
let proof_segment_index =
|
|
||||||
(proof_entry_height / ENTRIES_PER_SEGMENT) as usize;
|
|
||||||
if proof_segment_index < statew.replicator_map.len() {
|
|
||||||
statew.replicator_map[proof_segment_index]
|
|
||||||
.insert(message.account_keys[0]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
debug!("storage proof: entry_height: {}", entry_height);
|
|
||||||
}
|
|
||||||
Ok(_) => {}
|
|
||||||
Err(e) => {
|
|
||||||
info!("error: {:?}", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if *slot_count % storage_rotate_count == 0 {
|
||||||
|
debug!(
|
||||||
|
"crosses sending at slot: {}! hashes: {}",
|
||||||
|
slot, entry.num_hashes
|
||||||
|
);
|
||||||
|
Self::process_entry_crossing(
|
||||||
|
&storage_state,
|
||||||
|
&keypair,
|
||||||
|
&blocktree,
|
||||||
|
entry.hash,
|
||||||
|
slot,
|
||||||
|
instruction_sender,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if cross_boundary!(*poh_height, entry.num_hashes, storage_rotate_count) {
|
|
||||||
trace!(
|
|
||||||
"crosses sending at poh_height: {} entry_height: {}! hashes: {}",
|
|
||||||
*poh_height,
|
|
||||||
entry_height,
|
|
||||||
entry.num_hashes
|
|
||||||
);
|
|
||||||
Self::process_entry_crossing(
|
|
||||||
&storage_state,
|
|
||||||
&keypair,
|
|
||||||
&blocktree,
|
|
||||||
entry.hash,
|
|
||||||
*entry_height,
|
|
||||||
instruction_sender,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
*entry_height += 1;
|
|
||||||
*poh_height += entry.num_hashes;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -485,16 +474,15 @@ mod tests {
|
|||||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(1000);
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(1000);
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
||||||
let (_storage_entry_sender, storage_entry_receiver) = channel();
|
let (_slot_sender, slot_receiver) = channel();
|
||||||
let storage_state = StorageState::new();
|
let storage_state = StorageState::new();
|
||||||
let storage_stage = StorageStage::new(
|
let storage_stage = StorageStage::new(
|
||||||
&storage_state,
|
&storage_state,
|
||||||
storage_entry_receiver,
|
slot_receiver,
|
||||||
None,
|
None,
|
||||||
&keypair,
|
&keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit.clone(),
|
&exit.clone(),
|
||||||
0,
|
|
||||||
&bank_forks,
|
&bank_forks,
|
||||||
STORAGE_ROTATE_TEST_COUNT,
|
STORAGE_ROTATE_TEST_COUNT,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
@ -521,30 +509,30 @@ mod tests {
|
|||||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||||
|
|
||||||
let entries = make_tiny_test_entries(64);
|
let entries = make_tiny_test_entries(64);
|
||||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||||
|
let slot = 1;
|
||||||
let bank = Arc::new(Bank::new(&genesis_block));
|
let bank = Arc::new(Bank::new(&genesis_block));
|
||||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
||||||
blocktree
|
blocktree
|
||||||
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
.write_entries(slot, 0, 0, ticks_per_slot, &entries)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||||
|
|
||||||
let (storage_entry_sender, storage_entry_receiver) = channel();
|
let (slot_sender, slot_receiver) = channel();
|
||||||
let storage_state = StorageState::new();
|
let storage_state = StorageState::new();
|
||||||
let storage_stage = StorageStage::new(
|
let storage_stage = StorageStage::new(
|
||||||
&storage_state,
|
&storage_state,
|
||||||
storage_entry_receiver,
|
slot_receiver,
|
||||||
Some(Arc::new(blocktree)),
|
Some(blocktree.clone()),
|
||||||
&keypair,
|
&keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit.clone(),
|
&exit.clone(),
|
||||||
0,
|
|
||||||
&bank_forks,
|
&bank_forks,
|
||||||
STORAGE_ROTATE_TEST_COUNT,
|
STORAGE_ROTATE_TEST_COUNT,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
);
|
);
|
||||||
storage_entry_sender.send(entries.clone()).unwrap();
|
slot_sender.send(slot).unwrap();
|
||||||
|
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let hash = Hash::default();
|
let hash = Hash::default();
|
||||||
@ -552,8 +540,12 @@ mod tests {
|
|||||||
let mut result = storage_state.get_mining_result(&signature);
|
let mut result = storage_state.get_mining_result(&signature);
|
||||||
assert_eq!(result, Hash::default());
|
assert_eq!(result, Hash::default());
|
||||||
|
|
||||||
for _ in 0..9 {
|
for i in slot..slot + 3 {
|
||||||
storage_entry_sender.send(entries.clone()).unwrap();
|
blocktree
|
||||||
|
.write_entries(i, 0, 0, ticks_per_slot, &entries)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
slot_sender.send(i).unwrap();
|
||||||
}
|
}
|
||||||
for _ in 0..5 {
|
for _ in 0..5 {
|
||||||
result = storage_state.get_mining_result(&signature);
|
result = storage_state.get_mining_result(&signature);
|
||||||
@ -590,7 +582,7 @@ mod tests {
|
|||||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||||
|
|
||||||
let entries = make_tiny_test_entries(128);
|
let entries = make_tiny_test_entries(128);
|
||||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
|
||||||
blocktree
|
blocktree
|
||||||
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
.write_entries(1, 0, 0, ticks_per_slot, &entries)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -598,21 +590,20 @@ mod tests {
|
|||||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank])));
|
||||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||||
|
|
||||||
let (storage_entry_sender, storage_entry_receiver) = channel();
|
let (slot_sender, slot_receiver) = channel();
|
||||||
let storage_state = StorageState::new();
|
let storage_state = StorageState::new();
|
||||||
let storage_stage = StorageStage::new(
|
let storage_stage = StorageStage::new(
|
||||||
&storage_state,
|
&storage_state,
|
||||||
storage_entry_receiver,
|
slot_receiver,
|
||||||
Some(Arc::new(blocktree)),
|
Some(blocktree.clone()),
|
||||||
&keypair,
|
&keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit.clone(),
|
&exit.clone(),
|
||||||
0,
|
|
||||||
&bank_forks,
|
&bank_forks,
|
||||||
STORAGE_ROTATE_TEST_COUNT,
|
STORAGE_ROTATE_TEST_COUNT,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
);
|
);
|
||||||
storage_entry_sender.send(entries.clone()).unwrap();
|
slot_sender.send(1).unwrap();
|
||||||
|
|
||||||
let mut reference_keys;
|
let mut reference_keys;
|
||||||
{
|
{
|
||||||
@ -632,7 +623,10 @@ mod tests {
|
|||||||
let mining_txs = vec![mining_proof_tx];
|
let mining_txs = vec![mining_proof_tx];
|
||||||
|
|
||||||
let proof_entries = vec![Entry::new(&Hash::default(), 1, mining_txs)];
|
let proof_entries = vec![Entry::new(&Hash::default(), 1, mining_txs)];
|
||||||
storage_entry_sender.send(proof_entries).unwrap();
|
blocktree
|
||||||
|
.write_entries(2, 0, 0, ticks_per_slot, &proof_entries)
|
||||||
|
.unwrap();
|
||||||
|
slot_sender.send(2).unwrap();
|
||||||
|
|
||||||
for _ in 0..5 {
|
for _ in 0..5 {
|
||||||
{
|
{
|
||||||
|
@ -6,7 +6,6 @@ use crate::blocktree::Blocktree;
|
|||||||
use crate::broadcast_stage::BroadcastStage;
|
use crate::broadcast_stage::BroadcastStage;
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
|
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
|
||||||
use crate::entry::EntrySender;
|
|
||||||
use crate::fetch_stage::FetchStage;
|
use crate::fetch_stage::FetchStage;
|
||||||
use crate::poh_recorder::{PohRecorder, WorkingBankEntries};
|
use crate::poh_recorder::{PohRecorder, WorkingBankEntries};
|
||||||
use crate::service::Service;
|
use crate::service::Service;
|
||||||
@ -39,7 +38,6 @@ impl Tpu {
|
|||||||
broadcast_socket: UdpSocket,
|
broadcast_socket: UdpSocket,
|
||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
blocktree: &Arc<Blocktree>,
|
blocktree: &Arc<Blocktree>,
|
||||||
storage_entry_sender: EntrySender,
|
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
genesis_blockhash: &Hash,
|
genesis_blockhash: &Hash,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -80,7 +78,6 @@ impl Tpu {
|
|||||||
entry_receiver,
|
entry_receiver,
|
||||||
&exit,
|
&exit,
|
||||||
blocktree,
|
blocktree,
|
||||||
storage_entry_sender,
|
|
||||||
genesis_blockhash,
|
genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -16,9 +16,7 @@ use crate::bank_forks::BankForks;
|
|||||||
use crate::blob_fetch_stage::BlobFetchStage;
|
use crate::blob_fetch_stage::BlobFetchStage;
|
||||||
use crate::blockstream_service::BlockstreamService;
|
use crate::blockstream_service::BlockstreamService;
|
||||||
use crate::blocktree::Blocktree;
|
use crate::blocktree::Blocktree;
|
||||||
use crate::blocktree_processor::BankForksInfo;
|
|
||||||
use crate::cluster_info::ClusterInfo;
|
use crate::cluster_info::ClusterInfo;
|
||||||
use crate::entry::{EntryReceiver, EntrySender};
|
|
||||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||||
use crate::poh_recorder::PohRecorder;
|
use crate::poh_recorder::PohRecorder;
|
||||||
use crate::replay_stage::ReplayStage;
|
use crate::replay_stage::ReplayStage;
|
||||||
@ -61,7 +59,6 @@ impl Tvu {
|
|||||||
vote_account: &Pubkey,
|
vote_account: &Pubkey,
|
||||||
voting_keypair: Option<Arc<T>>,
|
voting_keypair: Option<Arc<T>>,
|
||||||
bank_forks: &Arc<RwLock<BankForks>>,
|
bank_forks: &Arc<RwLock<BankForks>>,
|
||||||
bank_forks_info: &[BankForksInfo],
|
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
sockets: Sockets,
|
sockets: Sockets,
|
||||||
blocktree: Arc<Blocktree>,
|
blocktree: Arc<Blocktree>,
|
||||||
@ -71,8 +68,6 @@ impl Tvu {
|
|||||||
ledger_signal_receiver: Receiver<bool>,
|
ledger_signal_receiver: Receiver<bool>,
|
||||||
subscriptions: &Arc<RpcSubscriptions>,
|
subscriptions: &Arc<RpcSubscriptions>,
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||||
storage_entry_sender: EntrySender,
|
|
||||||
storage_entry_receiver: EntryReceiver,
|
|
||||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
genesis_blockhash: &Hash,
|
genesis_blockhash: &Hash,
|
||||||
@ -115,7 +110,7 @@ impl Tvu {
|
|||||||
genesis_blockhash,
|
genesis_blockhash,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (replay_stage, slot_full_receiver) = ReplayStage::new(
|
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
|
||||||
&keypair.pubkey(),
|
&keypair.pubkey(),
|
||||||
vote_account,
|
vote_account,
|
||||||
voting_keypair,
|
voting_keypair,
|
||||||
@ -126,7 +121,6 @@ impl Tvu {
|
|||||||
ledger_signal_receiver,
|
ledger_signal_receiver,
|
||||||
subscriptions,
|
subscriptions,
|
||||||
poh_recorder,
|
poh_recorder,
|
||||||
storage_entry_sender,
|
|
||||||
leader_schedule_cache,
|
leader_schedule_cache,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -145,12 +139,11 @@ impl Tvu {
|
|||||||
let storage_keypair = Arc::new(Keypair::new());
|
let storage_keypair = Arc::new(Keypair::new());
|
||||||
let storage_stage = StorageStage::new(
|
let storage_stage = StorageStage::new(
|
||||||
storage_state,
|
storage_state,
|
||||||
storage_entry_receiver,
|
root_slot_receiver,
|
||||||
Some(blocktree),
|
Some(blocktree),
|
||||||
&keypair,
|
&keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
&exit,
|
&exit,
|
||||||
bank_forks_info[0].entry_height, // TODO: StorageStage needs to deal with BankForks somehow still
|
|
||||||
&bank_forks,
|
&bank_forks,
|
||||||
storage_rotate_count,
|
storage_rotate_count,
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
@ -203,10 +196,6 @@ pub mod tests {
|
|||||||
let (genesis_block, _mint_keypair) = GenesisBlock::new(starting_balance);
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(starting_balance);
|
||||||
|
|
||||||
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
|
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
|
||||||
let bank_forks_info = vec![BankForksInfo {
|
|
||||||
bank_slot: 0,
|
|
||||||
entry_height: 0,
|
|
||||||
}];
|
|
||||||
|
|
||||||
//start cluster_info1
|
//start cluster_info1
|
||||||
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
||||||
@ -221,13 +210,11 @@ pub mod tests {
|
|||||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let voting_keypair = Keypair::new();
|
let voting_keypair = Keypair::new();
|
||||||
let (storage_entry_sender, storage_entry_receiver) = channel();
|
|
||||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
&voting_keypair.pubkey(),
|
&voting_keypair.pubkey(),
|
||||||
Some(Arc::new(voting_keypair)),
|
Some(Arc::new(voting_keypair)),
|
||||||
&Arc::new(RwLock::new(bank_forks)),
|
&Arc::new(RwLock::new(bank_forks)),
|
||||||
&bank_forks_info,
|
|
||||||
&cref1,
|
&cref1,
|
||||||
{
|
{
|
||||||
Sockets {
|
Sockets {
|
||||||
@ -243,8 +230,6 @@ pub mod tests {
|
|||||||
l_receiver,
|
l_receiver,
|
||||||
&Arc::new(RpcSubscriptions::default()),
|
&Arc::new(RpcSubscriptions::default()),
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
storage_entry_sender,
|
|
||||||
storage_entry_receiver,
|
|
||||||
&leader_schedule_cache,
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
&Hash::default(),
|
&Hash::default(),
|
||||||
|
@ -84,7 +84,7 @@ fn test_replay() {
|
|||||||
|
|
||||||
let tvu_addr = target1.info.tvu;
|
let tvu_addr = target1.info.tvu;
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, blocktree, ledger_signal_receiver, leader_schedule_cache) =
|
let (bank_forks, _bank_forks_info, blocktree, ledger_signal_receiver, leader_schedule_cache) =
|
||||||
fullnode::new_banks_from_blocktree(&blocktree_path, None);
|
fullnode::new_banks_from_blocktree(&blocktree_path, None);
|
||||||
let bank = bank_forks.working_bank();
|
let bank = bank_forks.working_bank();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -105,12 +105,10 @@ fn test_replay() {
|
|||||||
{
|
{
|
||||||
let (poh_service_exit, poh_recorder, poh_service, _entry_receiver) =
|
let (poh_service_exit, poh_recorder, poh_service, _entry_receiver) =
|
||||||
create_test_recorder(&bank, &blocktree);
|
create_test_recorder(&bank, &blocktree);
|
||||||
let (storage_sender, storage_receiver) = channel();
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
&voting_keypair.pubkey(),
|
&voting_keypair.pubkey(),
|
||||||
Some(Arc::new(voting_keypair)),
|
Some(Arc::new(voting_keypair)),
|
||||||
&bank_forks,
|
&bank_forks,
|
||||||
&bank_forks_info,
|
|
||||||
&cref1,
|
&cref1,
|
||||||
{
|
{
|
||||||
Sockets {
|
Sockets {
|
||||||
@ -126,8 +124,6 @@ fn test_replay() {
|
|||||||
ledger_signal_receiver,
|
ledger_signal_receiver,
|
||||||
&Arc::new(RpcSubscriptions::default()),
|
&Arc::new(RpcSubscriptions::default()),
|
||||||
&poh_recorder,
|
&poh_recorder,
|
||||||
storage_sender,
|
|
||||||
storage_receiver,
|
|
||||||
&leader_schedule_cache,
|
&leader_schedule_cache,
|
||||||
&exit,
|
&exit,
|
||||||
&solana_sdk::hash::Hash::default(),
|
&solana_sdk::hash::Hash::default(),
|
||||||
|
@ -4,10 +4,10 @@ pub mod storage_processor;
|
|||||||
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
|
||||||
pub const ENTRIES_PER_SEGMENT: u64 = 16;
|
pub const SLOTS_PER_SEGMENT: u64 = 2;
|
||||||
|
|
||||||
pub fn get_segment_from_entry(entry_height: u64) -> usize {
|
pub fn get_segment_from_slot(slot: u64) -> usize {
|
||||||
(entry_height / ENTRIES_PER_SEGMENT) as usize
|
(slot / SLOTS_PER_SEGMENT) as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
const STORAGE_PROGRAM_ID: [u8; 32] = [
|
const STORAGE_PROGRAM_ID: [u8; 32] = [
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use crate::{get_segment_from_entry, ENTRIES_PER_SEGMENT};
|
use crate::get_segment_from_slot;
|
||||||
use log::*;
|
use log::*;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use solana_sdk::account::Account;
|
use solana_sdk::account::Account;
|
||||||
@ -44,7 +44,7 @@ pub enum StorageContract {
|
|||||||
Default,
|
Default,
|
||||||
|
|
||||||
ValidatorStorage {
|
ValidatorStorage {
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
lockout_validations: Vec<Vec<CheckedProof>>,
|
lockout_validations: Vec<Vec<CheckedProof>>,
|
||||||
reward_validations: Vec<Vec<CheckedProof>>,
|
reward_validations: Vec<Vec<CheckedProof>>,
|
||||||
@ -68,7 +68,7 @@ impl<'a> StorageAccount<'a> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
id: Pubkey,
|
id: Pubkey,
|
||||||
sha_state: Hash,
|
sha_state: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
let mut storage_contract = &mut self.account.state()?;
|
let mut storage_contract = &mut self.account.state()?;
|
||||||
@ -80,7 +80,7 @@ impl<'a> StorageAccount<'a> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let StorageContract::ReplicatorStorage { proofs, .. } = &mut storage_contract {
|
if let StorageContract::ReplicatorStorage { proofs, .. } = &mut storage_contract {
|
||||||
let segment_index = get_segment_from_entry(entry_height);
|
let segment_index = get_segment_from_slot(slot);
|
||||||
if segment_index > proofs.len() || proofs.is_empty() {
|
if segment_index > proofs.len() || proofs.is_empty() {
|
||||||
proofs.resize(cmp::max(1, segment_index), Proof::default());
|
proofs.resize(cmp::max(1, segment_index), Proof::default());
|
||||||
}
|
}
|
||||||
@ -91,8 +91,8 @@ impl<'a> StorageAccount<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
"Mining proof submitted with contract {:?} entry_height: {}",
|
"Mining proof submitted with contract {:?} slot: {}",
|
||||||
sha_state, entry_height
|
sha_state, slot
|
||||||
);
|
);
|
||||||
|
|
||||||
let proof_info = Proof {
|
let proof_info = Proof {
|
||||||
@ -110,12 +110,12 @@ impl<'a> StorageAccount<'a> {
|
|||||||
pub fn advertise_storage_recent_blockhash(
|
pub fn advertise_storage_recent_blockhash(
|
||||||
&mut self,
|
&mut self,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
let mut storage_contract = &mut self.account.state()?;
|
let mut storage_contract = &mut self.account.state()?;
|
||||||
if let StorageContract::Default = storage_contract {
|
if let StorageContract::Default = storage_contract {
|
||||||
*storage_contract = StorageContract::ValidatorStorage {
|
*storage_contract = StorageContract::ValidatorStorage {
|
||||||
entry_height: 0,
|
slot: 0,
|
||||||
hash: Hash::default(),
|
hash: Hash::default(),
|
||||||
lockout_validations: vec![],
|
lockout_validations: vec![],
|
||||||
reward_validations: vec![],
|
reward_validations: vec![],
|
||||||
@ -123,14 +123,14 @@ impl<'a> StorageAccount<'a> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let StorageContract::ValidatorStorage {
|
if let StorageContract::ValidatorStorage {
|
||||||
entry_height: state_entry_height,
|
slot: state_slot,
|
||||||
hash: state_hash,
|
hash: state_hash,
|
||||||
reward_validations,
|
reward_validations,
|
||||||
lockout_validations,
|
lockout_validations,
|
||||||
} = &mut storage_contract
|
} = &mut storage_contract
|
||||||
{
|
{
|
||||||
let original_segments = *state_entry_height / ENTRIES_PER_SEGMENT;
|
let original_segments = get_segment_from_slot(*state_slot);
|
||||||
let segments = entry_height / ENTRIES_PER_SEGMENT;
|
let segments = get_segment_from_slot(slot);
|
||||||
debug!(
|
debug!(
|
||||||
"advertise new last id segments: {} orig: {}",
|
"advertise new last id segments: {} orig: {}",
|
||||||
segments, original_segments
|
segments, original_segments
|
||||||
@ -139,7 +139,7 @@ impl<'a> StorageAccount<'a> {
|
|||||||
return Err(InstructionError::InvalidArgument);
|
return Err(InstructionError::InvalidArgument);
|
||||||
}
|
}
|
||||||
|
|
||||||
*state_entry_height = entry_height;
|
*state_slot = slot;
|
||||||
*state_hash = hash;
|
*state_hash = hash;
|
||||||
|
|
||||||
// move lockout_validations to reward_validations
|
// move lockout_validations to reward_validations
|
||||||
@ -154,14 +154,14 @@ impl<'a> StorageAccount<'a> {
|
|||||||
|
|
||||||
pub fn proof_validation(
|
pub fn proof_validation(
|
||||||
&mut self,
|
&mut self,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
proofs: Vec<CheckedProof>,
|
proofs: Vec<CheckedProof>,
|
||||||
replicator_accounts: &mut [StorageAccount],
|
replicator_accounts: &mut [StorageAccount],
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
let mut storage_contract = &mut self.account.state()?;
|
let mut storage_contract = &mut self.account.state()?;
|
||||||
if let StorageContract::Default = storage_contract {
|
if let StorageContract::Default = storage_contract {
|
||||||
*storage_contract = StorageContract::ValidatorStorage {
|
*storage_contract = StorageContract::ValidatorStorage {
|
||||||
entry_height: 0,
|
slot: 0,
|
||||||
hash: Hash::default(),
|
hash: Hash::default(),
|
||||||
lockout_validations: vec![],
|
lockout_validations: vec![],
|
||||||
reward_validations: vec![],
|
reward_validations: vec![],
|
||||||
@ -169,16 +169,16 @@ impl<'a> StorageAccount<'a> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let StorageContract::ValidatorStorage {
|
if let StorageContract::ValidatorStorage {
|
||||||
entry_height: current_entry_height,
|
slot: current_slot,
|
||||||
lockout_validations,
|
lockout_validations,
|
||||||
..
|
..
|
||||||
} = &mut storage_contract
|
} = &mut storage_contract
|
||||||
{
|
{
|
||||||
if entry_height >= *current_entry_height {
|
if slot >= *current_slot {
|
||||||
return Err(InstructionError::InvalidArgument);
|
return Err(InstructionError::InvalidArgument);
|
||||||
}
|
}
|
||||||
|
|
||||||
let segment_index = get_segment_from_entry(entry_height);
|
let segment_index = get_segment_from_slot(slot);
|
||||||
let mut previous_proofs = replicator_accounts
|
let mut previous_proofs = replicator_accounts
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.filter_map(|account| {
|
.filter_map(|account| {
|
||||||
@ -224,7 +224,7 @@ impl<'a> StorageAccount<'a> {
|
|||||||
|
|
||||||
pub fn claim_storage_reward(
|
pub fn claim_storage_reward(
|
||||||
&mut self,
|
&mut self,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
tick_height: u64,
|
tick_height: u64,
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
let mut storage_contract = &mut self.account.state()?;
|
let mut storage_contract = &mut self.account.state()?;
|
||||||
@ -236,7 +236,7 @@ impl<'a> StorageAccount<'a> {
|
|||||||
reward_validations, ..
|
reward_validations, ..
|
||||||
} = &mut storage_contract
|
} = &mut storage_contract
|
||||||
{
|
{
|
||||||
let claims_index = get_segment_from_entry(entry_height);
|
let claims_index = get_segment_from_slot(slot);
|
||||||
let _num_validations = count_valid_proofs(&reward_validations[claims_index]);
|
let _num_validations = count_valid_proofs(&reward_validations[claims_index]);
|
||||||
// TODO can't just create lamports out of thin air
|
// TODO can't just create lamports out of thin air
|
||||||
// self.account.lamports += TOTAL_VALIDATOR_REWARDS * num_validations;
|
// self.account.lamports += TOTAL_VALIDATOR_REWARDS * num_validations;
|
||||||
@ -248,8 +248,8 @@ impl<'a> StorageAccount<'a> {
|
|||||||
{
|
{
|
||||||
// if current tick height is a full segment away? then allow reward collection
|
// if current tick height is a full segment away? then allow reward collection
|
||||||
// storage needs to move to tick heights too, until then this makes little sense
|
// storage needs to move to tick heights too, until then this makes little sense
|
||||||
let current_index = get_segment_from_entry(tick_height);
|
let current_index = get_segment_from_slot(tick_height);
|
||||||
let claims_index = get_segment_from_entry(entry_height);
|
let claims_index = get_segment_from_slot(slot);
|
||||||
if current_index <= claims_index || claims_index >= reward_validations.len() {
|
if current_index <= claims_index || claims_index >= reward_validations.len() {
|
||||||
debug!(
|
debug!(
|
||||||
"current {:?}, claim {:?}, rewards {:?}",
|
"current {:?}, claim {:?}, rewards {:?}",
|
||||||
@ -348,7 +348,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
contract = StorageContract::ValidatorStorage {
|
contract = StorageContract::ValidatorStorage {
|
||||||
entry_height: 0,
|
slot: 0,
|
||||||
hash: Hash::default(),
|
hash: Hash::default(),
|
||||||
lockout_validations: vec![],
|
lockout_validations: vec![],
|
||||||
reward_validations: vec![],
|
reward_validations: vec![],
|
||||||
|
@ -11,18 +11,18 @@ use solana_sdk::signature::Signature;
|
|||||||
pub enum StorageInstruction {
|
pub enum StorageInstruction {
|
||||||
SubmitMiningProof {
|
SubmitMiningProof {
|
||||||
sha_state: Hash,
|
sha_state: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
},
|
},
|
||||||
AdvertiseStorageRecentBlockhash {
|
AdvertiseStorageRecentBlockhash {
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
},
|
},
|
||||||
ClaimStorageReward {
|
ClaimStorageReward {
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
},
|
},
|
||||||
ProofValidation {
|
ProofValidation {
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
proofs: Vec<CheckedProof>,
|
proofs: Vec<CheckedProof>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -30,12 +30,12 @@ pub enum StorageInstruction {
|
|||||||
pub fn mining_proof(
|
pub fn mining_proof(
|
||||||
from_pubkey: &Pubkey,
|
from_pubkey: &Pubkey,
|
||||||
sha_state: Hash,
|
sha_state: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Instruction {
|
) -> Instruction {
|
||||||
let storage_instruction = StorageInstruction::SubmitMiningProof {
|
let storage_instruction = StorageInstruction::SubmitMiningProof {
|
||||||
sha_state,
|
sha_state,
|
||||||
entry_height,
|
slot,
|
||||||
signature,
|
signature,
|
||||||
};
|
};
|
||||||
let account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
let account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
||||||
@ -45,34 +45,27 @@ pub fn mining_proof(
|
|||||||
pub fn advertise_recent_blockhash(
|
pub fn advertise_recent_blockhash(
|
||||||
from_pubkey: &Pubkey,
|
from_pubkey: &Pubkey,
|
||||||
storage_hash: Hash,
|
storage_hash: Hash,
|
||||||
entry_height: u64,
|
slot: u64,
|
||||||
) -> Instruction {
|
) -> Instruction {
|
||||||
let storage_instruction = StorageInstruction::AdvertiseStorageRecentBlockhash {
|
let storage_instruction = StorageInstruction::AdvertiseStorageRecentBlockhash {
|
||||||
hash: storage_hash,
|
hash: storage_hash,
|
||||||
entry_height,
|
slot,
|
||||||
};
|
};
|
||||||
let account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
let account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
||||||
Instruction::new(id(), &storage_instruction, account_metas)
|
Instruction::new(id(), &storage_instruction, account_metas)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn proof_validation(
|
pub fn proof_validation(from_pubkey: &Pubkey, slot: u64, proofs: Vec<CheckedProof>) -> Instruction {
|
||||||
from_pubkey: &Pubkey,
|
|
||||||
entry_height: u64,
|
|
||||||
proofs: Vec<CheckedProof>,
|
|
||||||
) -> Instruction {
|
|
||||||
let mut account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
let mut account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
||||||
proofs.iter().for_each(|checked_proof| {
|
proofs.iter().for_each(|checked_proof| {
|
||||||
account_metas.push(AccountMeta::new(checked_proof.proof.id, false))
|
account_metas.push(AccountMeta::new(checked_proof.proof.id, false))
|
||||||
});
|
});
|
||||||
let storage_instruction = StorageInstruction::ProofValidation {
|
let storage_instruction = StorageInstruction::ProofValidation { slot, proofs };
|
||||||
entry_height,
|
|
||||||
proofs,
|
|
||||||
};
|
|
||||||
Instruction::new(id(), &storage_instruction, account_metas)
|
Instruction::new(id(), &storage_instruction, account_metas)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reward_claim(from_pubkey: &Pubkey, entry_height: u64) -> Instruction {
|
pub fn reward_claim(from_pubkey: &Pubkey, slot: u64) -> Instruction {
|
||||||
let storage_instruction = StorageInstruction::ClaimStorageReward { entry_height };
|
let storage_instruction = StorageInstruction::ClaimStorageReward { slot };
|
||||||
let account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
let account_metas = vec![AccountMeta::new(*from_pubkey, true)];
|
||||||
Instruction::new(id(), &storage_instruction, account_metas)
|
Instruction::new(id(), &storage_instruction, account_metas)
|
||||||
}
|
}
|
||||||
|
@ -37,44 +37,36 @@ pub fn process_instruction(
|
|||||||
match bincode::deserialize(data).map_err(|_| InstructionError::InvalidInstructionData)? {
|
match bincode::deserialize(data).map_err(|_| InstructionError::InvalidInstructionData)? {
|
||||||
StorageInstruction::SubmitMiningProof {
|
StorageInstruction::SubmitMiningProof {
|
||||||
sha_state,
|
sha_state,
|
||||||
entry_height,
|
slot,
|
||||||
signature,
|
signature,
|
||||||
} => {
|
} => {
|
||||||
if num_keyed_accounts != 1 {
|
if num_keyed_accounts != 1 {
|
||||||
Err(InstructionError::InvalidArgument)?;
|
Err(InstructionError::InvalidArgument)?;
|
||||||
}
|
}
|
||||||
storage_account.submit_mining_proof(
|
storage_account.submit_mining_proof(storage_account_pubkey, sha_state, slot, signature)
|
||||||
storage_account_pubkey,
|
|
||||||
sha_state,
|
|
||||||
entry_height,
|
|
||||||
signature,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
StorageInstruction::AdvertiseStorageRecentBlockhash { hash, entry_height } => {
|
StorageInstruction::AdvertiseStorageRecentBlockhash { hash, slot } => {
|
||||||
if num_keyed_accounts != 1 {
|
if num_keyed_accounts != 1 {
|
||||||
// keyed_accounts[0] should be the main storage key
|
// keyed_accounts[0] should be the main storage key
|
||||||
// to access its data
|
// to access its data
|
||||||
Err(InstructionError::InvalidArgument)?;
|
Err(InstructionError::InvalidArgument)?;
|
||||||
}
|
}
|
||||||
storage_account.advertise_storage_recent_blockhash(hash, entry_height)
|
storage_account.advertise_storage_recent_blockhash(hash, slot)
|
||||||
}
|
}
|
||||||
StorageInstruction::ClaimStorageReward { entry_height } => {
|
StorageInstruction::ClaimStorageReward { slot } => {
|
||||||
if num_keyed_accounts != 1 {
|
if num_keyed_accounts != 1 {
|
||||||
// keyed_accounts[0] should be the main storage key
|
// keyed_accounts[0] should be the main storage key
|
||||||
// to access its data
|
// to access its data
|
||||||
Err(InstructionError::InvalidArgument)?;
|
Err(InstructionError::InvalidArgument)?;
|
||||||
}
|
}
|
||||||
storage_account.claim_storage_reward(entry_height, tick_height)
|
storage_account.claim_storage_reward(slot, tick_height)
|
||||||
}
|
}
|
||||||
StorageInstruction::ProofValidation {
|
StorageInstruction::ProofValidation { slot, proofs } => {
|
||||||
entry_height,
|
|
||||||
proofs,
|
|
||||||
} => {
|
|
||||||
if num_keyed_accounts == 1 {
|
if num_keyed_accounts == 1 {
|
||||||
// have to have at least 1 replicator to do any verification
|
// have to have at least 1 replicator to do any verification
|
||||||
Err(InstructionError::InvalidArgument)?;
|
Err(InstructionError::InvalidArgument)?;
|
||||||
}
|
}
|
||||||
storage_account.proof_validation(entry_height, proofs, &mut rest)
|
storage_account.proof_validation(slot, proofs, &mut rest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -85,7 +77,7 @@ mod tests {
|
|||||||
use crate::id;
|
use crate::id;
|
||||||
use crate::storage_contract::{CheckedProof, Proof, ProofStatus, StorageContract};
|
use crate::storage_contract::{CheckedProof, Proof, ProofStatus, StorageContract};
|
||||||
use crate::storage_instruction;
|
use crate::storage_instruction;
|
||||||
use crate::ENTRIES_PER_SEGMENT;
|
use crate::SLOTS_PER_SEGMENT;
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::bank_client::BankClient;
|
use solana_runtime::bank_client::BankClient;
|
||||||
@ -134,7 +126,7 @@ mod tests {
|
|||||||
let ix = storage_instruction::advertise_recent_blockhash(
|
let ix = storage_instruction::advertise_recent_blockhash(
|
||||||
&pubkey,
|
&pubkey,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
ENTRIES_PER_SEGMENT,
|
SLOTS_PER_SEGMENT,
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -158,7 +150,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_submit_mining_invalid_entry_height() {
|
fn test_submit_mining_invalid_slot() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let pubkey = Pubkey::new_rand();
|
let pubkey = Pubkey::new_rand();
|
||||||
let mut accounts = [Account::default(), Account::default()];
|
let mut accounts = [Account::default(), Account::default()];
|
||||||
@ -197,7 +189,7 @@ mod tests {
|
|||||||
|
|
||||||
let mut bank = Bank::new(&genesis_block);
|
let mut bank = Bank::new(&genesis_block);
|
||||||
bank.add_instruction_processor(id(), process_instruction);
|
bank.add_instruction_processor(id(), process_instruction);
|
||||||
let entry_height = 0;
|
let slot = 0;
|
||||||
let bank_client = BankClient::new(bank);
|
let bank_client = BankClient::new(bank);
|
||||||
|
|
||||||
let ix = system_instruction::create_account(&mint_pubkey, &validator, 10, 4 * 1042, &id());
|
let ix = system_instruction::create_account(&mint_pubkey, &validator, 10, 4 * 1042, &id());
|
||||||
@ -209,7 +201,7 @@ mod tests {
|
|||||||
let ix = storage_instruction::advertise_recent_blockhash(
|
let ix = storage_instruction::advertise_recent_blockhash(
|
||||||
&validator,
|
&validator,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
ENTRIES_PER_SEGMENT,
|
SLOTS_PER_SEGMENT,
|
||||||
);
|
);
|
||||||
|
|
||||||
bank_client
|
bank_client
|
||||||
@ -219,7 +211,7 @@ mod tests {
|
|||||||
let ix = storage_instruction::mining_proof(
|
let ix = storage_instruction::mining_proof(
|
||||||
&replicator,
|
&replicator,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
entry_height,
|
slot,
|
||||||
Signature::default(),
|
Signature::default(),
|
||||||
);
|
);
|
||||||
bank_client
|
bank_client
|
||||||
@ -229,7 +221,7 @@ mod tests {
|
|||||||
let ix = storage_instruction::advertise_recent_blockhash(
|
let ix = storage_instruction::advertise_recent_blockhash(
|
||||||
&validator,
|
&validator,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
ENTRIES_PER_SEGMENT * 2,
|
SLOTS_PER_SEGMENT * 2,
|
||||||
);
|
);
|
||||||
bank_client
|
bank_client
|
||||||
.send_instruction(&validator_keypair, ix)
|
.send_instruction(&validator_keypair, ix)
|
||||||
@ -237,7 +229,7 @@ mod tests {
|
|||||||
|
|
||||||
let ix = storage_instruction::proof_validation(
|
let ix = storage_instruction::proof_validation(
|
||||||
&validator,
|
&validator,
|
||||||
entry_height,
|
slot,
|
||||||
vec![CheckedProof {
|
vec![CheckedProof {
|
||||||
proof: Proof {
|
proof: Proof {
|
||||||
id: replicator,
|
id: replicator,
|
||||||
@ -254,13 +246,13 @@ mod tests {
|
|||||||
let ix = storage_instruction::advertise_recent_blockhash(
|
let ix = storage_instruction::advertise_recent_blockhash(
|
||||||
&validator,
|
&validator,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
ENTRIES_PER_SEGMENT * 3,
|
SLOTS_PER_SEGMENT * 3,
|
||||||
);
|
);
|
||||||
bank_client
|
bank_client
|
||||||
.send_instruction(&validator_keypair, ix)
|
.send_instruction(&validator_keypair, ix)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let ix = storage_instruction::reward_claim(&validator, entry_height);
|
let ix = storage_instruction::reward_claim(&validator, slot);
|
||||||
bank_client
|
bank_client
|
||||||
.send_instruction(&validator_keypair, ix)
|
.send_instruction(&validator_keypair, ix)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -274,7 +266,7 @@ mod tests {
|
|||||||
// bank.register_tick(&bank.last_blockhash());
|
// bank.register_tick(&bank.last_blockhash());
|
||||||
//}
|
//}
|
||||||
|
|
||||||
let ix = storage_instruction::reward_claim(&replicator, entry_height);
|
let ix = storage_instruction::reward_claim(&replicator, slot);
|
||||||
bank_client
|
bank_client
|
||||||
.send_instruction(&replicator_keypair, ix)
|
.send_instruction(&replicator_keypair, ix)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -283,21 +275,21 @@ mod tests {
|
|||||||
// assert_eq!(bank_client.get_balance(&replicator).unwrap(), TOTAL_REPLICATOR_REWARDS);
|
// assert_eq!(bank_client.get_balance(&replicator).unwrap(), TOTAL_REPLICATOR_REWARDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_storage_entry_height<C: SyncClient>(client: &C, account: &Pubkey) -> u64 {
|
fn get_storage_slot<C: SyncClient>(client: &C, account: &Pubkey) -> u64 {
|
||||||
match client.get_account_data(&account).unwrap() {
|
match client.get_account_data(&account).unwrap() {
|
||||||
Some(storage_system_account_data) => {
|
Some(storage_system_account_data) => {
|
||||||
let contract = deserialize(&storage_system_account_data);
|
let contract = deserialize(&storage_system_account_data);
|
||||||
if let Ok(contract) = contract {
|
if let Ok(contract) = contract {
|
||||||
match contract {
|
match contract {
|
||||||
StorageContract::ValidatorStorage { entry_height, .. } => {
|
StorageContract::ValidatorStorage { slot, .. } => {
|
||||||
return entry_height;
|
return slot;
|
||||||
}
|
}
|
||||||
_ => info!("error in reading entry_height"),
|
_ => info!("error in reading slot"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
info!("error in reading entry_height");
|
info!("error in reading slot");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
0
|
0
|
||||||
@ -357,18 +349,18 @@ mod tests {
|
|||||||
let ix = storage_instruction::advertise_recent_blockhash(
|
let ix = storage_instruction::advertise_recent_blockhash(
|
||||||
&validator_pubkey,
|
&validator_pubkey,
|
||||||
storage_blockhash,
|
storage_blockhash,
|
||||||
ENTRIES_PER_SEGMENT,
|
SLOTS_PER_SEGMENT,
|
||||||
);
|
);
|
||||||
|
|
||||||
bank_client
|
bank_client
|
||||||
.send_instruction(&validator_keypair, ix)
|
.send_instruction(&validator_keypair, ix)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let entry_height = 0;
|
let slot = 0;
|
||||||
let ix = storage_instruction::mining_proof(
|
let ix = storage_instruction::mining_proof(
|
||||||
&replicator_pubkey,
|
&replicator_pubkey,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
entry_height,
|
slot,
|
||||||
Signature::default(),
|
Signature::default(),
|
||||||
);
|
);
|
||||||
let _result = bank_client
|
let _result = bank_client
|
||||||
@ -376,8 +368,8 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_storage_entry_height(&bank_client, &validator_pubkey),
|
get_storage_slot(&bank_client, &validator_pubkey),
|
||||||
ENTRIES_PER_SEGMENT
|
SLOTS_PER_SEGMENT
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
get_storage_blockhash(&bank_client, &validator_pubkey),
|
get_storage_blockhash(&bank_client, &validator_pubkey),
|
||||||
|
Reference in New Issue
Block a user