Rename blocktree to blockstore (#7757)

automerge
This commit is contained in:
Greg Fitzgerald
2020-01-13 14:13:52 -07:00
committed by Grimes
parent ef06d165b4
commit b5dba77056
59 changed files with 1616 additions and 1534 deletions

View File

@ -19,7 +19,7 @@ use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_client::{rpc_client::RpcClient, rpc_request::RpcRequest, thin_client::ThinClient};
use solana_ledger::{
blocktree::Blocktree, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
};
use solana_net_utils::bind_in_range;
use solana_perf::packet::Packets;
@ -222,13 +222,13 @@ impl Archiver {
// Note for now, this ledger will not contain any of the existing entries
// in the ledger located at ledger_path, and will only append on newly received
// entries after being passed to window_service
let blocktree = Arc::new(
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"),
);
let gossip_service = GossipService::new(
&cluster_info,
Some(blocktree.clone()),
Some(blockstore.clone()),
None,
node.sockets.gossip,
&exit,
@ -294,7 +294,7 @@ impl Archiver {
let window_service = match Self::setup(
&mut meta,
cluster_info.clone(),
&blocktree,
&blockstore,
&exit,
&node_info,
&storage_keypair,
@ -320,7 +320,7 @@ impl Archiver {
// run archiver
Self::run(
&mut meta,
&blocktree,
&blockstore,
cluster_info,
&keypair,
&storage_keypair,
@ -344,14 +344,14 @@ impl Archiver {
fn run(
meta: &mut ArchiverMeta,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
archiver_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
) {
// encrypt segment
Self::encrypt_ledger(meta, blocktree).expect("ledger encrypt not successful");
Self::encrypt_ledger(meta, blockstore).expect("ledger encrypt not successful");
let enc_file_path = meta.ledger_data_file_encrypted.clone();
// do replicate
loop {
@ -443,7 +443,7 @@ impl Archiver {
fn setup(
meta: &mut ArchiverMeta,
cluster_info: Arc<RwLock<ClusterInfo>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
storage_keypair: &Arc<Keypair>,
@ -498,7 +498,7 @@ impl Archiver {
);
let window_service = WindowService::new(
blocktree.clone(),
blockstore.clone(),
cluster_info.clone(),
verified_receiver,
retransmit_sender,
@ -512,7 +512,7 @@ impl Archiver {
Self::wait_for_segment_download(
slot,
slots_per_segment,
&blocktree,
&blockstore,
&exit,
&node_info,
cluster_info,
@ -523,7 +523,7 @@ impl Archiver {
fn wait_for_segment_download(
start_slot: Slot,
slots_per_segment: u64,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
node_info: &ContactInfo,
cluster_info: Arc<RwLock<ClusterInfo>>,
@ -534,7 +534,7 @@ impl Archiver {
);
let mut current_slot = start_slot;
'outer: loop {
while blocktree.is_full(current_slot) {
while blockstore.is_full(current_slot) {
current_slot += 1;
info!("current slot: {}", current_slot);
if current_slot >= start_slot + slots_per_segment {
@ -559,7 +559,7 @@ impl Archiver {
}
}
fn encrypt_ledger(meta: &mut ArchiverMeta, blocktree: &Arc<Blocktree>) -> Result<()> {
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
meta.ledger_data_file_encrypted = meta.ledger_path.join(ENCRYPTED_FILENAME);
{
@ -567,7 +567,7 @@ impl Archiver {
ivec.copy_from_slice(&meta.signature.as_ref());
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
blocktree,
blockstore,
meta.slot,
meta.slots_per_segment,
&meta.ledger_data_file_encrypted,
@ -844,15 +844,15 @@ impl Archiver {
}
}
/// Ask an archiver to populate a given blocktree with its segment.
/// Ask an archiver to populate a given blockstore with its segment.
/// Return the slot at the start of the archiver's segment
///
/// It is recommended to use a temporary blocktree for this since the download will not verify
/// It is recommended to use a temporary blockstore for this since the download will not verify
/// shreds received and might impact the chaining of shreds across slots
pub fn download_from_archiver(
cluster_info: &Arc<RwLock<ClusterInfo>>,
archiver_info: &ContactInfo,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
slots_per_segment: u64,
) -> Result<u64> {
// Create a client which downloads from the archiver and see that it
@ -884,7 +884,7 @@ impl Archiver {
for _ in 0..120 {
// Strategy used by archivers
let repairs = RepairService::generate_repairs_in_range(
blocktree,
blockstore,
repair_service::MAX_REPAIR_LENGTH,
&repair_slot_range,
);
@ -930,10 +930,10 @@ impl Archiver {
.into_iter()
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
.collect();
blocktree.insert_shreds(shreds, None, false)?;
blockstore.insert_shreds(shreds, None, false)?;
}
// check if all the slots in the segment are complete
if Self::segment_complete(start_slot, slots_per_segment, blocktree) {
if Self::segment_complete(start_slot, slots_per_segment, blockstore) {
break;
}
sleep(Duration::from_millis(500));
@ -942,7 +942,7 @@ impl Archiver {
t_receiver.join().unwrap();
// check if all the slots in the segment are complete
if !Self::segment_complete(start_slot, slots_per_segment, blocktree) {
if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
return Err(
io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(),
);
@ -953,10 +953,10 @@ impl Archiver {
fn segment_complete(
start_slot: Slot,
slots_per_segment: u64,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> bool {
for slot in start_slot..(start_slot + slots_per_segment) {
if !blocktree.is_full(slot) {
if !blockstore.is_full(slot) {
return false;
}
}

View File

@ -10,8 +10,8 @@ use crate::{
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools;
use solana_ledger::{
blocktree::Blocktree,
blocktree_processor::{send_transaction_status_batch, TransactionStatusSender},
blockstore::Blockstore,
blockstore_processor::{send_transaction_status_batch, TransactionStatusSender},
entry::hash_transactions,
leader_schedule_cache::LeaderScheduleCache,
};
@ -979,7 +979,7 @@ impl BankingStage {
pub fn create_test_recorder(
bank: &Arc<Bank>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
poh_config: Option<PohConfig>,
) -> (
Arc<AtomicBool>,
@ -996,7 +996,7 @@ pub fn create_test_recorder(
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
blocktree,
blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&poh_config,
);
@ -1022,7 +1022,7 @@ mod tests {
use itertools::Itertools;
use solana_client::rpc_request::RpcEncodedTransaction;
use solana_ledger::{
blocktree::entries_to_test_shreds,
blockstore::entries_to_test_shreds,
entry::{next_entry, Entry, EntrySlice},
get_tmp_ledger_path,
};
@ -1043,11 +1043,12 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blocktree, None);
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -1063,7 +1064,7 @@ mod tests {
banking_stage.join().unwrap();
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1080,13 +1081,14 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let mut poh_config = PohConfig::default();
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config));
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -1114,7 +1116,7 @@ mod tests {
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
@ -1141,14 +1143,15 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let mut poh_config = PohConfig::default();
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config));
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(
@ -1234,7 +1237,7 @@ mod tests {
drop(entry_receiver);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1280,15 +1283,15 @@ mod tests {
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new(&genesis_config));
let blocktree = Arc::new(
Blocktree::open(&ledger_path)
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let mut poh_config = PohConfig::default();
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree, Some(poh_config));
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
@ -1331,7 +1334,7 @@ mod tests {
// the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 2);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1349,8 +1352,8 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1358,7 +1361,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1435,7 +1438,7 @@ mod tests {
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1685,8 +1688,8 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1694,7 +1697,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1751,7 +1754,7 @@ mod tests {
assert_eq!(bank.get_balance(&pubkey), 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1778,8 +1781,8 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1787,7 +1790,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1806,7 +1809,7 @@ mod tests {
assert!(result.is_ok());
assert_eq!(unprocessed.len(), 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1866,8 +1869,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1875,7 +1878,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::new_rand(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1894,7 +1897,7 @@ mod tests {
assert_eq!(retryable_txs, expected);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1933,9 +1936,9 @@ mod tests {
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree);
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let blockstore = Arc::new(blockstore);
let (poh_recorder, _entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
@ -1943,7 +1946,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&blocktree,
&blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1952,13 +1955,13 @@ mod tests {
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let shreds = entries_to_test_shreds(entries.clone(), bank.slot(), 0, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[bank.slot()]).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[bank.slot()]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
blocktree.clone(),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
@ -1972,7 +1975,7 @@ mod tests {
transaction_status_service.join().unwrap();
let confirmed_block = blocktree.get_confirmed_block(bank.slot(), None).unwrap();
let confirmed_block = blockstore.get_confirmed_block(bank.slot(), None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() {
@ -1993,6 +1996,6 @@ mod tests {
}
}
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
}

View File

@ -8,7 +8,7 @@ use crate::blockstream::MockBlockstream as Blockstream;
#[cfg(not(test))]
use crate::blockstream::SocketBlockstream as Blockstream;
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_sdk::pubkey::Pubkey;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
@ -25,7 +25,7 @@ impl BlockstreamService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
slot_full_receiver: Receiver<(u64, Pubkey)>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
unix_socket: &Path,
exit: &Arc<AtomicBool>,
) -> Self {
@ -38,7 +38,7 @@ impl BlockstreamService {
break;
}
if let Err(e) =
Self::process_entries(&slot_full_receiver, &blocktree, &mut blockstream)
Self::process_entries(&slot_full_receiver, &blockstore, &mut blockstream)
{
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
@ -52,18 +52,18 @@ impl BlockstreamService {
}
fn process_entries(
slot_full_receiver: &Receiver<(u64, Pubkey)>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
blockstream: &mut Blockstream,
) -> Result<()> {
let timeout = Duration::new(1, 0);
let (slot, slot_leader) = slot_full_receiver.recv_timeout(timeout)?;
let entries = blocktree.get_slot_entries(slot, 0, None).unwrap();
let blocktree_meta = blocktree.meta(slot).unwrap().unwrap();
let entries = blockstore.get_slot_entries(slot, 0, None).unwrap();
let blockstore_meta = blockstore.meta(slot).unwrap().unwrap();
let _parent_slot = if slot == 0 {
None
} else {
Some(blocktree_meta.parent_slot)
Some(blockstore_meta.parent_slot)
};
let ticks_per_slot = entries.iter().filter(|entry| entry.is_tick()).count() as u64;
let mut tick_height = ticks_per_slot * slot;
@ -113,14 +113,14 @@ mod test {
let ticks_per_slot = 5;
let leader_pubkey = Pubkey::new_rand();
// Set up genesis config and blocktree
// Set up genesis config and blockstore
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(1000);
genesis_config.ticks_per_slot = ticks_per_slot;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
// Set up blockstream
let mut blockstream = Blockstream::new(&PathBuf::from("test_stream"));
@ -143,7 +143,7 @@ mod test {
let expected_entries = entries.clone();
let expected_tick_heights = [6, 7, 8, 9, 9, 10];
blocktree
blockstore
.write_entries(
1,
0,
@ -160,7 +160,7 @@ mod test {
slot_full_sender.send((1, leader_pubkey)).unwrap();
BlockstreamService::process_entries(
&slot_full_receiver,
&Arc::new(blocktree),
&Arc::new(blockstore),
&mut blockstream,
)
.unwrap();

View File

@ -5,7 +5,7 @@ use self::standard_broadcast_run::StandardBroadcastRun;
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
use crate::poh_recorder::WorkingBankEntry;
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::shred::Shred;
use solana_ledger::staking_utils;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
@ -44,7 +44,7 @@ impl BroadcastStageType {
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
shred_version: u16,
) -> BroadcastStage {
let keypair = cluster_info.read().unwrap().keypair.clone();
@ -54,7 +54,7 @@ impl BroadcastStageType {
cluster_info,
receiver,
exit_sender,
blocktree,
blockstore,
StandardBroadcastRun::new(keypair, shred_version),
),
@ -63,7 +63,7 @@ impl BroadcastStageType {
cluster_info,
receiver,
exit_sender,
blocktree,
blockstore,
FailEntryVerificationBroadcastRun::new(keypair, shred_version),
),
@ -72,7 +72,7 @@ impl BroadcastStageType {
cluster_info,
receiver,
exit_sender,
blocktree,
blockstore,
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
),
}
@ -83,10 +83,10 @@ type TransmitShreds = (Option<Arc<HashMap<Pubkey, u64>>>, Arc<Vec<Shred>>);
trait BroadcastRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()>;
fn transmit(
&self,
@ -97,7 +97,7 @@ trait BroadcastRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()>;
}
@ -126,14 +126,15 @@ pub struct BroadcastStage {
impl BroadcastStage {
#[allow(clippy::too_many_arguments)]
fn run(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
mut broadcast_stage_run: impl BroadcastRun,
) -> BroadcastStageReturnType {
loop {
let res = broadcast_stage_run.run(blocktree, receiver, socket_sender, blocktree_sender);
let res =
broadcast_stage_run.run(blockstore, receiver, socket_sender, blockstore_sender);
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
@ -180,19 +181,25 @@ impl BroadcastStage {
cluster_info: Arc<RwLock<ClusterInfo>>,
receiver: Receiver<WorkingBankEntry>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone,
) -> Self {
let btree = blocktree.clone();
let btree = blockstore.clone();
let exit = exit_sender.clone();
let (socket_sender, socket_receiver) = channel();
let (blocktree_sender, blocktree_receiver) = channel();
let (blockstore_sender, blockstore_receiver) = channel();
let bs_run = broadcast_stage_run.clone();
let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
let _finalizer = Finalizer::new(exit);
Self::run(&btree, &receiver, &socket_sender, &blocktree_sender, bs_run)
Self::run(
&btree,
&receiver,
&socket_sender,
&blockstore_sender,
bs_run,
)
})
.unwrap();
let mut thread_hdls = vec![thread_hdl];
@ -213,15 +220,15 @@ impl BroadcastStage {
.unwrap();
thread_hdls.push(t);
}
let blocktree_receiver = Arc::new(Mutex::new(blocktree_receiver));
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
for _ in 0..NUM_INSERT_THREADS {
let blocktree_receiver = blocktree_receiver.clone();
let blockstore_receiver = blockstore_receiver.clone();
let bs_record = broadcast_stage_run.clone();
let btree = blocktree.clone();
let btree = blockstore.clone();
let t = Builder::new()
.name("solana-broadcaster-record".to_string())
.spawn(move || loop {
let res = bs_record.record(&blocktree_receiver, &btree);
let res = bs_record.record(&blockstore_receiver, &btree);
let res = Self::handle_error(res);
if let Some(res) = res {
return res;
@ -248,7 +255,7 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::entry::create_ticks;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
@ -261,7 +268,7 @@ mod test {
use std::time::Duration;
struct MockBroadcastStage {
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
broadcast_service: BroadcastStage,
bank: Arc<Bank>,
}
@ -272,7 +279,7 @@ mod test {
entry_receiver: Receiver<WorkingBankEntry>,
) -> MockBroadcastStage {
// Make the database ledger
let blocktree = Arc::new(Blocktree::open(ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap());
// Make the leader node and scheduler
let leader_info = Node::new_localhost_with_pubkey(leader_pubkey);
@ -298,12 +305,12 @@ mod test {
cluster_info,
entry_receiver,
&exit_sender,
&blocktree,
&blockstore,
StandardBroadcastRun::new(leader_keypair, 0),
);
MockBroadcastStage {
blocktree,
blockstore,
broadcast_service,
bank,
}
@ -350,8 +357,8 @@ mod test {
ticks_per_slot,
);
let blocktree = broadcast_service.blocktree;
let (entries, _, _) = blocktree
let blockstore = broadcast_service.blockstore;
let (entries, _, _) = blockstore
.get_slot_entries_with_shred_info(slot, 0)
.expect("Expect entries to be present");
assert_eq!(entries.len(), max_tick_height as usize);
@ -363,6 +370,6 @@ mod test {
.expect("Expect successful join of broadcast service");
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
}

View File

@ -26,17 +26,17 @@ impl BroadcastFakeShredsRun {
impl BroadcastRun for BroadcastFakeShredsRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
// 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
let bank = receive_results.bank.clone();
let last_tick_height = receive_results.last_tick_height;
let next_shred_index = blocktree
let next_shred_index = blockstore
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
@ -83,7 +83,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
}
let data_shreds = Arc::new(data_shreds);
blocktree_sender.send(data_shreds.clone())?;
blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step
//some indicates fake shreds
@ -121,10 +121,10 @@ impl BroadcastRun for BroadcastFakeShredsRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
for data_shreds in receiver.lock().unwrap().iter() {
blocktree.insert_shreds(data_shreds.to_vec(), None, true)?;
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
}
Ok(())
}

View File

@ -21,10 +21,10 @@ impl FailEntryVerificationBroadcastRun {
impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
// 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
@ -38,7 +38,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
last_entry.hash = Hash::default();
}
let next_shred_index = blocktree
let next_shred_index = blockstore
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
@ -61,7 +61,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
);
let data_shreds = Arc::new(data_shreds);
blocktree_sender.send(data_shreds.clone())?;
blockstore_sender.send(data_shreds.clone())?;
// 3) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
@ -90,12 +90,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let all_shreds = receiver.lock().unwrap().recv()?;
blocktree
blockstore
.insert_shreds(all_shreds.to_vec(), None, true)
.expect("Failed to insert shreds in blocktree");
.expect("Failed to insert shreds in blockstore");
Ok(())
}
}

View File

@ -83,13 +83,13 @@ impl StandardBroadcastRun {
last_unfinished_slot_shred
}
fn init_shredder(&self, blocktree: &Blocktree, reference_tick: u8) -> (Shredder, u32) {
fn init_shredder(&self, blockstore: &Blockstore, reference_tick: u8) -> (Shredder, u32) {
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
let next_shred_index = self
.unfinished_slot
.map(|s| s.next_shred_index)
.unwrap_or_else(|| {
blocktree
blockstore
.meta(slot)
.expect("Database error")
.map(|meta| meta.consumed)
@ -132,27 +132,27 @@ impl StandardBroadcastRun {
&mut self,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receive_results: ReceiveResults,
) -> Result<()> {
let (bsend, brecv) = channel();
let (ssend, srecv) = channel();
self.process_receive_results(&blocktree, &ssend, &bsend, receive_results)?;
self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
let srecv = Arc::new(Mutex::new(srecv));
let brecv = Arc::new(Mutex::new(brecv));
//data
let _ = self.transmit(&srecv, cluster_info, sock);
//coding
let _ = self.transmit(&srecv, cluster_info, sock);
let _ = self.record(&brecv, blocktree);
let _ = self.record(&brecv, blockstore);
Ok(())
}
fn process_receive_results(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
receive_results: ReceiveResults,
) -> Result<()> {
let mut receive_elapsed = receive_results.time_elapsed;
@ -181,7 +181,7 @@ impl StandardBroadcastRun {
// 2) Convert entries to shreds and coding shreds
let (shredder, next_shred_index) = self.init_shredder(
blocktree,
blockstore,
(bank.tick_height() % bank.ticks_per_slot()) as u8,
);
let mut data_shreds = self.entries_to_data_shreds(
@ -190,13 +190,13 @@ impl StandardBroadcastRun {
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
);
//Insert the first shred so blocktree stores that the leader started this block
//Insert the first shred so blockstore stores that the leader started this block
//This must be done before the blocks are sent out over the wire.
if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
let first = vec![data_shreds[0].clone()];
blocktree
blockstore
.insert_shreds(first, None, true)
.expect("Failed to insert shreds in blocktree");
.expect("Failed to insert shreds in blockstore");
}
let last_data_shred = data_shreds.len();
if let Some(last_shred) = last_unfinished_slot_shred {
@ -209,7 +209,7 @@ impl StandardBroadcastRun {
let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new(data_shreds);
socket_sender.send((stakes.clone(), data_shreds.clone()))?;
blocktree_sender.send(data_shreds.clone())?;
blockstore_sender.send(data_shreds.clone())?;
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
let coding_shreds = Arc::new(coding_shreds);
socket_sender.send((stakes, coding_shreds))?;
@ -227,8 +227,8 @@ impl StandardBroadcastRun {
Ok(())
}
fn insert(&self, blocktree: &Arc<Blocktree>, shreds: Arc<Vec<Shred>>) -> Result<()> {
// Insert shreds into blocktree
fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
// Insert shreds into blockstore
let insert_shreds_start = Instant::now();
//The first shred is inserted synchronously
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
@ -236,9 +236,9 @@ impl StandardBroadcastRun {
} else {
shreds.to_vec()
};
blocktree
blockstore
.insert_shreds(data_shreds, None, true)
.expect("Failed to insert shreds in blocktree");
.expect("Failed to insert shreds in blockstore");
let insert_shreds_elapsed = insert_shreds_start.elapsed();
self.update_broadcast_stats(BroadcastStats {
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
@ -317,13 +317,18 @@ impl StandardBroadcastRun {
impl BroadcastRun for StandardBroadcastRun {
fn run(
&mut self,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
receiver: &Receiver<WorkingBankEntry>,
socket_sender: &Sender<TransmitShreds>,
blocktree_sender: &Sender<Arc<Vec<Shred>>>,
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
) -> Result<()> {
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
self.process_receive_results(blocktree, socket_sender, blocktree_sender, receive_results)
self.process_receive_results(
blockstore,
socket_sender,
blockstore_sender,
receive_results,
)
}
fn transmit(
&self,
@ -337,10 +342,10 @@ impl BroadcastRun for StandardBroadcastRun {
fn record(
&self,
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<()> {
let shreds = receiver.lock().unwrap().recv()?;
self.insert(blocktree, shreds)
self.insert(blockstore, shreds)
}
}
@ -350,7 +355,7 @@ mod test {
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::create_genesis_config;
use solana_ledger::{
blocktree::Blocktree, entry::create_ticks, get_tmp_ledger_path,
blockstore::Blockstore, entry::create_ticks, get_tmp_ledger_path,
shred::max_ticks_per_n_shreds,
};
use solana_runtime::bank::Bank;
@ -365,7 +370,7 @@ mod test {
fn setup(
num_shreds_per_slot: Slot,
) -> (
Arc<Blocktree>,
Arc<Blockstore>,
GenesisConfig,
Arc<RwLock<ClusterInfo>>,
Arc<Bank>,
@ -374,8 +379,8 @@ mod test {
) {
// Setup
let ledger_path = get_tmp_ledger_path!();
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
@ -388,7 +393,7 @@ mod test {
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
let bank0 = Arc::new(Bank::new(&genesis_config));
(
blocktree,
blockstore,
genesis_config,
cluster_info,
bank0,
@ -433,7 +438,7 @@ mod test {
fn test_slot_interrupt() {
// Setup
let num_shreds_per_slot = 2;
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) =
let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
setup(num_shreds_per_slot);
// Insert 1 less than the number of ticks needed to finish the slot
@ -448,14 +453,14 @@ mod test {
// Step 1: Make an incomplete transmission for slot 0
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0);
standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap();
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds_per_slot);
assert_eq!(unfinished_slot.slot, 0);
assert_eq!(unfinished_slot.parent, 0);
// Make sure the slot is not complete
assert!(!blocktree.is_full(0));
assert!(!blockstore.is_full(0));
// Modify the stats, should reset later
standard_broadcast_run
.stats
@ -463,10 +468,10 @@ mod test {
.unwrap()
.receive_elapsed = 10;
// Try to fetch ticks from blocktree, nothing should break
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
// Try to fetch ticks from blockstore, nothing should break
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(
blocktree
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
vec![],
@ -487,7 +492,7 @@ mod test {
last_tick_height: (ticks1.len() - 1) as u64,
};
standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap();
let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap();
@ -503,10 +508,10 @@ mod test {
0
);
// Try to fetch the incomplete ticks from blocktree, should succeed
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
// Try to fetch the incomplete ticks from blockstore, should succeed
assert_eq!(blockstore.get_slot_entries(0, 0, None).unwrap(), ticks0);
assert_eq!(
blocktree
blockstore
.get_slot_entries(0, num_shreds_per_slot, None)
.unwrap(),
vec![],
@ -517,7 +522,7 @@ mod test {
fn test_slot_finish() {
// Setup
let num_shreds_per_slot = 2;
let (blocktree, genesis_config, cluster_info, bank0, leader_keypair, socket) =
let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket) =
setup(num_shreds_per_slot);
// Insert complete slot of ticks needed to finish the slot
@ -531,7 +536,7 @@ mod test {
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0);
standard_broadcast_run
.test_process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
.test_process_receive_results(&cluster_info, &socket, &blockstore, receive_results)
.unwrap();
assert!(standard_broadcast_run.unfinished_slot.is_none())
}

View File

@ -1,4 +1,4 @@
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_sdk::clock::Slot;
use std::fs::File;
use std::io;
@ -12,7 +12,7 @@ pub const CHACHA_BLOCK_SIZE: usize = 64;
pub const CHACHA_KEY_SIZE: usize = 32;
pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
start_slot: Slot,
slots_per_segment: u64,
out_path: &Path,
@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger(
let mut current_slot = start_slot;
let mut start_index = 0;
loop {
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
Ok((last_index, mut size)) => {
debug!(
"chacha: encrypting slice: {} num_shreds: {} data_len: {}",
@ -75,7 +75,7 @@ pub fn chacha_cbc_encrypt_ledger(
mod tests {
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::gen_keys::GenKeys;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry;
use solana_ledger::get_tmp_ledger_path;
use solana_sdk::hash::{hash, Hash, Hasher};
@ -131,7 +131,7 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 16;
let slots_per_segment = 32;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let out_path = tmp_file_path("test_encrypt_ledger");
let seed = [2u8; 32];
@ -139,7 +139,7 @@ mod tests {
let keypair = rnd.gen_keypair();
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
blocktree
blockstore
.write_entries(
0,
0,
@ -157,8 +157,14 @@ mod tests {
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
);
chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, &out_path, &mut key)
.unwrap();
chacha_cbc_encrypt_ledger(
&blockstore,
0,
slots_per_segment as u64,
&out_path,
&mut key,
)
.unwrap();
let mut out_file = File::open(&out_path).unwrap();
let mut buf = vec![];
let size = out_file.read_to_end(&mut buf).unwrap();

View File

@ -1,7 +1,7 @@
// Module used by validators to approve storage mining proofs in parallel using the GPU
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_perf::perf_libs;
use solana_sdk::hash::Hash;
use std::io;
@ -13,7 +13,7 @@ use std::sync::Arc;
// Then sample each block at the offsets provided by samples argument with sha256
// and return the vec of sha states
pub fn chacha_cbc_encrypt_file_many_keys(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
segment: u64,
slots_per_segment: u64,
ivecs: &mut [u8],
@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
(api.chacha_init_sha_state)(int_sha_states.as_mut_ptr(), num_keys as u32);
}
loop {
match blocktree.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
match blockstore.get_data_shreds(current_slot, start_index, std::u64::MAX, &mut buffer) {
Ok((last_index, mut size)) => {
debug!(
"chacha_cuda: encrypting segment: {} num_shreds: {} data_len: {}",
@ -134,9 +134,9 @@ mod tests {
let entries = create_ticks(slots_per_segment, 0, Hash::default());
let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blocktree
blockstore
.write_entries(
0,
0,
@ -160,7 +160,7 @@ mod tests {
let mut cpu_iv = ivecs.clone();
chacha_cbc_encrypt_ledger(
&blocktree,
&blockstore,
0,
slots_per_segment as u64,
out_path,
@ -171,7 +171,7 @@ mod tests {
let ref_hash = sample_file(&out_path, &samples).unwrap();
let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree,
&blockstore,
0,
slots_per_segment as u64,
&mut ivecs,
@ -196,8 +196,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
let ticks_per_slot = 90;
let entries = create_ticks(2 * ticks_per_slot, 0, Hash::default());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
blockstore
.write_entries(
0,
0,
@ -224,7 +224,7 @@ mod tests {
ivec[0] = i;
ivecs.extend(ivec.clone().iter());
chacha_cbc_encrypt_ledger(
&blocktree.clone(),
&blockstore.clone(),
0,
DEFAULT_SLOTS_PER_SEGMENT,
out_path,
@ -242,7 +242,7 @@ mod tests {
}
let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree,
&blockstore,
0,
DEFAULT_SLOTS_PER_SEGMENT,
&mut ivecs,
@ -267,9 +267,9 @@ mod tests {
let mut keys = hex!("abc123");
let ledger_path = get_tmp_ledger_path!();
let samples = [0];
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
assert!(chacha_cbc_encrypt_file_many_keys(
&blocktree,
&blockstore,
0,
DEFAULT_SLOTS_PER_SEGMENT,
&mut keys,

View File

@ -30,7 +30,7 @@ use bincode::{serialize, serialized_size};
use core::cmp;
use itertools::Itertools;
use rand::{thread_rng, Rng};
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree, staking_utils};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore, staking_utils};
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_net_utils::{
@ -1113,12 +1113,12 @@ impl ClusterInfo {
}
fn get_data_shred_as_packet(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
slot: Slot,
shred_index: u64,
dest: &SocketAddr,
) -> Result<Option<Packet>> {
let data = blocktree.get_data_shred(slot, shred_index)?;
let data = blockstore.get_data_shred(slot, shred_index)?;
Ok(data.map(|data| {
let mut packet = Packet::default();
packet.meta.size = data.len();
@ -1132,14 +1132,14 @@ impl ClusterInfo {
recycler: &PacketsRecycler,
from: &ContactInfo,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
me: &ContactInfo,
slot: Slot,
shred_index: u64,
) -> Option<Packets> {
if let Some(blocktree) = blocktree {
if let Some(blockstore) = blockstore {
// Try to find the requested index in one of the slots
let packet = Self::get_data_shred_as_packet(blocktree, slot, shred_index, from_addr);
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
if let Ok(Some(packet)) = packet {
inc_new_counter_debug!("cluster_info-window-request-ledger", 1);
@ -1166,17 +1166,17 @@ impl ClusterInfo {
fn run_highest_window_request(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
slot: Slot,
highest_index: u64,
) -> Option<Packets> {
let blocktree = blocktree?;
let blockstore = blockstore?;
// Try to find the requested index in one of the slots
let meta = blocktree.meta(slot).ok()??;
let meta = blockstore.meta(slot).ok()??;
if meta.received > highest_index {
// meta.received must be at least 1 by this point
let packet =
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr)
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
.ok()??;
return Some(Packets::new_with_recycler_data(
recycler,
@ -1190,19 +1190,19 @@ impl ClusterInfo {
fn run_orphan(
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
mut slot: Slot,
max_responses: usize,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blocktree) = blocktree {
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blocktree.meta(slot) {
while let Ok(Some(meta)) = blockstore.meta(slot) {
if meta.received == 0 {
break;
}
let packet =
Self::get_data_shred_as_packet(blocktree, slot, meta.received - 1, from_addr);
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
if let Ok(Some(packet)) = packet {
res.packets.push(packet);
}
@ -1222,7 +1222,7 @@ impl ClusterInfo {
fn handle_packets(
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
stakes: &HashMap<Pubkey, u64>,
packets: Packets,
response_sender: &PacketSender,
@ -1330,7 +1330,8 @@ impl ClusterInfo {
);
}
_ => {
let rsp = Self::handle_repair(me, recycler, &from_addr, blocktree, request);
let rsp =
Self::handle_repair(me, recycler, &from_addr, blockstore, request);
if let Some(rsp) = rsp {
let _ignore_disconnect = response_sender.send(rsp);
}
@ -1475,7 +1476,7 @@ impl ClusterInfo {
me: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
request: Protocol,
) -> Option<Packets> {
let now = Instant::now();
@ -1511,7 +1512,7 @@ impl ClusterInfo {
recycler,
from,
&from_addr,
blocktree,
blockstore,
&my_info,
*slot,
*shred_index,
@ -1526,7 +1527,7 @@ impl ClusterInfo {
Self::run_highest_window_request(
recycler,
&from_addr,
blocktree,
blockstore,
*slot,
*highest_index,
),
@ -1539,7 +1540,7 @@ impl ClusterInfo {
Self::run_orphan(
recycler,
&from_addr,
blocktree,
blockstore,
*slot,
MAX_ORPHAN_REPAIR_RESPONSES,
),
@ -1559,7 +1560,7 @@ impl ClusterInfo {
fn run_listen(
obj: &Arc<RwLock<Self>>,
recycler: &PacketsRecycler,
blocktree: Option<&Arc<Blocktree>>,
blockstore: Option<&Arc<Blockstore>>,
bank_forks: Option<&Arc<RwLock<BankForks>>>,
requests_receiver: &PacketReceiver,
response_sender: &PacketSender,
@ -1574,12 +1575,12 @@ impl ClusterInfo {
None => HashMap::new(),
};
Self::handle_packets(obj, &recycler, blocktree, &stakes, reqs, response_sender);
Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender);
Ok(())
}
pub fn listen(
me: Arc<RwLock<Self>>,
blocktree: Option<Arc<Blocktree>>,
blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
requests_receiver: PacketReceiver,
response_sender: PacketSender,
@ -1593,7 +1594,7 @@ impl ClusterInfo {
let e = Self::run_listen(
&me,
&recycler,
blocktree.as_ref(),
blockstore.as_ref(),
bank_forks.as_ref(),
&requests_receiver,
&response_sender,
@ -1916,9 +1917,9 @@ mod tests {
use crate::repair_service::RepairType;
use crate::result::Error;
use rayon::prelude::*;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blocktree_processor::fill_blocktree_slot_with_ticks;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_processor::fill_blockstore_slot_with_ticks;
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::shred::{
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
@ -2062,7 +2063,7 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let me = ContactInfo::new(
&Pubkey::new_rand(),
socketaddr!("127.0.0.1:1234"),
@ -2080,7 +2081,7 @@ mod tests {
&recycler,
&me,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
&me,
0,
0,
@ -2097,7 +2098,7 @@ mod tests {
CodingShredHeader::default(),
);
blocktree
blockstore
.insert_shreds(vec![shred_info], None, false)
.expect("Expect successful ledger write");
@ -2105,7 +2106,7 @@ mod tests {
&recycler,
&me,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
&me,
2,
1,
@ -2121,7 +2122,7 @@ mod tests {
assert_eq!(rv[0].slot(), 2);
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
@ -2131,18 +2132,18 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
0,
0,
);
assert!(rv.is_none());
let _ = fill_blocktree_slot_with_ticks(
&blocktree,
let _ = fill_blockstore_slot_with_ticks(
&blockstore,
max_ticks_per_n_shreds(1) + 1,
2,
1,
@ -2152,7 +2153,7 @@ mod tests {
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
2,
1,
);
@ -2163,21 +2164,21 @@ mod tests {
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
.collect();
assert!(!rv.is_empty());
let index = blocktree.meta(2).unwrap().unwrap().received - 1;
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
assert_eq!(rv[0].index(), index as u32);
assert_eq!(rv[0].slot(), 2);
let rv = ClusterInfo::run_highest_window_request(
&recycler,
&socketaddr_any!(),
Some(&blocktree),
Some(&blockstore),
2,
index + 1,
);
assert!(rv.is_none());
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -2186,25 +2187,27 @@ mod tests {
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 2, 0);
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
assert!(rv.is_none());
// Create slots 1, 2, 3 with 5 shreds apiece
let (shreds, _) = make_many_slot_entries(1, 3, 5);
blocktree
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful ledger write");
// We don't have slot 4, so we don't know how to service this requeset
let rv = ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 4, 5);
let rv =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
assert!(rv.is_none());
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
// for this request
let rv: Vec<_> =
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blocktree), 3, 5)
ClusterInfo::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
.expect("run_orphan packets")
.packets
.iter()
@ -2213,9 +2216,9 @@ mod tests {
let expected: Vec<_> = (1..=3)
.rev()
.map(|slot| {
let index = blocktree.meta(slot).unwrap().unwrap().received - 1;
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
ClusterInfo::get_data_shred_as_packet(
&blocktree,
&blockstore,
slot,
index,
&socketaddr_any!(),
@ -2227,7 +2230,7 @@ mod tests {
assert_eq!(rv, expected)
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
fn assert_in_range(x: u16, range: (u16, u16)) {

View File

@ -5,7 +5,7 @@ use byteorder::{ByteOrder, LittleEndian};
use rand::seq::SliceRandom;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::rooted_slot_iterator::RootedSlotIterator;
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
@ -89,13 +89,13 @@ pub struct ClusterInfoRepairListener {
impl ClusterInfoRepairListener {
pub fn new(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
cluster_info: Arc<RwLock<ClusterInfo>>,
epoch_schedule: EpochSchedule,
) -> Self {
let exit = exit.clone();
let blocktree = blocktree.clone();
let blockstore = blockstore.clone();
let thread = Builder::new()
.name("solana-cluster_info_repair_listener".to_string())
.spawn(move || {
@ -105,7 +105,7 @@ impl ClusterInfoRepairListener {
// 2) The latest root the peer gossiped
let mut peer_infos: HashMap<Pubkey, RepaireeInfo> = HashMap::new();
let _ = Self::recv_loop(
&blocktree,
&blockstore,
&mut peer_infos,
&exit,
&cluster_info,
@ -119,7 +119,7 @@ impl ClusterInfoRepairListener {
}
fn recv_loop(
blocktree: &Blocktree,
blockstore: &Blockstore,
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
exit: &Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -134,7 +134,7 @@ impl ClusterInfoRepairListener {
return Ok(());
}
let lowest_slot = blocktree.lowest_slot();
let lowest_slot = blockstore.lowest_slot();
let peers = cluster_info.read().unwrap().gossip_peers();
let mut peers_needing_repairs: HashMap<Pubkey, EpochSlots> = HashMap::new();
@ -156,7 +156,7 @@ impl ClusterInfoRepairListener {
// After updating all the peers, send out repairs to those that need it
let _ = Self::serve_repairs(
&my_pubkey,
blocktree,
blockstore,
peer_infos,
&peers_needing_repairs,
&socket,
@ -219,7 +219,7 @@ impl ClusterInfoRepairListener {
fn serve_repairs(
my_pubkey: &Pubkey,
blocktree: &Blocktree,
blockstore: &Blockstore,
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
repairees: &HashMap<Pubkey, EpochSlots>,
socket: &UdpSocket,
@ -258,7 +258,7 @@ impl ClusterInfoRepairListener {
my_pubkey,
repairee_pubkey,
my_root,
blocktree,
blockstore,
&repairee_epoch_slots,
&eligible_repairmen,
socket,
@ -286,7 +286,7 @@ impl ClusterInfoRepairListener {
my_pubkey: &Pubkey,
repairee_pubkey: &Pubkey,
my_root: Slot,
blocktree: &Blocktree,
blockstore: &Blockstore,
repairee_epoch_slots: &EpochSlots,
eligible_repairmen: &[&Pubkey],
socket: &UdpSocket,
@ -295,7 +295,7 @@ impl ClusterInfoRepairListener {
epoch_schedule: &EpochSchedule,
last_repaired_slot_and_ts: (u64, u64),
) -> Result<Option<Slot>> {
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree);
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blockstore);
if slot_iter.is_err() {
info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
@ -366,17 +366,17 @@ impl ClusterInfoRepairListener {
// a database iterator over the slots because by the time this node is
// sending the shreds in this slot for repair, we expect these slots
// to be full.
if let Some(shred_data) = blocktree
if let Some(shred_data) = blockstore
.get_data_shred(slot, shred_index as u64)
.expect("Failed to read data shred from blocktree")
.expect("Failed to read data shred from blockstore")
{
socket.send_to(&shred_data[..], repairee_addr)?;
total_data_shreds_sent += 1;
}
if let Some(coding_bytes) = blocktree
if let Some(coding_bytes) = blockstore
.get_coding_shred(slot, shred_index as u64)
.expect("Failed to read coding shred from blocktree")
.expect("Failed to read coding shred from blockstore")
{
socket.send_to(&coding_bytes[..], repairee_addr)?;
total_coding_shreds_sent += 1;
@ -550,7 +550,7 @@ mod tests {
use crate::packet::Packets;
use crate::streamer;
use crate::streamer::PacketReceiver;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use solana_perf::recycler::Recycler;
use std::collections::BTreeSet;
@ -699,16 +699,16 @@ mod tests {
#[test]
fn test_serve_same_repairs_to_repairee() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 2;
let (shreds, _) = make_many_slot_entries(0, num_slots, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let last_root = num_slots - 1;
let roots: Vec<_> = (0..=last_root).collect();
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -729,7 +729,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&eligible_repairmen,
&my_socket,
@ -749,7 +749,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&eligible_repairmen,
&my_socket,
@ -765,20 +765,20 @@ mod tests {
#[test]
fn test_serve_repairs_to_repairee() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let entries_per_slot = 5;
let num_slots = 10;
assert_eq!(num_slots % 2, 0);
let (shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
let num_shreds_per_slot = shreds.len() as u64 / num_slots;
// Write slots in the range [0, num_slots] to blocktree
blocktree.insert_shreds(shreds, None, false).unwrap();
// Write slots in the range [0, num_slots] to blockstore
blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=num_slots - 1).collect();
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -809,7 +809,7 @@ mod tests {
&repairman_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&eligible_repairmen_refs,
&my_socket,
@ -848,26 +848,26 @@ mod tests {
// Shutdown
mock_repairee.close().unwrap();
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_no_repair_past_confirmed_epoch() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let stakers_slot_offset = 16;
let slots_per_epoch = stakers_slot_offset * 2;
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, false);
// Create shreds for first two epochs and write them to blocktree
// Create shreds for first two epochs and write them to blockstore
let total_slots = slots_per_epoch * 2;
let (shreds, _) = make_many_slot_entries(0, total_slots, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Write roots so that these slots will qualify to be sent by the repairman
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
// Set up my information
let my_pubkey = Pubkey::new_rand();
@ -896,7 +896,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
total_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&vec![&my_pubkey],
&my_socket,
@ -919,7 +919,7 @@ mod tests {
&my_pubkey,
&mock_repairee.id,
total_slots - 1,
&blocktree,
&blockstore,
&repairee_epoch_slots,
&vec![&my_pubkey],
&my_socket,
@ -936,8 +936,8 @@ mod tests {
// Shutdown
mock_repairee.close().unwrap();
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]

View File

@ -6,7 +6,7 @@ use crate::streamer;
use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient};
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -24,7 +24,7 @@ pub struct GossipService {
impl GossipService {
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: Option<Arc<Blocktree>>,
blockstore: Option<Arc<Blockstore>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
gossip_socket: UdpSocket,
exit: &Arc<AtomicBool>,
@ -47,7 +47,7 @@ impl GossipService {
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_listen = ClusterInfo::listen(
cluster_info.clone(),
blocktree,
blockstore,
bank_forks.clone(),
request_receiver,
response_sender.clone(),

View File

@ -1,6 +1,6 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
@ -27,7 +27,7 @@ pub struct LedgerCleanupService {
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
max_ledger_slots: u64,
exit: &Arc<AtomicBool>,
) -> Self {
@ -45,7 +45,7 @@ impl LedgerCleanupService {
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blocktree,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
) {
@ -61,20 +61,20 @@ impl LedgerCleanupService {
fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
max_ledger_slots: u64,
next_purge_batch: &mut u64,
) -> Result<(), RecvTimeoutError> {
let disk_utilization_pre = blocktree.storage_size();
let disk_utilization_pre = blockstore.storage_size();
let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
if root > *next_purge_batch {
//cleanup
blocktree.purge_slots(0, Some(root - max_ledger_slots));
blockstore.purge_slots(0, Some(root - max_ledger_slots));
*next_purge_batch += DEFAULT_PURGE_BATCH_SIZE;
}
let disk_utilization_post = blocktree.storage_size();
let disk_utilization_post = blockstore.storage_size();
if let (Ok(disk_utilization_pre), Ok(disk_utilization_post)) =
(disk_utilization_pre, disk_utilization_post)
@ -101,39 +101,39 @@ impl LedgerCleanupService {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn test_cleanup() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blocktree.insert_shreds(shreds, None, false).unwrap();
let blocktree = Arc::new(blocktree);
blockstore.insert_shreds(shreds, None, false).unwrap();
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill slots 0-40
let mut next_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blocktree, 10, &mut next_purge_slot)
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 10, &mut next_purge_slot)
.unwrap();
//check that 0-40 don't exist
blocktree
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_compaction() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let n = 10_000;
let batch_size = 100;
@ -142,10 +142,10 @@ mod tests {
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(i * batch_size, batch_size, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
}
let u1 = blocktree.storage_size().unwrap() as f64;
let u1 = blockstore.storage_size().unwrap() as f64;
// send signal to cleanup slots
let (sender, receiver) = channel();
@ -153,7 +153,7 @@ mod tests {
let mut next_purge_batch = 0;
LedgerCleanupService::cleanup_ledger(
&receiver,
&blocktree,
&blockstore,
max_ledger_slots,
&mut next_purge_batch,
)
@ -161,18 +161,18 @@ mod tests {
thread::sleep(Duration::from_secs(2));
let u2 = blocktree.storage_size().unwrap() as f64;
let u2 = blockstore.storage_size().unwrap() as f64;
assert!(u2 < u1, "insufficient compaction! pre={},post={}", u1, u2,);
// check that early slots don't exist
let max_slot = n - max_ledger_slots;
blocktree
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > max_slot));
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}

View File

@ -10,7 +10,7 @@
//! For Entries:
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height
//!
use solana_ledger::blocktree::Blocktree;
use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::poh::Poh;
@ -70,7 +70,7 @@ pub struct PohRecorder {
leader_last_tick_height: u64, // zero if none
grace_ticks: u64,
id: Pubkey,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
poh_config: Arc<PohConfig>,
ticks_per_slot: u64,
@ -84,7 +84,7 @@ impl PohRecorder {
&self.id,
bank.slot(),
&bank,
Some(&self.blocktree),
Some(&self.blockstore),
);
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
let (leader_first_tick_height, leader_last_tick_height, grace_ticks) =
@ -407,7 +407,7 @@ impl PohRecorder {
next_leader_slot: Option<(Slot, Slot)>,
ticks_per_slot: u64,
id: &Pubkey,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
clear_bank_signal: Option<SyncSender<bool>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>,
@ -433,7 +433,7 @@ impl PohRecorder {
leader_last_tick_height,
grace_ticks,
id: *id,
blocktree: blocktree.clone(),
blockstore: blockstore.clone(),
leader_schedule_cache: leader_schedule_cache.clone(),
ticks_per_slot,
poh_config: poh_config.clone(),
@ -452,7 +452,7 @@ impl PohRecorder {
next_leader_slot: Option<(Slot, Slot)>,
ticks_per_slot: u64,
id: &Pubkey,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
poh_config: &Arc<PohConfig>,
) -> (Self, Receiver<WorkingBankEntry>) {
@ -463,7 +463,7 @@ impl PohRecorder {
next_leader_slot,
ticks_per_slot,
id,
blocktree,
blockstore,
None,
leader_schedule_cache,
poh_config,
@ -475,7 +475,7 @@ impl PohRecorder {
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::hash;
@ -486,8 +486,8 @@ mod tests {
let prev_hash = Hash::default();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
@ -496,7 +496,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -505,7 +505,7 @@ mod tests {
assert_eq!(poh_recorder.tick_cache[0].1, 1);
assert_eq!(poh_recorder.tick_height, 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -513,8 +513,8 @@ mod tests {
let prev_hash = Hash::default();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
@ -523,7 +523,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -533,15 +533,15 @@ mod tests {
assert_eq!(poh_recorder.tick_cache[1].1, 2);
assert_eq!(poh_recorder.tick_height, 2);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_reset_clears_cache() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -549,7 +549,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -558,15 +558,15 @@ mod tests {
poh_recorder.reset(Hash::default(), 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_clear() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -577,7 +577,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -592,15 +592,15 @@ mod tests {
poh_recorder.clear_bank();
assert!(poh_recorder.working_bank.is_none());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_tick_sent_after_min() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -611,7 +611,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -641,15 +641,15 @@ mod tests {
assert_eq!(num_entries, 3);
assert!(poh_recorder.working_bank.is_none());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_tick_sent_upto_and_including_max() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -660,7 +660,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -688,15 +688,15 @@ mod tests {
}
assert_eq!(num_entries, 3);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_to_early() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -707,7 +707,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -726,15 +726,15 @@ mod tests {
.is_err());
assert!(entry_receiver.try_recv().is_err());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_bad_slot() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -745,7 +745,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -766,15 +766,15 @@ mod tests {
Err(PohRecorderError::MaxHeightReached)
);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_at_min_passes() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -785,7 +785,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -812,15 +812,15 @@ mod tests {
let (_bank, (e, _tick_height)) = entry_receiver.recv().expect("recv 2");
assert!(!e.is_tick());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_recorder_record_at_max_fails() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -831,7 +831,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -856,15 +856,15 @@ mod tests {
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert!(entry.is_tick());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_poh_cache_on_disconnect() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -875,7 +875,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -894,15 +894,15 @@ mod tests {
assert!(poh_recorder.working_bank.is_none());
assert_eq!(poh_recorder.tick_cache.len(), 3);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reset_current() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -910,7 +910,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -921,15 +921,15 @@ mod tests {
poh_recorder.reset(hash, 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reset_with_cached() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -937,7 +937,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -947,7 +947,7 @@ mod tests {
poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4)));
assert_eq!(poh_recorder.tick_cache.len(), 0);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -956,8 +956,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
Hash::default(),
@ -965,7 +965,7 @@ mod tests {
Some((4, 4)),
DEFAULT_TICKS_PER_SLOT,
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
);
@ -980,15 +980,15 @@ mod tests {
poh_recorder.tick();
assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reset_clear_bank() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
@ -998,7 +998,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1011,15 +1011,15 @@ mod tests {
poh_recorder.reset(hash(b"hello"), 0, Some((4, 4)));
assert!(poh_recorder.working_bank.is_none());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
pub fn test_clear_signal() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let (sender, receiver) = sync_channel(1);
@ -1030,7 +1030,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
Some(sender),
&Arc::new(LeaderScheduleCache::default()),
&Arc::new(PohConfig::default()),
@ -1039,7 +1039,7 @@ mod tests {
poh_recorder.clear_bank();
assert!(receiver.try_recv().is_ok());
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1047,8 +1047,8 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let ticks_per_slot = 5;
let GenesisConfigInfo {
mut genesis_config, ..
@ -1064,7 +1064,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1091,7 +1091,7 @@ mod tests {
// Make sure the starting slot is updated
assert_eq!(poh_recorder.start_slot, end_slot);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
@ -1100,8 +1100,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -1112,7 +1112,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1213,15 +1213,15 @@ mod tests {
assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot());
assert_eq!(leader_slot, 9);
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_would_be_leader_soon() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
@ -1232,7 +1232,7 @@ mod tests {
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
@ -1287,8 +1287,8 @@ mod tests {
let ledger_path = get_tmp_ledger_path!();
{
// test that virtual ticks are flushed into a newly set bank asap
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let genesis_hash = bank.last_blockhash();
@ -1300,7 +1300,7 @@ mod tests {
Some((2, 2)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);

View File

@ -123,7 +123,7 @@ mod tests {
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::poh_recorder::WorkingBank;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_sdk::hash::hash;
@ -137,8 +137,8 @@ mod tests {
let prev_hash = bank.last_blockhash();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let poh_config = Arc::new(PohConfig {
hashes_per_tick: Some(2),
target_tick_duration: Duration::from_millis(42),
@ -151,7 +151,7 @@ mod tests {
Some((4, 4)),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&poh_config,
);
@ -230,6 +230,6 @@ mod tests {
let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
}

View File

@ -6,7 +6,7 @@ use crate::{
};
use solana_ledger::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta},
blockstore::{Blockstore, CompletedSlotsReceiver, SlotMeta},
};
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
@ -71,7 +71,7 @@ pub struct RepairService {
impl RepairService {
pub fn new(
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
@ -81,7 +81,7 @@ impl RepairService {
RepairStrategy::RepairAll {
ref epoch_schedule, ..
} => Some(ClusterInfoRepairListener::new(
&blocktree,
&blockstore,
&exit,
cluster_info.clone(),
*epoch_schedule,
@ -94,7 +94,7 @@ impl RepairService {
.name("solana-repair-service".to_string())
.spawn(move || {
Self::run(
&blocktree,
&blockstore,
&exit,
&repair_socket,
&cluster_info,
@ -110,7 +110,7 @@ impl RepairService {
}
fn run(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
exit: &Arc<AtomicBool>,
repair_socket: &Arc<UdpSocket>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -123,10 +123,10 @@ impl RepairService {
ref epoch_schedule, ..
} = repair_strategy
{
current_root = blocktree.last_root();
current_root = blockstore.last_root();
Self::initialize_epoch_slots(
id,
blocktree,
blockstore,
&mut epoch_slots,
current_root,
epoch_schedule,
@ -143,7 +143,7 @@ impl RepairService {
RepairStrategy::RepairRange(ref repair_slot_range) => {
// Strategy used by archivers
Self::generate_repairs_in_range(
blocktree,
blockstore,
MAX_REPAIR_LENGTH,
repair_slot_range,
)
@ -153,8 +153,8 @@ impl RepairService {
ref completed_slots_receiver,
..
} => {
let new_root = blocktree.last_root();
let lowest_slot = blocktree.lowest_slot();
let new_root = blockstore.last_root();
let lowest_slot = blockstore.lowest_slot();
Self::update_epoch_slots(
id,
new_root,
@ -164,7 +164,7 @@ impl RepairService {
&cluster_info,
completed_slots_receiver,
);
Self::generate_repairs(blocktree, new_root, MAX_REPAIR_LENGTH)
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
}
}
};
@ -195,7 +195,7 @@ impl RepairService {
// Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end
pub fn generate_repairs_in_range(
blocktree: &Blocktree,
blockstore: &Blockstore,
max_repairs: usize,
repair_range: &RepairSlotRange,
) -> Result<Vec<RepairType>> {
@ -206,7 +206,7 @@ impl RepairService {
break;
}
let meta = blocktree
let meta = blockstore
.meta(slot)
.expect("Unable to lookup slot meta")
.unwrap_or(SlotMeta {
@ -215,7 +215,7 @@ impl RepairService {
});
let new_repairs = Self::generate_repairs_for_slot(
blocktree,
blockstore,
slot,
&meta,
max_repairs - repairs.len(),
@ -227,18 +227,18 @@ impl RepairService {
}
fn generate_repairs(
blocktree: &Blocktree,
blockstore: &Blockstore,
root: Slot,
max_repairs: usize,
) -> Result<Vec<RepairType>> {
// Slot height and shred indexes for shreds we want to repair
let mut repairs: Vec<RepairType> = vec![];
Self::generate_repairs_for_fork(blocktree, &mut repairs, max_repairs, root);
Self::generate_repairs_for_fork(blockstore, &mut repairs, max_repairs, root);
// TODO: Incorporate gossip to determine priorities for repair?
// Try to resolve orphans in blocktree
let mut orphans = blocktree.get_orphans(Some(MAX_ORPHANS));
// Try to resolve orphans in blockstore
let mut orphans = blockstore.get_orphans(Some(MAX_ORPHANS));
orphans.retain(|x| *x > root);
Self::generate_repairs_for_orphans(&orphans[..], &mut repairs);
@ -246,7 +246,7 @@ impl RepairService {
}
fn generate_repairs_for_slot(
blocktree: &Blocktree,
blockstore: &Blockstore,
slot: Slot,
slot_meta: &SlotMeta,
max_repairs: usize,
@ -256,7 +256,7 @@ impl RepairService {
} else if slot_meta.consumed == slot_meta.received {
vec![RepairType::HighestShred(slot, slot_meta.received)]
} else {
let reqs = blocktree.find_missing_data_indexes(
let reqs = blockstore.find_missing_data_indexes(
slot,
slot_meta.first_shred_timestamp,
slot_meta.consumed,
@ -275,7 +275,7 @@ impl RepairService {
/// Repairs any fork starting at the input slot
fn generate_repairs_for_fork(
blocktree: &Blocktree,
blockstore: &Blockstore,
repairs: &mut Vec<RepairType>,
max_repairs: usize,
slot: Slot,
@ -283,9 +283,9 @@ impl RepairService {
let mut pending_slots = vec![slot];
while repairs.len() < max_repairs && !pending_slots.is_empty() {
let slot = pending_slots.pop().unwrap();
if let Some(slot_meta) = blocktree.meta(slot).unwrap() {
if let Some(slot_meta) = blockstore.meta(slot).unwrap() {
let new_repairs = Self::generate_repairs_for_slot(
blocktree,
blockstore,
slot,
&slot_meta,
max_repairs - repairs.len(),
@ -300,7 +300,7 @@ impl RepairService {
}
fn get_completed_slots_past_root(
blocktree: &Blocktree,
blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot,
epoch_schedule: &EpochSchedule,
@ -308,7 +308,7 @@ impl RepairService {
let last_confirmed_epoch = epoch_schedule.get_leader_schedule_epoch(root);
let last_epoch_slot = epoch_schedule.get_last_slot_in_epoch(last_confirmed_epoch);
let meta_iter = blocktree
let meta_iter = blockstore
.slot_meta_iterator(root + 1)
.expect("Couldn't get db iterator");
@ -324,22 +324,22 @@ impl RepairService {
fn initialize_epoch_slots(
id: Pubkey,
blocktree: &Blocktree,
blockstore: &Blockstore,
slots_in_gossip: &mut BTreeSet<Slot>,
root: Slot,
epoch_schedule: &EpochSchedule,
cluster_info: &RwLock<ClusterInfo>,
) {
Self::get_completed_slots_past_root(blocktree, slots_in_gossip, root, epoch_schedule);
Self::get_completed_slots_past_root(blockstore, slots_in_gossip, root, epoch_schedule);
// Safe to set into gossip because by this time, the leader schedule cache should
// also be updated with the latest root (done in blocktree_processor) and thus
// also be updated with the latest root (done in blockstore_processor) and thus
// will provide a schedule to window_service for any incoming shreds up to the
// last_confirmed_epoch.
cluster_info.write().unwrap().push_epoch_slots(
id,
root,
blocktree.lowest_slot(),
blockstore.lowest_slot(),
slots_in_gossip.clone(),
);
}
@ -409,60 +409,60 @@ mod test {
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use solana_ledger::blocktree::{
use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use std::sync::mpsc::channel;
use std::thread::Builder;
#[test]
pub fn test_repair_orphan() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create some orphan slots
let (mut shreds, _) = make_slot_entries(1, 0, 1);
let (shreds2, _) = make_slot_entries(5, 2, 1);
shreds.extend(shreds2);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0), RepairType::Orphan(2)]
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_empty_slot() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_slot_entries(2, 0, 1);
// Write this shred to slot 2, should chain to slot 0, which we haven't received
// any shreds for
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// Check that repair tries to patch the empty slot
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
RepairService::generate_repairs(&blockstore, 0, 2).unwrap(),
vec![RepairType::HighestShred(0, 0)]
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_generate_repairs() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let nth = 3;
let num_slots = 2;
@ -483,7 +483,7 @@ mod test {
missing_indexes_per_slot.insert(0, index);
}
}
blocktree
blockstore
.insert_shreds(shreds_to_write, None, false)
.unwrap();
// sleep so that the holes are ready for repair
@ -497,23 +497,23 @@ mod test {
.collect();
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(),
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected
);
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, expected.len() - 2).unwrap()[..],
RepairService::generate_repairs(&blockstore, 0, expected.len() - 2).unwrap()[..],
expected[0..expected.len() - 2]
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_generate_highest_repair() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 100;
@ -524,25 +524,25 @@ mod test {
// Remove last shred (which is also last in slot) so that slot is not complete
shreds.pop();
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
// We didn't get the last shred for this slot, so ask for the highest shred for that slot
let expected: Vec<RepairType> =
vec![RepairType::HighestShred(0, num_shreds_per_slot - 1)];
assert_eq!(
RepairService::generate_repairs(&blocktree, 0, std::usize::MAX).unwrap(),
RepairService::generate_repairs(&blockstore, 0, std::usize::MAX).unwrap(),
expected
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_range() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
@ -550,7 +550,7 @@ mod test {
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
for (mut slot_shreds, _) in shreds.into_iter() {
slot_shreds.remove(0);
blocktree.insert_shreds(slot_shreds, None, false).unwrap();
blockstore.insert_shreds(slot_shreds, None, false).unwrap();
}
// sleep to make slot eligible for repair
sleep(Duration::from_secs(1));
@ -574,7 +574,7 @@ mod test {
assert_eq!(
RepairService::generate_repairs_in_range(
&blocktree,
&blockstore,
std::usize::MAX,
&repair_slot_range
)
@ -584,14 +584,14 @@ mod test {
}
}
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_range_highest() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10;
@ -603,7 +603,7 @@ mod test {
let parent = if i > 0 { i - 1 } else { 0 };
let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
}
let end = 4;
@ -619,7 +619,7 @@ mod test {
assert_eq!(
RepairService::generate_repairs_in_range(
&blocktree,
&blockstore,
std::usize::MAX,
&repair_slot_range
)
@ -627,14 +627,14 @@ mod test {
expected
);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_get_completed_slots_past_root() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_entries_per_slot = 10;
let root = 10;
@ -656,8 +656,8 @@ mod test {
.collect();
let mut full_slots = BTreeSet::new();
blocktree.insert_shreds(fork1_shreds, None, false).unwrap();
blocktree
blockstore.insert_shreds(fork1_shreds, None, false).unwrap();
blockstore
.insert_shreds(fork2_incomplete_shreds, None, false)
.unwrap();
@ -665,7 +665,7 @@ mod test {
let epoch_schedule = EpochSchedule::custom(32, 32, false);
RepairService::get_completed_slots_past_root(
&blocktree,
&blockstore,
&mut full_slots,
root,
&epoch_schedule,
@ -682,9 +682,9 @@ mod test {
.into_iter()
.flat_map(|(shreds, _)| shreds)
.collect();
blocktree.insert_shreds(fork3_shreds, None, false).unwrap();
blockstore.insert_shreds(fork3_shreds, None, false).unwrap();
RepairService::get_completed_slots_past_root(
&blocktree,
&blockstore,
&mut full_slots,
root,
&epoch_schedule,
@ -692,25 +692,25 @@ mod test {
expected.insert(last_slot);
assert_eq!(full_slots, expected);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_update_epoch_slots() {
let blocktree_path = get_tmp_ledger_path!();
let blockstore_path = get_tmp_ledger_path!();
{
// Create blocktree
let (blocktree, _, completed_slots_receiver) =
Blocktree::open_with_signal(&blocktree_path).unwrap();
// Create blockstore
let (blockstore, _, completed_slots_receiver) =
Blockstore::open_with_signal(&blockstore_path).unwrap();
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let mut root = 0;
let num_slots = 100;
let entries_per_slot = 5;
let blocktree_ = blocktree.clone();
let blockstore_ = blockstore.clone();
// Spin up thread to write to blocktree
// Spin up thread to write to blockstore
let writer = Builder::new()
.name("writer".to_string())
.spawn(move || {
@ -729,7 +729,7 @@ mod test {
let step = rng.gen_range(1, max_step + 1) as usize;
let step = std::cmp::min(step, num_shreds - i);
let shreds_to_insert = shreds.drain(..step).collect_vec();
blocktree_
blockstore_
.insert_shreds(shreds_to_insert, None, false)
.unwrap();
sleep(Duration::from_millis(repair_interval_ms));
@ -748,7 +748,7 @@ mod test {
RepairService::update_epoch_slots(
Pubkey::default(),
root,
blocktree.lowest_slot(),
blockstore.lowest_slot(),
&mut root.clone(),
&mut completed_slots,
&cluster_info,
@ -762,7 +762,7 @@ mod test {
// Update with new root, should filter out the slots <= root
root = num_slots / 2;
let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
RepairService::update_epoch_slots(
Pubkey::default(),
root,
@ -777,7 +777,7 @@ mod test {
assert_eq!(completed_slots, expected);
writer.join().unwrap();
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]

View File

@ -12,8 +12,8 @@ use solana_ledger::entry::EntryVerificationStatus;
use solana_ledger::{
bank_forks::BankForks,
block_error::BlockError,
blocktree::{Blocktree, BlocktreeError},
blocktree_processor::{self, TransactionStatusSender},
blockstore::{Blockstore, BlockstoreError},
blockstore_processor::{self, TransactionStatusSender},
entry::{Entry, EntrySlice, VerifyRecyclers},
leader_schedule_cache::LeaderScheduleCache,
snapshot_package::SnapshotPackageSender,
@ -180,7 +180,7 @@ impl ReplayStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(
config: ReplayStageConfig,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
bank_forks: Arc<RwLock<BankForks>>,
cluster_info: Arc<RwLock<ClusterInfo>>,
ledger_signal_receiver: Receiver<bool>,
@ -237,7 +237,7 @@ impl ReplayStage {
let start = allocated.get();
Self::generate_new_bank_forks(
&blocktree,
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
@ -255,7 +255,7 @@ impl ReplayStage {
let start = allocated.get();
let did_complete_bank = Self::replay_active_banks(
&blocktree,
&blockstore,
&bank_forks,
&my_pubkey,
&mut progress,
@ -311,7 +311,7 @@ impl ReplayStage {
&vote_account,
&voting_keypair,
&cluster_info,
&blocktree,
&blockstore,
&leader_schedule_cache,
&root_bank_sender,
stats.total_staked,
@ -328,7 +328,7 @@ impl ReplayStage {
if last_reset != bank.last_blockhash() {
Self::reset_poh_recorder(
&my_pubkey,
&blocktree,
&blockstore,
&bank,
&poh_recorder,
&leader_schedule_cache,
@ -409,7 +409,7 @@ impl ReplayStage {
match result {
Err(RecvTimeoutError::Timeout) => continue,
Err(_) => break,
Ok(_) => trace!("blocktree signal"),
Ok(_) => trace!("blockstore signal"),
};
}
Ok(())
@ -535,16 +535,16 @@ impl ReplayStage {
!Bank::can_commit(&tx_error)
}
Err(Error::BlockError(_)) => true,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true,
Err(Error::BlocktreeError(BlocktreeError::DeadSlot)) => true,
Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_))) => true,
Err(Error::BlockstoreError(BlockstoreError::DeadSlot)) => true,
_ => false,
}
}
// Returns the replay result and the number of replayed transactions
fn replay_blocktree_into_bank(
fn replay_blockstore_into_bank(
bank: &Arc<Bank>,
blocktree: &Blocktree,
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>,
verify_recyclers: &VerifyRecyclers,
@ -552,7 +552,7 @@ impl ReplayStage {
let mut tx_count = 0;
let now = Instant::now();
let load_result =
Self::load_blocktree_entries_with_shred_info(bank, blocktree, bank_progress);
Self::load_blockstore_entries_with_shred_info(bank, blockstore, bank_progress);
let fetch_entries_elapsed = now.elapsed().as_micros();
if load_result.is_err() {
bank_progress.stats.fetch_entries_fail_elapsed += fetch_entries_elapsed as u64;
@ -591,17 +591,17 @@ impl ReplayStage {
("error", format!("error: {:?}", replay_result), String),
("slot", bank.slot(), i64)
);
Self::mark_dead_slot(bank.slot(), blocktree, bank_progress);
Self::mark_dead_slot(bank.slot(), blockstore, bank_progress);
}
(replay_result, tx_count)
}
fn mark_dead_slot(slot: Slot, blocktree: &Blocktree, bank_progress: &mut ForkProgress) {
fn mark_dead_slot(slot: Slot, blockstore: &Blockstore, bank_progress: &mut ForkProgress) {
bank_progress.is_dead = true;
blocktree
blockstore
.set_dead_slot(slot)
.expect("Failed to mark slot as dead in blocktree");
.expect("Failed to mark slot as dead in blockstore");
}
#[allow(clippy::too_many_arguments)]
@ -613,7 +613,7 @@ impl ReplayStage {
vote_account: &Pubkey,
voting_keypair: &Option<Arc<Keypair>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
total_staked: u64,
@ -637,12 +637,12 @@ impl ReplayStage {
let mut rooted_banks = root_bank.parents();
rooted_banks.push(root_bank);
let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect();
// Call leader schedule_cache.set_root() before blocktree.set_root() because
// Call leader schedule_cache.set_root() before blockstore.set_root() because
// bank_forks.root is consumed by repair_service to update gossip, so we don't want to
// get shreds for repair on gossip before we update leader schedule, otherwise they may
// get dropped.
leader_schedule_cache.set_root(rooted_banks.last().unwrap());
blocktree
blockstore
.set_roots(&rooted_slots)
.expect("Ledger set roots failed");
bank_forks
@ -699,13 +699,17 @@ impl ReplayStage {
fn reset_poh_recorder(
my_pubkey: &Pubkey,
blocktree: &Blocktree,
blockstore: &Blockstore,
bank: &Arc<Bank>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
) {
let next_leader_slot =
leader_schedule_cache.next_leader_slot(&my_pubkey, bank.slot(), &bank, Some(blocktree));
let next_leader_slot = leader_schedule_cache.next_leader_slot(
&my_pubkey,
bank.slot(),
&bank,
Some(blockstore),
);
poh_recorder
.lock()
.unwrap()
@ -727,7 +731,7 @@ impl ReplayStage {
}
fn replay_active_banks(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
bank_forks: &Arc<RwLock<BankForks>>,
my_pubkey: &Pubkey,
progress: &mut HashMap<u64, ForkProgress>,
@ -756,9 +760,9 @@ impl ReplayStage {
.entry(bank.slot())
.or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash()));
if bank.collector_id() != my_pubkey {
let (replay_result, replay_tx_count) = Self::replay_blocktree_into_bank(
let (replay_result, replay_tx_count) = Self::replay_blockstore_into_bank(
&bank,
&blocktree,
&blockstore,
bank_progress,
transaction_status_sender.clone(),
verify_recyclers,
@ -959,12 +963,12 @@ impl ReplayStage {
}
}
fn load_blocktree_entries_with_shred_info(
fn load_blockstore_entries_with_shred_info(
bank: &Bank,
blocktree: &Blocktree,
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
) -> Result<(Vec<Entry>, usize, bool)> {
blocktree
blockstore
.get_slot_entries_with_shred_info(bank.slot(), bank_progress.num_shreds as u64)
.map_err(|err| err.into())
}
@ -1078,7 +1082,7 @@ impl ReplayStage {
let mut replay_elapsed = Measure::start("replay_elapsed");
let res =
blocktree_processor::process_entries(bank, entries, true, transaction_status_sender);
blockstore_processor::process_entries(bank, entries, true, transaction_status_sender);
replay_elapsed.stop();
bank_progress.stats.replay_elapsed += replay_elapsed.as_us();
@ -1116,7 +1120,7 @@ impl ReplayStage {
}
fn generate_new_bank_forks(
blocktree: &Blocktree,
blockstore: &Blockstore,
forks_lock: &RwLock<BankForks>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
subscriptions: &Arc<RpcSubscriptions>,
@ -1125,7 +1129,7 @@ impl ReplayStage {
let forks = forks_lock.read().unwrap();
let frozen_banks = forks.frozen_banks();
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
let next_slots = blocktree
let next_slots = blockstore
.get_slots_since(&frozen_bank_slots)
.expect("Db error");
// Filter out what we've already seen
@ -1188,8 +1192,8 @@ pub(crate) mod tests {
use crossbeam_channel::unbounded;
use solana_client::rpc_request::RpcEncodedTransaction;
use solana_ledger::{
blocktree::make_slot_entries,
blocktree::{entries_to_test_shreds, BlocktreeError},
blockstore::make_slot_entries,
blockstore::{entries_to_test_shreds, BlockstoreError},
create_new_tmp_ledger,
entry::{self, next_entry},
get_tmp_ledger_path,
@ -1499,8 +1503,9 @@ pub(crate) mod tests {
fn test_child_slots_of_same_parent() {
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let genesis_config = create_genesis_config(10_000).genesis_config;
@ -1512,11 +1517,11 @@ pub(crate) mod tests {
// Insert shred for slot 1, generate new forks, check result
let (shreds, _) = make_slot_entries(1, 0, 8);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks.get(1).is_none());
let bank_forks = RwLock::new(bank_forks);
ReplayStage::generate_new_bank_forks(
&blocktree,
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
@ -1525,10 +1530,10 @@ pub(crate) mod tests {
// Insert shred for slot 3, generate new forks, check result
let (shreds, _) = make_slot_entries(2, 0, 8);
blocktree.insert_shreds(shreds, None, false).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
assert!(bank_forks.read().unwrap().get(2).is_none());
ReplayStage::generate_new_bank_forks(
&blocktree,
&blockstore,
&bank_forks,
&leader_schedule_cache,
&subscriptions,
@ -1750,7 +1755,7 @@ pub(crate) mod tests {
assert_matches!(
res,
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_)))
Err(Error::BlockstoreError(BlockstoreError::InvalidShredData(_)))
);
}
@ -1762,8 +1767,9 @@ pub(crate) mod tests {
{
let ledger_path = get_tmp_ledger_path!();
let res = {
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let GenesisConfigInfo {
mut genesis_config,
@ -1778,10 +1784,10 @@ pub(crate) mod tests {
.entry(bank0.slot())
.or_insert_with(|| ForkProgress::new(0, last_blockhash));
let shreds = shred_to_insert(&mint_keypair, bank0.clone());
blocktree.insert_shreds(shreds, None, false).unwrap();
let (res, _tx_count) = ReplayStage::replay_blocktree_into_bank(
blockstore.insert_shreds(shreds, None, false).unwrap();
let (res, _tx_count) = ReplayStage::replay_blockstore_into_bank(
&bank0,
&blocktree,
&blockstore,
&mut bank0_progress,
None,
&VerifyRecyclers::default(),
@ -1793,8 +1799,8 @@ pub(crate) mod tests {
.map(|b| b.is_dead)
.unwrap_or(false));
// Check that the erroring bank was marked as dead in blocktree
assert!(blocktree.is_dead(bank0.slot()));
// Check that the erroring bank was marked as dead in blockstore
assert!(blockstore.is_dead(bank0.slot()));
res
};
let _ignored = remove_dir_all(&ledger_path);
@ -1902,11 +1908,11 @@ pub(crate) mod tests {
);
}
pub fn create_test_transactions_and_populate_blocktree(
pub fn create_test_transactions_and_populate_blockstore(
keypairs: Vec<&Keypair>,
previous_slot: Slot,
bank: Arc<Bank>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
) -> Vec<Signature> {
let mint_keypair = keypairs[0];
let keypair1 = keypairs[1];
@ -1933,19 +1939,19 @@ pub(crate) mod tests {
let entries = vec![entry_1, entry_2, entry_3];
let shreds = entries_to_test_shreds(entries.clone(), slot, previous_slot, true, 0);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[slot]).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[slot]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
blocktree.clone(),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
// Check that process_entries successfully writes can_commit transactions statuses, and
// that they are matched properly by get_confirmed_block
let _result = blocktree_processor::process_entries(
let _result = blockstore_processor::process_entries(
&bank,
&entries,
true,
@ -1966,9 +1972,9 @@ pub(crate) mod tests {
} = create_genesis_config(1000);
let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config);
{
let blocktree = Blocktree::open(&ledger_path)
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to successfully open database ledger");
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
@ -1982,14 +1988,14 @@ pub(crate) mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
let slot = bank1.slot();
let signatures = create_test_transactions_and_populate_blocktree(
let signatures = create_test_transactions_and_populate_blockstore(
vec![&mint_keypair, &keypair1, &keypair2, &keypair3],
bank0.slot(),
bank1,
blocktree.clone(),
blockstore.clone(),
);
let confirmed_block = blocktree.get_confirmed_block(slot, None).unwrap();
let confirmed_block = blockstore.get_confirmed_block(slot, None).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for (transaction, result) in confirmed_block.transactions.into_iter() {
@ -2010,6 +2016,6 @@ pub(crate) mod tests {
}
}
}
Blocktree::destroy(&ledger_path).unwrap();
Blockstore::destroy(&ledger_path).unwrap();
}
}

View File

@ -3,7 +3,7 @@
use crate::cluster_info;
use crate::poh_recorder;
use solana_ledger::block_error;
use solana_ledger::blocktree;
use solana_ledger::blockstore;
use solana_ledger::snapshot_utils;
use solana_sdk::transaction;
use std::any::Any;
@ -27,7 +27,7 @@ pub enum Error {
SendError,
PohRecorderError(poh_recorder::PohRecorderError),
BlockError(block_error::BlockError),
BlocktreeError(blocktree::BlocktreeError),
BlockstoreError(blockstore::BlockstoreError),
FsExtra(fs_extra::error::Error),
SnapshotError(snapshot_utils::SnapshotError),
}
@ -127,9 +127,9 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
Error::PohRecorderError(e)
}
}
impl std::convert::From<blocktree::BlocktreeError> for Error {
fn from(e: blocktree::BlocktreeError) -> Error {
Error::BlocktreeError(e)
impl std::convert::From<blockstore::BlockstoreError> for Error {
fn from(e: blockstore::BlockstoreError) -> Error {
Error::BlockstoreError(e)
}
}
impl std::convert::From<snapshot_utils::SnapshotError> for Error {

View File

@ -12,7 +12,7 @@ use crate::{
use crossbeam_channel::Receiver as CrossbeamReceiver;
use solana_ledger::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver},
blockstore::{Blockstore, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache,
staking_utils,
};
@ -205,7 +205,7 @@ impl RetransmitStage {
pub fn new(
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
@ -234,7 +234,7 @@ impl RetransmitStage {
};
let leader_schedule_cache = leader_schedule_cache.clone();
let window_service = WindowService::new(
blocktree,
blockstore,
cluster_info.clone(),
verified_receiver,
retransmit_sender,
@ -281,7 +281,7 @@ mod tests {
use crate::contact_info::ContactInfo;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use crate::packet::{self, Meta, Packet, Packets};
use solana_ledger::blocktree_processor::{process_blocktree, ProcessOptions};
use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
use solana_ledger::create_new_tmp_ledger;
use solana_net_utils::find_available_port_in_range;
use solana_sdk::pubkey::Pubkey;
@ -290,13 +290,13 @@ mod tests {
fn test_skip_repair() {
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123);
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let opts = ProcessOptions {
full_leader_cache: true,
..ProcessOptions::default()
};
let (bank_forks, _, cached_leader_schedule) =
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
process_blockstore(&genesis_config, &blockstore, Vec::new(), opts).unwrap();
let leader_schedule_cache = Arc::new(cached_leader_schedule);
let bank_forks = Arc::new(RwLock::new(bank_forks));

View File

@ -18,7 +18,7 @@ use solana_client::rpc_request::{
};
use solana_faucet::faucet::request_airdrop_transaction;
use solana_ledger::{
bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator,
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
@ -69,7 +69,7 @@ impl Default for JsonRpcConfig {
pub struct JsonRpcRequestProcessor {
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
config: JsonRpcConfig,
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
@ -94,7 +94,7 @@ impl JsonRpcRequestProcessor {
config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
storage_state: StorageState,
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
) -> Self {
@ -102,7 +102,7 @@ impl JsonRpcRequestProcessor {
config,
bank_forks,
block_commitment_cache,
blocktree,
blockstore,
storage_state,
validator_exit,
}
@ -318,7 +318,7 @@ impl JsonRpcRequestProcessor {
slot: Slot,
encoding: Option<RpcTransactionEncoding>,
) -> Result<Option<RpcConfirmedBlock>> {
Ok(self.blocktree.get_confirmed_block(slot, encoding).ok())
Ok(self.blockstore.get_confirmed_block(slot, encoding).ok())
}
pub fn get_confirmed_blocks(
@ -331,9 +331,9 @@ impl JsonRpcRequestProcessor {
return Ok(vec![]);
}
let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot));
let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot));
if let Some(start_slot) = start_slot {
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blocktree)
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blockstore)
.unwrap()
.map(|(slot, _)| slot)
.collect();
@ -349,14 +349,14 @@ impl JsonRpcRequestProcessor {
// genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
// queried). If these values will be variable in the future, those timing parameters will
// need to be stored persistently, and the slot_duration calculation will likely need to be
// moved upstream into blocktree. Also, an explicit commitment level will need to be set.
// moved upstream into blockstore. Also, an explicit commitment level will need to be set.
let bank = self.bank(None);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(slot);
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self.blocktree.get_block_time(slot, slot_duration, stakes))
Ok(self.blockstore.get_block_time(slot, slot_duration, stakes))
}
}
@ -1068,13 +1068,13 @@ pub mod tests {
use crate::{
contact_info::ContactInfo,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
replay_stage::tests::create_test_transactions_and_populate_blocktree,
replay_stage::tests::create_test_transactions_and_populate_blockstore,
};
use bincode::deserialize;
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use solana_client::rpc_request::RpcEncodedTransaction;
use solana_ledger::{
blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks,
blockstore::entries_to_test_shreds, blockstore_processor::fill_blockstore_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path,
};
use solana_sdk::{
@ -1112,12 +1112,12 @@ pub mod tests {
}
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![], 0)
start_rpc_handler_with_tx_and_blockstore(pubkey, vec![], 0)
}
fn start_rpc_handler_with_tx_and_blocktree(
fn start_rpc_handler_with_tx_and_blockstore(
pubkey: &Pubkey,
blocktree_roots: Vec<Slot>,
blockstore_roots: Vec<Slot>,
default_timestamp: i64,
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
@ -1135,21 +1135,21 @@ pub mod tests {
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blocktree = Arc::new(blocktree);
let blockstore = Blockstore::open(&ledger_path).unwrap();
let blockstore = Arc::new(blockstore);
let keypair1 = Keypair::new();
let keypair2 = Keypair::new();
let keypair3 = Keypair::new();
bank.transfer(4, &alice, &keypair2.pubkey()).unwrap();
let confirmed_block_signatures = create_test_transactions_and_populate_blocktree(
let confirmed_block_signatures = create_test_transactions_and_populate_blockstore(
vec![&alice, &keypair1, &keypair2, &keypair3],
0,
bank.clone(),
blocktree.clone(),
blockstore.clone(),
);
// Add timestamp vote to blocktree
// Add timestamp vote to blockstore
let vote = Vote {
slots: vec![1],
hash: Hash::default(),
@ -1172,10 +1172,10 @@ pub mod tests {
true,
0,
);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[1]).unwrap();
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(&[1]).unwrap();
let mut roots = blocktree_roots.clone();
let mut roots = blockstore_roots.clone();
if !roots.is_empty() {
roots.retain(|&x| x > 1);
let mut parent_bank = bank;
@ -1186,9 +1186,9 @@ pub mod tests {
parent_bank.squash();
bank_forks.write().unwrap().set_root(*root, &None);
let parent = if i > 0 { roots[i - 1] } else { 1 };
fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default());
fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default());
}
blocktree.set_roots(&roots).unwrap();
blockstore.set_roots(&roots).unwrap();
let new_bank = Bank::new_from_parent(
&parent_bank,
parent_bank.collector_id(),
@ -1214,7 +1214,7 @@ pub mod tests {
JsonRpcConfig::default(),
bank_forks.clone(),
block_commitment_cache.clone(),
blocktree,
blockstore,
StorageState::default(),
validator_exit,
)));
@ -1261,12 +1261,12 @@ pub mod tests {
let bank = bank_forks.read().unwrap().working_bank();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let request_processor = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
bank_forks,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1752,7 +1752,7 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut io = MetaIoHandler::default();
let rpc = RpcSolImpl;
@ -1763,7 +1763,7 @@ pub mod tests {
JsonRpcConfig::default(),
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1856,12 +1856,12 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let request_processor = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1875,14 +1875,14 @@ pub mod tests {
let validator_exit = create_validator_exit(&exit);
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut config = JsonRpcConfig::default();
config.enable_validator_exit = true;
let request_processor = JsonRpcRequestProcessor::new(
config,
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -1927,7 +1927,7 @@ pub mod tests {
let block_commitment_cache =
Arc::new(RwLock::new(BlockCommitmentCache::new(block_commitment, 42)));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut config = JsonRpcConfig::default();
config.enable_validator_exit = true;
@ -1935,7 +1935,7 @@ pub mod tests {
config,
new_bank_forks().0,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
StorageState::default(),
validator_exit,
);
@ -2082,7 +2082,7 @@ pub mod tests {
let bob_pubkey = Pubkey::new_rand();
let roots = vec![0, 1, 3, 4, 8];
let RpcHandler { io, meta, .. } =
start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone(), 0);
start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone(), 0);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
@ -2129,7 +2129,7 @@ pub mod tests {
fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand();
let base_timestamp = 1576183541;
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blocktree(
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blockstore(
&bob_pubkey,
vec![1, 2, 3, 4, 5, 6, 7],
base_timestamp,

View File

@ -9,7 +9,7 @@ use jsonrpc_http_server::{
hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, RequestMiddleware,
RequestMiddlewareAction, ServerBuilder,
};
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_sdk::hash::Hash;
use std::{
net::SocketAddr,
@ -91,7 +91,7 @@ impl JsonRpcService {
config: JsonRpcConfig,
bank_forks: Arc<RwLock<BankForks>>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
genesis_hash: Hash,
ledger_path: &Path,
@ -104,7 +104,7 @@ impl JsonRpcService {
config,
bank_forks,
block_commitment_cache,
blocktree,
blockstore,
storage_state,
validator_exit.clone(),
)));
@ -204,13 +204,13 @@ mod tests {
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
let blockstore = Blockstore::open(&ledger_path).unwrap();
let mut rpc_service = JsonRpcService::new(
rpc_addr,
JsonRpcConfig::default(),
bank_forks,
block_commitment_cache,
Arc::new(blocktree),
Arc::new(blockstore),
cluster_info,
Hash::default(),
&PathBuf::from("farf"),

View File

@ -2,7 +2,7 @@
use crate::packet::{Packet, PacketsRecycler};
use crate::streamer::{self, PacketReceiver, PacketSender};
use solana_ledger::blocktree::MAX_DATA_SHREDS_PER_SLOT;
use solana_ledger::blockstore::MAX_DATA_SHREDS_PER_SLOT;
use solana_ledger::shred::{OFFSET_OF_SHRED_INDEX, SIZE_OF_SHRED_INDEX};
use solana_perf::cuda_runtime::PinnedVec;
use solana_perf::packet::limited_deserialize;

View File

@ -10,7 +10,7 @@ use crate::{
};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
use solana_sdk::{
account::Account,
@ -177,7 +177,7 @@ impl StorageStage {
pub fn new(
storage_state: &StorageState,
bank_receiver: Receiver<Vec<Arc<Bank>>>,
blocktree: Option<Arc<Blocktree>>,
blockstore: Option<Arc<Blockstore>>,
keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
@ -197,12 +197,12 @@ impl StorageStage {
let mut current_key = 0;
let mut storage_slots = StorageSlots::default();
loop {
if let Some(ref some_blocktree) = blocktree {
if let Some(ref some_blockstore) = blockstore {
if let Err(e) = Self::process_entries(
&storage_keypair,
&storage_state_inner,
&bank_receiver,
&some_blocktree,
&some_blockstore,
&mut storage_slots,
&mut current_key,
slots_per_turn,
@ -368,7 +368,7 @@ impl StorageStage {
fn process_turn(
storage_keypair: &Arc<Keypair>,
state: &Arc<RwLock<StorageStateInner>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
blockhash: Hash,
slot: Slot,
slots_per_segment: u64,
@ -431,7 +431,7 @@ impl StorageStage {
let mut statew = state.write().unwrap();
match chacha_cbc_encrypt_file_many_keys(
blocktree,
blockstore,
segment as u64,
statew.slots_per_segment,
&mut statew.storage_keys,
@ -502,7 +502,7 @@ impl StorageStage {
storage_keypair: &Arc<Keypair>,
storage_state: &Arc<RwLock<StorageStateInner>>,
bank_receiver: &Receiver<Vec<Arc<Bank>>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
storage_slots: &mut StorageSlots,
current_key_idx: &mut usize,
slots_per_turn: u64,
@ -541,7 +541,7 @@ impl StorageStage {
let _ignored = Self::process_turn(
&storage_keypair,
&storage_state,
&blocktree,
&blockstore,
bank.last_blockhash(),
bank.slot(),
bank.slots_per_segment(),

View File

@ -12,7 +12,7 @@ use crate::{
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
};
use crossbeam_channel::unbounded;
use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusSender};
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender};
use std::{
net::UdpSocket,
sync::{
@ -42,7 +42,7 @@ impl Tpu {
broadcast_sockets: Vec<UdpSocket>,
sigverify_disabled: bool,
transaction_status_sender: Option<TransactionStatusSender>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
broadcast_type: &BroadcastStageType,
exit: &Arc<AtomicBool>,
shred_version: u16,
@ -87,7 +87,7 @@ impl Tpu {
cluster_info.clone(),
entry_receiver,
&exit,
blocktree,
blockstore,
shred_version,
);

View File

@ -1,6 +1,6 @@
use crossbeam_channel::{Receiver, RecvTimeoutError};
use solana_client::rpc_request::RpcTransactionStatus;
use solana_ledger::{blocktree::Blocktree, blocktree_processor::TransactionStatusBatch};
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusBatch};
use solana_runtime::bank::{Bank, HashAgeKind};
use std::{
sync::{
@ -19,7 +19,7 @@ impl TransactionStatusService {
#[allow(clippy::new_ret_no_self)]
pub fn new(
write_transaction_status_receiver: Receiver<TransactionStatusBatch>,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
exit: &Arc<AtomicBool>,
) -> Self {
let exit = exit.clone();
@ -31,7 +31,7 @@ impl TransactionStatusService {
}
if let Err(RecvTimeoutError::Disconnected) = Self::write_transaction_status_batch(
&write_transaction_status_receiver,
&blocktree,
&blockstore,
) {
break;
}
@ -42,7 +42,7 @@ impl TransactionStatusService {
fn write_transaction_status_batch(
write_transaction_status_receiver: &Receiver<TransactionStatusBatch>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
) -> Result<(), RecvTimeoutError> {
let TransactionStatusBatch {
bank,
@ -68,7 +68,7 @@ impl TransactionStatusService {
.get_fee_calculator(&fee_hash)
.expect("FeeCalculator must exist");
let fee = fee_calculator.calculate_fee(transaction.message());
blocktree
blockstore
.write_transaction_status(
(slot, transaction.signatures[0]),
&RpcTransactionStatus {

View File

@ -21,8 +21,8 @@ use crossbeam_channel::unbounded;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver},
blocktree_processor::TransactionStatusSender,
blockstore::{Blockstore, CompletedSlotsReceiver},
blockstore_processor::TransactionStatusSender,
};
use solana_sdk::{
pubkey::Pubkey,
@ -63,7 +63,7 @@ impl Tvu {
/// # Arguments
/// * `cluster_info` - The cluster_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `blocktree` - the ledger itself
/// * `blockstore` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new(
vote_account: &Pubkey,
@ -72,7 +72,7 @@ impl Tvu {
bank_forks: &Arc<RwLock<BankForks>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sockets: Sockets,
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
storage_state: &StorageState,
blockstream_unix_socket: Option<&PathBuf>,
max_ledger_slots: Option<u64>,
@ -133,7 +133,7 @@ impl Tvu {
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
blocktree.clone(),
blockstore.clone(),
&cluster_info,
Arc::new(retransmit_sockets),
repair_socket,
@ -175,7 +175,7 @@ impl Tvu {
let (replay_stage, root_bank_receiver) = ReplayStage::new(
replay_stage_config,
blocktree.clone(),
blockstore.clone(),
bank_forks.clone(),
cluster_info.clone(),
ledger_signal_receiver,
@ -185,7 +185,7 @@ impl Tvu {
let blockstream_service = if let Some(blockstream_unix_socket) = blockstream_unix_socket {
let blockstream_service = BlockstreamService::new(
blockstream_slot_receiver,
blocktree.clone(),
blockstore.clone(),
blockstream_unix_socket,
&exit,
);
@ -197,7 +197,7 @@ impl Tvu {
let ledger_cleanup_service = max_ledger_slots.map(|max_ledger_slots| {
LedgerCleanupService::new(
ledger_cleanup_slot_receiver,
blocktree.clone(),
blockstore.clone(),
max_ledger_slots,
&exit,
)
@ -206,7 +206,7 @@ impl Tvu {
let storage_stage = StorageStage::new(
storage_state,
root_bank_receiver,
Some(blocktree),
Some(blockstore),
&keypair,
storage_keypair,
&exit,
@ -272,14 +272,14 @@ pub mod tests {
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let (blocktree_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blocktree, l_receiver, completed_slots_receiver) =
Blocktree::open_with_signal(&blocktree_path)
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
let (blockstore, l_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(&blockstore_path)
.expect("Expected to successfully open ledger");
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree, None);
create_test_recorder(&bank, &blockstore, None);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
@ -298,7 +298,7 @@ pub mod tests {
forwards: target1.sockets.tvu_forwards,
}
},
blocktree,
blockstore,
&StorageState::default(),
None,
None,

View File

@ -23,8 +23,8 @@ use crossbeam_channel::unbounded;
use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils,
blocktree::{Blocktree, CompletedSlotsReceiver},
blocktree_processor::{self, BankForksInfo},
blockstore::{Blockstore, CompletedSlotsReceiver},
blockstore_processor::{self, BankForksInfo},
create_new_tmp_ledger,
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
@ -156,12 +156,12 @@ impl Validator {
genesis_hash,
bank_forks,
bank_forks_info,
blocktree,
blockstore,
ledger_signal_receiver,
completed_slots_receiver,
leader_schedule_cache,
poh_config,
) = new_banks_from_blocktree(
) = new_banks_from_blockstore(
config.expected_genesis_hash,
ledger_path,
config.account_paths.clone(),
@ -197,7 +197,7 @@ impl Validator {
bank.slots_per_segment(),
);
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let rpc_service = if node.info.rpc.port() == 0 {
None
@ -207,7 +207,7 @@ impl Validator {
config.rpc_config.clone(),
bank_forks.clone(),
block_commitment_cache.clone(),
blocktree.clone(),
blockstore.clone(),
cluster_info.clone(),
genesis_hash,
ledger_path,
@ -237,7 +237,7 @@ impl Validator {
Some(transaction_status_sender),
Some(TransactionStatusService::new(
transaction_status_receiver,
blocktree.clone(),
blockstore.clone(),
&exit,
)),
)
@ -265,11 +265,11 @@ impl Validator {
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)),
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blockstore)),
bank.ticks_per_slot(),
&id,
&blocktree,
blocktree.new_shreds_signals.first().cloned(),
&blockstore,
blockstore.new_shreds_signals.first().cloned(),
&leader_schedule_cache,
&poh_config,
);
@ -282,7 +282,7 @@ impl Validator {
let gossip_service = GossipService::new(
&cluster_info,
Some(blocktree.clone()),
Some(blockstore.clone()),
Some(bank_forks.clone()),
node.sockets.gossip,
&exit,
@ -347,7 +347,7 @@ impl Validator {
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!(
blocktree.new_shreds_signals.len(),
blockstore.new_shreds_signals.len(),
1,
"New shred signal for the TVU should be the same as the clear bank signal."
);
@ -359,7 +359,7 @@ impl Validator {
&bank_forks,
&cluster_info,
sockets,
blocktree.clone(),
blockstore.clone(),
&storage_state,
config.blockstream_unix_socket.as_ref(),
config.max_ledger_slots,
@ -389,7 +389,7 @@ impl Validator {
node.sockets.broadcast,
config.dev_sigverify_disabled,
transaction_status_sender,
&blocktree,
&blockstore,
&config.broadcast_stage_type,
&exit,
shred_version,
@ -470,9 +470,9 @@ impl Validator {
}
}
pub fn new_banks_from_blocktree(
pub fn new_banks_from_blockstore(
expected_genesis_hash: Option<Hash>,
blocktree_path: &Path,
blockstore_path: &Path,
account_paths: Vec<PathBuf>,
snapshot_config: Option<SnapshotConfig>,
poh_verify: bool,
@ -482,14 +482,14 @@ pub fn new_banks_from_blocktree(
Hash,
BankForks,
Vec<BankForksInfo>,
Blocktree,
Blockstore,
Receiver<bool>,
CompletedSlotsReceiver,
LeaderScheduleCache,
PohConfig,
) {
let genesis_config = GenesisConfig::load(blocktree_path).unwrap_or_else(|err| {
error!("Failed to load genesis from {:?}: {}", blocktree_path, err);
let genesis_config = GenesisConfig::load(blockstore_path).unwrap_or_else(|err| {
error!("Failed to load genesis from {:?}: {}", blockstore_path, err);
process::exit(1);
});
let genesis_hash = genesis_config.hash();
@ -500,24 +500,24 @@ pub fn new_banks_from_blocktree(
error!("genesis hash mismatch: expected {}", expected_genesis_hash);
error!(
"Delete the ledger directory to continue: {:?}",
blocktree_path
blockstore_path
);
process::exit(1);
}
}
let (blocktree, ledger_signal_receiver, completed_slots_receiver) =
Blocktree::open_with_signal(blocktree_path).expect("Failed to open ledger database");
let (blockstore, ledger_signal_receiver, completed_slots_receiver) =
Blockstore::open_with_signal(blockstore_path).expect("Failed to open ledger database");
let process_options = blocktree_processor::ProcessOptions {
let process_options = blockstore_processor::ProcessOptions {
poh_verify,
dev_halt_at_slot,
..blocktree_processor::ProcessOptions::default()
..blockstore_processor::ProcessOptions::default()
};
let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load(
&genesis_config,
&blocktree,
&blockstore,
account_paths,
snapshot_config.as_ref(),
process_options,
@ -535,7 +535,7 @@ pub fn new_banks_from_blocktree(
genesis_hash,
bank_forks,
bank_forks_info,
blocktree,
blockstore,
ledger_signal_receiver,
completed_slots_receiver,
leader_schedule_cache,

View File

@ -1,5 +1,5 @@
//! `window_service` handles the data plane incoming shreds, storing them in
//! blocktree and retransmitting where required
//! blockstore and retransmitting where required
//!
use crate::cluster_info::ClusterInfo;
use crate::packet::Packets;
@ -13,7 +13,7 @@ use rayon::iter::IntoParallelRefMutIterator;
use rayon::iter::ParallelIterator;
use rayon::ThreadPool;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::{self, Blocktree, MAX_DATA_SHREDS_PER_SLOT};
use solana_ledger::blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
@ -30,7 +30,7 @@ use std::time::{Duration, Instant};
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
if shred.is_data() {
// Only data shreds have parent information
blocktree::verify_shred_slots(shred.slot(), shred.parent(), root)
blockstore::verify_shred_slots(shred.slot(), shred.parent(), root)
} else {
// Filter out outdated coding shreds
shred.slot() >= root
@ -75,7 +75,7 @@ pub fn should_retransmit_and_persist(
fn run_insert(
shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
) -> Result<()> {
let timer = Duration::from_millis(200);
@ -85,15 +85,15 @@ fn run_insert(
shreds.append(&mut more_shreds)
}
let blocktree_insert_metrics =
blocktree.insert_shreds(shreds, Some(leader_schedule_cache), false)?;
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
let blockstore_insert_metrics =
blockstore.insert_shreds(shreds, Some(leader_schedule_cache), false)?;
blockstore_insert_metrics.report_metrics("recv-window-insert-shreds");
Ok(())
}
fn recv_window<F>(
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
@ -117,7 +117,7 @@ where
let now = Instant::now();
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
let last_root = blocktree.last_root();
let last_root = blockstore.last_root();
let shreds: Vec<_> = thread_pool.install(|| {
packets
.par_iter_mut()
@ -138,7 +138,7 @@ where
// get retransmitted. It'll allow peer nodes to see this shred
// and trigger them to mark the slot as dead.
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
let _ = blocktree.set_dead_slot(shred.slot());
let _ = blockstore.set_dead_slot(shred.slot());
}
packet.meta.slot = shred.slot();
packet.meta.seed = shred.seed();
@ -205,7 +205,7 @@ pub struct WindowService {
impl WindowService {
#[allow(clippy::too_many_arguments)]
pub fn new<F>(
blocktree: Arc<Blocktree>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<RwLock<ClusterInfo>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
retransmit: PacketSender,
@ -227,7 +227,7 @@ impl WindowService {
};
let repair_service = RepairService::new(
blocktree.clone(),
blockstore.clone(),
exit.clone(),
repair_socket,
cluster_info.clone(),
@ -238,7 +238,7 @@ impl WindowService {
let t_insert = Self::start_window_insert_thread(
exit,
&blocktree,
&blockstore,
leader_schedule_cache,
insert_receiver,
);
@ -246,7 +246,7 @@ impl WindowService {
let t_window = Self::start_recv_window_thread(
cluster_info.read().unwrap().id(),
exit,
&blocktree,
&blockstore,
insert_sender,
verified_receiver,
shred_filter,
@ -263,12 +263,12 @@ impl WindowService {
fn start_window_insert_thread(
exit: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
insert_receiver: CrossbeamReceiver<Vec<Shred>>,
) -> JoinHandle<()> {
let exit = exit.clone();
let blocktree = blocktree.clone();
let blockstore = blockstore.clone();
let leader_schedule_cache = leader_schedule_cache.clone();
let mut handle_timeout = || {};
let handle_error = || {
@ -281,7 +281,7 @@ impl WindowService {
break;
}
if let Err(e) = run_insert(&insert_receiver, &blocktree, &leader_schedule_cache) {
if let Err(e) = run_insert(&insert_receiver, &blockstore, &leader_schedule_cache) {
if Self::should_exit_on_error(e, &mut handle_timeout, &handle_error) {
break;
}
@ -293,7 +293,7 @@ impl WindowService {
fn start_recv_window_thread<F>(
id: Pubkey,
exit: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
blockstore: &Arc<Blockstore>,
insert_sender: CrossbeamSender<Vec<Shred>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
shred_filter: F,
@ -307,7 +307,7 @@ impl WindowService {
+ std::marker::Sync,
{
let exit = exit.clone();
let blocktree = blocktree.clone();
let blockstore = blockstore.clone();
Builder::new()
.name("solana-window".to_string())
.spawn(move || {
@ -334,7 +334,7 @@ impl WindowService {
}
};
if let Err(e) = recv_window(
&blocktree,
&blockstore,
&insert_sender,
&id,
&verified_receiver,
@ -401,7 +401,7 @@ mod test {
use rand::thread_rng;
use solana_ledger::shred::DataShredHeader;
use solana_ledger::{
blocktree::{make_many_slot_entries, Blocktree},
blockstore::{make_many_slot_entries, Blockstore},
entry::{create_ticks, Entry},
get_tmp_ledger_path,
shred::Shredder,
@ -434,23 +434,23 @@ mod test {
#[test]
fn test_process_shred() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&blockstore_path).unwrap());
let num_entries = 10;
let original_entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new()));
shreds.reverse();
blocktree
blockstore
.insert_shreds(shreds, None, false)
.expect("Expect successful processing of shred");
assert_eq!(
blocktree.get_slot_entries(0, 0, None).unwrap(),
blockstore.get_slot_entries(0, 0, None).unwrap(),
original_entries
);
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
@ -529,18 +529,18 @@ mod test {
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
exit: Arc<AtomicBool>,
) -> WindowService {
let blocktree_path = get_tmp_ledger_path!();
let (blocktree, _, _) = Blocktree::open_with_signal(&blocktree_path)
let blockstore_path = get_tmp_ledger_path!();
let (blockstore, _, _) = Blockstore::open_with_signal(&blockstore_path)
.expect("Expected to be able to open database ledger");
let blocktree = Arc::new(blocktree);
let blockstore = Arc::new(blockstore);
let (retransmit_sender, _retransmit_receiver) = channel();
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
ContactInfo::new_localhost(&Pubkey::default(), 0),
)));
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
let window = WindowService::new(
blocktree,
blockstore,
cluster_info,
verified_receiver,
retransmit_sender,