Rename db_ledger to blocktree (#2698)

This commit is contained in:
carllin
2019-02-07 20:52:39 -08:00
committed by GitHub
parent e7df3cfe22
commit 6e7c5f205b
26 changed files with 526 additions and 526 deletions

View File

@ -73,7 +73,7 @@ name = "bank"
name = "banking_stage"
[[bench]]
name = "db_ledger"
name = "blocktree"
[[bench]]
name = "ledger"

View File

@ -5,15 +5,15 @@ extern crate test;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use solana::db_ledger::{get_tmp_ledger_path, DbLedger};
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
use solana::entry::{make_large_test_entries, make_tiny_test_entries, EntrySlice};
use solana::packet::{Blob, BLOB_HEADER_SIZE};
use test::Bencher;
// Given some blobs and a ledger at ledger_path, benchmark writing the blobs to the ledger
fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &str) {
let db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_blobs = blobs.len();
@ -21,7 +21,7 @@ fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &s
for blob in blobs.iter_mut() {
let index = blob.index();
db_ledger
blocktree
.put_data_blob_bytes(
blob.slot(),
index,
@ -33,12 +33,12 @@ fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &s
}
});
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
// Insert some blobs into the ledger in preparation for read benchmarks
fn setup_read_bench(
db_ledger: &mut DbLedger,
blocktree: &mut Blocktree,
num_small_blobs: u64,
num_large_blobs: u64,
slot: u64,
@ -53,7 +53,7 @@ fn setup_read_bench(
b.set_index(index as u64);
b.set_slot(slot);
}
db_ledger
blocktree
.write_blobs(&blobs)
.expect("Expectd successful insertion of blobs into ledger");
}
@ -91,15 +91,15 @@ fn bench_write_big(bench: &mut Bencher) {
#[ignore]
fn bench_read_sequential(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_read_sequential");
let mut db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let mut blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small blobs into the ledger
let num_small_blobs = 32 * 1024;
let num_large_blobs = 32 * 1024;
let total_blobs = num_small_blobs + num_large_blobs;
let slot = 0;
setup_read_bench(&mut db_ledger, num_small_blobs, num_large_blobs, slot);
setup_read_bench(&mut blocktree, num_small_blobs, num_large_blobs, slot);
let num_reads = total_blobs / 15;
let mut rng = rand::thread_rng();
@ -107,26 +107,26 @@ fn bench_read_sequential(bench: &mut Bencher) {
// Generate random starting point in the range [0, total_blobs - 1], read num_reads blobs sequentially
let start_index = rng.gen_range(0, num_small_blobs + num_large_blobs);
for i in start_index..start_index + num_reads {
let _ = db_ledger.get_data_blob(slot, i as u64 % total_blobs);
let _ = blocktree.get_data_blob(slot, i as u64 % total_blobs);
}
});
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_read_random(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_read_random");
let mut db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let mut blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small blobs into the ledger
let num_small_blobs = 32 * 1024;
let num_large_blobs = 32 * 1024;
let total_blobs = num_small_blobs + num_large_blobs;
let slot = 0;
setup_read_bench(&mut db_ledger, num_small_blobs, num_large_blobs, slot);
setup_read_bench(&mut blocktree, num_small_blobs, num_large_blobs, slot);
let num_reads = total_blobs / 15;
@ -138,19 +138,19 @@ fn bench_read_random(bench: &mut Bencher) {
.collect();
bench.iter(move || {
for i in indexes.iter() {
let _ = db_ledger.get_data_blob(slot, *i as u64);
let _ = blocktree.get_data_blob(slot, *i as u64);
}
});
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_insert_data_blob_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_insert_data_blob_small");
let db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let mut blobs = entries.to_blobs();
@ -162,18 +162,18 @@ fn bench_insert_data_blob_small(bench: &mut Bencher) {
let index = blob.index();
blob.set_index(index + num_entries as u64);
}
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
});
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_insert_data_blob_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_insert_data_blob_big");
let db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = make_large_test_entries(num_entries);
let mut shared_blobs = entries.to_shared_blobs();
@ -182,10 +182,10 @@ fn bench_insert_data_blob_big(bench: &mut Bencher) {
bench.iter(move || {
for blob in shared_blobs.iter_mut() {
let index = blob.read().unwrap().index();
db_ledger.write_shared_blobs(vec![blob.clone()]).unwrap();
blocktree.write_shared_blobs(vec![blob.clone()]).unwrap();
blob.write().unwrap().set_index(index + num_entries as u64);
}
});
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}

View File

@ -1,7 +1,7 @@
//! A command-line executable for generating the chain's genesis block.
use clap::{crate_version, value_t_or_exit, App, Arg};
use solana::db_ledger::create_new_ledger;
use solana::blocktree::create_new_ledger;
use solana::genesis_block::GenesisBlock;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
use std::error;

View File

@ -1,6 +1,6 @@
use clap::{crate_version, App, Arg, SubCommand};
use solana::bank::Bank;
use solana::db_ledger::DbLedger;
use solana::blocktree::Blocktree;
use solana::genesis_block::GenesisBlock;
use std::io::{stdout, Write};
use std::process::exit;
@ -55,15 +55,15 @@ fn main() {
exit(1);
});
let db_ledger = match DbLedger::open(ledger_path) {
Ok(db_ledger) => db_ledger,
let blocktree = match Blocktree::open(ledger_path) {
Ok(blocktree) => blocktree,
Err(err) => {
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
exit(1);
}
};
let entries = match db_ledger.read_ledger() {
let entries = match blocktree.read_ledger() {
Ok(entries) => entries,
Err(err) => {
eprintln!("Failed to read ledger at {}: {}", ledger_path, err);

View File

@ -1,4 +1,4 @@
use solana::db_ledger::create_tmp_sample_ledger;
use solana::blocktree::create_tmp_sample_ledger;
use solana_sdk::signature::{Keypair, KeypairUtil};
use assert_cmd::prelude::*;

View File

@ -1,4 +1,4 @@
//! The `db_ledger` module provides functions for parallel verification of the
//! The `block_tree` module provides functions for parallel verification of the
//! Proof of History ledger as well as iterative read, append write, and random
//! access read to a persistent file-based ledger.
@ -29,15 +29,15 @@ use std::rc::Rc;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::sync::Arc;
pub type DbLedgerRawIterator = rocksdb::DBRawIterator;
pub type BlocktreeRawIterator = rocksdb::DBRawIterator;
pub const DB_LEDGER_DIRECTORY: &str = "rocksdb";
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
// A good value for this is the number of cores on the machine
const TOTAL_THREADS: i32 = 8;
const MAX_WRITE_BUFFER_SIZE: usize = 512 * 1024 * 1024;
#[derive(Debug)]
pub enum DbLedgerError {
pub enum BlocktreeError {
BlobForIndexExists,
InvalidBlobData,
RocksDb(rocksdb::Error),
@ -45,7 +45,7 @@ pub enum DbLedgerError {
impl std::convert::From<rocksdb::Error> for Error {
fn from(e: rocksdb::Error) -> Error {
Error::DbLedgerError(DbLedgerError::RocksDb(e))
Error::BlocktreeError(BlocktreeError::RocksDb(e))
}
}
@ -112,7 +112,7 @@ pub trait LedgerColumnFamilyRaw {
Ok(())
}
fn raw_iterator(&self) -> DbLedgerRawIterator {
fn raw_iterator(&self) -> BlocktreeRawIterator {
let db = self.db();
db.raw_iterator_cf(self.handle())
.expect("Expected to be able to open database iterator")
@ -145,13 +145,13 @@ pub struct SlotMeta {
}
impl SlotMeta {
pub fn contains_all_ticks(&self, db_ledger: &DbLedger) -> bool {
pub fn contains_all_ticks(&self, blocktree: &Blocktree) -> bool {
if self.num_blocks == 0 {
// A placeholder slot does not contain all the ticks
false
} else {
let num_expected_ticks = {
let num = self.num_expected_ticks(db_ledger);
let num = self.num_expected_ticks(blocktree);
if self.slot_height == 0 {
num - 1
} else {
@ -162,8 +162,8 @@ impl SlotMeta {
}
}
pub fn num_expected_ticks(&self, db_ledger: &DbLedger) -> u64 {
db_ledger.ticks_per_slot * self.num_blocks
pub fn num_expected_ticks(&self, blocktree: &Blocktree) -> u64 {
blocktree.ticks_per_slot * self.num_blocks
}
fn new(slot_height: u64, num_blocks: u64) -> Self {
@ -331,24 +331,24 @@ impl LedgerColumnFamilyRaw for ErasureCf {
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub struct DbLedgerConfig {
pub struct BlocktreeConfig {
pub ticks_per_slot: u64,
}
impl DbLedgerConfig {
impl BlocktreeConfig {
pub fn new(ticks_per_slot: u64) -> Self {
DbLedgerConfig { ticks_per_slot }
BlocktreeConfig { ticks_per_slot }
}
}
impl Default for DbLedgerConfig {
impl Default for BlocktreeConfig {
fn default() -> Self {
Self::new(DEFAULT_TICKS_PER_SLOT)
}
}
// ledger window
pub struct DbLedger {
pub struct Blocktree {
// Underlying database is automatically closed in the Drop implementation of DB
db: Arc<DB>,
meta_cf: MetaCf,
@ -369,11 +369,11 @@ pub const DATA_CF: &str = "data";
// Column family for erasure data
pub const ERASURE_CF: &str = "erasure";
impl DbLedger {
impl Blocktree {
// Opens a Ledger in directory, provides "infinite" window of blobs
pub fn open(ledger_path: &str) -> Result<Self> {
fs::create_dir_all(&ledger_path)?;
let ledger_path = Path::new(ledger_path).join(DB_LEDGER_DIRECTORY);
let ledger_path = Path::new(ledger_path).join(BLOCKTREE_DIRECTORY);
// Use default database options
let db_options = Self::get_db_options();
@ -403,7 +403,7 @@ impl DbLedger {
// TODO: make these constructor arguments
// Issue: https://github.com/solana-labs/solana/issues/2458
let ticks_per_slot = DEFAULT_TICKS_PER_SLOT;
Ok(DbLedger {
Ok(Blocktree {
db,
meta_cf,
data_cf,
@ -414,29 +414,29 @@ impl DbLedger {
}
pub fn open_with_signal(ledger_path: &str) -> Result<(Self, SyncSender<bool>, Receiver<bool>)> {
let mut db_ledger = Self::open(ledger_path)?;
let mut blocktree = Self::open(ledger_path)?;
let (signal_sender, signal_receiver) = sync_channel(1);
db_ledger.new_blobs_signals = vec![signal_sender.clone()];
blocktree.new_blobs_signals = vec![signal_sender.clone()];
Ok((db_ledger, signal_sender, signal_receiver))
Ok((blocktree, signal_sender, signal_receiver))
}
pub fn open_config(ledger_path: &str, config: DbLedgerConfig) -> Result<Self> {
let mut db_ledger = Self::open(ledger_path)?;
db_ledger.ticks_per_slot = config.ticks_per_slot;
Ok(db_ledger)
pub fn open_config(ledger_path: &str, config: BlocktreeConfig) -> Result<Self> {
let mut blocktree = Self::open(ledger_path)?;
blocktree.ticks_per_slot = config.ticks_per_slot;
Ok(blocktree)
}
pub fn open_with_config_signal(
ledger_path: &str,
config: DbLedgerConfig,
config: BlocktreeConfig,
) -> Result<(Self, SyncSender<bool>, Receiver<bool>)> {
let mut db_ledger = Self::open(ledger_path)?;
let mut blocktree = Self::open(ledger_path)?;
let (signal_sender, signal_receiver) = sync_channel(1);
db_ledger.new_blobs_signals = vec![signal_sender.clone()];
db_ledger.ticks_per_slot = config.ticks_per_slot;
blocktree.new_blobs_signals = vec![signal_sender.clone()];
blocktree.ticks_per_slot = config.ticks_per_slot;
Ok((db_ledger, signal_sender, signal_receiver))
Ok((blocktree, signal_sender, signal_receiver))
}
pub fn meta(&self, slot_height: u64) -> Result<Option<SlotMeta>> {
@ -446,7 +446,7 @@ impl DbLedger {
pub fn destroy(ledger_path: &str) -> Result<()> {
// DB::destroy() fails if `ledger_path` doesn't exist
fs::create_dir_all(&ledger_path)?;
let ledger_path = Path::new(ledger_path).join(DB_LEDGER_DIRECTORY);
let ledger_path = Path::new(ledger_path).join(BLOCKTREE_DIRECTORY);
DB::destroy(&Options::default(), &ledger_path)?;
Ok(())
}
@ -599,7 +599,7 @@ impl DbLedger {
}
}
// TODO: Delete returning these entries and instead have replay_stage query db_ledger
// TODO: Delete returning these entries and instead have replay_stage query blocktree
// for updates. Returning these entries is to temporarily support current API as to
// not break functionality in db_window.
// Issue: https://github.com/solana-labs/solana/issues/2444
@ -731,7 +731,7 @@ impl DbLedger {
// indexes in the ledger in the range [start_index, end_index)
// for the slot with slot_height == slot
fn find_missing_indexes(
db_iterator: &mut DbLedgerRawIterator,
db_iterator: &mut BlocktreeRawIterator,
slot: u64,
start_index: u64,
end_index: u64,
@ -1108,7 +1108,7 @@ impl DbLedger {
if blob_index < slot_meta.consumed
|| prev_inserted_blob_datas.contains_key(&(blob_slot, blob_index))
{
return Err(Error::DbLedgerError(DbLedgerError::BlobForIndexExists));
return Err(Error::BlocktreeError(BlocktreeError::BlobForIndexExists));
}
let (new_consumed, new_consumed_ticks, blob_datas) = {
@ -1162,7 +1162,7 @@ impl DbLedger {
slot_meta.received = cmp::max(blob_index + 1, slot_meta.received);
slot_meta.consumed = new_consumed;
slot_meta.consumed_ticks += new_consumed_ticks;
// TODO: Remove returning these entries and instead have replay_stage query db_ledger
// TODO: Remove returning these entries and instead have replay_stage query blocktree
// for updates. Returning these entries is to temporarily support current API as to
// not break functionality in db_window.
// Issue: https://github.com/solana-labs/solana/issues/2444
@ -1245,12 +1245,12 @@ struct EntryIterator {
// can do this in parallel
last_id: Option<Hash>,
// https://github.com/rust-rocksdb/rust-rocksdb/issues/234
// rocksdb issue: the _db_ledger member must be lower in the struct to prevent a crash
// rocksdb issue: the _blocktree member must be lower in the struct to prevent a crash
// when the db_iterator member above is dropped.
// _db_ledger is unused, but dropping _db_ledger results in a broken db_iterator
// _blocktree is unused, but dropping _blocktree results in a broken db_iterator
// you have to hold the database open in order to iterate over it, and in order
// for db_iterator to be able to run Drop
// _db_ledger: DbLedger,
// _blocktree: Blocktree,
}
impl Iterator for EntryIterator {
@ -1276,13 +1276,13 @@ impl Iterator for EntryIterator {
}
pub fn create_new_ledger(ledger_path: &str, genesis_block: &GenesisBlock) -> Result<(u64, Hash)> {
DbLedger::destroy(ledger_path)?;
Blocktree::destroy(ledger_path)?;
genesis_block.write(&ledger_path)?;
// Add a single tick linked back to the genesis_block to bootstrap the ledger
let db_ledger = DbLedger::open(ledger_path)?;
let blocktree = Blocktree::open(ledger_path)?;
let entries = crate::entry::create_ticks(1, genesis_block.last_id());
db_ledger.write_entries(DEFAULT_SLOT_HEIGHT, 0, &entries)?;
blocktree.write_entries(DEFAULT_SLOT_HEIGHT, 0, &entries)?;
Ok((1, entries[0].id))
}
@ -1291,7 +1291,7 @@ pub fn genesis<'a, I>(ledger_path: &str, keypair: &Keypair, entries: I) -> Resul
where
I: IntoIterator<Item = &'a Entry>,
{
let db_ledger = DbLedger::open(ledger_path)?;
let blocktree = Blocktree::open(ledger_path)?;
// TODO sign these blobs with keypair
let blobs: Vec<_> = entries
@ -1306,7 +1306,7 @@ where
})
.collect();
db_ledger.write_genesis_blobs(&blobs[..])?;
blocktree.write_genesis_blobs(&blobs[..])?;
Ok(())
}
@ -1344,8 +1344,8 @@ pub fn create_tmp_sample_ledger(
if num_extra_ticks > 0 {
let entries = crate::entry::create_ticks(num_extra_ticks, last_id);
let db_ledger = DbLedger::open(&ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&ledger_path).unwrap();
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, entry_height, &entries)
.unwrap();
entry_height += entries.len() as u64;
@ -1357,13 +1357,13 @@ pub fn create_tmp_sample_ledger(
pub fn tmp_copy_ledger(from: &str, name: &str) -> String {
let path = get_tmp_ledger_path(name);
let db_ledger = DbLedger::open(from).unwrap();
let blobs = db_ledger.read_ledger_blobs();
let blocktree = Blocktree::open(from).unwrap();
let blobs = blocktree.read_ledger_blobs();
let genesis_block = GenesisBlock::load(from).unwrap();
DbLedger::destroy(&path).expect("Expected successful database destruction");
let db_ledger = DbLedger::open(&path).unwrap();
db_ledger.write_blobs(blobs).unwrap();
Blocktree::destroy(&path).expect("Expected successful database destruction");
let blocktree = Blocktree::open(&path).unwrap();
blocktree.write_blobs(blobs).unwrap();
genesis_block.write(&path).unwrap();
path
@ -1382,7 +1382,7 @@ mod tests {
#[test]
fn test_put_get_simple() {
let ledger_path = get_tmp_ledger_path("test_put_get_simple");
let ledger = DbLedger::open(&ledger_path).unwrap();
let ledger = Blocktree::open(&ledger_path).unwrap();
// Test meta column family
let meta = SlotMeta::new(DEFAULT_SLOT_HEIGHT, 1);
@ -1424,7 +1424,7 @@ mod tests {
// Destroying database without closing it first is undefined behavior
drop(ledger);
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -1437,7 +1437,7 @@ mod tests {
let blobs: Vec<&Blob> = blob_locks.iter().map(|b| &**b).collect();
let ledger_path = get_tmp_ledger_path("test_read_blobs_bytes");
let ledger = DbLedger::open(&ledger_path).unwrap();
let ledger = Blocktree::open(&ledger_path).unwrap();
ledger.write_blobs(blobs.clone()).unwrap();
let mut buf = [0; 1024];
@ -1489,7 +1489,7 @@ mod tests {
// Destroying database without closing it first is undefined behavior
drop(ledger);
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -1506,7 +1506,7 @@ mod tests {
let blobs: Vec<&Blob> = blob_locks.iter().map(|b| &**b).collect();
let ledger_path = get_tmp_ledger_path("test_insert_data_blobs_basic");
let ledger = DbLedger::open(&ledger_path).unwrap();
let ledger = Blocktree::open(&ledger_path).unwrap();
// Insert second blob, we're missing the first blob, so no consecutive
// blobs starting from slot 0, index 0 should exist.
@ -1542,7 +1542,7 @@ mod tests {
// Destroying database without closing it first is undefined behavior
drop(ledger);
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -1558,7 +1558,7 @@ mod tests {
let blobs: Vec<&Blob> = blob_locks.iter().map(|b| &**b).collect();
let ledger_path = get_tmp_ledger_path("test_insert_data_blobs_multiple");
let ledger = DbLedger::open(&ledger_path).unwrap();
let ledger = Blocktree::open(&ledger_path).unwrap();
// Insert blobs in reverse, check for consecutive returned blobs
for i in (0..num_blobs).rev() {
@ -1581,7 +1581,7 @@ mod tests {
// Destroying database without closing it first is undefined behavior
drop(ledger);
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -1593,9 +1593,9 @@ mod tests {
#[test]
pub fn test_iteration_order() {
let slot = 0;
let db_ledger_path = get_tmp_ledger_path("test_iteration_order");
let blocktree_path = get_tmp_ledger_path("test_iteration_order");
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let num_entries = 8;
@ -1608,13 +1608,13 @@ mod tests {
w_b.set_slot(DEFAULT_SLOT_HEIGHT);
}
db_ledger
blocktree
.write_shared_blobs(&shared_blobs)
.expect("Expected successful write of blobs");
let mut db_iterator = db_ledger
let mut db_iterator = blocktree
.db
.raw_iterator_cf(db_ledger.data_cf.handle())
.raw_iterator_cf(blocktree.data_cf.handle())
.expect("Expected to be able to open database iterator");
db_iterator.seek(&DataCf::key(slot, 1));
@ -1629,14 +1629,14 @@ mod tests {
db_iterator.next();
}
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_get_slot_entries1() {
let db_ledger_path = get_tmp_ledger_path("test_get_slot_entries1");
let blocktree_path = get_tmp_ledger_path("test_get_slot_entries1");
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let entries = make_tiny_test_entries(8);
let mut blobs = entries.clone().to_blobs();
for (i, b) in blobs.iter_mut().enumerate() {
@ -1647,28 +1647,28 @@ mod tests {
b.set_index(8 + i as u64);
}
}
db_ledger
blocktree
.write_blobs(&blobs)
.expect("Expected successful write of blobs");
assert_eq!(
db_ledger.get_slot_entries(1, 2, None).unwrap()[..],
blocktree.get_slot_entries(1, 2, None).unwrap()[..],
entries[2..4],
);
assert_eq!(
db_ledger.get_slot_entries(1, 12, None).unwrap()[..],
blocktree.get_slot_entries(1, 12, None).unwrap()[..],
entries[4..],
);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_get_slot_entries2() {
let db_ledger_path = get_tmp_ledger_path("test_get_slot_entries2");
let blocktree_path = get_tmp_ledger_path("test_get_slot_entries2");
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let num_slots = 5 as u64;
@ -1682,25 +1682,25 @@ mod tests {
b.set_slot(slot_height as u64);
index += 1;
}
db_ledger
blocktree
.write_blobs(&blobs)
.expect("Expected successful write of blobs");
assert_eq!(
db_ledger
blocktree
.get_slot_entries(slot_height, index - 1, None)
.unwrap(),
vec![last_entry],
);
}
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_insert_data_blobs_consecutive() {
let db_ledger_path = get_tmp_ledger_path("test_insert_data_blobs_consecutive");
let blocktree_path = get_tmp_ledger_path("test_insert_data_blobs_consecutive");
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let slot = 0;
// Write entries
@ -1713,43 +1713,43 @@ mod tests {
w_b.set_slot(slot);
}
db_ledger
blocktree
.write_shared_blobs(shared_blobs.iter().skip(1).step_by(2))
.unwrap();
assert_eq!(db_ledger.get_slot_entries(0, 0, None).unwrap(), vec![]);
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), vec![]);
let meta_key = MetaCf::key(slot);
let meta = db_ledger.meta_cf.get(&meta_key).unwrap().unwrap();
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
assert_eq!(meta.received, num_entries);
assert_eq!(meta.consumed, 0);
assert_eq!(meta.consumed_ticks, 0);
db_ledger
blocktree
.write_shared_blobs(shared_blobs.iter().step_by(2))
.unwrap();
assert_eq!(
db_ledger.get_slot_entries(0, 0, None).unwrap(),
blocktree.get_slot_entries(0, 0, None).unwrap(),
original_entries,
);
let meta_key = MetaCf::key(slot);
let meta = db_ledger.meta_cf.get(&meta_key).unwrap().unwrap();
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
assert_eq!(meta.received, num_entries);
assert_eq!(meta.consumed, num_entries);
assert_eq!(meta.consumed_ticks, num_entries);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_insert_data_blobs_duplicate() {
// Create RocksDb ledger
let db_ledger_path = get_tmp_ledger_path("test_insert_data_blobs_duplicate");
let blocktree_path = get_tmp_ledger_path("test_insert_data_blobs_duplicate");
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let num_entries = 10 as u64;
@ -1766,7 +1766,7 @@ mod tests {
w_b.set_index(index);
}
db_ledger
blocktree
.write_shared_blobs(
shared_blobs
.iter()
@ -1775,9 +1775,9 @@ mod tests {
)
.unwrap();
assert_eq!(db_ledger.get_slot_entries(0, 0, None).unwrap(), vec![]);
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), vec![]);
db_ledger
blocktree
.write_shared_blobs(shared_blobs.iter().step_by(num_duplicates * 2))
.unwrap();
@ -1786,14 +1786,14 @@ mod tests {
.step_by(num_duplicates)
.collect();
assert_eq!(db_ledger.get_slot_entries(0, 0, None).unwrap(), expected,);
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), expected,);
let meta_key = MetaCf::key(DEFAULT_SLOT_HEIGHT);
let meta = db_ledger.meta_cf.get(&meta_key).unwrap().unwrap();
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
assert_eq!(meta.consumed, num_entries);
assert_eq!(meta.received, num_entries);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
@ -1804,7 +1804,7 @@ mod tests {
{
genesis(&ledger_path, &Keypair::new(), &entries).unwrap();
let ledger = DbLedger::open(&ledger_path).expect("open failed");
let ledger = Blocktree::open(&ledger_path).expect("open failed");
let read_entries: Vec<Entry> =
ledger.read_ledger().expect("read_ledger failed").collect();
@ -1812,7 +1812,7 @@ mod tests {
assert_eq!(entries, read_entries);
}
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_entry_iterator_up_to_consumed() {
@ -1822,7 +1822,7 @@ mod tests {
// put entries except last 2 into ledger
genesis(&ledger_path, &Keypair::new(), &entries[..entries.len() - 2]).unwrap();
let ledger = DbLedger::open(&ledger_path).expect("open failed");
let ledger = Blocktree::open(&ledger_path).expect("open failed");
// now write the last entry, ledger has a hole in it one before the end
// +-+-+-+-+-+-+-+ +-+
@ -1844,7 +1844,7 @@ mod tests {
assert_eq!(entries[..entries.len() - 2].to_vec(), read_entries);
}
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
@ -1852,8 +1852,8 @@ mod tests {
// Initialize ledger
let ledger_path = get_tmp_ledger_path("test_new_blobs_signal");
let ticks_per_slot = 10;
let config = DbLedgerConfig::new(ticks_per_slot);
let (ledger, _, recvr) = DbLedger::open_with_config_signal(&ledger_path, config).unwrap();
let config = BlocktreeConfig::new(ticks_per_slot);
let (ledger, _, recvr) = Blocktree::open_with_config_signal(&ledger_path, config).unwrap();
let ledger = Arc::new(ledger);
// Create ticks for slot 0
@ -1951,16 +1951,16 @@ mod tests {
// Destroying database without closing it first is undefined behavior
drop(ledger);
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_handle_chaining_basic() {
let db_ledger_path = get_tmp_ledger_path("test_handle_chaining_basic");
let blocktree_path = get_tmp_ledger_path("test_handle_chaining_basic");
{
let ticks_per_slot = 2;
let config = DbLedgerConfig::new(ticks_per_slot);
let db_ledger = DbLedger::open_config(&db_ledger_path, config).unwrap();
let config = BlocktreeConfig::new(ticks_per_slot);
let blocktree = Blocktree::open_config(&blocktree_path, config).unwrap();
let entries = create_ticks(6, Hash::default());
let mut blobs = entries.to_blobs();
@ -1970,10 +1970,10 @@ mod tests {
}
// 1) Write to the first slot
db_ledger
blocktree
.write_blobs(&blobs[ticks_per_slot as usize..2 * ticks_per_slot as usize])
.unwrap();
let s1 = db_ledger.meta_cf.get_slot_meta(1).unwrap().unwrap();
let s1 = blocktree.meta_cf.get_slot_meta(1).unwrap().unwrap();
assert!(s1.next_slots.is_empty());
// Slot 1 is not trunk because slot 0 hasn't been inserted yet
assert!(!s1.is_trunk);
@ -1981,10 +1981,10 @@ mod tests {
assert_eq!(s1.consumed_ticks, ticks_per_slot);
// 2) Write to the second slot
db_ledger
blocktree
.write_blobs(&blobs[2 * ticks_per_slot as usize..3 * ticks_per_slot as usize])
.unwrap();
let s2 = db_ledger.meta_cf.get_slot_meta(2).unwrap().unwrap();
let s2 = blocktree.meta_cf.get_slot_meta(2).unwrap().unwrap();
assert!(s2.next_slots.is_empty());
// Slot 2 is not trunk because slot 0 hasn't been inserted yet
assert!(!s2.is_trunk);
@ -1993,18 +1993,18 @@ mod tests {
// Check the first slot again, it should chain to the second slot,
// but still isn't part of the trunk
let s1 = db_ledger.meta_cf.get_slot_meta(1).unwrap().unwrap();
let s1 = blocktree.meta_cf.get_slot_meta(1).unwrap().unwrap();
assert_eq!(s1.next_slots, vec![2]);
assert!(!s1.is_trunk);
assert_eq!(s1.consumed_ticks, ticks_per_slot);
// 3) Write to the zeroth slot, check that every slot
// is now part of the trunk
db_ledger
blocktree
.write_blobs(&blobs[0..ticks_per_slot as usize])
.unwrap();
for i in 0..3 {
let s = db_ledger.meta_cf.get_slot_meta(i).unwrap().unwrap();
let s = blocktree.meta_cf.get_slot_meta(i).unwrap().unwrap();
// The last slot will not chain to any other slots
if i != 2 {
assert_eq!(s.next_slots, vec![i + 1]);
@ -2018,17 +2018,17 @@ mod tests {
assert!(s.is_trunk);
}
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_handle_chaining_missing_slots() {
let db_ledger_path = get_tmp_ledger_path("test_handle_chaining_missing_slots");
let blocktree_path = get_tmp_ledger_path("test_handle_chaining_missing_slots");
{
let mut db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let mut blocktree = Blocktree::open(&blocktree_path).unwrap();
let num_slots = 30;
let ticks_per_slot = 2;
db_ledger.ticks_per_slot = ticks_per_slot as u64;
blocktree.ticks_per_slot = ticks_per_slot as u64;
let ticks = create_ticks((num_slots / 2) * ticks_per_slot, Hash::default());
let mut blobs = ticks.to_blobs();
@ -2039,7 +2039,7 @@ mod tests {
b.set_slot(((i / 2) * 2 + 1) as u64);
}
db_ledger.write_blobs(&blobs[..]).unwrap();
blocktree.write_blobs(&blobs[..]).unwrap();
// Check metadata
for i in 0..num_slots {
@ -2047,7 +2047,7 @@ mod tests {
// because no slots chain to it yet because we left a gap. However, if it's
// a slot we haven't inserted, aka one of the gaps, then one of the slots
// we just inserted will chain to that gap
let s = db_ledger.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
let s = blocktree.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
if i % 2 == 0 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
assert_eq!(s.consumed_ticks, 0);
@ -2069,12 +2069,12 @@ mod tests {
b.set_slot(((i / 2) * 2) as u64);
}
db_ledger.write_blobs(&blobs[..]).unwrap();
blocktree.write_blobs(&blobs[..]).unwrap();
for i in 0..num_slots {
// Check that all the slots chain correctly once the missing slots
// have been filled
let s = db_ledger.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
let s = blocktree.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
if i != num_slots - 1 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
} else {
@ -2089,17 +2089,17 @@ mod tests {
}
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_forward_chaining_is_trunk() {
let db_ledger_path = get_tmp_ledger_path("test_forward_chaining_is_trunk");
let blocktree_path = get_tmp_ledger_path("test_forward_chaining_is_trunk");
{
let mut db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let mut blocktree = Blocktree::open(&blocktree_path).unwrap();
let num_slots = 15;
let ticks_per_slot = 2;
db_ledger.ticks_per_slot = ticks_per_slot as u64;
blocktree.ticks_per_slot = ticks_per_slot as u64;
let entries = create_ticks(num_slots * ticks_per_slot, Hash::default());
let mut blobs = entries.to_blobs();
@ -2111,11 +2111,11 @@ mod tests {
// Write the blobs such that every 3rd block has a gap in the beginning
for (slot_index, slot_ticks) in blobs.chunks(ticks_per_slot as usize).enumerate() {
if slot_index % 3 == 0 {
db_ledger
blocktree
.write_blobs(&slot_ticks[1..ticks_per_slot as usize])
.unwrap();
} else {
db_ledger
blocktree
.write_blobs(&slot_ticks[..ticks_per_slot as usize])
.unwrap();
}
@ -2123,7 +2123,7 @@ mod tests {
// Check metadata
for i in 0..num_slots {
let s = db_ledger.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
let s = blocktree.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
// The last slot will not chain to any other slots
if i as u64 != num_slots - 1 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
@ -2149,10 +2149,10 @@ mod tests {
// slot_index + 3 become part of the trunk
for (slot_index, slot_ticks) in blobs.chunks(ticks_per_slot as usize).enumerate() {
if slot_index % 3 == 0 {
db_ledger.write_blobs(&slot_ticks[0..1]).unwrap();
blocktree.write_blobs(&slot_ticks[0..1]).unwrap();
for i in 0..num_slots {
let s = db_ledger.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
let s = blocktree.meta_cf.get_slot_meta(i as u64).unwrap().unwrap();
if i != num_slots - 1 {
assert_eq!(s.next_slots, vec![i as u64 + 1]);
} else {
@ -2167,47 +2167,47 @@ mod tests {
}
}
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_get_slots_since() {
let db_ledger_path = get_tmp_ledger_path("test_get_slots_since");
let blocktree_path = get_tmp_ledger_path("test_get_slots_since");
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Slot doesn't exist
assert!(db_ledger.get_slots_since(&vec![0]).unwrap().is_empty());
assert!(blocktree.get_slots_since(&vec![0]).unwrap().is_empty());
let mut meta0 = SlotMeta::new(0, 1);
db_ledger.meta_cf.put_slot_meta(0, &meta0).unwrap();
blocktree.meta_cf.put_slot_meta(0, &meta0).unwrap();
// Slot exists, chains to nothing
assert!(db_ledger.get_slots_since(&vec![0]).unwrap().is_empty());
assert!(blocktree.get_slots_since(&vec![0]).unwrap().is_empty());
meta0.next_slots = vec![1, 2];
db_ledger.meta_cf.put_slot_meta(0, &meta0).unwrap();
blocktree.meta_cf.put_slot_meta(0, &meta0).unwrap();
// Slot exists, chains to some other slots
assert_eq!(db_ledger.get_slots_since(&vec![0]).unwrap(), vec![1, 2]);
assert_eq!(db_ledger.get_slots_since(&vec![0, 1]).unwrap(), vec![1, 2]);
assert_eq!(blocktree.get_slots_since(&vec![0]).unwrap(), vec![1, 2]);
assert_eq!(blocktree.get_slots_since(&vec![0, 1]).unwrap(), vec![1, 2]);
let mut meta3 = SlotMeta::new(3, 1);
meta3.next_slots = vec![10, 5];
db_ledger.meta_cf.put_slot_meta(3, &meta3).unwrap();
blocktree.meta_cf.put_slot_meta(3, &meta3).unwrap();
assert_eq!(
db_ledger.get_slots_since(&vec![0, 1, 3]).unwrap(),
blocktree.get_slots_since(&vec![0, 1, 3]).unwrap(),
vec![1, 2, 10, 5]
);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
fn test_insert_data_blobs_slots(name: &str, should_bulk_write: bool) {
let db_ledger_path = get_tmp_ledger_path(name);
let blocktree_path = get_tmp_ledger_path(name);
{
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let num_entries = 20 as u64;
@ -2220,11 +2220,11 @@ mod tests {
}
if should_bulk_write {
db_ledger.write_shared_blobs(shared_blobs.iter()).unwrap();
blocktree.write_shared_blobs(shared_blobs.iter()).unwrap();
} else {
for i in 0..num_entries {
let i = i as usize;
db_ledger
blocktree
.write_shared_blobs(&shared_blobs[i..i + 1])
.unwrap();
}
@ -2232,12 +2232,12 @@ mod tests {
for i in 0..num_entries - 1 {
assert_eq!(
db_ledger.get_slot_entries(i, i, None).unwrap()[0],
blocktree.get_slot_entries(i, i, None).unwrap()[0],
original_entries[i as usize]
);
let meta_key = MetaCf::key(i);
let meta = db_ledger.meta_cf.get(&meta_key).unwrap().unwrap();
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
assert_eq!(meta.received, i + 1);
if i != 0 {
assert!(meta.consumed == 0);
@ -2246,6 +2246,6 @@ mod tests {
}
}
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
}

View File

@ -291,9 +291,9 @@ impl Service for BroadcastService {
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, Node};
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::DbLedger;
use crate::entry::create_ticks;
use crate::service::Service;
use solana_sdk::hash::Hash;
@ -305,7 +305,7 @@ mod test {
use std::time::Duration;
struct MockBroadcastService {
db_ledger: Arc<DbLedger>,
blocktree: Arc<Blocktree>,
broadcast_service: BroadcastService,
}
@ -318,7 +318,7 @@ mod test {
max_tick_height: u64,
) -> MockBroadcastService {
// Make the database ledger
let db_ledger = Arc::new(DbLedger::open(ledger_path).unwrap());
let blocktree = Arc::new(Blocktree::open(ledger_path).unwrap());
// Make the leader node and scheduler
let leader_info = Node::new_localhost_with_pubkey(leader_pubkey);
@ -351,7 +351,7 @@ mod test {
);
MockBroadcastService {
db_ledger,
blocktree,
broadcast_service,
}
}
@ -392,7 +392,7 @@ mod test {
}
sleep(Duration::from_millis(2000));
let db_ledger = broadcast_service.db_ledger;
let blocktree = broadcast_service.blocktree;
let mut blob_index = 0;
for i in 0..max_tick_height - start_tick_height {
let slot = leader_scheduler
@ -400,7 +400,7 @@ mod test {
.unwrap()
.tick_height_to_slot(start_tick_height + i + 1);
let result = db_ledger.get_data_blob(slot, blob_index).unwrap();
let result = blocktree.get_data_blob(slot, blob_index).unwrap();
blob_index += 1;
assert!(result.is_some());
@ -413,6 +413,6 @@ mod test {
.expect("Expect successful join of broadcast service");
}
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
}

View File

@ -1,4 +1,4 @@
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
use crate::blocktree::{Blocktree, DEFAULT_SLOT_HEIGHT};
use std::fs::File;
use std::io;
use std::io::{BufWriter, Write};
@ -34,7 +34,7 @@ pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mu
}
pub fn chacha_cbc_encrypt_ledger(
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
slice: u64,
out_path: &Path,
ivec: &mut [u8; CHACHA_BLOCK_SIZE],
@ -50,7 +50,7 @@ pub fn chacha_cbc_encrypt_ledger(
let mut entry = slice;
loop {
match db_ledger.read_blobs_bytes(
match blocktree.read_blobs_bytes(
entry,
ENTRIES_PER_SEGMENT - total_entries,
&mut buffer,
@ -94,9 +94,9 @@ pub fn chacha_cbc_encrypt_ledger(
#[cfg(test)]
mod tests {
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::{Blocktree, DEFAULT_SLOT_HEIGHT};
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
use crate::entry::Entry;
use ring::signature::Ed25519KeyPair;
use solana_sdk::budget_transaction::BudgetTransaction;
@ -144,11 +144,11 @@ mod tests {
solana_logger::setup();
let ledger_dir = "chacha_test_encrypt_file";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let db_ledger = Arc::new(DbLedger::open(&ledger_path).unwrap());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let out_path = Path::new("test_chacha_encrypt_file_output.txt.enc");
let entries = make_tiny_deterministic_test_entries(32);
db_ledger
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &entries)
.unwrap();
@ -156,7 +156,7 @@ mod tests {
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
);
chacha_cbc_encrypt_ledger(&db_ledger, 0, out_path, &mut key).unwrap();
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut key).unwrap();
let mut out_file = File::open(out_path).unwrap();
let mut buf = vec![];
let size = out_file.read_to_end(&mut buf).unwrap();

View File

@ -1,8 +1,8 @@
// Module used by validators to approve storage mining proofs
// // in parallel using the GPU
use crate::blocktree::{Blocktree, DEFAULT_SLOT_HEIGHT};
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
use crate::sigverify::{
chacha_cbc_encrypt_many_sample, chacha_end_sha_state, chacha_init_sha_state,
};
@ -18,7 +18,7 @@ use crate::storage_stage::ENTRIES_PER_SEGMENT;
// Then sample each block at the offsets provided by samples argument with sha256
// and return the vec of sha states
pub fn chacha_cbc_encrypt_file_many_keys(
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
segment: u64,
ivecs: &mut [u8],
samples: &[u64],
@ -47,7 +47,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
chacha_init_sha_state(int_sha_states.as_mut_ptr(), num_keys as u32);
}
loop {
match db_ledger.read_blobs_bytes(
match blocktree.read_blobs_bytes(
entry,
ENTRIES_PER_SEGMENT - total_entries,
&mut buffer,
@ -109,10 +109,10 @@ pub fn chacha_cbc_encrypt_file_many_keys(
#[cfg(test)]
mod tests {
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::{Blocktree, DEFAULT_SLOT_HEIGHT};
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
use crate::entry::make_tiny_test_entries;
use crate::replicator::sample_file;
use solana_sdk::hash::Hash;
@ -127,8 +127,8 @@ mod tests {
let entries = make_tiny_test_entries(32);
let ledger_dir = "test_encrypt_file_many_keys_single";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let db_ledger = Arc::new(DbLedger::open(&ledger_path).unwrap());
db_ledger
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &entries)
.unwrap();
@ -141,12 +141,12 @@ mod tests {
);
let mut cpu_iv = ivecs.clone();
chacha_cbc_encrypt_ledger(&db_ledger, 0, out_path, &mut cpu_iv).unwrap();
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut cpu_iv).unwrap();
let ref_hash = sample_file(&out_path, &samples).unwrap();
let hashes =
chacha_cbc_encrypt_file_many_keys(&db_ledger, 0, &mut ivecs, &samples).unwrap();
chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut ivecs, &samples).unwrap();
assert_eq!(hashes[0], ref_hash);
@ -161,8 +161,8 @@ mod tests {
let entries = make_tiny_test_entries(32);
let ledger_dir = "test_encrypt_file_many_keys_multiple";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let db_ledger = Arc::new(DbLedger::open(&ledger_path).unwrap());
db_ledger
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, 0, &entries)
.unwrap();
@ -178,7 +178,7 @@ mod tests {
);
ivec[0] = i;
ivecs.extend(ivec.clone().iter());
chacha_cbc_encrypt_ledger(&db_ledger.clone(), 0, out_path, &mut ivec).unwrap();
chacha_cbc_encrypt_ledger(&blocktree.clone(), 0, out_path, &mut ivec).unwrap();
ref_hashes.push(sample_file(&out_path, &samples).unwrap());
info!(
@ -190,7 +190,7 @@ mod tests {
}
let hashes =
chacha_cbc_encrypt_file_many_keys(&db_ledger, 0, &mut ivecs, &samples).unwrap();
chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut ivecs, &samples).unwrap();
assert_eq!(hashes, ref_hashes);
@ -204,7 +204,7 @@ mod tests {
let ledger_dir = "test_encrypt_file_many_keys_bad_key_length";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let samples = [0];
let db_ledger = Arc::new(DbLedger::open(&ledger_path).unwrap());
assert!(chacha_cbc_encrypt_file_many_keys(&db_ledger, 0, &mut keys, &samples,).is_err());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
assert!(chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut keys, &samples,).is_err());
}
}

View File

@ -13,6 +13,7 @@
//!
//! Bank needs to provide an interface for us to query the stake weight
use crate::bank::Bank;
use crate::blocktree::Blocktree;
use crate::bloom::Bloom;
use crate::contact_info::ContactInfo;
use crate::counter::Counter;
@ -20,7 +21,6 @@ use crate::crds_gossip::CrdsGossip;
use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use crate::crds_value::{CrdsValue, CrdsValueLabel, LeaderId, Vote};
use crate::db_ledger::DbLedger;
use crate::packet::{to_shared_blob, Blob, SharedBlob, BLOB_SIZE};
use crate::result::Result;
use crate::rpc::RPC_PORT;
@ -846,14 +846,14 @@ impl ClusterInfo {
fn run_window_request(
from: &NodeInfo,
from_addr: &SocketAddr,
db_ledger: Option<&Arc<DbLedger>>,
blocktree: Option<&Arc<Blocktree>>,
me: &NodeInfo,
slot_height: u64,
blob_index: u64,
) -> Vec<SharedBlob> {
if let Some(db_ledger) = db_ledger {
if let Some(blocktree) = blocktree {
// Try to find the requested index in one of the slots
let blob = db_ledger.get_data_blob(slot_height, blob_index);
let blob = blocktree.get_data_blob(slot_height, blob_index);
if let Ok(Some(mut blob)) = blob {
inc_new_counter_info!("cluster_info-window-request-ledger", 1);
@ -878,13 +878,13 @@ impl ClusterInfo {
//TODO we should first coalesce all the requests
fn handle_blob(
obj: &Arc<RwLock<Self>>,
db_ledger: Option<&Arc<DbLedger>>,
blocktree: Option<&Arc<Blocktree>>,
blob: &Blob,
) -> Vec<SharedBlob> {
deserialize(&blob.data[..blob.meta.size])
.into_iter()
.flat_map(|request| {
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), db_ledger, request)
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), blocktree, request)
})
.collect()
}
@ -994,7 +994,7 @@ impl ClusterInfo {
fn handle_request_window_index(
me: &Arc<RwLock<Self>>,
from: &ContactInfo,
db_ledger: Option<&Arc<DbLedger>>,
blocktree: Option<&Arc<Blocktree>>,
slot_height: u64,
blob_index: u64,
from_addr: &SocketAddr,
@ -1028,7 +1028,7 @@ impl ClusterInfo {
let res = Self::run_window_request(
&from,
&from_addr,
db_ledger,
blocktree,
&my_info,
slot_height,
blob_index,
@ -1043,7 +1043,7 @@ impl ClusterInfo {
fn handle_protocol(
me: &Arc<RwLock<Self>>,
from_addr: &SocketAddr,
db_ledger: Option<&Arc<DbLedger>>,
blocktree: Option<&Arc<Blocktree>>,
request: Protocol,
) -> Vec<SharedBlob> {
match request {
@ -1102,7 +1102,7 @@ impl ClusterInfo {
Self::handle_request_window_index(
me,
&from,
db_ledger,
blocktree,
slot_height,
blob_index,
from_addr,
@ -1114,7 +1114,7 @@ impl ClusterInfo {
/// Process messages from the network
fn run_listen(
obj: &Arc<RwLock<Self>>,
db_ledger: Option<&Arc<DbLedger>>,
blocktree: Option<&Arc<Blocktree>>,
requests_receiver: &BlobReceiver,
response_sender: &BlobSender,
) -> Result<()> {
@ -1126,7 +1126,7 @@ impl ClusterInfo {
}
let mut resps = Vec::new();
for req in reqs {
let mut resp = Self::handle_blob(obj, db_ledger, &req.read().unwrap());
let mut resp = Self::handle_blob(obj, blocktree, &req.read().unwrap());
resps.append(&mut resp);
}
response_sender.send(resps)?;
@ -1134,7 +1134,7 @@ impl ClusterInfo {
}
pub fn listen(
me: Arc<RwLock<Self>>,
db_ledger: Option<Arc<DbLedger>>,
blocktree: Option<Arc<Blocktree>>,
requests_receiver: BlobReceiver,
response_sender: BlobSender,
exit: Arc<AtomicBool>,
@ -1144,7 +1144,7 @@ impl ClusterInfo {
.spawn(move || loop {
let e = Self::run_listen(
&me,
db_ledger.as_ref(),
blocktree.as_ref(),
&requests_receiver,
&response_sender,
);
@ -1302,9 +1302,9 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::Blocktree;
use crate::crds_value::CrdsValueLabel;
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::DbLedger;
use crate::packet::BLOB_HEADER_SIZE;
use crate::result::Error;
use crate::test_tx::test_tx;
@ -1406,7 +1406,7 @@ mod tests {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path("run_window_request");
{
let db_ledger = Arc::new(DbLedger::open(&ledger_path).unwrap());
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let me = NodeInfo::new(
Keypair::new().pubkey(),
socketaddr!("127.0.0.1:1234"),
@ -1420,7 +1420,7 @@ mod tests {
let rv = ClusterInfo::run_window_request(
&me,
&socketaddr_any!(),
Some(&db_ledger),
Some(&blocktree),
&me,
0,
0,
@ -1436,14 +1436,14 @@ mod tests {
w_blob.meta.size = data_size + BLOB_HEADER_SIZE;
}
db_ledger
blocktree
.write_shared_blobs(vec![&blob])
.expect("Expect successful ledger write");
let rv = ClusterInfo::run_window_request(
&me,
&socketaddr_any!(),
Some(&db_ledger),
Some(&blocktree),
&me,
2,
1,
@ -1455,7 +1455,7 @@ mod tests {
assert_eq!(v.read().unwrap().meta.size, BLOB_HEADER_SIZE + data_size);
}
DbLedger::destroy(&ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]

View File

@ -1,7 +1,7 @@
//! Set of functions for emulating windowing functions from a database ledger implementation
use crate::blocktree::*;
use crate::cluster_info::ClusterInfo;
use crate::counter::Counter;
use crate::db_ledger::*;
use crate::entry::Entry;
#[cfg(feature = "erasure")]
use crate::erasure;
@ -21,25 +21,25 @@ use std::sync::{Arc, RwLock};
pub const MAX_REPAIR_LENGTH: usize = 128;
pub fn generate_repairs(db_ledger: &DbLedger, max_repairs: usize) -> Result<Vec<(u64, u64)>> {
pub fn generate_repairs(blocktree: &Blocktree, max_repairs: usize) -> Result<Vec<(u64, u64)>> {
// Slot height and blob indexes for blobs we want to repair
let mut repairs: Vec<(u64, u64)> = vec![];
let mut slots = vec![0];
while repairs.len() < max_repairs && !slots.is_empty() {
let slot_height = slots.pop().unwrap();
let slot = db_ledger.meta(slot_height)?;
let slot = blocktree.meta(slot_height)?;
if slot.is_none() {
continue;
}
let slot = slot.unwrap();
slots.extend(slot.next_slots.clone());
if slot.contains_all_ticks(db_ledger) {
if slot.contains_all_ticks(blocktree) {
continue;
} else {
let num_unreceived_ticks = {
if slot.consumed == slot.received {
slot.num_expected_ticks(db_ledger) - slot.consumed_ticks
slot.num_expected_ticks(blocktree) - slot.consumed_ticks
} else {
0
}
@ -47,7 +47,7 @@ pub fn generate_repairs(db_ledger: &DbLedger, max_repairs: usize) -> Result<Vec<
let upper = slot.received + num_unreceived_ticks;
let reqs = db_ledger.find_missing_data_indexes(
let reqs = blocktree.find_missing_data_indexes(
0,
slot.consumed,
upper,
@ -62,7 +62,7 @@ pub fn generate_repairs(db_ledger: &DbLedger, max_repairs: usize) -> Result<Vec<
}
pub fn repair(
db_ledger: &DbLedger,
blocktree: &Blocktree,
slot_index: u64,
cluster_info: &Arc<RwLock<ClusterInfo>>,
id: &Pubkey,
@ -73,7 +73,7 @@ pub fn repair(
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
let rcluster_info = cluster_info.read().unwrap();
let is_next_leader = false;
let meta = db_ledger.meta(slot_index)?;
let meta = blocktree.meta(slot_index)?;
if meta.is_none() {
return Ok(vec![]);
}
@ -121,7 +121,7 @@ pub fn repair(
max_entry_height + 2
};
let idxs = db_ledger.find_missing_data_indexes(
let idxs = blocktree.find_missing_data_indexes(
DEFAULT_SLOT_HEIGHT,
consumed,
max_repair_entry_height - 1,
@ -212,7 +212,7 @@ pub fn add_blob_to_retransmit_queue(
/// range of blobs to a queue to be sent on to the next stage.
pub fn process_blob(
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
blob: &SharedBlob,
max_ix: u64,
consume_queue: &mut Vec<Entry>,
@ -241,10 +241,10 @@ pub fn process_blob(
// Insert the new blob into the window
let mut consumed_entries = if is_coding {
let blob = &blob.read().unwrap();
db_ledger.put_coding_blob_bytes(slot, pix, &blob.data[..BLOB_HEADER_SIZE + blob.size()])?;
blocktree.put_coding_blob_bytes(slot, pix, &blob.data[..BLOB_HEADER_SIZE + blob.size()])?;
vec![]
} else {
db_ledger.insert_data_blobs(vec![(*blob.read().unwrap()).borrow()])?
blocktree.insert_data_blobs(vec![(*blob.read().unwrap()).borrow()])?
};
#[cfg(feature = "erasure")]
@ -254,7 +254,7 @@ pub fn process_blob(
// be updated. Hopefully we can recover these blobs next time successfully.
// TODO: Support per-slot erasure. Issue: https://github.com/solana-labs/solana/issues/2441
if let Err(e) = try_erasure(db_ledger, &mut consumed_entries, 0) {
if let Err(e) = try_erasure(blocktree, &mut consumed_entries, 0) {
trace!(
"erasure::recover failed to write recovered coding blobs. Err: {:?}",
e
@ -270,7 +270,7 @@ pub fn process_blob(
// we only want up to a certain index
// then stop
if max_ix != 0 && !consumed_entries.is_empty() {
let meta = db_ledger
let meta = blocktree
.meta(0)?
.expect("Expect metadata to exist if consumed entries is nonzero");
@ -312,24 +312,24 @@ pub fn calculate_max_repair_entry_height(
#[cfg(feature = "erasure")]
fn try_erasure(
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
consume_queue: &mut Vec<Entry>,
slot_index: u64,
) -> Result<()> {
let meta = db_ledger.meta(slot_index)?;
let meta = blocktree.meta(slot_index)?;
if let Some(meta) = meta {
let (data, coding) = erasure::recover(db_ledger, slot_index, meta.consumed)?;
let (data, coding) = erasure::recover(blocktree, slot_index, meta.consumed)?;
for c in coding {
let c = c.read().unwrap();
db_ledger.put_coding_blob_bytes(
blocktree.put_coding_blob_bytes(
0,
c.index(),
&c.data[..BLOB_HEADER_SIZE + c.size()],
)?;
}
let entries = db_ledger.write_shared_blobs(data)?;
let entries = blocktree.write_shared_blobs(data)?;
consume_queue.extend(entries);
}
@ -339,12 +339,12 @@ fn try_erasure(
#[cfg(test)]
mod test {
use super::*;
use crate::db_ledger::get_tmp_ledger_path;
use crate::blocktree::get_tmp_ledger_path;
#[cfg(all(feature = "erasure", test))]
use crate::entry::reconstruct_entries_from_blobs;
use crate::entry::{make_tiny_test_entries, EntrySlice};
#[cfg(all(feature = "erasure", test))]
use crate::erasure::test::{generate_db_ledger_from_window, setup_window_ledger};
use crate::erasure::test::{generate_blocktree_from_window, setup_window_ledger};
#[cfg(all(feature = "erasure", test))]
use crate::erasure::{NUM_CODING, NUM_DATA};
use crate::packet::{index_blobs, Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE};
@ -485,10 +485,10 @@ mod test {
#[test]
pub fn test_generate_repairs() {
let db_ledger_path = get_tmp_ledger_path("test_generate_repairs");
let blocktree_path = get_tmp_ledger_path("test_generate_repairs");
let num_ticks_per_slot = 10;
let db_ledger_config = DbLedgerConfig::new(num_ticks_per_slot);
let db_ledger = DbLedger::open_config(&db_ledger_path, db_ledger_config).unwrap();
let blocktree_config = BlocktreeConfig::new(num_ticks_per_slot);
let blocktree = Blocktree::open_config(&blocktree_path, blocktree_config).unwrap();
let num_entries_per_slot = 10;
let num_slots = 2;
@ -501,7 +501,7 @@ mod test {
b.set_slot((i / num_entries_per_slot) as u64);
}
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
let missing_indexes_per_slot: Vec<u64> = (0..num_entries_per_slot - 1)
.flat_map(|x| ((nth * x + 1) as u64..(nth * x + nth) as u64))
@ -517,12 +517,12 @@ mod test {
// Across all slots, find all missing indexes in the range [0, num_entries_per_slot * nth]
assert_eq!(
generate_repairs(&db_ledger, std::usize::MAX).unwrap(),
generate_repairs(&blocktree, std::usize::MAX).unwrap(),
expected
);
assert_eq!(
generate_repairs(&db_ledger, expected.len() - 2).unwrap()[..],
generate_repairs(&blocktree, expected.len() - 2).unwrap()[..],
expected[0..expected.len() - 2]
);
@ -533,7 +533,7 @@ mod test {
let mut b = make_tiny_test_entries(1).to_blobs().pop().unwrap();
b.set_index(blob_index);
b.set_slot(slot_height);
db_ledger.write_blobs(&vec![b]).unwrap();
blocktree.write_blobs(&vec![b]).unwrap();
}
let last_index_per_slot = ((num_entries_per_slot - 1) * nth) as u64;
@ -547,11 +547,11 @@ mod test {
})
.collect();
assert_eq!(
generate_repairs(&db_ledger, std::usize::MAX).unwrap(),
generate_repairs(&blocktree, std::usize::MAX).unwrap(),
expected
);
assert_eq!(
generate_repairs(&db_ledger, expected.len() - 2).unwrap()[..],
generate_repairs(&blocktree, expected.len() - 2).unwrap()[..],
expected[0..expected.len() - 2]
);
}
@ -560,15 +560,15 @@ mod test {
pub fn test_find_missing_data_indexes_sanity() {
let slot = DEFAULT_SLOT_HEIGHT;
let db_ledger_path = get_tmp_ledger_path("test_find_missing_data_indexes_sanity");
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree_path = get_tmp_ledger_path("test_find_missing_data_indexes_sanity");
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Early exit conditions
let empty: Vec<u64> = vec![];
assert_eq!(db_ledger.find_missing_data_indexes(slot, 0, 0, 1), empty);
assert_eq!(db_ledger.find_missing_data_indexes(slot, 5, 5, 1), empty);
assert_eq!(db_ledger.find_missing_data_indexes(slot, 4, 3, 1), empty);
assert_eq!(db_ledger.find_missing_data_indexes(slot, 1, 2, 0), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 0, 0, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 5, 5, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty);
let mut blobs = make_tiny_test_entries(2).to_blobs();
@ -579,7 +579,7 @@ mod test {
blobs[1].set_index(OTHER);
// Insert one blob at index = first_index
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
const STARTS: u64 = OTHER * 2;
const END: u64 = OTHER * 3;
@ -588,7 +588,7 @@ mod test {
// given the input range of [i, first_index], the missing indexes should be
// [i, first_index - 1]
for start in 0..STARTS {
let result = db_ledger.find_missing_data_indexes(
let result = blocktree.find_missing_data_indexes(
slot, start, // start
END, //end
MAX, //max
@ -597,15 +597,15 @@ mod test {
assert_eq!(result, expected);
}
drop(db_ledger);
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_find_missing_data_indexes() {
let slot = DEFAULT_SLOT_HEIGHT;
let db_ledger_path = get_tmp_ledger_path("test_find_missing_data_indexes");
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree_path = get_tmp_ledger_path("test_find_missing_data_indexes");
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let gap = 10;
@ -616,7 +616,7 @@ mod test {
b.set_index(i as u64 * gap);
b.set_slot(slot);
}
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
// Index of the first blob is 0
// Index of the second blob is "gap"
@ -624,27 +624,27 @@ mod test {
// range of [0, gap)
let expected: Vec<u64> = (1..gap).collect();
assert_eq!(
db_ledger.find_missing_data_indexes(slot, 0, gap, gap as usize),
blocktree.find_missing_data_indexes(slot, 0, gap, gap as usize),
expected
);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, 1, gap, (gap - 1) as usize),
blocktree.find_missing_data_indexes(slot, 1, gap, (gap - 1) as usize),
expected,
);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, 0, gap - 1, (gap - 1) as usize),
blocktree.find_missing_data_indexes(slot, 0, gap - 1, (gap - 1) as usize),
&expected[..expected.len() - 1],
);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, gap - 2, gap, gap as usize),
blocktree.find_missing_data_indexes(slot, gap - 2, gap, gap as usize),
vec![gap - 2, gap - 1],
);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, gap - 2, gap, 1),
blocktree.find_missing_data_indexes(slot, gap - 2, gap, 1),
vec![gap - 2],
);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, 0, gap, 1),
blocktree.find_missing_data_indexes(slot, 0, gap, 1),
vec![1],
);
@ -652,11 +652,11 @@ mod test {
let mut expected: Vec<u64> = (1..gap).collect();
expected.push(gap + 1);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, 0, gap + 2, (gap + 2) as usize),
blocktree.find_missing_data_indexes(slot, 0, gap + 2, (gap + 2) as usize),
expected,
);
assert_eq!(
db_ledger.find_missing_data_indexes(slot, 0, gap + 2, (gap - 1) as usize),
blocktree.find_missing_data_indexes(slot, 0, gap + 2, (gap - 1) as usize),
&expected[..expected.len() - 1],
);
@ -670,7 +670,7 @@ mod test {
})
.collect();
assert_eq!(
db_ledger.find_missing_data_indexes(
blocktree.find_missing_data_indexes(
slot,
j * gap,
i * gap,
@ -681,14 +681,14 @@ mod test {
}
}
drop(db_ledger);
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_find_missing_data_indexes_slots() {
let db_ledger_path = get_tmp_ledger_path("test_find_missing_data_indexes_slots");
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree_path = get_tmp_ledger_path("test_find_missing_data_indexes_slots");
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let num_entries_per_slot = 10;
let num_slots = 2;
@ -701,7 +701,7 @@ mod test {
b.set_slot((i / num_entries_per_slot) as u64);
}
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
let mut expected: Vec<u64> = (0..num_entries_per_slot)
.flat_map(|x| ((nth * x + 1) as u64..(nth * x + nth) as u64))
@ -710,7 +710,7 @@ mod test {
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
for slot_height in 0..num_slots {
assert_eq!(
db_ledger.find_missing_data_indexes(
blocktree.find_missing_data_indexes(
slot_height as u64,
0,
(num_entries_per_slot * nth) as u64,
@ -723,7 +723,7 @@ mod test {
// Test with a limit on the number of returned entries
for slot_height in 0..num_slots {
assert_eq!(
db_ledger.find_missing_data_indexes(
blocktree.find_missing_data_indexes(
slot_height as u64,
0,
(num_entries_per_slot * nth) as u64,
@ -742,7 +742,7 @@ mod test {
// For each slot, find all missing indexes in the range [0, num_entries_per_slot * nth]
for slot_height in 0..num_slots {
assert_eq!(
db_ledger.find_missing_data_indexes(
blocktree.find_missing_data_indexes(
slot_height as u64,
0,
(num_entries_per_slot * (nth + 1)) as u64,
@ -756,8 +756,8 @@ mod test {
#[test]
pub fn test_no_missing_blob_indexes() {
let slot = DEFAULT_SLOT_HEIGHT;
let db_ledger_path = get_tmp_ledger_path("test_find_missing_data_indexes");
let db_ledger = DbLedger::open(&db_ledger_path).unwrap();
let blocktree_path = get_tmp_ledger_path("test_find_missing_data_indexes");
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Write entries
let num_entries = 10;
@ -772,20 +772,20 @@ mod test {
let blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.read().unwrap()).collect();
let blobs: Vec<&Blob> = blob_locks.iter().map(|b| &**b).collect();
db_ledger.write_blobs(blobs).unwrap();
blocktree.write_blobs(blobs).unwrap();
let empty: Vec<u64> = vec![];
for i in 0..num_entries as u64 {
for j in 0..i {
assert_eq!(
db_ledger.find_missing_data_indexes(slot, j, i, (i - j) as usize),
blocktree.find_missing_data_indexes(slot, j, i, (i - j) as usize),
empty
);
}
}
drop(db_ledger);
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[cfg(all(feature = "erasure", test))]
@ -809,12 +809,12 @@ mod test {
window[erased_index].data = None;
window[erased_index].coding = None;
// Generate the db_ledger from the window
// Generate the blocktree from the window
let ledger_path = get_tmp_ledger_path("test_try_erasure");
let db_ledger = Arc::new(generate_db_ledger_from_window(&ledger_path, &window, false));
let blocktree = Arc::new(generate_blocktree_from_window(&ledger_path, &window, false));
let mut consume_queue = vec![];
try_erasure(&db_ledger, &mut consume_queue, DEFAULT_SLOT_HEIGHT)
try_erasure(&blocktree, &mut consume_queue, DEFAULT_SLOT_HEIGHT)
.expect("Expected successful erasure attempt");
window[erased_index].data = erased_data;
@ -834,7 +834,7 @@ mod test {
let erased_coding_l = erased_coding.read().unwrap();
assert_eq!(
&db_ledger
&blocktree
.get_coding_blob_bytes(slot_height, erased_index as u64)
.unwrap()
.unwrap()[BLOB_HEADER_SIZE..],
@ -847,8 +847,8 @@ mod test {
let mut leader_scheduler = LeaderScheduler::default();
leader_scheduler.set_leader_schedule(vec![Keypair::new().pubkey()]);
let db_ledger_path = get_tmp_ledger_path("test_process_blob");
let db_ledger = Arc::new(DbLedger::open(&db_ledger_path).unwrap());
let blocktree_path = get_tmp_ledger_path("test_process_blob");
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let num_entries = 10;
@ -869,7 +869,7 @@ mod test {
for blob in shared_blobs.iter().rev() {
process_blob(
&leader_scheduler,
&db_ledger,
&blocktree,
blob,
0,
&mut consume_queue,
@ -881,7 +881,7 @@ mod test {
assert_eq!(consume_queue, original_entries);
drop(db_ledger);
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
}

View File

@ -1,5 +1,5 @@
// Support erasure coding
use crate::db_ledger::DbLedger;
use crate::blocktree::Blocktree;
use crate::packet::{Blob, SharedBlob, BLOB_DATA_SIZE, BLOB_HEADER_SIZE, BLOB_SIZE};
use crate::result::{Error, Result};
use std::cmp;
@ -364,7 +364,7 @@ impl CodingGenerator {
// Recover the missing data and coding blobs from the input ledger. Returns a vector
// of the recovered missing data blobs and a vector of the recovered coding blobs
pub fn recover(
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
slot: u64,
start_idx: u64,
) -> Result<(Vec<SharedBlob>, Vec<SharedBlob>)> {
@ -380,10 +380,10 @@ pub fn recover(
block_end_idx
);
let data_missing = db_ledger
let data_missing = blocktree
.find_missing_data_indexes(slot, block_start_idx, block_end_idx, NUM_DATA)
.len();
let coding_missing = db_ledger
let coding_missing = blocktree
.find_missing_coding_indexes(slot, coding_start_idx, block_end_idx, NUM_CODING)
.len();
@ -418,7 +418,7 @@ pub fn recover(
// Add the data blobs we have into the recovery vector, mark the missing ones
for i in block_start_idx..block_end_idx {
let result = db_ledger.get_data_blob_bytes(slot, i)?;
let result = blocktree.get_data_blob_bytes(slot, i)?;
categorize_blob(
&result,
@ -432,7 +432,7 @@ pub fn recover(
let mut size = None;
// Add the coding blobs we have into the recovery vector, mark the missing ones
for i in coding_start_idx..block_end_idx {
let result = db_ledger.get_coding_blob_bytes(slot, i)?;
let result = blocktree.get_coding_blob_bytes(slot, i)?;
categorize_blob(
&result,
@ -464,7 +464,7 @@ pub fn recover(
// Remove the corrupted coding blobs so there's no effort wasted in trying to
// reconstruct the blobs again
for i in coding_start_idx..block_end_idx {
db_ledger.delete_coding_blob(slot, i)?;
blocktree.delete_coding_blob(slot, i)?;
}
return Ok((vec![], vec![]));
}
@ -501,8 +501,8 @@ fn categorize_blob(
#[cfg(test)]
pub mod test {
use super::*;
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::{Blocktree, DEFAULT_SLOT_HEIGHT};
use crate::entry::{make_tiny_test_entries, EntrySlice};
use crate::packet::{index_blobs, SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
@ -632,22 +632,22 @@ pub mod test {
// TODO: Temprorary function used in tests to generate a database ledger
// from the window (which is used to generate the erasure coding)
// until we also transition generate_coding() and BroadcastStage to use DbLedger
// until we also transition generate_coding() and BroadcastStage to use Blocktree
// Github issue: https://github.com/solana-labs/solana/issues/1899.
pub fn generate_db_ledger_from_window(
pub fn generate_blocktree_from_window(
ledger_path: &str,
window: &[WindowSlot],
use_random: bool,
) -> DbLedger {
let db_ledger =
DbLedger::open(ledger_path).expect("Expected to be able to open database ledger");
) -> Blocktree {
let blocktree =
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
for slot in window {
if let Some(ref data) = slot.data {
// If we're using gibberish blobs, skip validation checks and insert
// directly into the ledger
if use_random {
let data = data.read().unwrap();
db_ledger
blocktree
.put_data_blob_bytes(
data.slot(),
data.index(),
@ -655,7 +655,7 @@ pub mod test {
)
.expect("Expected successful put into data column of ledger");
} else {
db_ledger
blocktree
.write_shared_blobs(vec![data].into_iter())
.unwrap();
}
@ -668,7 +668,7 @@ pub mod test {
let data_size = coding_lock.size();
db_ledger
blocktree
.put_coding_blob_bytes(
coding_lock.slot(),
index,
@ -678,7 +678,7 @@ pub mod test {
}
}
db_ledger
blocktree
}
fn generate_coding(
@ -970,12 +970,12 @@ pub mod test {
let refwindow = window[erase_offset].data.clone();
window[erase_offset].data = None;
// Generate the db_ledger from the window
// Generate the blocktree from the window
let ledger_path = get_tmp_ledger_path("test_window_recover_basic");
let db_ledger = Arc::new(generate_db_ledger_from_window(&ledger_path, &window, true));
let blocktree = Arc::new(generate_blocktree_from_window(&ledger_path, &window, true));
// Recover it from coding
let (recovered_data, recovered_coding) = recover(&db_ledger, 0, offset as u64)
let (recovered_data, recovered_coding) = recover(&blocktree, 0, offset as u64)
.expect("Expected successful recovery of erased blobs");
assert!(recovered_coding.is_empty());
@ -996,8 +996,8 @@ pub mod test {
assert_eq!(result.index(), offset as u64);
assert_eq!(result.slot(), DEFAULT_SLOT_HEIGHT as u64);
}
drop(db_ledger);
DbLedger::destroy(&ledger_path)
drop(blocktree);
Blocktree::destroy(&ledger_path)
.expect("Expected successful destruction of database ledger");
}
@ -1021,10 +1021,10 @@ pub mod test {
window[erase_offset].data = None;
window[erase_offset].coding = None;
let ledger_path = get_tmp_ledger_path("test_window_recover_basic2");
let db_ledger = Arc::new(generate_db_ledger_from_window(&ledger_path, &window, true));
let blocktree = Arc::new(generate_blocktree_from_window(&ledger_path, &window, true));
// Recover it from coding
let (recovered_data, recovered_coding) = recover(&db_ledger, 0, offset as u64)
let (recovered_data, recovered_coding) = recover(&blocktree, 0, offset as u64)
.expect("Expected successful recovery of erased blobs");
{
@ -1062,8 +1062,8 @@ pub mod test {
assert_eq!(result.index(), coding_start as u64);
assert_eq!(result.slot(), DEFAULT_SLOT_HEIGHT as u64);
}
drop(db_ledger);
DbLedger::destroy(&ledger_path)
drop(blocktree);
Blocktree::destroy(&ledger_path)
.expect("Expected successful destruction of database ledger");
}
}

View File

@ -1,9 +1,9 @@
//! The `fullnode` module hosts all the fullnode microservices.
use crate::bank::Bank;
use crate::blocktree::{Blocktree, BlocktreeConfig};
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
use crate::counter::Counter;
use crate::db_ledger::{DbLedger, DbLedgerConfig};
use crate::genesis_block::GenesisBlock;
use crate::gossip_service::GossipService;
use crate::leader_scheduler::LeaderSchedulerConfig;
@ -64,7 +64,7 @@ pub struct FullnodeConfig {
pub entry_stream: Option<String>,
pub storage_rotate_count: u64,
pub leader_scheduler_config: LeaderSchedulerConfig,
pub ledger_config: DbLedgerConfig,
pub ledger_config: BlocktreeConfig,
}
impl Default for FullnodeConfig {
fn default() -> Self {
@ -125,7 +125,7 @@ impl Fullnode {
bank,
entry_height,
last_entry_id,
db_ledger,
blocktree,
ledger_signal_sender,
ledger_signal_receiver,
) = new_bank_from_ledger(
@ -143,7 +143,7 @@ impl Fullnode {
let exit = Arc::new(AtomicBool::new(false));
let bank = Arc::new(bank);
let db_ledger = Arc::new(db_ledger);
let blocktree = Arc::new(blocktree);
node.info.wallclock = timestamp();
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_keypair(
@ -184,7 +184,7 @@ impl Fullnode {
let gossip_service = GossipService::new(
&cluster_info,
Some(db_ledger.clone()),
Some(blocktree.clone()),
node.sockets.gossip,
exit.clone(),
);
@ -251,7 +251,7 @@ impl Fullnode {
let (to_leader_sender, to_leader_receiver) = channel();
let (to_validator_sender, to_validator_receiver) = channel();
let blob_index = Self::get_consumed_for_slot(&db_ledger, slot_height);
let blob_index = Self::get_consumed_for_slot(&blocktree, slot_height);
let (tvu, blob_sender) = Tvu::new(
voting_keypair_option,
@ -261,7 +261,7 @@ impl Fullnode {
last_entry_id,
&cluster_info,
sockets,
db_ledger.clone(),
blocktree.clone(),
config.storage_rotate_count,
to_leader_sender,
&storage_state,
@ -490,8 +490,8 @@ impl Fullnode {
self.join()
}
fn get_consumed_for_slot(db_ledger: &DbLedger, slot_index: u64) -> u64 {
let meta = db_ledger.meta(slot_index).expect("Database error");
fn get_consumed_for_slot(blocktree: &Blocktree, slot_index: u64) -> u64 {
let meta = blocktree.meta(slot_index).expect("Database error");
if let Some(meta) = meta {
meta.consumed
} else {
@ -502,18 +502,18 @@ impl Fullnode {
pub fn new_bank_from_ledger(
ledger_path: &str,
ledger_config: DbLedgerConfig,
ledger_config: BlocktreeConfig,
leader_scheduler_config: &LeaderSchedulerConfig,
) -> (Bank, u64, Hash, DbLedger, SyncSender<bool>, Receiver<bool>) {
let (db_ledger, ledger_signal_sender, ledger_signal_receiver) =
DbLedger::open_with_config_signal(ledger_path, ledger_config)
) -> (Bank, u64, Hash, Blocktree, SyncSender<bool>, Receiver<bool>) {
let (blocktree, ledger_signal_sender, ledger_signal_receiver) =
Blocktree::open_with_config_signal(ledger_path, ledger_config)
.expect("Expected to successfully open database ledger");
let genesis_block =
GenesisBlock::load(ledger_path).expect("Expected to successfully open genesis block");
let mut bank = Bank::new_with_leader_scheduler_config(&genesis_block, leader_scheduler_config);
let now = Instant::now();
let entries = db_ledger.read_ledger().expect("opening ledger");
let entries = blocktree.read_ledger().expect("opening ledger");
info!("processing ledger...");
let (entry_height, last_entry_id) = bank.process_ledger(entries).expect("process_ledger");
info!(
@ -527,7 +527,7 @@ pub fn new_bank_from_ledger(
bank,
entry_height,
last_entry_id,
db_ledger,
blocktree,
ledger_signal_sender,
ledger_signal_receiver,
)
@ -553,7 +553,7 @@ impl Service for Fullnode {
#[cfg(test)]
mod tests {
use super::*;
use crate::db_ledger::{create_tmp_sample_ledger, tmp_copy_ledger};
use crate::blocktree::{create_tmp_sample_ledger, tmp_copy_ledger};
use crate::entry::make_consecutive_blobs;
use crate::leader_scheduler::make_active_set_entries;
use crate::streamer::responder;
@ -743,7 +743,7 @@ mod tests {
.expect("Expected validator node to close");
}
for path in ledger_paths {
DbLedger::destroy(&path).expect("Expected successful database destruction");
Blocktree::destroy(&path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&path);
}
}
@ -837,7 +837,7 @@ mod tests {
validator_exit();
let (bank, entry_height, _, _, _, _) = new_bank_from_ledger(
&validator_ledger_path,
DbLedgerConfig::default(),
BlocktreeConfig::default(),
&LeaderSchedulerConfig::default(),
);
@ -847,7 +847,7 @@ mod tests {
// Shut down
t_responder.join().expect("responder thread join");
DbLedger::destroy(&validator_ledger_path)
Blocktree::destroy(&validator_ledger_path)
.expect("Expected successful database destruction");
let _ignored = remove_dir_all(&validator_ledger_path).unwrap();
}
@ -942,7 +942,7 @@ mod tests {
info!("Shut down");
leader_exit();
DbLedger::destroy(&leader_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&leader_ledger_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&leader_ledger_path).unwrap();
}
@ -994,7 +994,7 @@ mod tests {
.chunks(ticks_per_block as usize)
.collect();
let db_ledger = DbLedger::open(&ledger_path).unwrap();
let blocktree = Blocktree::open(&ledger_path).unwrap();
// Iterate writing slots through 0..entry_chunks.len()
for i in 0..entry_chunks.len() + 1 {
@ -1009,7 +1009,7 @@ mod tests {
}
};
db_ledger
blocktree
.write_entries(i as u64, start_height, entries)
.unwrap();
}

View File

@ -1,7 +1,7 @@
//! The `gossip_service` module implements the network control plane.
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::db_ledger::DbLedger;
use crate::service::Service;
use crate::streamer;
use std::net::UdpSocket;
@ -18,7 +18,7 @@ pub struct GossipService {
impl GossipService {
pub fn new(
cluster_info: &Arc<RwLock<ClusterInfo>>,
db_ledger: Option<Arc<DbLedger>>,
blocktree: Option<Arc<Blocktree>>,
gossip_socket: UdpSocket,
exit: Arc<AtomicBool>,
) -> Self {
@ -35,7 +35,7 @@ impl GossipService {
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_listen = ClusterInfo::listen(
cluster_info.clone(),
db_ledger,
blocktree,
request_receiver,
response_sender.clone(),
exit.clone(),

View File

@ -29,9 +29,9 @@ pub mod crds_gossip_push;
pub mod crds_value;
#[macro_use]
pub mod contact_info;
pub mod blocktree;
pub mod cluster_info;
pub mod compute_leader_confirmation_service;
pub mod db_ledger;
pub mod db_window;
pub mod entry;
pub mod entry_stream;

View File

@ -1,8 +1,8 @@
//! The `repair_service` module implements the tools necessary to generate a thread which
//! regularly finds missing blobs in the ledger and sends repair requests for those blobs
use crate::blocktree::{Blocktree, SlotMeta};
use crate::cluster_info::ClusterInfo;
use crate::db_ledger::{DbLedger, SlotMeta};
use crate::result::Result;
use crate::service::Service;
use solana_metrics::{influxdb, submit};
@ -38,7 +38,7 @@ pub struct RepairService {
impl RepairService {
fn run(
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
exit: &Arc<AtomicBool>,
repair_socket: &Arc<UdpSocket>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
@ -50,7 +50,7 @@ impl RepairService {
break;
}
let repairs = Self::generate_repairs(db_ledger, MAX_REPAIR_LENGTH, &mut repair_info);
let repairs = Self::generate_repairs(blocktree, MAX_REPAIR_LENGTH, &mut repair_info);
if let Ok(repairs) = repairs {
let reqs: Vec<_> = repairs
@ -99,31 +99,31 @@ impl RepairService {
}
pub fn new(
db_ledger: Arc<DbLedger>,
blocktree: Arc<Blocktree>,
exit: Arc<AtomicBool>,
repair_socket: Arc<UdpSocket>,
cluster_info: Arc<RwLock<ClusterInfo>>,
) -> Self {
let t_repair = Builder::new()
.name("solana-repair-service".to_string())
.spawn(move || Self::run(&db_ledger, &exit, &repair_socket, &cluster_info))
.spawn(move || Self::run(&blocktree, &exit, &repair_socket, &cluster_info))
.unwrap();
RepairService { t_repair }
}
fn process_slot(
db_ledger: &DbLedger,
blocktree: &Blocktree,
slot_height: u64,
slot: &SlotMeta,
max_repairs: usize,
) -> Result<Vec<(u64, u64)>> {
if slot.contains_all_ticks(db_ledger) {
if slot.contains_all_ticks(blocktree) {
Ok(vec![])
} else {
let num_unreceived_ticks = {
if slot.consumed == slot.received {
let num_expected_ticks = slot.num_expected_ticks(db_ledger);
let num_expected_ticks = slot.num_expected_ticks(blocktree);
if num_expected_ticks == 0 {
// This signals that we have received nothing for this slot, try to get at least the
// first entry
@ -146,14 +146,14 @@ impl RepairService {
let upper = slot.received + num_unreceived_ticks;
let reqs =
db_ledger.find_missing_data_indexes(slot_height, slot.consumed, upper, max_repairs);
blocktree.find_missing_data_indexes(slot_height, slot.consumed, upper, max_repairs);
Ok(reqs.into_iter().map(|i| (slot_height, i)).collect())
}
}
fn generate_repairs(
db_ledger: &DbLedger,
blocktree: &Blocktree,
max_repairs: usize,
repair_info: &mut RepairInfo,
) -> Result<Vec<(u64, u64)>> {
@ -166,20 +166,20 @@ impl RepairService {
repair_info.max_slot = current_slot_height.unwrap();
}
let slot = db_ledger.meta(current_slot_height.unwrap())?;
let slot = blocktree.meta(current_slot_height.unwrap())?;
if slot.is_none() {
current_slot_height = db_ledger.get_next_slot(current_slot_height.unwrap())?;
current_slot_height = blocktree.get_next_slot(current_slot_height.unwrap())?;
continue;
}
let slot = slot.unwrap();
let new_repairs = Self::process_slot(
db_ledger,
blocktree,
current_slot_height.unwrap(),
&slot,
max_repairs - repairs.len(),
)?;
repairs.extend(new_repairs);
current_slot_height = db_ledger.get_next_slot(current_slot_height.unwrap())?;
current_slot_height = blocktree.get_next_slot(current_slot_height.unwrap())?;
}
// Only increment repair_tries if the ledger contains every blob for every slot
@ -208,24 +208,24 @@ impl Service for RepairService {
#[cfg(test)]
mod test {
use super::*;
use crate::db_ledger::{get_tmp_ledger_path, DbLedger, DbLedgerConfig};
use crate::blocktree::{get_tmp_ledger_path, Blocktree, BlocktreeConfig};
use crate::entry::create_ticks;
use crate::entry::{make_tiny_test_entries, EntrySlice};
use solana_sdk::hash::Hash;
#[test]
pub fn test_repair_missed_future_slot() {
let db_ledger_path = get_tmp_ledger_path("test_repair_missed_future_slot");
let blocktree_path = get_tmp_ledger_path("test_repair_missed_future_slot");
{
let num_ticks_per_slot = 1;
let db_ledger_config = DbLedgerConfig::new(num_ticks_per_slot);
let db_ledger = DbLedger::open_config(&db_ledger_path, db_ledger_config).unwrap();
let blocktree_config = BlocktreeConfig::new(num_ticks_per_slot);
let blocktree = Blocktree::open_config(&blocktree_path, blocktree_config).unwrap();
let mut blobs = create_ticks(1, Hash::default()).to_blobs();
blobs[0].set_index(0);
blobs[0].set_slot(0);
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
let mut repair_info = RepairInfo::new();
// We have all the blobs for all the slots in the ledger, wait for optimistic
@ -240,7 +240,7 @@ mod test {
vec![]
};
assert_eq!(
RepairService::generate_repairs(&db_ledger, 2, &mut repair_info).unwrap(),
RepairService::generate_repairs(&blocktree, 2, &mut repair_info).unwrap(),
expected
);
}
@ -250,25 +250,25 @@ mod test {
blobs[0].set_index(0);
blobs[0].set_slot(1);
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
assert_eq!(
RepairService::generate_repairs(&db_ledger, 2, &mut repair_info).unwrap(),
RepairService::generate_repairs(&blocktree, 2, &mut repair_info).unwrap(),
vec![]
);
assert_eq!(repair_info.repair_tries, 1);
assert_eq!(repair_info.max_slot, 1);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_repair_empty_slot() {
let db_ledger_path = get_tmp_ledger_path("test_repair_empty_slot");
let blocktree_path = get_tmp_ledger_path("test_repair_empty_slot");
{
let num_ticks_per_slot = 10;
let db_ledger_config = DbLedgerConfig::new(num_ticks_per_slot);
let db_ledger = DbLedger::open_config(&db_ledger_path, db_ledger_config).unwrap();
let blocktree_config = BlocktreeConfig::new(num_ticks_per_slot);
let blocktree = Blocktree::open_config(&blocktree_path, blocktree_config).unwrap();
let mut blobs = make_tiny_test_entries(1).to_blobs();
blobs[0].set_index(1);
@ -278,23 +278,23 @@ mod test {
// Write this blob to slot 2, should chain to slot 1, which we haven't received
// any blobs for
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
// Check that repair tries to patch the empty slot
assert_eq!(
RepairService::generate_repairs(&db_ledger, 2, &mut repair_info).unwrap(),
RepairService::generate_repairs(&blocktree, 2, &mut repair_info).unwrap(),
vec![(1, 0), (2, 0)]
);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_generate_repairs() {
let db_ledger_path = get_tmp_ledger_path("test_generate_repairs");
let blocktree_path = get_tmp_ledger_path("test_generate_repairs");
{
let num_ticks_per_slot = 10;
let db_ledger_config = DbLedgerConfig::new(num_ticks_per_slot);
let db_ledger = DbLedger::open_config(&db_ledger_path, db_ledger_config).unwrap();
let blocktree_config = BlocktreeConfig::new(num_ticks_per_slot);
let blocktree = Blocktree::open_config(&blocktree_path, blocktree_config).unwrap();
let num_entries_per_slot = 10;
let num_slots = 2;
@ -309,7 +309,7 @@ mod test {
b.set_slot((i / num_entries_per_slot) as u64);
}
db_ledger.write_blobs(&blobs).unwrap();
blocktree.write_blobs(&blobs).unwrap();
let missing_indexes_per_slot: Vec<u64> = (0..num_entries_per_slot - 1)
.flat_map(|x| ((nth * x + 1) as u64..(nth * x + nth) as u64))
@ -325,13 +325,13 @@ mod test {
// Across all slots, find all missing indexes in the range [0, num_entries_per_slot * nth]
assert_eq!(
RepairService::generate_repairs(&db_ledger, std::usize::MAX, &mut repair_info)
RepairService::generate_repairs(&blocktree, std::usize::MAX, &mut repair_info)
.unwrap(),
expected
);
assert_eq!(
RepairService::generate_repairs(&db_ledger, expected.len() - 2, &mut repair_info)
RepairService::generate_repairs(&blocktree, expected.len() - 2, &mut repair_info)
.unwrap()[..],
expected[0..expected.len() - 2]
);
@ -343,7 +343,7 @@ mod test {
let mut b = make_tiny_test_entries(1).to_blobs().pop().unwrap();
b.set_index(blob_index);
b.set_slot(slot_height);
db_ledger.write_blobs(&vec![b]).unwrap();
blocktree.write_blobs(&vec![b]).unwrap();
}
let last_index_per_slot = ((num_entries_per_slot - 1) * nth) as u64;
@ -357,16 +357,16 @@ mod test {
})
.collect();
assert_eq!(
RepairService::generate_repairs(&db_ledger, std::usize::MAX, &mut repair_info)
RepairService::generate_repairs(&blocktree, std::usize::MAX, &mut repair_info)
.unwrap(),
expected
);
assert_eq!(
RepairService::generate_repairs(&db_ledger, expected.len() - 2, &mut repair_info)
RepairService::generate_repairs(&blocktree, expected.len() - 2, &mut repair_info)
.unwrap()[..],
expected[0..expected.len() - 2]
);
}
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
}

View File

@ -1,9 +1,9 @@
//! The `replay_stage` replays transactions broadcast by the leader.
use crate::bank::Bank;
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::counter::Counter;
use crate::db_ledger::DbLedger;
use crate::entry::{Entry, EntryReceiver, EntrySender, EntrySlice};
#[cfg(not(test))]
use crate::entry_stream::EntryStream;
@ -175,7 +175,7 @@ impl ReplayStage {
pub fn new(
my_id: Pubkey,
voting_keypair: Option<Arc<VotingKeypair>>,
db_ledger: Arc<DbLedger>,
blocktree: Arc<Blocktree>,
bank: Arc<Bank>,
cluster_info: Arc<RwLock<ClusterInfo>>,
exit: Arc<AtomicBool>,
@ -208,7 +208,7 @@ impl ReplayStage {
)
};
// Loop through db_ledger MAX_ENTRY_RECV_PER_ITER entries at a time for each
// Loop through blocktree MAX_ENTRY_RECV_PER_ITER entries at a time for each
// relevant slot to see if there are any available updates
loop {
// Stop getting entries if we get exit signal
@ -218,7 +218,7 @@ impl ReplayStage {
if current_slot.is_none() {
let new_slot = Self::get_next_slot(
&db_ledger,
&blocktree,
prev_slot.expect("prev_slot must exist"),
);
if new_slot.is_some() {
@ -236,7 +236,7 @@ impl ReplayStage {
let entries = {
if let Some(slot) = current_slot {
if let Ok(entries) = db_ledger.get_slot_entries(
if let Ok(entries) = blocktree.get_slot_entries(
slot,
current_blob_index,
Some(MAX_ENTRY_RECV_PER_ITER as u64),
@ -333,9 +333,9 @@ impl ReplayStage {
.expect("Scheduled leader should be calculated by this point")
}
fn get_next_slot(db_ledger: &DbLedger, slot_index: u64) -> Option<u64> {
fn get_next_slot(blocktree: &Blocktree, slot_index: u64) -> Option<u64> {
// Find the next slot that chains to the old slot
let next_slots = db_ledger.get_slots_since(&[slot_index]).expect("Db error");
let next_slots = blocktree.get_slots_since(&[slot_index]).expect("Db error");
next_slots.first().cloned()
}
}
@ -352,10 +352,10 @@ impl Service for ReplayStage {
mod test {
use super::*;
use crate::bank::Bank;
use crate::cluster_info::{ClusterInfo, Node};
use crate::db_ledger::{
create_tmp_sample_ledger, DbLedger, DbLedgerConfig, DEFAULT_SLOT_HEIGHT,
use crate::blocktree::{
create_tmp_sample_ledger, Blocktree, BlocktreeConfig, DEFAULT_SLOT_HEIGHT,
};
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::create_ticks;
use crate::entry::Entry;
use crate::fullnode::new_bank_from_ledger;
@ -417,8 +417,8 @@ mod test {
last_id = active_set_entries.last().unwrap().id;
{
let db_ledger = DbLedger::open(&my_ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&my_ledger_path).unwrap();
blocktree
.write_entries(
DEFAULT_SLOT_HEIGHT,
genesis_entry_height,
@ -429,20 +429,20 @@ mod test {
{
// Set up the bank
let db_ledger_config = DbLedgerConfig::new(ticks_per_slot);
let (bank, _entry_height, last_entry_id, db_ledger, l_sender, l_receiver) =
new_bank_from_ledger(&my_ledger_path, db_ledger_config, &leader_scheduler_config);
let blocktree_config = BlocktreeConfig::new(ticks_per_slot);
let (bank, _entry_height, last_entry_id, blocktree, l_sender, l_receiver) =
new_bank_from_ledger(&my_ledger_path, blocktree_config, &leader_scheduler_config);
// Set up the replay stage
let (rotation_sender, rotation_receiver) = channel();
let meta = db_ledger.meta(0).unwrap().unwrap();
let meta = blocktree.meta(0).unwrap().unwrap();
let exit = Arc::new(AtomicBool::new(false));
let bank = Arc::new(bank);
let db_ledger = Arc::new(db_ledger);
let blocktree = Arc::new(blocktree);
let (replay_stage, ledger_writer_recv) = ReplayStage::new(
my_id,
Some(Arc::new(voting_keypair)),
db_ledger.clone(),
blocktree.clone(),
bank.clone(),
Arc::new(RwLock::new(cluster_info_me)),
exit.clone(),
@ -465,7 +465,7 @@ mod test {
let expected_last_id = entries_to_send.last().unwrap().id;
// Write the entries to the ledger, replay_stage should get notified of changes
db_ledger
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, meta.consumed, &entries_to_send)
.unwrap();
@ -532,19 +532,19 @@ mod test {
let voting_keypair = Arc::new(VotingKeypair::new_local(&my_keypair));
let (to_leader_sender, _) = channel();
{
let (bank, entry_height, last_entry_id, db_ledger, l_sender, l_receiver) =
let (bank, entry_height, last_entry_id, blocktree, l_sender, l_receiver) =
new_bank_from_ledger(
&my_ledger_path,
DbLedgerConfig::default(),
BlocktreeConfig::default(),
&LeaderSchedulerConfig::default(),
);
let bank = Arc::new(bank);
let db_ledger = Arc::new(db_ledger);
let blocktree = Arc::new(blocktree);
let (replay_stage, ledger_writer_recv) = ReplayStage::new(
my_keypair.pubkey(),
Some(voting_keypair.clone()),
db_ledger.clone(),
blocktree.clone(),
bank.clone(),
cluster_info_me.clone(),
exit.clone(),
@ -563,7 +563,7 @@ mod test {
// Send ReplayStage an entry, should see it on the ledger writer receiver
let next_tick = create_ticks(1, last_entry_id);
db_ledger
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, entry_height, next_tick.clone())
.unwrap();
@ -613,8 +613,8 @@ mod test {
let initial_tick_height = genesis_entry_height;
{
let db_ledger = DbLedger::open(&my_ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&my_ledger_path).unwrap();
blocktree
.write_entries(
DEFAULT_SLOT_HEIGHT,
genesis_entry_height,
@ -636,22 +636,22 @@ mod test {
let (rotation_tx, rotation_rx) = channel();
let exit = Arc::new(AtomicBool::new(false));
{
let db_ledger_config = DbLedgerConfig::new(ticks_per_slot);
let (bank, _entry_height, last_entry_id, db_ledger, l_sender, l_receiver) =
new_bank_from_ledger(&my_ledger_path, db_ledger_config, &leader_scheduler_config);
let blocktree_config = BlocktreeConfig::new(ticks_per_slot);
let (bank, _entry_height, last_entry_id, blocktree, l_sender, l_receiver) =
new_bank_from_ledger(&my_ledger_path, blocktree_config, &leader_scheduler_config);
let meta = db_ledger
let meta = blocktree
.meta(0)
.unwrap()
.expect("First slot metadata must exist");
let voting_keypair = Arc::new(voting_keypair);
let bank = Arc::new(bank);
let db_ledger = Arc::new(db_ledger);
let blocktree = Arc::new(blocktree);
let (replay_stage, ledger_writer_recv) = ReplayStage::new(
my_keypair.pubkey(),
Some(voting_keypair.clone()),
db_ledger.clone(),
blocktree.clone(),
bank.clone(),
cluster_info_me.clone(),
exit.clone(),
@ -677,7 +677,7 @@ mod test {
for i in 0..total_entries_to_send {
let entry = Entry::new(&mut last_id, 0, num_hashes, vec![]);
last_id = entry.id;
db_ledger
blocktree
.write_entries(
DEFAULT_SLOT_HEIGHT,
meta.consumed + i as u64,

View File

@ -1,9 +1,9 @@
use crate::blob_fetch_stage::BlobFetchStage;
use crate::blocktree::Blocktree;
#[cfg(feature = "chacha")]
use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use crate::client::mk_client;
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
use crate::db_ledger::DbLedger;
use crate::gossip_service::GossipService;
use crate::leader_scheduler::LeaderScheduler;
use crate::result::{self, Result};
@ -130,19 +130,19 @@ impl Replicator {
cluster_info_w.set_leader(leader_pubkey);
}
// Create DbLedger, eventually will simply repurpose the input
// ledger path as the DbLedger path once we replace the ledger with
// DbLedger. Note for now, this ledger will not contain any of the existing entries
// Create Blocktree, eventually will simply repurpose the input
// ledger path as the Blocktree path once we replace the ledger with
// Blocktree. Note for now, this ledger will not contain any of the existing entries
// in the ledger located at ledger_path, and will only append on newly received
// entries after being passed to window_service
let db_ledger =
DbLedger::open(ledger_path).expect("Expected to be able to open database ledger");
let blocktree =
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
let db_ledger = Arc::new(db_ledger);
let blocktree = Arc::new(blocktree);
let gossip_service = GossipService::new(
&cluster_info,
Some(db_ledger.clone()),
Some(blocktree.clone()),
node.sockets.gossip,
exit.clone(),
);
@ -173,7 +173,7 @@ impl Replicator {
let (retransmit_sender, retransmit_receiver) = channel();
let window_service = WindowService::new(
db_ledger.clone(),
blocktree.clone(),
cluster_info.clone(),
0,
max_entry_height,
@ -234,7 +234,7 @@ impl Replicator {
ivec.copy_from_slice(signature.as_ref());
let num_encrypted_bytes = chacha_cbc_encrypt_ledger(
&db_ledger,
&blocktree,
entry_height,
&ledger_data_file_encrypted,
&mut ivec,

View File

@ -1,8 +1,8 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use crate::bank;
use crate::blocktree;
use crate::cluster_info;
use crate::db_ledger;
#[cfg(feature = "erasure")]
use crate::erasure;
use crate::packet;
@ -28,7 +28,7 @@ pub enum Error {
ErasureError(erasure::ErasureError),
SendError,
PohRecorderError(poh_recorder::PohRecorderError),
DbLedgerError(db_ledger::DbLedgerError),
BlocktreeError(blocktree::BlocktreeError),
}
pub type Result<T> = std::result::Result<T, Error>;
@ -102,9 +102,9 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
Error::PohRecorderError(e)
}
}
impl std::convert::From<db_ledger::DbLedgerError> for Error {
fn from(e: db_ledger::DbLedgerError) -> Error {
Error::DbLedgerError(e)
impl std::convert::From<blocktree::BlocktreeError> for Error {
fn from(e: blocktree::BlocktreeError) -> Error {
Error::BlocktreeError(e)
}
}

View File

@ -1,9 +1,9 @@
//! The `retransmit_stage` retransmits blobs between validators
use crate::bank::Bank;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, DATA_PLANE_FANOUT, GROW_LAYER_CAPACITY, NEIGHBORHOOD_SIZE};
use crate::counter::Counter;
use crate::db_ledger::DbLedger;
use crate::leader_scheduler::LeaderScheduler;
use crate::result::{Error, Result};
use crate::service::Service;
@ -126,7 +126,7 @@ impl RetransmitStage {
#[allow(clippy::new_ret_no_self)]
pub fn new(
bank: &Arc<Bank>,
db_ledger: Arc<DbLedger>,
blocktree: Arc<Blocktree>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
tick_height: u64,
retransmit_socket: Arc<UdpSocket>,
@ -145,7 +145,7 @@ impl RetransmitStage {
);
let done = Arc::new(AtomicBool::new(false));
let window_service = WindowService::new(
db_ledger,
blocktree,
cluster_info.clone(),
tick_height,
0,

View File

@ -2,11 +2,11 @@
// for storage mining. Replicators submit storage proofs, validator then bundles them
// to submit its proof for mining to be rewarded.
use crate::blocktree::Blocktree;
#[cfg(all(feature = "chacha", feature = "cuda"))]
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use crate::client::mk_client;
use crate::cluster_info::ClusterInfo;
use crate::db_ledger::DbLedger;
use crate::entry::EntryReceiver;
use crate::result::{Error, Result};
use crate::service::Service;
@ -140,7 +140,7 @@ impl StorageStage {
pub fn new(
storage_state: &StorageState,
storage_entry_receiver: EntryReceiver,
db_ledger: Option<Arc<DbLedger>>,
blocktree: Option<Arc<Blocktree>>,
keypair: &Arc<Keypair>,
exit: &Arc<AtomicBool>,
entry_height: u64,
@ -162,12 +162,12 @@ impl StorageStage {
let mut current_key = 0;
let mut entry_height = entry_height;
loop {
if let Some(ref some_db_ledger) = db_ledger {
if let Some(ref some_blocktree) = blocktree {
if let Err(e) = Self::process_entries(
&keypair0,
&storage_state_inner,
&storage_entry_receiver,
&some_db_ledger,
&some_blocktree,
&mut poh_height,
&mut entry_height,
&mut current_key,
@ -264,7 +264,7 @@ impl StorageStage {
pub fn process_entry_crossing(
state: &Arc<RwLock<StorageStateInner>>,
keypair: &Arc<Keypair>,
_db_ledger: &Arc<DbLedger>,
_blocktree: &Arc<Blocktree>,
entry_id: Hash,
entry_height: u64,
tx_sender: &TransactionSender,
@ -318,7 +318,7 @@ impl StorageStage {
let mut statew = state.write().unwrap();
match chacha_cbc_encrypt_file_many_keys(
_db_ledger,
_blocktree,
segment as u64,
&mut statew.storage_keys,
&samples,
@ -342,7 +342,7 @@ impl StorageStage {
keypair: &Arc<Keypair>,
storage_state: &Arc<RwLock<StorageStateInner>>,
entry_receiver: &EntryReceiver,
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
poh_height: &mut u64,
entry_height: &mut u64,
current_key_idx: &mut usize,
@ -408,7 +408,7 @@ impl StorageStage {
Self::process_entry_crossing(
&storage_state,
&keypair,
&db_ledger,
&blocktree,
entry.id,
*entry_height,
tx_sender,
@ -432,8 +432,8 @@ impl Service for StorageStage {
#[cfg(test)]
mod tests {
use crate::db_ledger::create_tmp_sample_ledger;
use crate::db_ledger::{DbLedger, DEFAULT_SLOT_HEIGHT};
use crate::blocktree::create_tmp_sample_ledger;
use crate::blocktree::{Blocktree, DEFAULT_SLOT_HEIGHT};
use crate::entry::{make_tiny_test_entries, Entry};
use crate::cluster_info::{ClusterInfo, NodeInfo};
@ -501,8 +501,8 @@ mod tests {
);
let entries = make_tiny_test_entries(64);
let db_ledger = DbLedger::open(&ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&ledger_path).unwrap();
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, genesis_entry_height, &entries)
.unwrap();
@ -513,7 +513,7 @@ mod tests {
let storage_stage = StorageStage::new(
&storage_state,
storage_entry_receiver,
Some(Arc::new(db_ledger)),
Some(Arc::new(blocktree)),
&keypair,
&exit.clone(),
0,
@ -569,8 +569,8 @@ mod tests {
);
let entries = make_tiny_test_entries(128);
let db_ledger = DbLedger::open(&ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&ledger_path).unwrap();
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, genesis_entry_height, &entries)
.unwrap();
@ -581,7 +581,7 @@ mod tests {
let storage_stage = StorageStage::new(
&storage_state,
storage_entry_receiver,
Some(Arc::new(db_ledger)),
Some(Arc::new(blocktree)),
&keypair,
&exit.clone(),
0,

View File

@ -454,8 +454,8 @@ pub fn retry_get_balance(
}
pub fn new_fullnode(ledger_name: &'static str) -> (Fullnode, NodeInfo, Keypair, String) {
use crate::blocktree::create_tmp_sample_ledger;
use crate::cluster_info::Node;
use crate::db_ledger::create_tmp_sample_ledger;
use crate::fullnode::Fullnode;
use crate::voting_keypair::VotingKeypair;
use solana_sdk::signature::KeypairUtil;

View File

@ -14,8 +14,8 @@
use crate::bank::Bank;
use crate::blob_fetch_stage::BlobFetchStage;
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::db_ledger::DbLedger;
use crate::replay_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::service::Service;
@ -63,7 +63,7 @@ impl Tvu {
/// * `last_entry_id` - Hash of the last entry
/// * `cluster_info` - The cluster_info state.
/// * `sockets` - My fetch, repair, and restransmit sockets
/// * `db_ledger` - the ledger itself
/// * `blocktree` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new(
voting_keypair: Option<Arc<VotingKeypair>>,
@ -73,7 +73,7 @@ impl Tvu {
last_entry_id: Hash,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sockets: Sockets,
db_ledger: Arc<DbLedger>,
blocktree: Arc<Blocktree>,
storage_rotate_count: u64,
to_leader_sender: TvuRotationSender,
storage_state: &StorageState,
@ -108,7 +108,7 @@ impl Tvu {
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank,
db_ledger.clone(),
blocktree.clone(),
&cluster_info,
bank.tick_height(),
Arc::new(retransmit_socket),
@ -123,7 +123,7 @@ impl Tvu {
let (replay_stage, ledger_entry_receiver) = ReplayStage::new(
keypair.pubkey(),
voting_keypair,
db_ledger.clone(),
blocktree.clone(),
bank.clone(),
cluster_info.clone(),
exit.clone(),
@ -138,7 +138,7 @@ impl Tvu {
let storage_stage = StorageStage::new(
storage_state,
ledger_entry_receiver,
Some(db_ledger),
Some(blocktree),
&keypair,
&exit.clone(),
entry_height,
@ -196,9 +196,9 @@ impl Service for Tvu {
#[cfg(test)]
pub mod tests {
use crate::bank::Bank;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, Node};
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::DbLedger;
use crate::entry::Entry;
use crate::genesis_block::GenesisBlock;
use crate::gossip_service::GossipService;
@ -245,8 +245,8 @@ pub mod tests {
let cref1 = Arc::new(RwLock::new(cluster_info1));
let cur_hash = Hash::default();
let db_ledger_path = get_tmp_ledger_path("test_replay");
let (db_ledger, l_sender, l_receiver) = DbLedger::open_with_signal(&db_ledger_path)
let blocktree_path = get_tmp_ledger_path("test_replay");
let (blocktree, l_sender, l_receiver) = Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to successfully open ledger");
let vote_account_keypair = Arc::new(Keypair::new());
let voting_keypair = VotingKeypair::new_local(&vote_account_keypair);
@ -265,7 +265,7 @@ pub mod tests {
fetch: target1.sockets.tvu,
}
},
Arc::new(db_ledger),
Arc::new(blocktree),
STORAGE_ROTATE_TEST_COUNT,
sender,
&StorageState::default(),
@ -332,8 +332,8 @@ pub mod tests {
let dr_1 = new_gossip(cref1.clone(), target1.sockets.gossip, exit.clone());
let mut cur_hash = Hash::default();
let db_ledger_path = get_tmp_ledger_path("test_replay");
let (db_ledger, l_sender, l_receiver) = DbLedger::open_with_signal(&db_ledger_path)
let blocktree_path = get_tmp_ledger_path("test_replay");
let (blocktree, l_sender, l_receiver) = Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to successfully open ledger");
let vote_account_keypair = Arc::new(Keypair::new());
let voting_keypair = VotingKeypair::new_local(&vote_account_keypair);
@ -352,7 +352,7 @@ pub mod tests {
fetch: target1.sockets.tvu,
}
},
Arc::new(db_ledger),
Arc::new(blocktree),
STORAGE_ROTATE_TEST_COUNT,
sender,
&StorageState::default(),
@ -432,7 +432,7 @@ pub mod tests {
dr_1.join().expect("join");
t_receiver.join().expect("join");
t_responder.join().expect("join");
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&db_ledger_path);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}
}

View File

@ -1,8 +1,8 @@
//! The `window_service` provides a thread for maintaining a window (tail of the ledger).
//!
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::counter::Counter;
use crate::db_ledger::DbLedger;
use crate::db_window::*;
use crate::leader_scheduler::LeaderScheduler;
use crate::repair_service::RepairService;
@ -29,7 +29,7 @@ pub enum WindowServiceReturnType {
#[allow(clippy::too_many_arguments)]
fn recv_window(
db_ledger: &Arc<DbLedger>,
blocktree: &Arc<Blocktree>,
id: &Pubkey,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
tick_height: &mut u64,
@ -70,7 +70,7 @@ fn recv_window(
let _ = process_blob(
leader_scheduler,
db_ledger,
blocktree,
&b,
max_ix,
&mut consume_queue,
@ -113,7 +113,7 @@ pub struct WindowService {
impl WindowService {
#[allow(clippy::too_many_arguments)]
pub fn new(
db_ledger: Arc<DbLedger>,
blocktree: Arc<Blocktree>,
cluster_info: Arc<RwLock<ClusterInfo>>,
tick_height: u64,
max_entry_height: u64,
@ -126,7 +126,7 @@ impl WindowService {
) -> WindowService {
let exit_ = exit.clone();
let repair_service = RepairService::new(
db_ledger.clone(),
blocktree.clone(),
exit.clone(),
repair_socket,
cluster_info.clone(),
@ -143,7 +143,7 @@ impl WindowService {
break;
}
if let Err(e) = recv_window(
&db_ledger,
&blocktree,
&id,
&leader_scheduler,
&mut tick_height_,
@ -183,9 +183,9 @@ impl Service for WindowService {
#[cfg(test)]
mod test {
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, Node};
use crate::db_ledger::get_tmp_ledger_path;
use crate::db_ledger::DbLedger;
use crate::entry::make_consecutive_blobs;
use crate::leader_scheduler::LeaderScheduler;
use crate::service::Service;
@ -218,14 +218,14 @@ mod test {
blob_receiver(Arc::new(leader_node.sockets.gossip), exit.clone(), s_reader);
let (s_retransmit, r_retransmit) = channel();
let done = Arc::new(AtomicBool::new(false));
let db_ledger_path = get_tmp_ledger_path("window_send_test");
let db_ledger = Arc::new(
DbLedger::open(&db_ledger_path).expect("Expected to be able to open database ledger"),
let blocktree_path = get_tmp_ledger_path("window_send_test");
let blocktree = Arc::new(
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
);
let mut leader_schedule = LeaderScheduler::default();
leader_schedule.set_leader_schedule(vec![me_id]);
let t_window = WindowService::new(
db_ledger,
blocktree,
subs,
0,
0,
@ -278,8 +278,8 @@ mod test {
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&db_ledger_path);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}
#[test]
@ -299,14 +299,14 @@ mod test {
blob_receiver(Arc::new(leader_node.sockets.gossip), exit.clone(), s_reader);
let (s_retransmit, r_retransmit) = channel();
let done = Arc::new(AtomicBool::new(false));
let db_ledger_path = get_tmp_ledger_path("window_send_late_leader_test");
let db_ledger = Arc::new(
DbLedger::open(&db_ledger_path).expect("Expected to be able to open database ledger"),
let blocktree_path = get_tmp_ledger_path("window_send_late_leader_test");
let blocktree = Arc::new(
Blocktree::open(&blocktree_path).expect("Expected to be able to open database ledger"),
);
let mut leader_schedule = LeaderScheduler::default();
leader_schedule.set_leader_schedule(vec![me_id]);
let t_window = WindowService::new(
db_ledger,
blocktree,
subs.clone(),
0,
0,
@ -355,7 +355,7 @@ mod test {
t_receiver.join().expect("join");
t_responder.join().expect("join");
t_window.join().expect("join");
DbLedger::destroy(&db_ledger_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&db_ledger_path);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}
}

View File

@ -1,9 +1,9 @@
use log::*;
use solana::blob_fetch_stage::BlobFetchStage;
use solana::blocktree::{create_tmp_sample_ledger, tmp_copy_ledger};
use solana::blocktree::{Blocktree, BlocktreeConfig, DEFAULT_SLOT_HEIGHT};
use solana::client::mk_client;
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
use solana::db_ledger::{create_tmp_sample_ledger, tmp_copy_ledger};
use solana::db_ledger::{DbLedger, DbLedgerConfig, DEFAULT_SLOT_HEIGHT};
use solana::entry::{reconstruct_entries_from_blobs, Entry};
use solana::fullnode::{new_bank_from_ledger, Fullnode, FullnodeConfig, FullnodeReturnType};
use solana::gossip_service::GossipService;
@ -27,7 +27,7 @@ use std::thread::{sleep, Builder};
use std::time::{Duration, Instant};
fn read_ledger(ledger_path: &str) -> Vec<Entry> {
let ledger = DbLedger::open(&ledger_path).expect("Unable to open ledger");
let ledger = Blocktree::open(&ledger_path).expect("Unable to open ledger");
ledger
.read_ledger()
.expect("Unable to read ledger")
@ -138,13 +138,13 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
// and force it to respond to repair from the ledger window
// TODO: write out more than slot 0
{
let db_ledger = DbLedger::open(&leader_ledger_path).unwrap();
let blocktree = Blocktree::open(&leader_ledger_path).unwrap();
let entries = solana::entry::create_ticks(
fullnode_config.leader_scheduler_config.ticks_per_slot - last_entry_height - 2,
last_entry_id,
);
db_ledger
blocktree
.write_entries(0, last_entry_height, &entries)
.unwrap();
@ -959,8 +959,8 @@ fn test_leader_to_validator_transition() {
0,
);
{
let db_ledger = DbLedger::open(&leader_ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&leader_ledger_path).unwrap();
blocktree
.write_entries(
DEFAULT_SLOT_HEIGHT,
genesis_entry_height,
@ -1010,7 +1010,7 @@ fn test_leader_to_validator_transition() {
info!("Check the ledger to make sure it's the right height...");
let bank = new_bank_from_ledger(
&leader_ledger_path,
DbLedgerConfig::default(),
BlocktreeConfig::default(),
&LeaderSchedulerConfig::default(),
)
.0;
@ -1059,8 +1059,8 @@ fn test_leader_validator_basic() {
0,
);
{
let db_ledger = DbLedger::open(&leader_ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&leader_ledger_path).unwrap();
blocktree
.write_entries(
DEFAULT_SLOT_HEIGHT,
genesis_entry_height,
@ -1154,7 +1154,7 @@ fn test_leader_validator_basic() {
info!("done!");
for path in ledger_paths {
DbLedger::destroy(&path).expect("Expected successful database destruction");
Blocktree::destroy(&path).expect("Expected successful database destruction");
remove_dir_all(path).unwrap();
}
}
@ -1217,8 +1217,8 @@ fn test_dropped_handoff_recovery() {
// Write the entries
{
let db_ledger = DbLedger::open(&genesis_ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&genesis_ledger_path).unwrap();
blocktree
.write_entries(
DEFAULT_SLOT_HEIGHT,
genesis_entry_height,
@ -1381,8 +1381,8 @@ fn test_full_leader_validator_network() {
.expect("expected at least one genesis entry")
.id;
{
let db_ledger = DbLedger::open(&bootstrap_leader_ledger_path).unwrap();
db_ledger
let blocktree = Blocktree::open(&bootstrap_leader_ledger_path).unwrap();
blocktree
.write_entries(DEFAULT_SLOT_HEIGHT, index, &bootstrap_entries)
.unwrap();
index += bootstrap_entries.len() as u64;
@ -1533,7 +1533,7 @@ fn test_full_leader_validator_network() {
assert!(shortest >= fullnode_config.leader_scheduler_config.ticks_per_slot * 3,);
for path in ledger_paths {
DbLedger::destroy(&path).expect("Expected successful database destruction");
Blocktree::destroy(&path).expect("Expected successful database destruction");
remove_dir_all(path).unwrap();
}
}

View File

@ -6,12 +6,12 @@ extern crate log;
extern crate serde_json;
use bincode::deserialize;
use solana::client::mk_client;
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
use solana::db_ledger::DbLedger;
use solana::db_ledger::{
use solana::blocktree::Blocktree;
use solana::blocktree::{
create_tmp_sample_ledger, get_tmp_ledger_path, tmp_copy_ledger, DEFAULT_SLOT_HEIGHT,
};
use solana::client::mk_client;
use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
use solana::entry::Entry;
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::replicator::Replicator;
@ -220,8 +220,8 @@ fn test_replicator_startup_basic() {
}
info!("cleanup");
DbLedger::destroy(&leader_ledger_path).expect("Expected successful database destruction");
DbLedger::destroy(&replicator_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&leader_ledger_path).expect("Expected successful database destruction");
Blocktree::destroy(&replicator_ledger_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&leader_ledger_path);
let _ignored = remove_dir_all(&replicator_ledger_path);
}
@ -257,8 +257,8 @@ fn test_replicator_startup_leader_hang() {
assert!(replicator_res.is_err());
}
let _ignored = DbLedger::destroy(&leader_ledger_path);
let _ignored = DbLedger::destroy(&replicator_ledger_path);
let _ignored = Blocktree::destroy(&leader_ledger_path);
let _ignored = Blocktree::destroy(&replicator_ledger_path);
let _ignored = remove_dir_all(&leader_ledger_path);
let _ignored = remove_dir_all(&replicator_ledger_path);
}
@ -328,8 +328,8 @@ fn test_replicator_startup_ledger_hang() {
assert!(replicator_res.is_err());
}
let _ignored = DbLedger::destroy(&leader_ledger_path);
let _ignored = DbLedger::destroy(&replicator_ledger_path);
let _ignored = Blocktree::destroy(&leader_ledger_path);
let _ignored = Blocktree::destroy(&replicator_ledger_path);
let _ignored = remove_dir_all(&leader_ledger_path);
let _ignored = remove_dir_all(&replicator_ledger_path);
}