Add struct BlockstoreOptions (#22121)
This commit is contained in:
committed by
GitHub
parent
33ad74fbcd
commit
e8b7f96a89
@ -398,6 +398,7 @@ pub mod tests {
|
|||||||
solana_gossip::cluster_info::{ClusterInfo, Node},
|
solana_gossip::cluster_info::{ClusterInfo, Node},
|
||||||
solana_ledger::{
|
solana_ledger::{
|
||||||
blockstore::BlockstoreSignals,
|
blockstore::BlockstoreSignals,
|
||||||
|
blockstore_db::BlockstoreOptions,
|
||||||
create_new_tmp_ledger,
|
create_new_tmp_ledger,
|
||||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||||
},
|
},
|
||||||
@ -438,7 +439,7 @@ pub mod tests {
|
|||||||
blockstore,
|
blockstore,
|
||||||
ledger_signal_receiver,
|
ledger_signal_receiver,
|
||||||
..
|
..
|
||||||
} = Blockstore::open_with_signal(&blockstore_path, None, true)
|
} = Blockstore::open_with_signal(&blockstore_path, BlockstoreOptions::default())
|
||||||
.expect("Expected to successfully open ledger");
|
.expect("Expected to successfully open ledger");
|
||||||
let blockstore = Arc::new(blockstore);
|
let blockstore = Arc::new(blockstore);
|
||||||
let bank = bank_forks.working_bank();
|
let bank = bank_forks.working_bank();
|
||||||
|
@ -36,7 +36,7 @@ use {
|
|||||||
solana_ledger::{
|
solana_ledger::{
|
||||||
bank_forks_utils,
|
bank_forks_utils,
|
||||||
blockstore::{Blockstore, BlockstoreSignals, CompletedSlotsReceiver, PurgeType},
|
blockstore::{Blockstore, BlockstoreSignals, CompletedSlotsReceiver, PurgeType},
|
||||||
blockstore_db::BlockstoreRecoveryMode,
|
blockstore_db::{BlockstoreOptions, BlockstoreRecoveryMode},
|
||||||
blockstore_processor::{self, TransactionStatusSender},
|
blockstore_processor::{self, TransactionStatusSender},
|
||||||
leader_schedule::FixedSchedule,
|
leader_schedule::FixedSchedule,
|
||||||
leader_schedule_cache::LeaderScheduleCache,
|
leader_schedule_cache::LeaderScheduleCache,
|
||||||
@ -1265,8 +1265,11 @@ fn new_banks_from_ledger(
|
|||||||
..
|
..
|
||||||
} = Blockstore::open_with_signal(
|
} = Blockstore::open_with_signal(
|
||||||
ledger_path,
|
ledger_path,
|
||||||
config.wal_recovery_mode.clone(),
|
BlockstoreOptions {
|
||||||
|
recovery_mode: config.wal_recovery_mode.clone(),
|
||||||
enforce_ulimit_nofile,
|
enforce_ulimit_nofile,
|
||||||
|
..BlockstoreOptions::default()
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.expect("Failed to open ledger database");
|
.expect("Failed to open ledger database");
|
||||||
blockstore.set_no_compaction(config.no_rocksdb_compaction);
|
blockstore.set_no_compaction(config.no_rocksdb_compaction);
|
||||||
|
@ -22,7 +22,9 @@ use {
|
|||||||
ancestor_iterator::AncestorIterator,
|
ancestor_iterator::AncestorIterator,
|
||||||
bank_forks_utils,
|
bank_forks_utils,
|
||||||
blockstore::{create_new_ledger, Blockstore, PurgeType},
|
blockstore::{create_new_ledger, Blockstore, PurgeType},
|
||||||
blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database},
|
blockstore_db::{
|
||||||
|
self, AccessType, BlockstoreOptions, BlockstoreRecoveryMode, Column, Database,
|
||||||
|
},
|
||||||
blockstore_processor::ProcessOptions,
|
blockstore_processor::ProcessOptions,
|
||||||
shred::Shred,
|
shred::Shred,
|
||||||
},
|
},
|
||||||
@ -676,7 +678,14 @@ fn open_blockstore(
|
|||||||
access_type: AccessType,
|
access_type: AccessType,
|
||||||
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||||
) -> Blockstore {
|
) -> Blockstore {
|
||||||
match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode, true) {
|
match Blockstore::open_with_access_type(
|
||||||
|
ledger_path,
|
||||||
|
BlockstoreOptions {
|
||||||
|
access_type,
|
||||||
|
recovery_mode: wal_recovery_mode,
|
||||||
|
enforce_ulimit_nofile: true,
|
||||||
|
},
|
||||||
|
) {
|
||||||
Ok(blockstore) => blockstore,
|
Ok(blockstore) => blockstore,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
|
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
|
||||||
@ -686,7 +695,14 @@ fn open_blockstore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
|
fn open_database(ledger_path: &Path, access_type: AccessType) -> Database {
|
||||||
match Database::open(&ledger_path.join("rocksdb"), access_type, None) {
|
match Database::open(
|
||||||
|
&ledger_path.join("rocksdb"),
|
||||||
|
BlockstoreOptions {
|
||||||
|
access_type,
|
||||||
|
recovery_mode: None,
|
||||||
|
..BlockstoreOptions::default()
|
||||||
|
},
|
||||||
|
) {
|
||||||
Ok(database) => database,
|
Ok(database) => database,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
|
eprintln!("Unable to read the Ledger rocksdb: {:?}", err);
|
||||||
|
@ -6,7 +6,7 @@ use {
|
|||||||
crate::{
|
crate::{
|
||||||
ancestor_iterator::AncestorIterator,
|
ancestor_iterator::AncestorIterator,
|
||||||
blockstore_db::{
|
blockstore_db::{
|
||||||
columns as cf, AccessType, BlockstoreRecoveryMode, Column, Database, IteratorDirection,
|
columns as cf, AccessType, BlockstoreOptions, Column, Database, IteratorDirection,
|
||||||
IteratorMode, LedgerColumn, Result, WriteBatch,
|
IteratorMode, LedgerColumn, Result, WriteBatch,
|
||||||
},
|
},
|
||||||
blockstore_meta::*,
|
blockstore_meta::*,
|
||||||
@ -341,38 +341,26 @@ impl Blockstore {
|
|||||||
|
|
||||||
/// Opens a Ledger in directory, provides "infinite" window of shreds
|
/// Opens a Ledger in directory, provides "infinite" window of shreds
|
||||||
pub fn open(ledger_path: &Path) -> Result<Blockstore> {
|
pub fn open(ledger_path: &Path) -> Result<Blockstore> {
|
||||||
Self::do_open(ledger_path, AccessType::PrimaryOnly, None, true)
|
Self::do_open(ledger_path, BlockstoreOptions::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn open_with_access_type(
|
pub fn open_with_access_type(
|
||||||
ledger_path: &Path,
|
ledger_path: &Path,
|
||||||
access_type: AccessType,
|
options: BlockstoreOptions,
|
||||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
|
||||||
enforce_ulimit_nofile: bool,
|
|
||||||
) -> Result<Blockstore> {
|
) -> Result<Blockstore> {
|
||||||
Self::do_open(
|
Self::do_open(ledger_path, options)
|
||||||
ledger_path,
|
|
||||||
access_type,
|
|
||||||
recovery_mode,
|
|
||||||
enforce_ulimit_nofile,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_open(
|
fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result<Blockstore> {
|
||||||
ledger_path: &Path,
|
|
||||||
access_type: AccessType,
|
|
||||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
|
||||||
enforce_ulimit_nofile: bool,
|
|
||||||
) -> Result<Blockstore> {
|
|
||||||
fs::create_dir_all(&ledger_path)?;
|
fs::create_dir_all(&ledger_path)?;
|
||||||
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
|
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
|
||||||
|
|
||||||
adjust_ulimit_nofile(enforce_ulimit_nofile)?;
|
adjust_ulimit_nofile(options.enforce_ulimit_nofile)?;
|
||||||
|
|
||||||
// Open the database
|
// Open the database
|
||||||
let mut measure = Measure::start("open");
|
let mut measure = Measure::start("open");
|
||||||
info!("Opening database at {:?}", blockstore_path);
|
info!("Opening database at {:?}", blockstore_path);
|
||||||
let db = Database::open(&blockstore_path, access_type, recovery_mode)?;
|
let db = Database::open(&blockstore_path, options)?;
|
||||||
|
|
||||||
// Create the metadata column family
|
// Create the metadata column family
|
||||||
let meta_cf = db.column();
|
let meta_cf = db.column();
|
||||||
@ -467,15 +455,9 @@ impl Blockstore {
|
|||||||
|
|
||||||
pub fn open_with_signal(
|
pub fn open_with_signal(
|
||||||
ledger_path: &Path,
|
ledger_path: &Path,
|
||||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
options: BlockstoreOptions,
|
||||||
enforce_ulimit_nofile: bool,
|
|
||||||
) -> Result<BlockstoreSignals> {
|
) -> Result<BlockstoreSignals> {
|
||||||
let mut blockstore = Self::open_with_access_type(
|
let mut blockstore = Self::open_with_access_type(ledger_path, options)?;
|
||||||
ledger_path,
|
|
||||||
AccessType::PrimaryOnly,
|
|
||||||
recovery_mode,
|
|
||||||
enforce_ulimit_nofile,
|
|
||||||
)?;
|
|
||||||
let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1);
|
let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1);
|
||||||
let (completed_slots_sender, completed_slots_receiver) =
|
let (completed_slots_sender, completed_slots_receiver) =
|
||||||
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
|
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
|
||||||
@ -3773,7 +3755,14 @@ pub fn create_new_ledger(
|
|||||||
genesis_config.write(ledger_path)?;
|
genesis_config.write(ledger_path)?;
|
||||||
|
|
||||||
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
|
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
|
||||||
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?;
|
let blockstore = Blockstore::open_with_access_type(
|
||||||
|
ledger_path,
|
||||||
|
BlockstoreOptions {
|
||||||
|
access_type,
|
||||||
|
recovery_mode: None,
|
||||||
|
enforce_ulimit_nofile: false,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
let ticks_per_slot = genesis_config.ticks_per_slot;
|
let ticks_per_slot = genesis_config.ticks_per_slot;
|
||||||
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
|
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
|
||||||
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
|
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
|
||||||
@ -4716,7 +4705,7 @@ pub mod tests {
|
|||||||
fn test_data_set_completed_on_insert() {
|
fn test_data_set_completed_on_insert() {
|
||||||
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
||||||
let BlockstoreSignals { blockstore, .. } =
|
let BlockstoreSignals { blockstore, .. } =
|
||||||
Blockstore::open_with_signal(ledger_path.path(), None, true).unwrap();
|
Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
|
||||||
|
|
||||||
// Create enough entries to fill 2 shreds, only the later one is data complete
|
// Create enough entries to fill 2 shreds, only the later one is data complete
|
||||||
let slot = 0;
|
let slot = 0;
|
||||||
@ -4757,8 +4746,7 @@ pub mod tests {
|
|||||||
blockstore,
|
blockstore,
|
||||||
ledger_signal_receiver: recvr,
|
ledger_signal_receiver: recvr,
|
||||||
..
|
..
|
||||||
} = Blockstore::open_with_signal(ledger_path.path(), None, true).unwrap();
|
} = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
|
||||||
//let blockstore = Arc::new(blockstore);
|
|
||||||
|
|
||||||
let entries_per_slot = 50;
|
let entries_per_slot = 50;
|
||||||
// Create entries for slot 0
|
// Create entries for slot 0
|
||||||
@ -4841,8 +4829,7 @@ pub mod tests {
|
|||||||
blockstore,
|
blockstore,
|
||||||
completed_slots_receiver: recvr,
|
completed_slots_receiver: recvr,
|
||||||
..
|
..
|
||||||
} = Blockstore::open_with_signal(ledger_path.path(), None, true).unwrap();
|
} = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
|
||||||
// let blockstore = Arc::new(blockstore);
|
|
||||||
|
|
||||||
let entries_per_slot = 10;
|
let entries_per_slot = 10;
|
||||||
|
|
||||||
@ -4867,8 +4854,7 @@ pub mod tests {
|
|||||||
blockstore,
|
blockstore,
|
||||||
completed_slots_receiver: recvr,
|
completed_slots_receiver: recvr,
|
||||||
..
|
..
|
||||||
} = Blockstore::open_with_signal(ledger_path.path(), None, true).unwrap();
|
} = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
|
||||||
// let blockstore = Arc::new(blockstore);
|
|
||||||
|
|
||||||
let entries_per_slot = 10;
|
let entries_per_slot = 10;
|
||||||
let slots = vec![2, 5, 10];
|
let slots = vec![2, 5, 10];
|
||||||
@ -4913,8 +4899,7 @@ pub mod tests {
|
|||||||
blockstore,
|
blockstore,
|
||||||
completed_slots_receiver: recvr,
|
completed_slots_receiver: recvr,
|
||||||
..
|
..
|
||||||
} = Blockstore::open_with_signal(ledger_path.path(), None, true).unwrap();
|
} = Blockstore::open_with_signal(ledger_path.path(), BlockstoreOptions::default()).unwrap();
|
||||||
// let blockstore = Arc::new(blockstore);
|
|
||||||
|
|
||||||
let entries_per_slot = 10;
|
let entries_per_slot = 10;
|
||||||
let mut slots = vec![2, 5, 10];
|
let mut slots = vec![2, 5, 10];
|
||||||
|
@ -270,12 +270,10 @@ impl OldestSlot {
|
|||||||
struct Rocks(rocksdb::DB, ActualAccessType, OldestSlot);
|
struct Rocks(rocksdb::DB, ActualAccessType, OldestSlot);
|
||||||
|
|
||||||
impl Rocks {
|
impl Rocks {
|
||||||
fn open(
|
fn open(path: &Path, options: BlockstoreOptions) -> Result<Rocks> {
|
||||||
path: &Path,
|
|
||||||
access_type: AccessType,
|
|
||||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
|
||||||
) -> Result<Rocks> {
|
|
||||||
use columns::*;
|
use columns::*;
|
||||||
|
let access_type = options.access_type;
|
||||||
|
let recovery_mode = options.recovery_mode;
|
||||||
|
|
||||||
fs::create_dir_all(&path)?;
|
fs::create_dir_all(&path)?;
|
||||||
|
|
||||||
@ -1016,13 +1014,26 @@ pub struct WriteBatch<'a> {
|
|||||||
map: HashMap<&'static str, &'a ColumnFamily>,
|
map: HashMap<&'static str, &'a ColumnFamily>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct BlockstoreOptions {
|
||||||
|
pub access_type: AccessType,
|
||||||
|
pub recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||||
|
pub enforce_ulimit_nofile: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for BlockstoreOptions {
|
||||||
|
/// The default options are the values used by [`Blockstore::open`].
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
access_type: AccessType::PrimaryOnly,
|
||||||
|
recovery_mode: None,
|
||||||
|
enforce_ulimit_nofile: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Database {
|
impl Database {
|
||||||
pub fn open(
|
pub fn open(path: &Path, options: BlockstoreOptions) -> Result<Self> {
|
||||||
path: &Path,
|
let backend = Arc::new(Rocks::open(path, options)?);
|
||||||
access_type: AccessType,
|
|
||||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let backend = Arc::new(Rocks::open(path, access_type, recovery_mode)?);
|
|
||||||
|
|
||||||
Ok(Database {
|
Ok(Database {
|
||||||
backend,
|
backend,
|
||||||
|
@ -31,7 +31,7 @@ use {
|
|||||||
solana_ledger::{
|
solana_ledger::{
|
||||||
ancestor_iterator::AncestorIterator,
|
ancestor_iterator::AncestorIterator,
|
||||||
blockstore::{Blockstore, PurgeType},
|
blockstore::{Blockstore, PurgeType},
|
||||||
blockstore_db::AccessType,
|
blockstore_db::{AccessType, BlockstoreOptions},
|
||||||
leader_schedule::{FixedSchedule, LeaderSchedule},
|
leader_schedule::{FixedSchedule, LeaderSchedule},
|
||||||
},
|
},
|
||||||
solana_local_cluster::{
|
solana_local_cluster::{
|
||||||
@ -3043,7 +3043,14 @@ fn test_validator_saves_tower() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn open_blockstore(ledger_path: &Path) -> Blockstore {
|
fn open_blockstore(ledger_path: &Path) -> Blockstore {
|
||||||
Blockstore::open_with_access_type(ledger_path, AccessType::TryPrimaryThenSecondary, None, true)
|
Blockstore::open_with_access_type(
|
||||||
|
ledger_path,
|
||||||
|
BlockstoreOptions {
|
||||||
|
access_type: AccessType::TryPrimaryThenSecondary,
|
||||||
|
recovery_mode: None,
|
||||||
|
enforce_ulimit_nofile: true,
|
||||||
|
},
|
||||||
|
)
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
panic!("Failed to open ledger at {:?}, err: {}", ledger_path, e);
|
panic!("Failed to open ledger at {:?}, err: {}", ledger_path, e);
|
||||||
})
|
})
|
||||||
@ -3402,9 +3409,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
|
|||||||
a_votes.push(last_vote);
|
a_votes.push(last_vote);
|
||||||
let blockstore = Blockstore::open_with_access_type(
|
let blockstore = Blockstore::open_with_access_type(
|
||||||
&val_a_ledger_path,
|
&val_a_ledger_path,
|
||||||
AccessType::TryPrimaryThenSecondary,
|
BlockstoreOptions {
|
||||||
None,
|
access_type: AccessType::TryPrimaryThenSecondary,
|
||||||
true,
|
recovery_mode: None,
|
||||||
|
enforce_ulimit_nofile: true,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut ancestors = AncestorIterator::new(last_vote, &blockstore);
|
let mut ancestors = AncestorIterator::new(last_vote, &blockstore);
|
||||||
|
@ -6,7 +6,7 @@ use {
|
|||||||
solana_genesis_utils::download_then_check_genesis_hash,
|
solana_genesis_utils::download_then_check_genesis_hash,
|
||||||
solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
|
solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo},
|
||||||
solana_ledger::{
|
solana_ledger::{
|
||||||
blockstore::Blockstore, blockstore_db::AccessType, blockstore_processor,
|
blockstore::Blockstore, blockstore_db::BlockstoreOptions, blockstore_processor,
|
||||||
leader_schedule_cache::LeaderScheduleCache,
|
leader_schedule_cache::LeaderScheduleCache,
|
||||||
},
|
},
|
||||||
solana_replica_lib::accountsdb_repl_client::AccountsDbReplClientServiceConfig,
|
solana_replica_lib::accountsdb_repl_client::AccountsDbReplClientServiceConfig,
|
||||||
@ -177,9 +177,10 @@ fn start_client_rpc_services(
|
|||||||
let blockstore = Arc::new(
|
let blockstore = Arc::new(
|
||||||
Blockstore::open_with_access_type(
|
Blockstore::open_with_access_type(
|
||||||
&replica_config.ledger_path,
|
&replica_config.ledger_path,
|
||||||
AccessType::PrimaryOnly,
|
BlockstoreOptions {
|
||||||
None,
|
enforce_ulimit_nofile: false,
|
||||||
false,
|
..BlockstoreOptions::default()
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
|
Reference in New Issue
Block a user