accounts_index_bins to AccountsIndexConfig (#19257)
* accounts_index_bins to AccountsIndexConfig * rename param bins -> config * rename BINS_FOR* to ACCOUNTS_INDEX_CONFIG_FOR*
This commit is contained in:
committed by
GitHub
parent
2c648cc6b6
commit
7c70f2158b
@ -58,7 +58,7 @@ use {
|
||||
},
|
||||
solana_runtime::{
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
accounts_index::{AccountSecondaryIndexes, AccountsIndexConfig},
|
||||
bank::Bank,
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
@ -144,7 +144,7 @@ pub struct ValidatorConfig {
|
||||
pub poh_hashes_per_batch: u64,
|
||||
pub account_indexes: AccountSecondaryIndexes,
|
||||
pub accounts_db_caching_enabled: bool,
|
||||
pub accounts_index_bins: Option<usize>,
|
||||
pub accounts_index_config: Option<AccountsIndexConfig>,
|
||||
pub warp_slot: Option<Slot>,
|
||||
pub accounts_db_test_hash_calculation: bool,
|
||||
pub accounts_db_skip_shrink: bool,
|
||||
@ -210,7 +210,7 @@ impl Default for ValidatorConfig {
|
||||
validator_exit: Arc::new(RwLock::new(Exit::default())),
|
||||
no_wait_for_vote_to_start_leader: true,
|
||||
accounts_shrink_ratio: AccountShrinkThreshold::default(),
|
||||
accounts_index_bins: None,
|
||||
accounts_index_config: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1139,7 +1139,7 @@ fn new_banks_from_ledger(
|
||||
debug_keys: config.debug_keys.clone(),
|
||||
account_indexes: config.account_indexes.clone(),
|
||||
accounts_db_caching_enabled: config.accounts_db_caching_enabled,
|
||||
accounts_index_bins: config.accounts_index_bins,
|
||||
accounts_index_config: config.accounts_index_config,
|
||||
shrink_ratio: config.accounts_shrink_ratio,
|
||||
accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation,
|
||||
accounts_db_skip_shrink: config.accounts_db_skip_shrink,
|
||||
|
@ -203,7 +203,7 @@ mod tests {
|
||||
check_hash_calculation,
|
||||
false,
|
||||
false,
|
||||
Some(solana_runtime::accounts_index::BINS_FOR_TESTING),
|
||||
Some(solana_runtime::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -831,7 +831,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(solana_runtime::accounts_index::BINS_FOR_TESTING),
|
||||
Some(solana_runtime::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)?;
|
||||
|
||||
assert_eq!(bank, &deserialized_bank);
|
||||
@ -1011,7 +1011,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(solana_runtime::accounts_index::BINS_FOR_TESTING),
|
||||
Some(solana_runtime::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -26,6 +26,7 @@ use solana_ledger::{
|
||||
shred::Shred,
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_index::AccountsIndexConfig,
|
||||
bank::{Bank, RewardCalculationEvent},
|
||||
bank_forks::BankForks,
|
||||
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
|
||||
@ -1863,6 +1864,10 @@ fn main() {
|
||||
}
|
||||
}
|
||||
("verify", Some(arg_matches)) => {
|
||||
let accounts_index_config = value_t!(arg_matches, "accounts_index_bins", usize)
|
||||
.ok()
|
||||
.map(|bins| AccountsIndexConfig { bins: Some(bins) });
|
||||
|
||||
let process_options = ProcessOptions {
|
||||
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
|
||||
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
|
||||
@ -1875,7 +1880,7 @@ fn main() {
|
||||
usize
|
||||
)
|
||||
.ok(),
|
||||
accounts_index_bins: value_t!(arg_matches, "accounts_index_bins", usize).ok(),
|
||||
accounts_index_config,
|
||||
verify_index: arg_matches.is_present("verify_accounts_index"),
|
||||
allow_dead_slots: arg_matches.is_present("allow_dead_slots"),
|
||||
accounts_db_test_hash_calculation: arg_matches
|
||||
|
@ -132,7 +132,7 @@ fn load_from_snapshot(
|
||||
process_options.accounts_db_test_hash_calculation,
|
||||
process_options.accounts_db_skip_shrink,
|
||||
process_options.verify_index,
|
||||
process_options.accounts_index_bins,
|
||||
process_options.accounts_index_config,
|
||||
)
|
||||
.expect("Load from snapshot failed");
|
||||
|
||||
|
@ -17,7 +17,7 @@ use solana_metrics::{datapoint_error, inc_new_counter_debug};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_runtime::{
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
accounts_index::{AccountSecondaryIndexes, AccountsIndexConfig},
|
||||
bank::{
|
||||
Bank, ExecuteTimings, InnerInstructionsList, RentDebits, TransactionBalancesSet,
|
||||
TransactionExecutionResult, TransactionLogMessages, TransactionResults,
|
||||
@ -461,7 +461,7 @@ pub struct ProcessOptions {
|
||||
pub allow_dead_slots: bool,
|
||||
pub accounts_db_test_hash_calculation: bool,
|
||||
pub accounts_db_skip_shrink: bool,
|
||||
pub accounts_index_bins: Option<usize>,
|
||||
pub accounts_index_config: Option<AccountsIndexConfig>,
|
||||
pub verify_index: bool,
|
||||
pub shrink_ratio: AccountShrinkThreshold,
|
||||
}
|
||||
@ -493,7 +493,7 @@ pub fn process_blockstore(
|
||||
opts.accounts_db_caching_enabled,
|
||||
opts.shrink_ratio,
|
||||
false,
|
||||
opts.accounts_index_bins,
|
||||
opts.accounts_index_config,
|
||||
);
|
||||
let bank0 = Arc::new(bank0);
|
||||
info!("processing ledger for slot 0...");
|
||||
|
@ -57,7 +57,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig {
|
||||
poh_hashes_per_batch: config.poh_hashes_per_batch,
|
||||
no_wait_for_vote_to_start_leader: config.no_wait_for_vote_to_start_leader,
|
||||
accounts_shrink_ratio: config.accounts_shrink_ratio,
|
||||
accounts_index_bins: config.accounts_index_bins,
|
||||
accounts_index_config: config.accounts_index_config,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ fn initialize_from_snapshot(
|
||||
process_options.accounts_db_test_hash_calculation,
|
||||
false,
|
||||
process_options.verify_index,
|
||||
process_options.accounts_index_bins,
|
||||
process_options.accounts_index_config,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -5,7 +5,9 @@ extern crate test;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_runtime::{
|
||||
accounts_db::AccountInfo,
|
||||
accounts_index::{AccountSecondaryIndexes, AccountsIndex, BINS_FOR_BENCHMARKS},
|
||||
accounts_index::{
|
||||
AccountSecondaryIndexes, AccountsIndex, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS,
|
||||
},
|
||||
};
|
||||
use solana_sdk::pubkey::{self, Pubkey};
|
||||
use test::Bencher;
|
||||
@ -18,7 +20,7 @@ fn bench_accounts_index(bencher: &mut Bencher) {
|
||||
const NUM_FORKS: u64 = 16;
|
||||
|
||||
let mut reclaims = vec![];
|
||||
let index = AccountsIndex::<AccountInfo>::new(Some(BINS_FOR_BENCHMARKS));
|
||||
let index = AccountsIndex::<AccountInfo>::new(Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS));
|
||||
for f in 0..NUM_FORKS {
|
||||
for pubkey in pubkeys.iter().take(NUM_PUBKEYS) {
|
||||
index.upsert(
|
||||
|
@ -4,7 +4,8 @@ use crate::{
|
||||
ScanStorageResult,
|
||||
},
|
||||
accounts_index::{
|
||||
AccountSecondaryIndexes, IndexKey, ScanResult, BINS_FOR_BENCHMARKS, BINS_FOR_TESTING,
|
||||
AccountSecondaryIndexes, AccountsIndexConfig, IndexKey, ScanResult,
|
||||
ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
|
||||
},
|
||||
ancestors::Ancestors,
|
||||
bank::{
|
||||
@ -139,7 +140,7 @@ impl Accounts {
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
shrink_ratio,
|
||||
Some(BINS_FOR_TESTING),
|
||||
Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
}
|
||||
|
||||
@ -156,7 +157,7 @@ impl Accounts {
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
shrink_ratio,
|
||||
Some(BINS_FOR_BENCHMARKS),
|
||||
Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS),
|
||||
)
|
||||
}
|
||||
|
||||
@ -166,7 +167,7 @@ impl Accounts {
|
||||
account_indexes: AccountSecondaryIndexes,
|
||||
caching_enabled: bool,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Self {
|
||||
Self {
|
||||
accounts_db: Arc::new(AccountsDb::new_with_config(
|
||||
@ -175,7 +176,7 @@ impl Accounts {
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
shrink_ratio,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
)),
|
||||
account_locks: Mutex::new(AccountLocks::default()),
|
||||
}
|
||||
|
@ -23,9 +23,9 @@ use crate::{
|
||||
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
|
||||
accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats, PreviousPass},
|
||||
accounts_index::{
|
||||
AccountIndexGetResult, AccountSecondaryIndexes, AccountsIndex, AccountsIndexRootsStats,
|
||||
IndexKey, IsCached, RefCount, ScanResult, SlotList, SlotSlice, ZeroLamport,
|
||||
BINS_FOR_TESTING,
|
||||
AccountIndexGetResult, AccountSecondaryIndexes, AccountsIndex, AccountsIndexConfig,
|
||||
AccountsIndexRootsStats, IndexKey, IsCached, RefCount, ScanResult, SlotList, SlotSlice,
|
||||
ZeroLamport, ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
|
||||
},
|
||||
ancestors::Ancestors,
|
||||
append_vec::{AppendVec, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion},
|
||||
@ -1426,7 +1426,7 @@ impl AccountsDb {
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
Some(BINS_FOR_TESTING),
|
||||
Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
}
|
||||
|
||||
@ -1436,9 +1436,9 @@ impl AccountsDb {
|
||||
account_indexes: AccountSecondaryIndexes,
|
||||
caching_enabled: bool,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Self {
|
||||
let accounts_index = AccountsIndex::new(accounts_index_bins);
|
||||
let accounts_index = AccountsIndex::new(accounts_index_config);
|
||||
let mut new = if !paths.is_empty() {
|
||||
Self {
|
||||
paths,
|
||||
@ -6327,7 +6327,7 @@ impl AccountsDb {
|
||||
account_indexes,
|
||||
caching_enabled,
|
||||
shrink_ratio,
|
||||
Some(BINS_FOR_TESTING),
|
||||
Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,12 @@ use thiserror::Error;
|
||||
|
||||
pub const ITER_BATCH_SIZE: usize = 1000;
|
||||
pub const BINS_DEFAULT: usize = 16;
|
||||
pub const BINS_FOR_TESTING: usize = BINS_DEFAULT;
|
||||
pub const BINS_FOR_BENCHMARKS: usize = BINS_DEFAULT;
|
||||
pub const ACCOUNTS_INDEX_CONFIG_FOR_TESTING: AccountsIndexConfig = AccountsIndexConfig {
|
||||
bins: Some(BINS_DEFAULT),
|
||||
};
|
||||
pub const ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS: AccountsIndexConfig = AccountsIndexConfig {
|
||||
bins: Some(BINS_DEFAULT),
|
||||
};
|
||||
pub type ScanResult<T> = Result<T, ScanError>;
|
||||
pub type SlotList<T> = Vec<(Slot, T)>;
|
||||
pub type SlotSlice<'s, T> = &'s [(Slot, T)];
|
||||
@ -78,6 +82,11 @@ pub struct AccountSecondaryIndexesIncludeExclude {
|
||||
pub keys: HashSet<Pubkey>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
pub struct AccountsIndexConfig {
|
||||
pub bins: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct AccountSecondaryIndexes {
|
||||
pub keys: Option<AccountSecondaryIndexesIncludeExclude>,
|
||||
@ -761,11 +770,11 @@ pub struct AccountsIndex<T> {
|
||||
|
||||
impl<T: IsCached> AccountsIndex<T> {
|
||||
pub fn default_for_tests() -> Self {
|
||||
Self::new(Some(BINS_FOR_TESTING))
|
||||
Self::new(Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING))
|
||||
}
|
||||
|
||||
pub fn new(bins: Option<usize>) -> Self {
|
||||
let (account_maps, bin_calculator) = Self::allocate_accounts_index(bins);
|
||||
pub fn new(config: Option<AccountsIndexConfig>) -> Self {
|
||||
let (account_maps, bin_calculator) = Self::allocate_accounts_index(config);
|
||||
Self {
|
||||
account_maps,
|
||||
bin_calculator,
|
||||
@ -784,8 +793,12 @@ impl<T: IsCached> AccountsIndex<T> {
|
||||
}
|
||||
}
|
||||
|
||||
fn allocate_accounts_index(bins: Option<usize>) -> (LockMapType<T>, PubkeyBinCalculator16) {
|
||||
let bins = bins.unwrap_or(BINS_DEFAULT);
|
||||
fn allocate_accounts_index(
|
||||
config: Option<AccountsIndexConfig>,
|
||||
) -> (LockMapType<T>, PubkeyBinCalculator16) {
|
||||
let bins = config
|
||||
.and_then(|config| config.bins)
|
||||
.unwrap_or(BINS_DEFAULT);
|
||||
let account_maps = (0..bins)
|
||||
.into_iter()
|
||||
.map(|_| RwLock::new(AccountMap::default()))
|
||||
@ -4024,6 +4037,6 @@ pub mod tests {
|
||||
#[test]
|
||||
#[should_panic(expected = "bins.is_power_of_two()")]
|
||||
fn test_illegal_bins() {
|
||||
AccountsIndex::<bool>::new(Some(3));
|
||||
AccountsIndex::<bool>::new(Some(AccountsIndexConfig { bins: Some(3) }));
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,8 @@ use crate::{
|
||||
},
|
||||
accounts_db::{AccountShrinkThreshold, ErrorCounters, SnapshotStorages},
|
||||
accounts_index::{
|
||||
AccountSecondaryIndexes, IndexKey, ScanResult, BINS_FOR_BENCHMARKS, BINS_FOR_TESTING,
|
||||
AccountSecondaryIndexes, AccountsIndexConfig, IndexKey, ScanResult,
|
||||
ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
|
||||
},
|
||||
ancestors::{Ancestors, AncestorsForSerialization},
|
||||
blockhash_queue::BlockhashQueue,
|
||||
@ -1190,7 +1191,7 @@ impl Bank {
|
||||
accounts_db_caching_enabled,
|
||||
shrink_ratio,
|
||||
debug_do_not_add_builtins,
|
||||
Some(BINS_FOR_TESTING),
|
||||
Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
}
|
||||
|
||||
@ -1215,7 +1216,7 @@ impl Bank {
|
||||
accounts_db_caching_enabled,
|
||||
shrink_ratio,
|
||||
debug_do_not_add_builtins,
|
||||
Some(BINS_FOR_BENCHMARKS),
|
||||
Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS),
|
||||
)
|
||||
}
|
||||
|
||||
@ -1230,7 +1231,7 @@ impl Bank {
|
||||
accounts_db_caching_enabled: bool,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
debug_do_not_add_builtins: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Self {
|
||||
let accounts = Accounts::new_with_config(
|
||||
paths,
|
||||
@ -1238,7 +1239,7 @@ impl Bank {
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled,
|
||||
shrink_ratio,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
);
|
||||
let mut bank = Self::default_with_accounts(accounts);
|
||||
bank.ancestors = Ancestors::from(vec![bank.slot()]);
|
||||
|
458
runtime/src/hybrid_btree_map.rs
Normal file
458
runtime/src/hybrid_btree_map.rs
Normal file
@ -0,0 +1,458 @@
|
||||
use crate::accounts_index::AccountMapEntry;
|
||||
use crate::accounts_index::{IsCached, RefCount, SlotList, ACCOUNTS_INDEX_CONFIG_FOR_TESTING};
|
||||
use crate::bucket_map_holder::{BucketMapWriteHolder};
|
||||
|
||||
use crate::pubkey_bins::PubkeyBinCalculator16;
|
||||
use solana_bucket_map::bucket_map::BucketMap;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::btree_map::BTreeMap;
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Bound;
|
||||
use std::ops::{Range, RangeBounds};
|
||||
use std::sync::Arc;
|
||||
|
||||
type K = Pubkey;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HybridAccountEntry<V: Clone + Debug> {
|
||||
entry: V,
|
||||
//exists_on_disk: bool,
|
||||
}
|
||||
//type V2<T: Clone + Debug> = HybridAccountEntry<T>;
|
||||
pub type V2<T> = AccountMapEntry<T>;
|
||||
/*
|
||||
trait RealEntry<T: Clone + Debug> {
|
||||
fn real_entry(&self) -> T;
|
||||
}
|
||||
|
||||
impl<T:Clone + Debug> RealEntry<T> for T {
|
||||
fn real_entry(&self) -> T
|
||||
{
|
||||
self
|
||||
}
|
||||
}
|
||||
*/
|
||||
pub type SlotT<T> = (Slot, T);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HybridBTreeMap<V: 'static + Clone + IsCached + Debug> {
|
||||
in_memory: BTreeMap<K, V2<V>>,
|
||||
disk: Arc<BucketMapWriteHolder<V>>,
|
||||
bin_index: usize,
|
||||
bins: usize,
|
||||
}
|
||||
|
||||
// TODO: we need a bit for 'exists on disk' for updates
|
||||
/*
|
||||
impl<V: Clone + Debug> Default for HybridBTreeMap<V> {
|
||||
/// Creates an empty `BTreeMap`.
|
||||
fn default() -> HybridBTreeMap<V> {
|
||||
Self {
|
||||
in_memory: BTreeMap::default(),
|
||||
disk: BucketMap::new_buckets(PubkeyBinCalculator16::log_2(BINS as u32) as u8),
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
impl<'a, K: 'a, V: 'a> Iterator for HybridBTreeMap<'a, V> {
|
||||
type Item = (&'a K, &'a V);
|
||||
|
||||
fn next(&mut self) -> Option<(&'a K, &'a V)> {
|
||||
if self.length == 0 {
|
||||
None
|
||||
} else {
|
||||
self.length -= 1;
|
||||
Some(unsafe { self.range.inner.next_unchecked() })
|
||||
}
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(self.length, Some(self.length))
|
||||
}
|
||||
|
||||
fn last(mut self) -> Option<(&'a K, &'a V)> {
|
||||
self.next_back()
|
||||
}
|
||||
|
||||
fn min(mut self) -> Option<(&'a K, &'a V)> {
|
||||
self.next()
|
||||
}
|
||||
|
||||
fn max(mut self) -> Option<(&'a K, &'a V)> {
|
||||
self.next_back()
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
pub enum HybridEntry<'a, V: 'static + Clone + IsCached + Debug> {
|
||||
/// A vacant entry.
|
||||
Vacant(HybridVacantEntry<'a, V>),
|
||||
|
||||
/// An occupied entry.
|
||||
Occupied(HybridOccupiedEntry<'a, V>),
|
||||
}
|
||||
|
||||
pub struct Keys {
|
||||
keys: Vec<K>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl Keys {
|
||||
pub fn len(&self) -> usize {
|
||||
self.keys.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for Keys {
|
||||
type Item = Pubkey;
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.index >= self.keys.len() {
|
||||
None
|
||||
} else {
|
||||
let r = Some(self.keys[self.index]);
|
||||
self.index += 1;
|
||||
r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Values<V: Clone + std::fmt::Debug> {
|
||||
values: Vec<SlotList<V>>,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<V: Clone + std::fmt::Debug> Iterator for Values<V> {
|
||||
type Item = V2<V>;
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.index >= self.values.len() {
|
||||
None
|
||||
} else {
|
||||
let r = Some(AccountMapEntry {
|
||||
slot_list: self.values[self.index].clone(),
|
||||
ref_count: RefCount::MAX, // todo: no clone
|
||||
});
|
||||
self.index += 1;
|
||||
r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HybridOccupiedEntry<'a, V: 'static + Clone + IsCached + Debug> {
|
||||
pubkey: Pubkey,
|
||||
entry: V2<V>,
|
||||
map: &'a HybridBTreeMap<V>,
|
||||
}
|
||||
pub struct HybridVacantEntry<'a, V: 'static + Clone + IsCached + Debug> {
|
||||
pubkey: Pubkey,
|
||||
map: &'a HybridBTreeMap<V>,
|
||||
}
|
||||
|
||||
impl<'a, V: 'a + Clone + IsCached + Debug> HybridOccupiedEntry<'a, V> {
|
||||
pub fn get(&self) -> &V2<V> {
|
||||
&self.entry
|
||||
}
|
||||
pub fn update(&mut self, new_data: &SlotList<V>, new_rc: Option<RefCount>) {
|
||||
//error!("update: {}", self.pubkey);
|
||||
self.map.disk.update(
|
||||
&self.pubkey,
|
||||
|previous| {
|
||||
if previous.is_some() {
|
||||
//error!("update {} to {:?}", self.pubkey, new_data);
|
||||
}
|
||||
Some((new_data.clone(), new_rc.unwrap_or(self.entry.ref_count)))
|
||||
// TODO no clone here
|
||||
},
|
||||
Some(&self.entry),
|
||||
);
|
||||
let g = self.map.disk.get(&self.pubkey).unwrap();
|
||||
assert_eq!(format!("{:?}", g.1), format!("{:?}", new_data));
|
||||
}
|
||||
pub fn addref(&mut self) {
|
||||
self.entry.ref_count += 1;
|
||||
|
||||
self.map
|
||||
.disk
|
||||
.addref(&self.pubkey, self.entry.ref_count, &self.entry.slot_list);
|
||||
//error!("addref: {}, {}, {:?}", self.pubkey, self.entry.ref_count(), result);
|
||||
}
|
||||
pub fn unref(&mut self) {
|
||||
self.entry.ref_count -= 1;
|
||||
self.map
|
||||
.disk
|
||||
.unref(&self.pubkey, self.entry.ref_count, &self.entry.slot_list);
|
||||
//error!("addref: {}, {}, {:?}", self.pubkey, self.entry.ref_count(), result);
|
||||
}
|
||||
/*
|
||||
pub fn get_mut(&mut self) -> &mut V2<V> {
|
||||
self.entry.get_mut()
|
||||
}
|
||||
*/
|
||||
pub fn key(&self) -> &K {
|
||||
&self.pubkey
|
||||
}
|
||||
pub fn remove(self) {
|
||||
self.map.disk.delete_key(&self.pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, V: 'a + Clone + Debug + IsCached> HybridVacantEntry<'a, V> {
|
||||
pub fn insert(self, value: V2<V>) {
|
||||
// -> &'a mut V2<V> {
|
||||
/*
|
||||
let value = V2::<V> {
|
||||
entry: value,
|
||||
//exists_on_disk: false,
|
||||
};
|
||||
*/
|
||||
//let mut sl = SlotList::default();
|
||||
//std::mem::swap(&mut sl, &mut value.slot_list);
|
||||
self.map.disk.update(
|
||||
&self.pubkey,
|
||||
|_previous| {
|
||||
Some((value.slot_list.clone() /* todo bad */, value.ref_count))
|
||||
},
|
||||
None,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl<V: IsCached> HybridBTreeMap<V> {
|
||||
/// Creates an empty `BTreeMap`.
|
||||
pub fn new2(bucket_map: &Arc<BucketMapWriteHolder<V>>, bin_index: usize, bins: usize) -> Self {
|
||||
Self {
|
||||
in_memory: BTreeMap::default(),
|
||||
disk: bucket_map.clone(),
|
||||
bin_index,
|
||||
bins: bins, //bucket_map.num_buckets(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_for_testing() -> Self {
|
||||
let map = Self::new_bucket_map(ACCOUNTS_INDEX_CONFIG_FOR_TESTING);
|
||||
Self::new2(&map, 0, 1)
|
||||
}
|
||||
|
||||
pub fn new_bucket_map(bins: usize) -> Arc<BucketMapWriteHolder<V>> {
|
||||
let buckets = PubkeyBinCalculator16::log_2(bins as u32) as u8; // make more buckets to try to spread things out
|
||||
// 15 hopefully avoids too many files open problem
|
||||
//buckets = std::cmp::min(buckets + 11, 15); // max # that works with open file handles and such
|
||||
//buckets =
|
||||
//error!("creating: {} for {}", buckets, BUCKET_BINS);
|
||||
Arc::new(BucketMapWriteHolder::new(BucketMap::new_buckets(buckets)))
|
||||
}
|
||||
|
||||
pub fn flush(&self) -> usize {
|
||||
let num_buckets = self.disk.num_buckets();
|
||||
let mystart = num_buckets * self.bin_index / self.bins;
|
||||
let myend = num_buckets * (self.bin_index + 1) / self.bins;
|
||||
assert_eq!(myend - mystart, 1, "{}", self.bin_index);
|
||||
(mystart..myend)
|
||||
.map(|ix| self.disk.flush(ix, false, None).1)
|
||||
.sum()
|
||||
|
||||
/*
|
||||
{
|
||||
// put entire contents of this map into the disk backing
|
||||
let mut keys = Vec::with_capacity(self.in_memory.len());
|
||||
for k in self.in_memory.keys() {
|
||||
keys.push(k);
|
||||
}
|
||||
self.disk.update_batch(&keys[..], |previous, key, orig_i| {
|
||||
let item = self.in_memory.get(key);
|
||||
item.map(|item| (item.slot_list.clone(), item.ref_count()))
|
||||
});
|
||||
self.in_memory.clear();
|
||||
}*/
|
||||
}
|
||||
pub fn distribution(&self) {
|
||||
self.disk.distribution();
|
||||
}
|
||||
fn bound<'a, T>(bound: Bound<&'a T>, unbounded: &'a T) -> &'a T {
|
||||
match bound {
|
||||
Bound::Included(b) | Bound::Excluded(b) => b,
|
||||
_ => unbounded,
|
||||
}
|
||||
}
|
||||
pub fn range<R>(&self, range: Option<R>) -> Vec<(Pubkey, SlotList<V>)>
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
//self.disk.range.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let num_buckets = self.disk.num_buckets();
|
||||
if self.bin_index != 0 && self.disk.unified_backing {
|
||||
return vec![];
|
||||
}
|
||||
let mut start = 0;
|
||||
let mut end = num_buckets;
|
||||
if let Some(range) = &range {
|
||||
let default = Pubkey::default();
|
||||
let max = Pubkey::new(&[0xff; 32]);
|
||||
let start_bound = Self::bound(range.start_bound(), &default);
|
||||
start = self.disk.bucket_ix(start_bound);
|
||||
// end is exclusive, so it is end + 1 we care about here
|
||||
let end_bound = Self::bound(range.end_bound(), &max);
|
||||
end = std::cmp::min(num_buckets, 1 + self.disk.bucket_ix(end_bound)); // ugly
|
||||
assert!(
|
||||
start_bound <= end_bound,
|
||||
"range start is greater than range end"
|
||||
);
|
||||
}
|
||||
let len = (start..end)
|
||||
.into_iter()
|
||||
.map(|ix| self.disk.bucket_len(ix) as usize)
|
||||
.sum::<usize>();
|
||||
|
||||
let mystart = num_buckets * self.bin_index / self.bins;
|
||||
let myend = num_buckets * (self.bin_index + 1) / self.bins;
|
||||
start = std::cmp::max(start, mystart);
|
||||
end = std::cmp::min(end, myend);
|
||||
let mut keys = Vec::with_capacity(len);
|
||||
(start..end).into_iter().for_each(|ix| {
|
||||
let mut ks = self.disk.range(ix, range.as_ref());
|
||||
keys.append(&mut ks);
|
||||
});
|
||||
keys.sort_unstable_by(|a, b| a.0.cmp(&b.0));
|
||||
keys
|
||||
}
|
||||
|
||||
pub fn keys2(&self) -> Keys {
|
||||
// used still?
|
||||
let num_buckets = self.disk.num_buckets();
|
||||
let start = num_buckets * self.bin_index / self.bins;
|
||||
let end = num_buckets * (self.bin_index + 1) / self.bins;
|
||||
let len = (start..end)
|
||||
.into_iter()
|
||||
.map(|ix| self.disk.bucket_len(ix) as usize)
|
||||
.sum::<usize>();
|
||||
let mut keys = Vec::with_capacity(len);
|
||||
let _len = (start..end).into_iter().for_each(|ix| {
|
||||
keys.append(
|
||||
&mut self
|
||||
.disk
|
||||
.keys3(ix, None::<&Range<Pubkey>>)
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
});
|
||||
keys.sort_unstable();
|
||||
Keys { keys, index: 0 }
|
||||
}
|
||||
pub fn values(&self) -> Values<V> {
|
||||
let num_buckets = self.disk.num_buckets();
|
||||
if self.bin_index != 0 && self.disk.unified_backing {
|
||||
return Values {
|
||||
values: vec![],
|
||||
index: 0,
|
||||
};
|
||||
}
|
||||
// todo: this may be unsafe if we are asking for things with an update cache active. thankfully, we only call values at startup right now
|
||||
let start = num_buckets * self.bin_index / self.bins;
|
||||
let end = num_buckets * (self.bin_index + 1) / self.bins;
|
||||
let len = (start..end)
|
||||
.into_iter()
|
||||
.map(|ix| self.disk.bucket_len(ix) as usize)
|
||||
.sum::<usize>();
|
||||
let mut values = Vec::with_capacity(len);
|
||||
(start..end).into_iter().for_each(|ix| {
|
||||
values.append(
|
||||
&mut self
|
||||
.disk
|
||||
.values(ix, None::<&Range<Pubkey>>)
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
});
|
||||
//error!("getting values: {}, bin: {}, bins: {}, start: {}, end: {}", values.len(), self.bin_index, self.bins, start, end);
|
||||
//keys.sort_unstable();
|
||||
if self.bin_index == 0 {
|
||||
//error!("getting values: {}, {}, {}", values.len(), start, end);
|
||||
}
|
||||
Values { values, index: 0 }
|
||||
}
|
||||
|
||||
pub fn upsert(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
new_value: AccountMapEntry<V>,
|
||||
reclaims: &mut SlotList<V>,
|
||||
reclaims_must_be_empty: bool,
|
||||
) {
|
||||
self.disk
|
||||
.upsert(pubkey, new_value, reclaims, reclaims_must_be_empty);
|
||||
}
|
||||
|
||||
pub fn entry(&mut self, key: K) -> HybridEntry<'_, V> {
|
||||
match self.disk.get(&key) {
|
||||
Some(entry) => HybridEntry::Occupied(HybridOccupiedEntry {
|
||||
pubkey: key,
|
||||
entry: AccountMapEntry::<V> {
|
||||
slot_list: entry.1,
|
||||
ref_count: entry.0,
|
||||
},
|
||||
map: self,
|
||||
}),
|
||||
None => HybridEntry::Vacant(HybridVacantEntry {
|
||||
pubkey: key,
|
||||
map: self,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert2(&mut self, key: K, value: V2<V>) {
|
||||
match self.entry(key) {
|
||||
HybridEntry::Occupied(_occupied) => {
|
||||
panic!("");
|
||||
}
|
||||
HybridEntry::Vacant(vacant) => vacant.insert(value),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, key: &K) -> Option<V2<V>> {
|
||||
let lookup = || {
|
||||
let disk = self.disk.get(key);
|
||||
disk.map(|disk| AccountMapEntry {
|
||||
ref_count: disk.0,
|
||||
slot_list: disk.1,
|
||||
})
|
||||
};
|
||||
|
||||
if true {
|
||||
lookup()
|
||||
} else {
|
||||
let in_mem = self.in_memory.get(key);
|
||||
match in_mem {
|
||||
Some(in_mem) => Some(in_mem.clone()),
|
||||
None => {
|
||||
// we have to load this into the in-mem cache so we can get a ref_count, if nothing else
|
||||
lookup()
|
||||
/*
|
||||
disk.map(|item| {
|
||||
self.in_memory.entry(*key).map(|entry| {
|
||||
|
||||
}
|
||||
})*/
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn remove(&mut self, key: &K) {
|
||||
self.disk.delete_key(key); //.map(|x| x.entry)
|
||||
}
|
||||
pub fn len(&self) -> usize {
|
||||
self.disk.keys3(self.bin_index, None::<&Range<Pubkey>>).map(|x| x.len()).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn set_startup(&self, startup: bool) {
|
||||
self.disk.set_startup(startup);
|
||||
}
|
||||
|
||||
pub fn update_or_insert_async(&self, pubkey: Pubkey, new_entry: AccountMapEntry<V>) {
|
||||
self.disk
|
||||
.update_or_insert_async(self.bin_index, pubkey, new_entry);
|
||||
}
|
||||
pub fn dump_metrics(&self) {
|
||||
self.disk.dump_metrics();
|
||||
}
|
||||
}
|
@ -4,7 +4,7 @@ use {
|
||||
accounts_db::{
|
||||
AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AppendVecId, BankHashInfo,
|
||||
},
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
accounts_index::{AccountSecondaryIndexes, AccountsIndexConfig},
|
||||
ancestors::Ancestors,
|
||||
append_vec::{AppendVec, StoredMetaWriteVersion},
|
||||
bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins},
|
||||
@ -197,7 +197,7 @@ pub(crate) fn bank_from_streams<R>(
|
||||
limit_load_slot_count_from_snapshot: Option<usize>,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
verify_index: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> std::result::Result<Bank, Error>
|
||||
where
|
||||
R: Read,
|
||||
@ -235,7 +235,7 @@ where
|
||||
limit_load_slot_count_from_snapshot,
|
||||
shrink_ratio,
|
||||
verify_index,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
)?;
|
||||
Ok(bank)
|
||||
}};
|
||||
@ -328,7 +328,7 @@ fn reconstruct_bank_from_fields<E>(
|
||||
limit_load_slot_count_from_snapshot: Option<usize>,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
verify_index: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Result<Bank, Error>
|
||||
where
|
||||
E: SerializableStorage + std::marker::Sync,
|
||||
@ -343,7 +343,7 @@ where
|
||||
limit_load_slot_count_from_snapshot,
|
||||
shrink_ratio,
|
||||
verify_index,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
)?;
|
||||
accounts_db.freeze_accounts(
|
||||
&Ancestors::from(&bank_fields.ancestors),
|
||||
@ -395,7 +395,7 @@ fn reconstruct_accountsdb_from_fields<E>(
|
||||
limit_load_slot_count_from_snapshot: Option<usize>,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
verify_index: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Result<AccountsDb, Error>
|
||||
where
|
||||
E: SerializableStorage + std::marker::Sync,
|
||||
@ -406,7 +406,7 @@ where
|
||||
account_secondary_indexes,
|
||||
caching_enabled,
|
||||
shrink_ratio,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
);
|
||||
|
||||
let AccountsDbFields(
|
||||
|
@ -81,7 +81,7 @@ where
|
||||
None,
|
||||
AccountShrinkThreshold::default(),
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ fn test_bank_serialize_style(serde_style: SerdeStyle) {
|
||||
None,
|
||||
AccountShrinkThreshold::default(),
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
dbank.src = ref_sc;
|
||||
|
@ -1,7 +1,7 @@
|
||||
use {
|
||||
crate::{
|
||||
accounts_db::{AccountShrinkThreshold, AccountsDb},
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
accounts_index::{AccountSecondaryIndexes, AccountsIndexConfig},
|
||||
bank::{Bank, BankSlotDelta, Builtins},
|
||||
hardened_unpack::{unpack_snapshot, ParallelSelector, UnpackError, UnpackedAppendVecMap},
|
||||
serde_snapshot::{
|
||||
@ -728,7 +728,7 @@ pub fn bank_from_snapshot_archives(
|
||||
test_hash_calculation: bool,
|
||||
accounts_db_skip_shrink: bool,
|
||||
verify_index: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Result<(Bank, BankFromArchiveTimings)> {
|
||||
check_are_snapshots_compatible(
|
||||
full_snapshot_archive_info,
|
||||
@ -792,7 +792,7 @@ pub fn bank_from_snapshot_archives(
|
||||
limit_load_slot_count_from_snapshot,
|
||||
shrink_ratio,
|
||||
verify_index,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
)?;
|
||||
measure_rebuild.stop();
|
||||
info!("{}", measure_rebuild);
|
||||
@ -838,7 +838,7 @@ pub fn bank_from_latest_snapshot_archives(
|
||||
test_hash_calculation: bool,
|
||||
accounts_db_skip_shrink: bool,
|
||||
verify_index: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Result<(Bank, BankFromArchiveTimings)> {
|
||||
let full_snapshot_archive_info = get_highest_full_snapshot_archive_info(&snapshot_archives_dir)
|
||||
.ok_or(SnapshotError::NoSnapshotArchives)?;
|
||||
@ -876,7 +876,7 @@ pub fn bank_from_latest_snapshot_archives(
|
||||
test_hash_calculation,
|
||||
accounts_db_skip_shrink,
|
||||
verify_index,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
)?;
|
||||
|
||||
verify_bank_against_expected_slot_hash(
|
||||
@ -1376,7 +1376,7 @@ fn rebuild_bank_from_snapshots(
|
||||
limit_load_slot_count_from_snapshot: Option<usize>,
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
verify_index: bool,
|
||||
accounts_index_bins: Option<usize>,
|
||||
accounts_index_config: Option<AccountsIndexConfig>,
|
||||
) -> Result<Bank> {
|
||||
let (full_snapshot_version, full_snapshot_root_paths) =
|
||||
verify_unpacked_snapshots_dir_and_version(
|
||||
@ -1424,7 +1424,7 @@ fn rebuild_bank_from_snapshots(
|
||||
limit_load_slot_count_from_snapshot,
|
||||
shrink_ratio,
|
||||
verify_index,
|
||||
accounts_index_bins,
|
||||
accounts_index_config,
|
||||
),
|
||||
}?,
|
||||
)
|
||||
@ -2524,7 +2524,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -2615,7 +2615,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -2725,7 +2725,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -2824,7 +2824,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -2964,7 +2964,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
@ -3026,7 +3026,7 @@ mod tests {
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
Some(crate::accounts_index::BINS_FOR_TESTING),
|
||||
Some(crate::accounts_index::ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
|
@ -45,6 +45,7 @@ use {
|
||||
},
|
||||
accounts_index::{
|
||||
AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude,
|
||||
AccountsIndexConfig,
|
||||
},
|
||||
hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE,
|
||||
snapshot_archive_info::SnapshotArchiveInfoGetter,
|
||||
@ -2387,6 +2388,10 @@ pub fn main() {
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let accounts_index_config = value_t!(matches, "accounts_index_bins", usize)
|
||||
.ok()
|
||||
.map(|bins| AccountsIndexConfig { bins: Some(bins) });
|
||||
|
||||
let mut validator_config = ValidatorConfig {
|
||||
require_tower: matches.is_present("require_tower"),
|
||||
tower_storage,
|
||||
@ -2484,7 +2489,7 @@ pub fn main() {
|
||||
account_indexes,
|
||||
accounts_db_caching_enabled: !matches.is_present("no_accounts_db_caching"),
|
||||
accounts_db_test_hash_calculation: matches.is_present("accounts_db_test_hash_calculation"),
|
||||
accounts_index_bins: value_t!(matches, "accounts_index_bins", usize).ok(),
|
||||
accounts_index_config,
|
||||
accounts_db_skip_shrink: matches.is_present("accounts_db_skip_shrink"),
|
||||
accounts_db_use_index_hash_calculation: matches.is_present("accounts_db_index_hashing"),
|
||||
tpu_coalesce_ms,
|
||||
|
Reference in New Issue
Block a user