This commit is contained in:
Michael Vines
2020-12-13 17:26:34 -08:00
parent 0d139d7ef3
commit 7143aaa89b
102 changed files with 543 additions and 499 deletions

View File

@@ -107,7 +107,7 @@ pub async fn upload_confirmed_blocks(
.difference(&bigtable_slots)
.cloned()
.collect::<Vec<_>>();
blocks_to_upload.sort();
blocks_to_upload.sort_unstable();
blocks_to_upload
};

View File

@@ -441,10 +441,8 @@ impl Blockstore {
}
pub fn is_full(&self, slot: Slot) -> bool {
if let Ok(meta) = self.meta_cf.get(slot) {
if let Some(meta) = meta {
return meta.is_full();
}
if let Ok(Some(meta)) = self.meta_cf.get(slot) {
return meta.is_full();
}
false
}
@@ -467,10 +465,10 @@ impl Blockstore {
.unwrap_or(0)
}
pub fn slot_meta_iterator<'a>(
&'a self,
pub fn slot_meta_iterator(
&self,
slot: Slot,
) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + 'a> {
) -> Result<impl Iterator<Item = (Slot, SlotMeta)> + '_> {
let meta_iter = self
.db
.iter::<cf::SlotMeta>(IteratorMode::From(slot, IteratorDirection::Forward))?;
@@ -484,21 +482,18 @@ impl Blockstore {
}
#[allow(dead_code)]
pub fn live_slots_iterator<'a>(
&'a self,
root: Slot,
) -> impl Iterator<Item = (Slot, SlotMeta)> + 'a {
pub fn live_slots_iterator(&self, root: Slot) -> impl Iterator<Item = (Slot, SlotMeta)> + '_ {
let root_forks = NextSlotsIterator::new(root, self);
let orphans_iter = self.orphans_iterator(root + 1).unwrap();
root_forks.chain(orphans_iter.flat_map(move |orphan| NextSlotsIterator::new(orphan, self)))
}
pub fn slot_data_iterator<'a>(
&'a self,
pub fn slot_data_iterator(
&self,
slot: Slot,
index: u64,
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + 'a> {
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
let slot_iterator = self.db.iter::<cf::ShredData>(IteratorMode::From(
(slot, index),
IteratorDirection::Forward,
@@ -506,11 +501,11 @@ impl Blockstore {
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
}
pub fn slot_coding_iterator<'a>(
&'a self,
pub fn slot_coding_iterator(
&self,
slot: Slot,
index: u64,
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + 'a> {
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + '_> {
let slot_iterator = self.db.iter::<cf::ShredCode>(IteratorMode::From(
(slot, index),
IteratorDirection::Forward,
@@ -518,10 +513,7 @@ impl Blockstore {
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
}
pub fn rooted_slot_iterator<'a>(
&'a self,
slot: Slot,
) -> Result<impl Iterator<Item = u64> + 'a> {
pub fn rooted_slot_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
let slot_iterator = self
.db
.iter::<cf::Root>(IteratorMode::From(slot, IteratorDirection::Forward))?;
@@ -929,7 +921,7 @@ impl Blockstore {
&self.completed_slots_senders,
should_signal,
newly_completed_slots,
)?;
);
total_start.stop();
@@ -1690,7 +1682,7 @@ impl Blockstore {
.map(|(iter_slot, _)| iter_slot)
.take(timestamp_sample_range)
.collect();
timestamp_slots.sort();
timestamp_slots.sort_unstable();
get_slots.stop();
datapoint_info!(
"blockstore-get-timestamp-slots",
@@ -2746,17 +2738,14 @@ impl Blockstore {
.is_some()
}
pub fn orphans_iterator<'a>(&'a self, slot: Slot) -> Result<impl Iterator<Item = u64> + 'a> {
pub fn orphans_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = u64> + '_> {
let orphans_iter = self
.db
.iter::<cf::Orphans>(IteratorMode::From(slot, IteratorDirection::Forward))?;
Ok(orphans_iter.map(|(slot, _)| slot))
}
pub fn dead_slots_iterator<'a>(
&'a self,
slot: Slot,
) -> Result<impl Iterator<Item = Slot> + 'a> {
pub fn dead_slots_iterator(&self, slot: Slot) -> Result<impl Iterator<Item = Slot> + '_> {
let dead_slots_iterator = self
.db
.iter::<cf::DeadSlots>(IteratorMode::From(slot, IteratorDirection::Forward))?;
@@ -2981,7 +2970,7 @@ fn send_signals(
completed_slots_senders: &[SyncSender<Vec<u64>>],
should_signal: bool,
newly_completed_slots: Vec<u64>,
) -> Result<()> {
) {
if should_signal {
for signal in new_shreds_signals {
let _ = signal.try_send(true);
@@ -3009,8 +2998,6 @@ fn send_signals(
}
}
}
Ok(())
}
fn commit_slot_meta_working_set(
@@ -3051,7 +3038,7 @@ fn find_slot_meta_else_create<'a>(
chained_slots: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot_index: u64,
) -> Result<Rc<RefCell<SlotMeta>>> {
let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index)?;
let result = find_slot_meta_in_cached_state(working_set, chained_slots, slot_index);
if let Some(slot) = result {
Ok(slot)
} else {
@@ -3061,10 +3048,10 @@ fn find_slot_meta_else_create<'a>(
// Search the database for that slot metadata. If still no luck, then
// create a dummy orphan slot in the database
fn find_slot_meta_in_db_else_create<'a>(
fn find_slot_meta_in_db_else_create(
db: &Database,
slot: Slot,
insert_map: &'a mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
insert_map: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
) -> Result<Rc<RefCell<SlotMeta>>> {
if let Some(slot_meta) = db.column::<cf::SlotMeta>().get(slot)? {
insert_map.insert(slot, Rc::new(RefCell::new(slot_meta)));
@@ -3083,13 +3070,13 @@ fn find_slot_meta_in_cached_state<'a>(
working_set: &'a HashMap<u64, SlotMetaWorkingSetEntry>,
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot: Slot,
) -> Result<Option<Rc<RefCell<SlotMeta>>>> {
) -> Option<Rc<RefCell<SlotMeta>>> {
if let Some(entry) = working_set.get(&slot) {
Ok(Some(entry.new_slot_meta.clone()))
Some(entry.new_slot_meta.clone())
} else if let Some(entry) = chained_slots.get(&slot) {
Ok(Some(entry.clone()))
Some(entry.clone())
} else {
Ok(None)
None
}
}
@@ -3596,7 +3583,7 @@ pub mod tests {
use solana_storage_proto::convert::generated;
use solana_transaction_status::{InnerInstructions, Reward, Rewards};
use solana_vote_program::{vote_instruction, vote_state::Vote};
use std::{iter::FromIterator, time::Duration};
use std::time::Duration;
// used for tests only
pub(crate) fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
@@ -4062,7 +4049,7 @@ pub mod tests {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Write entries
let num_slots = 5 as u64;
let num_slots = 5_u64;
let mut index = 0;
for slot in 0..num_slots {
let entries = create_ticks(slot + 1, 0, Hash::default());
@@ -4094,8 +4081,8 @@ pub mod tests {
let blockstore_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 5 as u64;
let shreds_per_slot = 5 as u64;
let num_slots = 5_u64;
let shreds_per_slot = 5_u64;
let entry_serialized_size =
bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
let entries_per_slot =
@@ -4437,9 +4424,9 @@ pub mod tests {
all_shreds.shuffle(&mut thread_rng());
ledger.insert_shreds(all_shreds, None, false).unwrap();
let mut result = recvr.try_recv().unwrap();
result.sort();
result.sort_unstable();
slots.push(disconnected_slot);
slots.sort();
slots.sort_unstable();
assert_eq!(result, slots);
}
@@ -4799,23 +4786,22 @@ pub mod tests {
blockstore.meta_cf.put(0, &meta0).unwrap();
// Slot exists, chains to nothing
let expected: HashMap<u64, Vec<u64>> =
HashMap::from_iter(vec![(0, vec![])].into_iter());
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![])].into_iter().collect();
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
meta0.next_slots = vec![1, 2];
blockstore.meta_cf.put(0, &meta0).unwrap();
// Slot exists, chains to some other slots
let expected: HashMap<u64, Vec<u64>> =
HashMap::from_iter(vec![(0, vec![1, 2])].into_iter());
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2])].into_iter().collect();
assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected);
assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected);
let mut meta3 = SlotMeta::new(3, 1);
meta3.next_slots = vec![10, 5];
blockstore.meta_cf.put(3, &meta3).unwrap();
let expected: HashMap<u64, Vec<u64>> =
HashMap::from_iter(vec![(0, vec![1, 2]), (3, vec![10, 5])].into_iter());
let expected: HashMap<u64, Vec<u64>> = vec![(0, vec![1, 2]), (3, vec![10, 5])]
.into_iter()
.collect();
assert_eq!(blockstore.get_slots_since(&[0, 1, 3]).unwrap(), expected);
}
@@ -4902,7 +4888,7 @@ pub mod tests {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create shreds and entries
let num_entries = 20 as u64;
let num_entries = 20_u64;
let mut entries = vec![];
let mut shreds = vec![];
let mut num_shreds_per_slot = 0;
@@ -5807,8 +5793,10 @@ pub mod tests {
ledger.insert_shreds(more_shreds, None, false).unwrap();
ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap();
let mut parent_meta = SlotMeta::default();
parent_meta.parent_slot = std::u64::MAX;
let parent_meta = SlotMeta {
parent_slot: std::u64::MAX,
..SlotMeta::default()
};
ledger
.put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
.unwrap();

View File

@@ -95,7 +95,7 @@ impl Blockstore {
.batch()
.expect("Database Error: Failed to get write batch");
// delete range cf is not inclusive
let to_slot = to_slot.checked_add(1).unwrap_or_else(|| std::u64::MAX);
let to_slot = to_slot.checked_add(1).unwrap_or(std::u64::MAX);
let mut delete_range_timer = Measure::start("delete_range");
let mut columns_purged = self

View File

@@ -360,11 +360,7 @@ impl Rocks {
Ok(())
}
fn iterator_cf<C>(
&self,
cf: &ColumnFamily,
iterator_mode: IteratorMode<C::Index>,
) -> Result<DBIterator>
fn iterator_cf<C>(&self, cf: &ColumnFamily, iterator_mode: IteratorMode<C::Index>) -> DBIterator
where
C: Column,
{
@@ -377,18 +373,15 @@ impl Rocks {
IteratorMode::Start => RocksIteratorMode::Start,
IteratorMode::End => RocksIteratorMode::End,
};
let iter = self.0.iterator_cf(cf, iterator_mode);
Ok(iter)
self.0.iterator_cf(cf, iterator_mode)
}
fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
let raw_iter = self.0.raw_iterator_cf(cf);
Ok(raw_iter)
fn raw_iterator_cf(&self, cf: &ColumnFamily) -> DBRawIterator {
self.0.raw_iterator_cf(cf)
}
fn batch(&self) -> Result<RWriteBatch> {
Ok(RWriteBatch::default())
fn batch(&self) -> RWriteBatch {
RWriteBatch::default()
}
fn write(&self, batch: RWriteBatch) -> Result<()> {
@@ -766,15 +759,15 @@ impl Database {
}
}
pub fn iter<'a, C>(
&'a self,
pub fn iter<C>(
&self,
iterator_mode: IteratorMode<C::Index>,
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + 'a>
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + '_>
where
C: Column + ColumnName,
{
let cf = self.cf_handle::<C>();
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode)?;
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode);
Ok(iter.map(|(key, value)| (C::index(&key), value)))
}
@@ -798,11 +791,11 @@ impl Database {
#[inline]
pub fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
self.backend.raw_iterator_cf(cf)
Ok(self.backend.raw_iterator_cf(cf))
}
pub fn batch(&self) -> Result<WriteBatch> {
let write_batch = self.backend.batch()?;
let write_batch = self.backend.batch();
let map = self
.backend
.columns()
@@ -845,12 +838,12 @@ where
self.backend.get_cf(self.handle(), &C::key(key))
}
pub fn iter<'a>(
&'a self,
pub fn iter(
&self,
iterator_mode: IteratorMode<C::Index>,
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + 'a> {
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)> + '_> {
let cf = self.handle();
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode)?;
let iter = self.backend.iterator_cf::<C>(cf, iterator_mode);
Ok(iter.map(|(key, value)| (C::index(&key), value)))
}
@@ -906,7 +899,7 @@ where
#[cfg(test)]
pub fn is_empty(&self) -> Result<bool> {
let mut iter = self.backend.raw_iterator_cf(self.handle())?;
let mut iter = self.backend.raw_iterator_cf(self.handle());
iter.seek_to_first();
Ok(!iter.valid())
}

View File

@@ -375,7 +375,7 @@ pub fn process_blockstore(
let bank0 = Arc::new(bank0);
info!("processing ledger for slot 0...");
let recyclers = VerifyRecyclers::default();
process_bank_0(&bank0, blockstore, &opts, &recyclers)?;
process_bank_0(&bank0, blockstore, &opts, &recyclers);
do_process_blockstore_from_root(blockstore, bank0, &opts, &recyclers, None)
}
@@ -738,7 +738,7 @@ fn process_bank_0(
blockstore: &Blockstore,
opts: &ProcessOptions,
recyclers: &VerifyRecyclers,
) -> result::Result<(), BlockstoreProcessorError> {
) {
assert_eq!(bank0.slot(), 0);
let mut progress = ConfirmationProgress::new(bank0.last_blockhash());
confirm_full_slot(
@@ -752,7 +752,6 @@ fn process_bank_0(
)
.expect("processing for bank 0 must succeed");
bank0.freeze();
Ok(())
}
// Given a bank, add its children to the pending slots queue if those children slots are
@@ -2715,7 +2714,7 @@ pub mod tests {
..ProcessOptions::default()
};
let recyclers = VerifyRecyclers::default();
process_bank_0(&bank0, &blockstore, &opts, &recyclers).unwrap();
process_bank_0(&bank0, &blockstore, &opts, &recyclers);
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
confirm_full_slot(
&blockstore,
@@ -2901,7 +2900,7 @@ pub mod tests {
fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> {
let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect();
slots.sort();
slots.sort_unstable();
slots
}
@@ -3210,6 +3209,7 @@ pub mod tests {
}
#[test]
#[allow(clippy::field_reassign_with_default)]
fn test_supermajority_root_from_vote_accounts() {
let convert_to_vote_accounts =
|roots_stakes: Vec<(Slot, u64)>| -> Vec<(Pubkey, (u64, ArcVoteAccount))> {

View File

@@ -322,7 +322,7 @@ mod tests {
LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES);
assert_eq!(cached_schedules.len(), MAX_SCHEDULES);
let mut keys: Vec<_> = cached_schedules.keys().cloned().collect();
keys.sort();
keys.sort_unstable();
let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect();
let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect();
assert_eq!(expected, keys);

View File

@@ -500,6 +500,7 @@ impl Shredder {
reference_tick: u8,
version: u16,
) -> Result<Self> {
#[allow(clippy::manual_range_contains)]
if fec_rate > 1.0 || fec_rate < 0.0 {
Err(ShredError::InvalidFecRate(fec_rate))
} else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) {

View File

@@ -37,7 +37,7 @@ fn test_multiple_threads_insert_shred() {
// Check slot 0 has the correct children
let mut meta0 = blockstore.meta(0).unwrap().unwrap();
meta0.next_slots.sort();
meta0.next_slots.sort_unstable();
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
assert_eq!(meta0.next_slots, expected_next_slots);