chore: cargo +nightly clippy --fix -Z unstable-options
This commit is contained in:
committed by
Michael Vines
parent
3570b00560
commit
6514096a67
@ -58,8 +58,8 @@ pub fn load(
|
||||
)
|
||||
{
|
||||
return load_from_snapshot(
|
||||
&genesis_config,
|
||||
&blockstore,
|
||||
genesis_config,
|
||||
blockstore,
|
||||
account_paths,
|
||||
shrink_paths,
|
||||
snapshot_config,
|
||||
@ -79,8 +79,8 @@ pub fn load(
|
||||
}
|
||||
|
||||
load_from_genesis(
|
||||
&genesis_config,
|
||||
&blockstore,
|
||||
genesis_config,
|
||||
blockstore,
|
||||
account_paths,
|
||||
process_options,
|
||||
cache_block_meta_sender,
|
||||
@ -97,8 +97,8 @@ fn load_from_genesis(
|
||||
info!("Processing ledger from genesis");
|
||||
to_loadresult(
|
||||
blockstore_processor::process_blockstore(
|
||||
&genesis_config,
|
||||
&blockstore,
|
||||
genesis_config,
|
||||
blockstore,
|
||||
account_paths,
|
||||
process_options,
|
||||
cache_block_meta_sender,
|
||||
|
@ -705,7 +705,7 @@ impl Blockstore {
|
||||
for (&(slot, set_index), erasure_meta) in erasure_metas.iter() {
|
||||
let index_meta_entry = index_working_set.get_mut(&slot).expect("Index");
|
||||
let index = &mut index_meta_entry.index;
|
||||
match erasure_meta.status(&index) {
|
||||
match erasure_meta.status(index) {
|
||||
ErasureMetaStatus::CanRecover => {
|
||||
Self::recover_shreds(
|
||||
index,
|
||||
@ -838,7 +838,7 @@ impl Blockstore {
|
||||
let mut num_recovered_exists = 0;
|
||||
if let Some(leader_schedule_cache) = leader_schedule {
|
||||
let recovered_data = Self::try_shred_recovery(
|
||||
&db,
|
||||
db,
|
||||
&erasure_metas,
|
||||
&mut index_working_set,
|
||||
&mut just_inserted_data_shreds,
|
||||
@ -1135,14 +1135,14 @@ impl Blockstore {
|
||||
let maybe_shred = self.get_coding_shred(slot, coding_index);
|
||||
if let Ok(Some(shred_data)) = maybe_shred {
|
||||
let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap();
|
||||
if Self::erasure_mismatch(&potential_shred, &shred) {
|
||||
if Self::erasure_mismatch(&potential_shred, shred) {
|
||||
conflicting_shred = Some(potential_shred.payload);
|
||||
}
|
||||
break;
|
||||
} else if let Some(potential_shred) =
|
||||
just_received_coding_shreds.get(&(slot, coding_index))
|
||||
{
|
||||
if Self::erasure_mismatch(&potential_shred, &shred) {
|
||||
if Self::erasure_mismatch(potential_shred, shred) {
|
||||
conflicting_shred = Some(potential_shred.payload.clone());
|
||||
}
|
||||
break;
|
||||
@ -1183,7 +1183,7 @@ impl Blockstore {
|
||||
let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
|
||||
|
||||
if !is_trusted {
|
||||
if Self::is_data_shred_present(&shred, slot_meta, &index_meta.data()) {
|
||||
if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) {
|
||||
handle_duplicate(shred);
|
||||
return Err(InsertDataShredError::Exists);
|
||||
}
|
||||
@ -1474,7 +1474,7 @@ impl Blockstore {
|
||||
index as u32,
|
||||
new_consumed,
|
||||
shred.reference_tick(),
|
||||
&data_index,
|
||||
data_index,
|
||||
);
|
||||
if slot_meta.is_full() {
|
||||
datapoint_info!(
|
||||
@ -1689,7 +1689,7 @@ impl Blockstore {
|
||||
}
|
||||
break;
|
||||
}
|
||||
let (current_slot, index) = C::index(&db_iterator.key().expect("Expect a valid key"));
|
||||
let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key"));
|
||||
|
||||
let current_index = {
|
||||
if current_slot > slot {
|
||||
@ -1702,7 +1702,7 @@ impl Blockstore {
|
||||
let upper_index = cmp::min(current_index, end_index);
|
||||
// the tick that will be used to figure out the timeout for this hole
|
||||
let reference_tick = u64::from(Shred::reference_tick_from_data(
|
||||
&db_iterator.value().expect("couldn't read value"),
|
||||
db_iterator.value().expect("couldn't read value"),
|
||||
));
|
||||
|
||||
if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS {
|
||||
@ -2437,7 +2437,7 @@ impl Blockstore {
|
||||
address_signatures.extend(
|
||||
signatures
|
||||
.into_iter()
|
||||
.filter(|(_, signature)| !excluded_signatures.contains(&signature)),
|
||||
.filter(|(_, signature)| !excluded_signatures.contains(signature)),
|
||||
)
|
||||
} else {
|
||||
address_signatures.append(&mut signatures);
|
||||
@ -2520,7 +2520,7 @@ impl Blockstore {
|
||||
next_primary_index_iter_timer.stop();
|
||||
let mut address_signatures: Vec<(Slot, Signature)> = address_signatures
|
||||
.into_iter()
|
||||
.filter(|(_, signature)| !until_excluded_signatures.contains(&signature))
|
||||
.filter(|(_, signature)| !until_excluded_signatures.contains(signature))
|
||||
.collect();
|
||||
address_signatures.truncate(limit);
|
||||
|
||||
@ -2993,7 +2993,7 @@ impl Blockstore {
|
||||
}
|
||||
|
||||
pub fn scan_and_fix_roots(&self, exit: &Arc<AtomicBool>) -> Result<()> {
|
||||
let ancestor_iterator = AncestorIterator::new(self.last_root(), &self)
|
||||
let ancestor_iterator = AncestorIterator::new(self.last_root(), self)
|
||||
.take_while(|&slot| slot >= self.lowest_cleanup_slot());
|
||||
|
||||
let mut find_missing_roots = Measure::start("find_missing_roots");
|
||||
@ -3278,8 +3278,8 @@ fn commit_slot_meta_working_set(
|
||||
}
|
||||
// Check if the working copy of the metadata has changed
|
||||
if Some(meta) != meta_backup.as_ref() {
|
||||
should_signal = should_signal || slot_has_updates(meta, &meta_backup);
|
||||
write_batch.put::<cf::SlotMeta>(*slot, &meta)?;
|
||||
should_signal = should_signal || slot_has_updates(meta, meta_backup);
|
||||
write_batch.put::<cf::SlotMeta>(*slot, meta)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3430,7 +3430,7 @@ fn handle_chaining_for_slot(
|
||||
traverse_children_mut(
|
||||
db,
|
||||
slot,
|
||||
&meta,
|
||||
meta,
|
||||
working_set,
|
||||
new_chained_slots,
|
||||
slot_function,
|
||||
@ -3520,7 +3520,7 @@ pub fn create_new_ledger(
|
||||
access_type: AccessType,
|
||||
) -> Result<Hash> {
|
||||
Blockstore::destroy(ledger_path)?;
|
||||
genesis_config.write(&ledger_path)?;
|
||||
genesis_config.write(ledger_path)?;
|
||||
|
||||
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
|
||||
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?;
|
||||
|
@ -112,7 +112,7 @@ fn execute_batch(
|
||||
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
|
||||
|
||||
let pre_token_balances = if record_token_balances {
|
||||
collect_token_balances(&bank, &batch, &mut mint_decimals)
|
||||
collect_token_balances(bank, batch, &mut mint_decimals)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@ -139,7 +139,7 @@ fn execute_batch(
|
||||
if let Some(transaction_status_sender) = transaction_status_sender {
|
||||
let txs = batch.transactions_iter().cloned().collect();
|
||||
let post_token_balances = if record_token_balances {
|
||||
collect_token_balances(&bank, &batch, &mut mint_decimals)
|
||||
collect_token_balances(bank, batch, &mut mint_decimals)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@ -327,7 +327,7 @@ fn process_entries_with_callback(
|
||||
timings,
|
||||
)?;
|
||||
for hash in tick_hashes {
|
||||
bank.register_tick(&hash);
|
||||
bank.register_tick(hash);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -396,7 +396,7 @@ pub fn process_blockstore(
|
||||
|
||||
// Setup bank for slot 0
|
||||
let bank0 = Bank::new_with_paths(
|
||||
&genesis_config,
|
||||
genesis_config,
|
||||
account_paths,
|
||||
&opts.frozen_accounts,
|
||||
opts.debug_keys.clone(),
|
||||
@ -896,9 +896,9 @@ fn process_next_slots(
|
||||
// handles any partials
|
||||
if next_meta.is_full() {
|
||||
let next_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank,
|
||||
bank,
|
||||
&leader_schedule_cache
|
||||
.slot_leader_at(*next_slot, Some(&bank))
|
||||
.slot_leader_at(*next_slot, Some(bank))
|
||||
.unwrap(),
|
||||
*next_slot,
|
||||
));
|
||||
@ -1048,7 +1048,7 @@ fn load_frozen_forks(
|
||||
*root = new_root_bank.slot();
|
||||
last_root = new_root_bank.slot();
|
||||
|
||||
leader_schedule_cache.set_root(&new_root_bank);
|
||||
leader_schedule_cache.set_root(new_root_bank);
|
||||
new_root_bank.squash();
|
||||
|
||||
if last_free.elapsed() > Duration::from_secs(10) {
|
||||
@ -3093,7 +3093,7 @@ pub mod tests {
|
||||
account_paths: Vec<PathBuf>,
|
||||
) -> EpochSchedule {
|
||||
let bank = Bank::new_with_paths(
|
||||
&genesis_config,
|
||||
genesis_config,
|
||||
account_paths,
|
||||
&[],
|
||||
None,
|
||||
@ -3274,7 +3274,7 @@ pub mod tests {
|
||||
slot_leader_keypair: &Arc<Keypair>,
|
||||
) {
|
||||
// Add votes to `last_slot` so that `root` will be confirmed
|
||||
let vote_entry = next_entry(&parent_blockhash, 1, vec![vote_tx]);
|
||||
let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]);
|
||||
let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash);
|
||||
entries.insert(0, vote_entry);
|
||||
blockstore
|
||||
@ -3285,7 +3285,7 @@ pub mod tests {
|
||||
ticks_per_slot,
|
||||
Some(parent_slot),
|
||||
true,
|
||||
&slot_leader_keypair,
|
||||
slot_leader_keypair,
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
|
@ -682,7 +682,7 @@ impl EntrySlice for [Entry] {
|
||||
}
|
||||
|
||||
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
let entry = Entry::new(&start, num_hashes, transactions);
|
||||
let entry = Entry::new(start, num_hashes, transactions);
|
||||
*start = entry.hash;
|
||||
entry
|
||||
}
|
||||
@ -737,7 +737,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_entry_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let one = hash(zero.as_ref());
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
||||
@ -826,7 +826,7 @@ mod tests {
|
||||
fn test_verify_slice1() {
|
||||
solana_logger::setup();
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let one = hash(zero.as_ref());
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
@ -841,8 +841,8 @@ mod tests {
|
||||
fn test_verify_slice_with_hashes1() {
|
||||
solana_logger::setup();
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let two = hash(&one.as_ref());
|
||||
let one = hash(zero.as_ref());
|
||||
let two = hash(one.as_ref());
|
||||
assert!(vec![][..].verify(&one)); // base case
|
||||
assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad
|
||||
@ -861,8 +861,8 @@ mod tests {
|
||||
fn test_verify_slice_with_hashes_and_transactions() {
|
||||
solana_logger::setup();
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let two = hash(&one.as_ref());
|
||||
let one = hash(zero.as_ref());
|
||||
let two = hash(one.as_ref());
|
||||
let alice_keypair = Keypair::new();
|
||||
let bob_keypair = Keypair::new();
|
||||
let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one);
|
||||
|
@ -63,9 +63,9 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
|
||||
// Note: Use unstable sort, because we dedup right after to remove the equal elements.
|
||||
stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| {
|
||||
if r_stake == l_stake {
|
||||
r_pubkey.cmp(&l_pubkey)
|
||||
r_pubkey.cmp(l_pubkey)
|
||||
} else {
|
||||
r_stake.cmp(&l_stake)
|
||||
r_stake.cmp(l_stake)
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -63,7 +63,7 @@ impl Poh {
|
||||
let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes);
|
||||
|
||||
for _ in 0..num_hashes {
|
||||
self.hash = hash(&self.hash.as_ref());
|
||||
self.hash = hash(self.hash.as_ref());
|
||||
}
|
||||
self.num_hashes += num_hashes;
|
||||
self.remaining_hashes -= num_hashes;
|
||||
@ -77,7 +77,7 @@ impl Poh {
|
||||
return None; // Caller needs to `tick()` first
|
||||
}
|
||||
|
||||
self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]);
|
||||
self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]);
|
||||
let num_hashes = self.num_hashes + 1;
|
||||
self.num_hashes = 0;
|
||||
self.remaining_hashes -= 1;
|
||||
@ -89,7 +89,7 @@ impl Poh {
|
||||
}
|
||||
|
||||
pub fn tick(&mut self) -> Option<PohEntry> {
|
||||
self.hash = hash(&self.hash.as_ref());
|
||||
self.hash = hash(self.hash.as_ref());
|
||||
self.num_hashes += 1;
|
||||
self.remaining_hashes -= 1;
|
||||
|
||||
@ -115,7 +115,7 @@ pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 {
|
||||
let mut v = Hash::default();
|
||||
let start = Instant::now();
|
||||
for _ in 0..hashes_sample_size {
|
||||
v = hash(&v.as_ref());
|
||||
v = hash(v.as_ref());
|
||||
}
|
||||
start.elapsed().as_nanos() as u64
|
||||
}
|
||||
@ -139,11 +139,11 @@ mod tests {
|
||||
assert_ne!(entry.num_hashes, 0);
|
||||
|
||||
for _ in 1..entry.num_hashes {
|
||||
current_hash = hash(¤t_hash.as_ref());
|
||||
current_hash = hash(current_hash.as_ref());
|
||||
}
|
||||
current_hash = match mixin {
|
||||
Some(mixin) => hashv(&[¤t_hash.as_ref(), &mixin.as_ref()]),
|
||||
None => hash(¤t_hash.as_ref()),
|
||||
Some(mixin) => hashv(&[current_hash.as_ref(), mixin.as_ref()]),
|
||||
None => hash(current_hash.as_ref()),
|
||||
};
|
||||
if current_hash != entry.hash {
|
||||
return false;
|
||||
@ -192,9 +192,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_poh_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let two = hash(&one.as_ref());
|
||||
let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]);
|
||||
let one = hash(zero.as_ref());
|
||||
let two = hash(one.as_ref());
|
||||
let one_with_zero = hashv(&[zero.as_ref(), zero.as_ref()]);
|
||||
|
||||
let mut poh = Poh::new(zero, None);
|
||||
assert!(verify(
|
||||
@ -262,7 +262,7 @@ mod tests {
|
||||
(
|
||||
PohEntry {
|
||||
num_hashes: 1,
|
||||
hash: hash(&one_with_zero.as_ref()),
|
||||
hash: hash(one_with_zero.as_ref()),
|
||||
},
|
||||
None
|
||||
)
|
||||
|
@ -840,7 +840,7 @@ impl Shredder {
|
||||
first_index: usize,
|
||||
slot: Slot,
|
||||
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
|
||||
Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?;
|
||||
Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?;
|
||||
let mut recovered_data = vec![];
|
||||
let fec_set_size = num_data + num_coding;
|
||||
|
||||
@ -933,7 +933,7 @@ impl Shredder {
|
||||
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
|
||||
use reed_solomon_erasure::Error::TooFewDataShards;
|
||||
const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
|
||||
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
|
||||
Self::verify_consistent_shred_payload_sizes("deshred()", shreds)?;
|
||||
let index = shreds.first().ok_or(TooFewDataShards)?.index();
|
||||
let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i);
|
||||
let data_complete = {
|
||||
|
@ -312,7 +312,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) {
|
||||
);
|
||||
let signature = keypair.sign_message(&packet.data[msg_start..msg_end]);
|
||||
trace!("signature {:?}", signature);
|
||||
packet.data[0..sig_end].copy_from_slice(&signature.as_ref());
|
||||
packet.data[0..sig_end].copy_from_slice(signature.as_ref());
|
||||
}
|
||||
|
||||
pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) {
|
||||
@ -364,7 +364,7 @@ pub fn sign_shreds_gpu(
|
||||
|
||||
let mut elems = Vec::new();
|
||||
let offset: usize = pinned_keypair.len();
|
||||
let num_keypair_packets = vec_size_in_packets(&pinned_keypair);
|
||||
let num_keypair_packets = vec_size_in_packets(pinned_keypair);
|
||||
let mut num_packets = num_keypair_packets;
|
||||
|
||||
//should be zero
|
||||
|
Reference in New Issue
Block a user