Cleanup nightly warnings (#7055)
This commit is contained in:
@ -1,10 +1,8 @@
|
||||
#![feature(test)]
|
||||
use rand;
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_ledger;
|
||||
extern crate test;
|
||||
|
||||
use rand::Rng;
|
||||
use solana_ledger::{
|
||||
|
@ -450,7 +450,7 @@ impl Archiver {
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
shred_fetch_receiver: PacketReceiver,
|
||||
slot_sender: Sender<u64>,
|
||||
) -> Result<(WindowService)> {
|
||||
) -> Result<WindowService> {
|
||||
let slots_per_segment =
|
||||
match Self::get_segment_config(&cluster_info, meta.client_commitment.clone()) {
|
||||
Ok(slots_per_segment) => slots_per_segment,
|
||||
@ -598,7 +598,7 @@ impl Archiver {
|
||||
fn sample_file_to_create_mining_hash(
|
||||
enc_file_path: &Path,
|
||||
sampling_offsets: &[u64],
|
||||
) -> Result<(Hash)> {
|
||||
) -> Result<Hash> {
|
||||
let sha_state = sample_file(enc_file_path, sampling_offsets)?;
|
||||
info!("sampled sha_state: {}", sha_state);
|
||||
Ok(sha_state)
|
||||
@ -809,7 +809,7 @@ impl Archiver {
|
||||
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))
|
||||
})?;
|
||||
let (storage_blockhash, turn_slot) =
|
||||
serde_json::from_value::<((String, u64))>(response).map_err(|err| {
|
||||
serde_json::from_value::<(String, u64)>(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Couldn't parse response: {:?}", err),
|
||||
@ -851,7 +851,7 @@ impl Archiver {
|
||||
archiver_info: &ContactInfo,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
slots_per_segment: u64,
|
||||
) -> Result<(u64)> {
|
||||
) -> Result<u64> {
|
||||
// Create a client which downloads from the archiver and see that it
|
||||
// can respond with shreds.
|
||||
let start_slot = Self::get_archiver_segment_slot(archiver_info.storage_addr);
|
||||
|
@ -637,7 +637,7 @@ impl ClusterInfo {
|
||||
fanout: usize,
|
||||
select_index: usize,
|
||||
curr_index: usize,
|
||||
) -> Option<(Locality)> {
|
||||
) -> Option<Locality> {
|
||||
let end = layer_indices.len() - 1;
|
||||
let next = min(end, curr_index + 1);
|
||||
let layer_start = layer_indices[curr_index];
|
||||
|
@ -791,7 +791,7 @@ mod test {
|
||||
let mut account = Account::default();
|
||||
account.lamports = 1;
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].into_iter().cloned().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
|
||||
Tower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
|
||||
assert_eq!(stake_lockouts[&0].stake, 1);
|
||||
assert_eq!(stake_lockouts[&1].stake, 1);
|
||||
|
@ -186,7 +186,7 @@ impl RepairService {
|
||||
blocktree: &Blocktree,
|
||||
max_repairs: usize,
|
||||
repair_range: &RepairSlotRange,
|
||||
) -> Result<(Vec<RepairType>)> {
|
||||
) -> Result<Vec<RepairType>> {
|
||||
// Slot height and shred indexes for shreds we want to repair
|
||||
let mut repairs: Vec<RepairType> = vec![];
|
||||
for slot in repair_range.start..=repair_range.end {
|
||||
@ -218,7 +218,7 @@ impl RepairService {
|
||||
blocktree: &Blocktree,
|
||||
root: u64,
|
||||
max_repairs: usize,
|
||||
) -> Result<(Vec<RepairType>)> {
|
||||
) -> Result<Vec<RepairType>> {
|
||||
// Slot height and shred indexes for shreds we want to repair
|
||||
let mut repairs: Vec<RepairType> = vec![];
|
||||
Self::generate_repairs_for_fork(blocktree, &mut repairs, max_repairs, root);
|
||||
|
Reference in New Issue
Block a user