Pacify clippy

This commit is contained in:
Greg Fitzgerald
2018-09-24 13:26:47 -06:00
parent ea37d29d3a
commit 95677a81c5
9 changed files with 21 additions and 20 deletions

View File

@ -503,7 +503,7 @@ impl Bank {
res res
} }
pub fn process_entry(&self, entry: Entry) -> Result<()> { pub fn process_entry(&self, entry: &Entry) -> Result<()> {
if !entry.transactions.is_empty() { if !entry.transactions.is_empty() {
for result in self.process_transactions(&entry.transactions) { for result in self.process_transactions(&entry.transactions) {
result?; result?;
@ -532,16 +532,16 @@ impl Bank {
*tail_idx = (*tail_idx + 1) % WINDOW_SIZE as usize; *tail_idx = (*tail_idx + 1) % WINDOW_SIZE as usize;
entry_count += 1; entry_count += 1;
self.process_entry(entry)?; self.process_entry(&entry)?;
} }
Ok(entry_count) Ok(entry_count)
} }
/// Process an ordered list of entries. /// Process an ordered list of entries.
pub fn process_entries(&self, entries: Vec<Entry>) -> Result<()> { pub fn process_entries(&self, entries: &[Entry]) -> Result<()> {
for entry in entries { for entry in entries {
self.process_entry(entry)?; self.process_entry(&entry)?;
} }
Ok(()) Ok(())
} }
@ -915,7 +915,7 @@ mod tests {
); );
// Now ensure the TX is accepted despite pointing to the ID of an empty entry. // Now ensure the TX is accepted despite pointing to the ID of an empty entry.
bank.process_entries(vec![entry]).unwrap(); bank.process_entries(&[entry]).unwrap();
assert!(bank.process_transaction(&tx).is_ok()); assert!(bank.process_transaction(&tx).is_ok());
} }

View File

@ -82,7 +82,7 @@ impl BankingStage {
fn process_transactions( fn process_transactions(
bank: &Arc<Bank>, bank: &Arc<Bank>,
transactions: Vec<Transaction>, transactions: &[Transaction],
hash_sender: &Sender<Hash>, hash_sender: &Sender<Hash>,
poh_receiver: &Receiver<PohEntry>, poh_receiver: &Receiver<PohEntry>,
entry_sender: &Sender<Vec<Entry>>, entry_sender: &Sender<Vec<Entry>>,
@ -93,7 +93,7 @@ impl BankingStage {
let mut chunk_start = 0; let mut chunk_start = 0;
while chunk_start != transactions.len() { while chunk_start != transactions.len() {
let chunk_end = chunk_start + Entry::num_will_fit(transactions[chunk_start..].to_vec()); let chunk_end = chunk_start + Entry::num_will_fit(&transactions[chunk_start..]);
let results = bank.process_transactions(&transactions[chunk_start..chunk_end]); let results = bank.process_transactions(&transactions[chunk_start..chunk_end]);
debug!("results: {}", results.len()); debug!("results: {}", results.len());
@ -120,7 +120,7 @@ impl BankingStage {
let hash = hasher.result(); let hash = hasher.result();
if processed_transactions.len() != 0 { if !processed_transactions.is_empty() {
hash_sender.send(hash)?; hash_sender.send(hash)?;
let mut answered = false; let mut answered = false;
@ -155,7 +155,8 @@ impl BankingStage {
debug!("done process_transactions, {} entries", entries.len()); debug!("done process_transactions, {} entries", entries.len());
Ok(entry_sender.send(entries)?) entry_sender.send(entries)?;
Ok(())
} }
/// Process the incoming packets and send output `Signal` messages to `signal_sender`. /// Process the incoming packets and send output `Signal` messages to `signal_sender`.
@ -200,7 +201,7 @@ impl BankingStage {
Self::process_transactions( Self::process_transactions(
bank, bank,
transactions, &transactions,
hash_sender, hash_sender,
poh_receiver, poh_receiver,
entry_sender, entry_sender,

View File

@ -122,7 +122,7 @@ fn main() {
if i >= head { if i >= head {
break; break;
} }
if let Err(e) = bank.process_entry(entry) { if let Err(e) = bank.process_entry(&entry) {
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e); eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
exit(1); exit(1);
} }

View File

@ -80,7 +80,7 @@ fn broadcast(
{ {
let mut win = window.write().unwrap(); let mut win = window.write().unwrap();
assert!(blobs.len() <= win.len()); assert!(blobs.len() <= win.len());
for b in blobs.iter() { for b in &blobs {
let ix = b.read().get_index().expect("blob index"); let ix = b.read().get_index().expect("blob index");
let pos = (ix % WINDOW_SIZE) as usize; let pos = (ix % WINDOW_SIZE) as usize;
if let Some(x) = win[pos].data.take() { if let Some(x) = win[pos].data.take() {
@ -92,7 +92,7 @@ fn broadcast(
trace!("{} null {}", id, pos); trace!("{} null {}", id, pos);
} }
for b in blobs.iter() { for b in &blobs {
let ix = b.read().get_index().expect("blob index"); let ix = b.read().get_index().expect("blob index");
let pos = (ix % WINDOW_SIZE) as usize; let pos = (ix % WINDOW_SIZE) as usize;
trace!("{} caching {} at {}", id, ix, pos); trace!("{} caching {} at {}", id, ix, pos);

View File

@ -326,7 +326,7 @@ impl Crdt {
// TODO: Dummy leader scheduler, need to implement actual leader scheduling. // TODO: Dummy leader scheduler, need to implement actual leader scheduling.
pub fn get_scheduled_leader(&self, entry_height: u64) -> Option<Pubkey> { pub fn get_scheduled_leader(&self, entry_height: u64) -> Option<Pubkey> {
match self.scheduled_leaders.get(&entry_height) { match self.scheduled_leaders.get(&entry_height) {
Some(x) => Some(x.clone()), Some(x) => Some(*x),
None => Some(self.my_data().leader_id), None => Some(self.my_data().leader_id),
} }
} }

View File

@ -107,8 +107,8 @@ impl Entry {
<= BLOB_DATA_SIZE as u64 <= BLOB_DATA_SIZE as u64
} }
pub fn num_will_fit(transactions: Vec<Transaction>) -> usize { pub fn num_will_fit(transactions: &[Transaction]) -> usize {
if transactions.len() == 0 { if transactions.is_empty() {
return 0; return 0;
} }
let mut num = transactions.len(); let mut num = transactions.len();
@ -196,7 +196,7 @@ impl Entry {
/// the signature. If num_hashes is zero and there's no transaction data, /// the signature. If num_hashes is zero and there's no transaction data,
/// start_hash is returned. /// start_hash is returned.
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash { fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
if num_hashes == 0 && transactions.len() == 0 { if num_hashes == 0 && transactions.is_empty() {
return *start_hash; return *start_hash;
} }

View File

@ -386,7 +386,7 @@ impl Fullnode {
// Make a new RPU to serve requests out of the new bank we've created // Make a new RPU to serve requests out of the new bank we've created
// instead of the old one // instead of the old one
if !self.rpu.is_none() { if self.rpu.is_some() {
let old_rpu = self.rpu.take().unwrap(); let old_rpu = self.rpu.take().unwrap();
old_rpu.close()?; old_rpu.close()?;
self.rpu = Some(Rpu::new( self.rpu = Some(Rpu::new(

View File

@ -40,7 +40,7 @@ impl ReplicateStage {
entries.append(&mut more); entries.append(&mut more);
} }
let res = bank.process_entries(entries.clone()); let res = bank.process_entries(&entries);
{ {
let mut wcrdt = crdt.write().unwrap(); let mut wcrdt = crdt.write().unwrap();

View File

@ -211,7 +211,7 @@ impl JsonRpcRequestProcessor {
fn get_account_info(&self, pubkey: Pubkey) -> Result<Account> { fn get_account_info(&self, pubkey: Pubkey) -> Result<Account> {
self.bank self.bank
.get_account(&pubkey) .get_account(&pubkey)
.ok_or(Error::invalid_request()) .ok_or_else(Error::invalid_request)
} }
fn get_balance(&self, pubkey: Pubkey) -> Result<i64> { fn get_balance(&self, pubkey: Pubkey) -> Result<i64> {
let val = self.bank.get_balance(&pubkey); let val = self.bank.get_balance(&pubkey);