Pacify clippy

This commit is contained in:
Michael Vines
2021-02-18 23:42:09 -08:00
parent fd3b71a2c6
commit 5df36aec7d
40 changed files with 446 additions and 451 deletions

View File

@ -1,5 +1,5 @@
use crate::{
accounts_db::{AccountsDB, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult},
accounts_db::{AccountsDb, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult},
accounts_index::{AccountIndex, Ancestors, IndexKey},
bank::{
NonceRollbackFull, NonceRollbackInfo, TransactionCheckResult, TransactionExecutionResult,
@ -51,8 +51,8 @@ pub struct Accounts {
/// my epoch
pub epoch: Epoch,
/// Single global AccountsDB
pub accounts_db: Arc<AccountsDB>,
/// Single global AccountsDb
pub accounts_db: Arc<AccountsDb>,
/// set of writable accounts which are currently in the pipeline
pub(crate) account_locks: Mutex<HashSet<Pubkey>>,
@ -93,7 +93,7 @@ impl Accounts {
caching_enabled: bool,
) -> Self {
Self {
accounts_db: Arc::new(AccountsDB::new_with_config(
accounts_db: Arc::new(AccountsDb::new_with_config(
paths,
cluster_type,
account_indexes,
@ -117,7 +117,7 @@ impl Accounts {
}
}
pub(crate) fn new_empty(accounts_db: AccountsDB) -> Self {
pub(crate) fn new_empty(accounts_db: AccountsDb) -> Self {
Self {
accounts_db: Arc::new(accounts_db),
account_locks: Mutex::new(HashSet::new()),
@ -553,14 +553,14 @@ impl Accounts {
|total_capitalization: &mut u64, (_pubkey, loaded_account, _slot)| {
let lamports = loaded_account.lamports();
if Self::is_loadable(lamports) {
let account_cap = AccountsDB::account_balance_for_capitalization(
let account_cap = AccountsDb::account_balance_for_capitalization(
lamports,
&loaded_account.owner(),
loaded_account.executable(),
simple_capitalization_enabled,
);
*total_capitalization = AccountsDB::checked_iterative_sum_for_capitalization(
*total_capitalization = AccountsDb::checked_iterative_sum_for_capitalization(
*total_capitalization,
account_cap,
);
@ -1624,7 +1624,7 @@ mod tests {
let accounts =
Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false);
// Load accounts owned by various programs into AccountsDB
// Load accounts owned by various programs into AccountsDb
let pubkey0 = solana_sdk::pubkey::new_rand();
let account0 = Account::new(1, 0, &Pubkey::new(&[2; 32]));
accounts.store_slow_uncached(0, &pubkey0, &account0);

View File

@ -205,13 +205,13 @@ impl SnapshotRequestHandler {
}
#[derive(Default)]
pub struct ABSRequestSender {
pub struct AbsRequestSender {
snapshot_request_sender: Option<SnapshotRequestSender>,
}
impl ABSRequestSender {
impl AbsRequestSender {
pub fn new(snapshot_request_sender: Option<SnapshotRequestSender>) -> Self {
ABSRequestSender {
AbsRequestSender {
snapshot_request_sender,
}
}
@ -232,12 +232,12 @@ impl ABSRequestSender {
}
}
pub struct ABSRequestHandler {
pub struct AbsRequestHandler {
pub snapshot_request_handler: Option<SnapshotRequestHandler>,
pub pruned_banks_receiver: DroppedSlotsReceiver,
}
impl ABSRequestHandler {
impl AbsRequestHandler {
// Returns the latest requested snapshot block height, if one exists
pub fn handle_snapshot_requests(
&self,
@ -275,7 +275,7 @@ impl AccountsBackgroundService {
pub fn new(
bank_forks: Arc<RwLock<BankForks>>,
exit: &Arc<AtomicBool>,
request_handler: ABSRequestHandler,
request_handler: AbsRequestHandler,
accounts_db_caching_enabled: bool,
test_hash_calculation: bool,
use_index_hash_calculation: bool,
@ -378,7 +378,7 @@ impl AccountsBackgroundService {
fn remove_dead_slots(
bank: &Bank,
request_handler: &ABSRequestHandler,
request_handler: &AbsRequestHandler,
removed_slots_count: &mut usize,
total_remove_slots_time: &mut u64,
) {
@ -411,7 +411,7 @@ mod test {
let genesis = create_genesis_config(10);
let bank0 = Arc::new(Bank::new(&genesis.genesis_config));
let (pruned_banks_sender, pruned_banks_receiver) = unbounded();
let request_handler = ABSRequestHandler {
let request_handler = AbsRequestHandler {
snapshot_request_handler: None,
pruned_banks_receiver,
};

File diff suppressed because it is too large Load Diff

View File

@ -1067,7 +1067,7 @@ impl Bank {
new.ancestors.insert(p.slot(), i + 1);
});
// Following code may touch AccountsDB, requiring proper ancestors
// Following code may touch AccountsDb, requiring proper ancestors
let parent_epoch = parent.epoch();
if parent_epoch < new.epoch() {
new.apply_feature_activations(false);
@ -2169,7 +2169,7 @@ impl Bank {
self.capitalization.fetch_sub(account.lamports, Relaxed);
// Resetting account balance to 0 is needed to really purge from AccountsDB and
// Resetting account balance to 0 is needed to really purge from AccountsDb and
// flush the Stakes cache
account.lamports = 0;
self.store_account(&program_id, &account);
@ -2189,7 +2189,7 @@ impl Bank {
),
Some(account) => {
if *name == String::from_utf8_lossy(&account.data) {
// nop; it seems that already AccountsDB is updated.
// nop; it seems that already AccountsDb is updated.
return;
}
// continue to replace account

View File

@ -1,7 +1,7 @@
//! The `bank_forks` module implements BankForks a DAG of checkpointed Banks
use crate::{
accounts_background_service::{ABSRequestSender, SnapshotRequest},
accounts_background_service::{AbsRequestSender, SnapshotRequest},
bank::Bank,
};
use log::*;
@ -187,7 +187,7 @@ impl BankForks {
pub fn set_root(
&mut self,
root: Slot,
accounts_background_request_sender: &ABSRequestSender,
accounts_background_request_sender: &AbsRequestSender,
highest_confirmed_root: Option<Slot>,
) {
let old_epoch = self.root_bank().epoch();
@ -428,7 +428,7 @@ mod tests {
let bank0 = Bank::new(&genesis_config);
let mut bank_forks0 = BankForks::new(bank0);
bank_forks0.set_root(0, &ABSRequestSender::default(), None);
bank_forks0.set_root(0, &AbsRequestSender::default(), None);
let bank1 = Bank::new(&genesis_config);
let mut bank_forks1 = BankForks::new(bank1);
@ -460,7 +460,7 @@ mod tests {
// Set root in bank_forks0 to truncate the ancestor history
bank_forks0.insert(child1);
bank_forks0.set_root(slot, &ABSRequestSender::default(), None);
bank_forks0.set_root(slot, &AbsRequestSender::default(), None);
// Don't set root in bank_forks1 to keep the ancestor history
bank_forks1.insert(child2);
@ -514,7 +514,7 @@ mod tests {
);
bank_forks.set_root(
2,
&ABSRequestSender::default(),
&AbsRequestSender::default(),
None, // highest confirmed root
);
banks[2].squash();
@ -573,7 +573,7 @@ mod tests {
);
bank_forks.set_root(
2,
&ABSRequestSender::default(),
&AbsRequestSender::default(),
Some(1), // highest confirmed root
);
banks[2].squash();

View File

@ -19,7 +19,7 @@ use thiserror::Error;
#[derive(Error, Debug)]
pub enum UnpackError {
#[error("IO error: {0}")]
IO(#[from] std::io::Error),
Io(#[from] std::io::Error),
#[error("Archive error: {0}")]
Archive(String),
}

View File

@ -1,7 +1,7 @@
use {
crate::{
accounts::Accounts,
accounts_db::{AccountStorageEntry, AccountsDB, AppendVecId, BankHashInfo},
accounts_db::{AccountStorageEntry, AccountsDb, AppendVecId, BankHashInfo},
accounts_index::{AccountIndex, Ancestors},
append_vec::AppendVec,
bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins},
@ -59,7 +59,7 @@ pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
NEWER,
Newer,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
@ -82,7 +82,7 @@ trait TypeContext<'a> {
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
serializable_db: &SerializableAccountsDb<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
@ -155,7 +155,7 @@ where
}};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
SerdeStyle::Newer => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
@ -185,7 +185,7 @@ where
};
}
match serde_style {
SerdeStyle::NEWER => INTO!(TypeContextFuture),
SerdeStyle::Newer => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
@ -208,14 +208,14 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
}
}
struct SerializableAccountsDB<'a, C> {
accounts_db: &'a AccountsDB,
struct SerializableAccountsDb<'a, C> {
accounts_db: &'a AccountsDb,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDb<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
@ -225,7 +225,7 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> {
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {}
impl<'a, C> IgnoreAsHelper for SerializableAccountsDb<'a, C> {}
#[allow(clippy::too_many_arguments)]
fn reconstruct_bank_from_fields<E, P>(
@ -273,12 +273,12 @@ fn reconstruct_accountsdb_from_fields<E, P>(
cluster_type: &ClusterType,
account_indexes: HashSet<AccountIndex>,
caching_enabled: bool,
) -> Result<AccountsDB, Error>
) -> Result<AccountsDb, Error>
where
E: SerializableStorage,
P: AsRef<Path>,
{
let mut accounts_db = AccountsDB::new_with_config(
let mut accounts_db = AccountsDb::new_with_config(
account_paths.to_vec(),
cluster_type,
account_indexes,

View File

@ -215,7 +215,7 @@ impl<'a> TypeContext<'a> for Context {
{
(
SerializableVersionedBank::from(serializable_bank.bank.get_fields_to_serialize()),
SerializableAccountsDB::<'a, Self> {
SerializableAccountsDb::<'a, Self> {
accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db,
slot: serializable_bank.bank.rc.slot,
account_storage_entries: serializable_bank.snapshot_storages,
@ -227,7 +227,7 @@ impl<'a> TypeContext<'a> for Context {
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDB<'a, Self>,
serializable_db: &SerializableAccountsDb<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized,

View File

@ -21,7 +21,7 @@ use {
#[cfg(test)]
fn copy_append_vecs<P: AsRef<Path>>(
accounts_db: &AccountsDB,
accounts_db: &AccountsDb,
output_dir: P,
) -> std::io::Result<()> {
let storage_entries = accounts_db.get_snapshot_storages(Slot::max_value());
@ -57,7 +57,7 @@ fn context_accountsdb_from_stream<'a, C, R, P>(
stream: &mut BufReader<R>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
) -> Result<AccountsDB, Error>
) -> Result<AccountsDb, Error>
where
C: TypeContext<'a>,
R: Read,
@ -80,13 +80,13 @@ fn accountsdb_from_stream<R, P>(
stream: &mut BufReader<R>,
account_paths: &[PathBuf],
stream_append_vecs_path: P,
) -> Result<AccountsDB, Error>
) -> Result<AccountsDb, Error>
where
R: Read,
P: AsRef<Path>,
{
match serde_style {
SerdeStyle::NEWER => context_accountsdb_from_stream::<TypeContextFuture, R, P>(
SerdeStyle::Newer => context_accountsdb_from_stream::<TypeContextFuture, R, P>(
stream,
account_paths,
stream_append_vecs_path,
@ -98,7 +98,7 @@ where
fn accountsdb_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut W,
accounts_db: &AccountsDB,
accounts_db: &AccountsDb,
slot: Slot,
account_storage_entries: &[SnapshotStorage],
) -> Result<(), Error>
@ -106,9 +106,9 @@ where
W: Write,
{
match serde_style {
SerdeStyle::NEWER => serialize_into(
SerdeStyle::Newer => serialize_into(
stream,
&SerializableAccountsDB::<TypeContextFuture> {
&SerializableAccountsDb::<TypeContextFuture> {
accounts_db,
slot,
account_storage_entries,
@ -230,13 +230,13 @@ fn test_bank_serialize_style(serde_style: SerdeStyle) {
#[cfg(test)]
pub(crate) fn reconstruct_accounts_db_via_serialization(
accounts: &AccountsDB,
accounts: &AccountsDb,
slot: Slot,
) -> AccountsDB {
) -> AccountsDb {
let mut writer = Cursor::new(vec![]);
let snapshot_storages = accounts.get_snapshot_storages(slot);
accountsdb_to_stream(
SerdeStyle::NEWER,
SerdeStyle::Newer,
&mut writer,
&accounts,
slot,
@ -249,17 +249,17 @@ pub(crate) fn reconstruct_accounts_db_via_serialization(
let copied_accounts = TempDir::new().unwrap();
// Simulate obtaining a copy of the AppendVecs from a tarball
copy_append_vecs(&accounts, copied_accounts.path()).unwrap();
accountsdb_from_stream(SerdeStyle::NEWER, &mut reader, &[], copied_accounts.path()).unwrap()
accountsdb_from_stream(SerdeStyle::Newer, &mut reader, &[], copied_accounts.path()).unwrap()
}
#[test]
fn test_accounts_serialize_newer() {
test_accounts_serialize_style(SerdeStyle::NEWER)
test_accounts_serialize_style(SerdeStyle::Newer)
}
#[test]
fn test_bank_serialize_newer() {
test_bank_serialize_style(SerdeStyle::NEWER)
test_bank_serialize_style(SerdeStyle::Newer)
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
@ -268,7 +268,7 @@ mod test_bank_serialize {
// These some what long test harness is required to freeze the ABI of
// Bank's serialization due to versioned nature
#[frozen_abi(digest = "9CqwEeiVycBp9wVDLz19XUJXRMZ68itGfYVEe29S8JmA")]
#[frozen_abi(digest = "DuRGntVwLGNAv5KooafUSpxk67BPAx2yC7Z8A9c8wr2G")]
#[derive(Serialize, AbiExample)]
pub struct BankAbiTestWrapperFuture {
#[serde(serialize_with = "wrapper_future")]

View File

@ -1,5 +1,5 @@
use crate::{
accounts_db::AccountsDB,
accounts_db::AccountsDb,
accounts_index::AccountIndex,
bank::{Bank, BankSlotDelta, Builtins},
bank_forks::ArchiveFormat,
@ -25,7 +25,7 @@ use std::{
cmp::Ordering,
fmt,
fs::{self, File},
io::{self, BufReader, BufWriter, Error as IOError, ErrorKind, Read, Seek, SeekFrom, Write},
io::{self, BufReader, BufWriter, Error as IoError, ErrorKind, Read, Seek, SeekFrom, Write},
path::{Path, PathBuf},
process::{self, ExitStatus},
str::FromStr,
@ -108,7 +108,7 @@ pub struct SlotSnapshotPaths {
#[derive(Error, Debug)]
pub enum SnapshotError {
#[error("I/O error: {0}")]
IO(#[from] std::io::Error),
Io(#[from] std::io::Error),
#[error("serialization error: {0}")]
Serialize(#[from] bincode::Error),
@ -319,7 +319,7 @@ pub fn archive_snapshot_package(snapshot_package: &AccountsPackage) -> Result<()
match &mut tar.stdout {
None => {
return Err(SnapshotError::IO(IOError::new(
return Err(SnapshotError::Io(IoError::new(
ErrorKind::Other,
"tar stdout unavailable".to_string(),
)));
@ -521,7 +521,7 @@ pub fn add_snapshot<P: AsRef<Path>>(
let mut bank_serialize = Measure::start("bank-serialize-ms");
let bank_snapshot_serializer = move |stream: &mut BufWriter<File>| -> Result<()> {
let serde_style = match snapshot_version {
SnapshotVersion::V1_2_0 => SerdeStyle::NEWER,
SnapshotVersion::V1_2_0 => SerdeStyle::Newer,
};
bank_to_stream(serde_style, stream.by_ref(), bank, snapshot_storages)?;
Ok(())
@ -797,7 +797,7 @@ where
let bank = deserialize_snapshot_data_file(&root_paths.snapshot_file_path, |mut stream| {
Ok(match snapshot_version_enum {
SnapshotVersion::V1_2_0 => bank_from_stream(
SerdeStyle::NEWER,
SerdeStyle::Newer,
&mut stream,
&append_vecs_path,
account_paths,
@ -841,7 +841,7 @@ fn get_bank_snapshot_dir<P: AsRef<Path>>(path: P, slot: Slot) -> PathBuf {
fn get_io_error(error: &str) -> SnapshotError {
warn!("Snapshot Error: {:?}", error);
SnapshotError::IO(IOError::new(ErrorKind::Other, error))
SnapshotError::Io(IoError::new(ErrorKind::Other, error))
}
pub fn verify_snapshot_archive<P, Q, R>(
@ -968,7 +968,7 @@ pub fn process_accounts_package_pre(
let hash = accounts_package.hash; // temporarily remaining here
if let Some(expected_hash) = accounts_package.hash_for_testing {
let (hash, lamports) = AccountsDB::calculate_accounts_hash_without_index(
let (hash, lamports) = AccountsDb::calculate_accounts_hash_without_index(
&accounts_package.storages,
accounts_package.simple_capitalization_testing,
thread_pool,
@ -1039,7 +1039,7 @@ mod tests {
Ok(())
},
);
assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize"));
assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize"));
}
#[test]
@ -1088,7 +1088,7 @@ mod tests {
expected_consumed_size - 1,
|stream| Ok(deserialize_from::<_, u32>(stream)?),
);
assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize"));
assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize"));
}
#[test]
@ -1113,7 +1113,7 @@ mod tests {
expected_consumed_size * 2,
|stream| Ok(deserialize_from::<_, u32>(stream)?),
);
assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("invalid snapshot data file"));
assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("invalid snapshot data file"));
}
#[test]

View File

@ -1,7 +1,7 @@
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_runtime::{accounts_db::AccountsDB, accounts_index::Ancestors};
use solana_runtime::{accounts_db::AccountsDb, accounts_index::Ancestors};
use solana_sdk::genesis_config::ClusterType;
use solana_sdk::{account::Account, clock::Slot, pubkey::Pubkey};
use std::collections::HashSet;
@ -15,7 +15,7 @@ fn test_shrink_and_clean() {
// repeat the whole test scenario
for _ in 0..5 {
let accounts = Arc::new(AccountsDB::new_single());
let accounts = Arc::new(AccountsDb::new_single());
let accounts_for_shrink = accounts.clone();
// spawn the slot shrinking background thread
@ -31,7 +31,7 @@ fn test_shrink_and_clean() {
let mut alive_accounts = vec![];
let owner = Pubkey::default();
// populate the AccountsDB with plenty of food for slot shrinking
// populate the AccountsDb with plenty of food for slot shrinking
// also this simulates realistic some heavy spike account updates in the wild
for current_slot in 0..100 {
while alive_accounts.len() <= 10 {
@ -66,7 +66,7 @@ fn test_shrink_and_clean() {
fn test_bad_bank_hash() {
solana_logger::setup();
use solana_sdk::signature::{Keypair, Signer};
let db = AccountsDB::new(Vec::new(), &ClusterType::Development);
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let some_slot: Slot = 0;
let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect();
@ -113,7 +113,7 @@ fn test_bad_bank_hash() {
for (key, account) in &account_refs {
assert_eq!(
db.load_account_hash(&ancestors, &key),
AccountsDB::hash_account(some_slot, &account, &key, &ClusterType::Development)
AccountsDb::hash_account(some_slot, &account, &key, &ClusterType::Development)
);
}
existing.clear();