From 671fb3519d8d76f94cba032b3ee78f52bf3bf1b4 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Fri, 19 Feb 2021 10:38:16 -0800 Subject: [PATCH] Pacify clippy --- banking-bench/src/main.rs | 4 +- cli/src/program.rs | 4 +- core/src/banking_stage.rs | 2 +- core/src/broadcast_stage.rs | 2 +- core/src/cluster_info.rs | 2 +- core/src/commitment_service.rs | 8 +- core/src/consensus.rs | 17 +- core/src/non_circulating_supply.rs | 2 +- .../optimistically_confirmed_bank_tracker.rs | 4 +- core/src/poh_service.rs | 8 +- core/src/replay_stage.rs | 22 +- core/src/result.rs | 10 +- core/src/rpc.rs | 10 +- core/src/tvu.rs | 6 +- core/src/validator.rs | 2 +- core/tests/snapshots.rs | 6 +- frozen-abi/src/abi_digester.rs | 32 +- ledger/src/blockstore.rs | 10 +- ledger/src/blockstore_db.rs | 2 +- ledger/src/entry.rs | 16 +- log-analyzer/src/main.rs | 6 +- merkle-root-bench/src/main.rs | 4 +- program-test/src/lib.rs | 2 +- programs/bpf/benches/bpf_loader.rs | 8 +- programs/bpf_loader/src/allocator_bump.rs | 6 +- programs/bpf_loader/src/bpf_verifier.rs | 16 +- programs/bpf_loader/src/lib.rs | 32 +- programs/bpf_loader/src/syscalls.rs | 222 ++++++------- remote-wallet/src/ledger_error.rs | 2 +- runtime/src/accounts.rs | 16 +- runtime/src/accounts_background_service.rs | 16 +- runtime/src/accounts_db.rs | 310 +++++++++--------- runtime/src/bank.rs | 6 +- runtime/src/bank_forks.rs | 12 +- runtime/src/hardened_unpack.rs | 2 +- runtime/src/serde_snapshot.rs | 22 +- runtime/src/serde_snapshot/future.rs | 4 +- runtime/src/serde_snapshot/tests.rs | 28 +- runtime/src/snapshot_utils.rs | 22 +- runtime/tests/accounts.rs | 10 +- streamer/src/streamer.rs | 2 +- 41 files changed, 455 insertions(+), 462 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 0b06715121..8ff0f3d3ce 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -19,7 +19,7 @@ use solana_ledger::{ use solana_measure::measure::Measure; use solana_perf::packet::to_packets_chunked; use solana_runtime::{ - accounts_background_service::ABSRequestSender, bank::Bank, bank_forks::BankForks, + accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, }; use solana_sdk::{ hash::Hash, @@ -326,7 +326,7 @@ fn main() { poh_recorder.lock().unwrap().set_bank(&bank); assert!(poh_recorder.lock().unwrap().bank().is_some()); if bank.slot() > 32 { - bank_forks.set_root(root, &ABSRequestSender::default(), None); + bank_forks.set_root(root, &AbsRequestSender::default(), None); root += 1; } debug!( diff --git a/cli/src/program.rs b/cli/src/program.rs index 58cdd36ad7..c4e640f53b 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -11,7 +11,7 @@ use bip39::{Language, Mnemonic, MnemonicType, Seed}; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use log::*; use serde_json::{self, json, Value}; -use solana_bpf_loader_program::{bpf_verifier, BPFError, ThisInstructionMeter}; +use solana_bpf_loader_program::{bpf_verifier, BpfError, ThisInstructionMeter}; use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}; use solana_cli_output::{ display::new_spinner_progress_bar, CliUpgradeableBuffer, CliUpgradeableProgram, @@ -1362,7 +1362,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box::from_elf( + Executable::::from_elf( &program_data, Some(|x| bpf_verifier::check(x, false)), Config::default(), diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index a8227ff054..2fd71f72c3 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -513,7 +513,7 @@ impl BankingStage { processed_transactions_indexes, ); } - Err(e) => panic!(format!("Poh recorder returned unexpected error: {:?}", e)), + Err(e) => panic!("Poh recorder returned unexpected error: {:?}", e), } poh_record.stop(); } diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 2aa6a10cda..ca5f3e6db8 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -402,7 +402,7 @@ pub fn broadcast_shreds( match send_mmsg(s, &packets[sent..]) { Ok(n) => sent += n, Err(e) => { - return Err(Error::IO(e)); + return Err(Error::Io(e)); } } } diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index 09db308b38..c94886120b 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -1496,7 +1496,7 @@ impl ClusterInfo { 1 ); error!("retransmit result {:?}", e); - return Err(Error::IO(e)); + return Err(Error::Io(e)); } } } diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 6249d1f57b..d597385ad6 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -249,7 +249,7 @@ mod tests { use super::*; use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_runtime::{ - accounts_background_service::ABSRequestSender, + accounts_background_service::AbsRequestSender, bank_forks::BankForks, genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs}, }; @@ -534,7 +534,7 @@ mod tests { &working_bank, ); for x in 0..root { - bank_forks.set_root(x, &ABSRequestSender::default(), None); + bank_forks.set_root(x, &AbsRequestSender::default(), None); } // Add an additional bank/vote that will root slot 2 @@ -573,7 +573,7 @@ mod tests { .highest_confirmed_root(); bank_forks.set_root( root, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(highest_confirmed_root), ); let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root); @@ -642,7 +642,7 @@ mod tests { .highest_confirmed_root(); bank_forks.set_root( root, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(highest_confirmed_root), ); let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 75fbb8a098..9efb151a60 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -845,10 +845,9 @@ impl Tower { assert!( self.last_vote == Vote::default() && self.lockouts.votes.is_empty() || self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(), - format!( - "last vote: {:?} lockouts.votes: {:?}", - self.last_vote, self.lockouts.votes - ) + "last vote: {:?} lockouts.votes: {:?}", + self.last_vote, + self.lockouts.votes ); if let Some(last_voted_slot) = self.last_voted_slot() { @@ -1131,7 +1130,7 @@ impl Tower { #[derive(Error, Debug)] pub enum TowerError { #[error("IO Error: {0}")] - IOError(#[from] std::io::Error), + IoError(#[from] std::io::Error), #[error("Serialization Error: {0}")] SerializeError(#[from] bincode::Error), @@ -1157,7 +1156,7 @@ pub enum TowerError { impl TowerError { pub fn is_file_missing(&self) -> bool { - if let TowerError::IOError(io_err) = &self { + if let TowerError::IoError(io_err) = &self { io_err.kind() == std::io::ErrorKind::NotFound } else { false @@ -1246,7 +1245,7 @@ pub mod test { }; use solana_ledger::{blockstore::make_slot_entries, get_tmp_ledger_path}; use solana_runtime::{ - accounts_background_service::ABSRequestSender, + accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, genesis_utils::{ @@ -1417,7 +1416,7 @@ pub mod test { new_root, &self.bank_forks, &mut self.progress, - &ABSRequestSender::default(), + &AbsRequestSender::default(), None, &mut self.heaviest_subtree_fork_choice, ) @@ -2704,7 +2703,7 @@ pub mod test { remove_file(path).unwrap(); }, ); - assert_matches!(loaded, Err(TowerError::IOError(_))) + assert_matches!(loaded, Err(TowerError::IoError(_))) } #[test] diff --git a/core/src/non_circulating_supply.rs b/core/src/non_circulating_supply.rs index bebd2ac6e2..df4a0ae466 100644 --- a/core/src/non_circulating_supply.rs +++ b/core/src/non_circulating_supply.rs @@ -31,7 +31,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc) -> NonCirculatingSuppl bank.get_filtered_indexed_accounts( &IndexKey::ProgramId(solana_stake_program::id()), // The program-id account index checks for Account owner on inclusion. However, due to - // the current AccountsDB implementation, an account may remain in storage as a + // the current AccountsDb implementation, an account may remain in storage as a // zero-lamport Account::Default() after being wiped and reinitialized in later // updates. We include the redundant filter here to avoid returning these accounts. |account| account.owner == solana_stake_program::id(), diff --git a/core/src/optimistically_confirmed_bank_tracker.rs b/core/src/optimistically_confirmed_bank_tracker.rs index 523712dc34..b8f54e9e36 100644 --- a/core/src/optimistically_confirmed_bank_tracker.rs +++ b/core/src/optimistically_confirmed_bank_tracker.rs @@ -168,7 +168,7 @@ mod tests { use super::*; use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo}; use solana_runtime::{ - accounts_background_service::ABSRequestSender, commitment::BlockCommitmentCache, + accounts_background_service::AbsRequestSender, commitment::BlockCommitmentCache, }; use solana_sdk::pubkey::Pubkey; @@ -284,7 +284,7 @@ mod tests { bank_forks .write() .unwrap() - .set_root(7, &ABSRequestSender::default(), None); + .set_root(7, &AbsRequestSender::default(), None); OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(6), &bank_forks, diff --git a/core/src/poh_service.rs b/core/src/poh_service.rs index 8d94fbeb59..9dc915749a 100644 --- a/core/src/poh_service.rs +++ b/core/src/poh_service.rs @@ -248,11 +248,9 @@ mod tests { if entry.is_tick() { assert!( entry.num_hashes <= poh_config.hashes_per_tick.unwrap(), - format!( - "{} <= {}", - entry.num_hashes, - poh_config.hashes_per_tick.unwrap() - ) + "{} <= {}", + entry.num_hashes, + poh_config.hashes_per_tick.unwrap() ); if entry.num_hashes == poh_config.hashes_per_tick.unwrap() { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index ab2905ec37..98bcdcfc23 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -28,7 +28,7 @@ use solana_ledger::{ use solana_measure::{measure::Measure, thread_mem_usage}; use solana_metrics::inc_new_counter_info; use solana_runtime::{ - accounts_background_service::ABSRequestSender, bank::Bank, bank_forks::BankForks, + accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache, vote_sender_types::ReplayVoteSender, }; use solana_sdk::{ @@ -98,7 +98,7 @@ pub struct ReplayStageConfig { pub subscriptions: Arc, pub leader_schedule_cache: Arc, pub latest_root_senders: Vec>, - pub accounts_background_request_sender: ABSRequestSender, + pub accounts_background_request_sender: AbsRequestSender, pub block_commitment_cache: Arc>, pub transaction_status_sender: Option, pub rewards_recorder_sender: Option, @@ -1069,7 +1069,7 @@ impl ReplayStage { blockstore: &Arc, leader_schedule_cache: &Arc, lockouts_sender: &Sender, - accounts_background_request_sender: &ABSRequestSender, + accounts_background_request_sender: &AbsRequestSender, latest_root_senders: &[Sender], subscriptions: &Arc, block_commitment_cache: &Arc>, @@ -1834,7 +1834,7 @@ impl ReplayStage { new_root: Slot, bank_forks: &RwLock, progress: &mut ProgressMap, - accounts_background_request_sender: &ABSRequestSender, + accounts_background_request_sender: &AbsRequestSender, highest_confirmed_root: Option, heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, ) { @@ -2012,7 +2012,7 @@ pub(crate) mod tests { }, }; use solana_runtime::{ - accounts_background_service::ABSRequestSender, + accounts_background_service::AbsRequestSender, commitment::BlockCommitment, genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs}, }; @@ -2248,7 +2248,7 @@ pub(crate) mod tests { root, &bank_forks, &mut progress, - &ABSRequestSender::default(), + &AbsRequestSender::default(), None, &mut heaviest_subtree_fork_choice, ); @@ -2292,7 +2292,7 @@ pub(crate) mod tests { root, &bank_forks, &mut progress, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(confirmed_root), &mut heaviest_subtree_fork_choice, ); @@ -3244,7 +3244,7 @@ pub(crate) mod tests { bank_forks.insert(Bank::new_from_parent(&bank0, &Pubkey::default(), 9)); let bank9 = bank_forks.get(9).unwrap().clone(); bank_forks.insert(Bank::new_from_parent(&bank9, &Pubkey::default(), 10)); - bank_forks.set_root(9, &ABSRequestSender::default(), None); + bank_forks.set_root(9, &AbsRequestSender::default(), None); let total_epoch_stake = bank0.total_epoch_stake(); // Insert new ForkProgress for slot 10 and its @@ -3335,7 +3335,7 @@ pub(crate) mod tests { .get_propagated_stats_mut(0) .unwrap() .is_leader_slot = true; - bank_forks.set_root(0, &ABSRequestSender::default(), None); + bank_forks.set_root(0, &AbsRequestSender::default(), None); let total_epoch_stake = bank_forks.root_bank().total_epoch_stake(); // Insert new ForkProgress representing a slot for all slots 1..=num_banks. Only @@ -3415,7 +3415,7 @@ pub(crate) mod tests { .get_propagated_stats_mut(0) .unwrap() .is_leader_slot = true; - bank_forks.set_root(0, &ABSRequestSender::default(), None); + bank_forks.set_root(0, &AbsRequestSender::default(), None); let total_epoch_stake = num_validators as u64 * stake_per_validator; @@ -3760,7 +3760,7 @@ pub(crate) mod tests { bank_forks .write() .unwrap() - .set_root(3, &ABSRequestSender::default(), None); + .set_root(3, &AbsRequestSender::default(), None); let mut descendants = bank_forks.read().unwrap().descendants().clone(); let mut ancestors = bank_forks.read().unwrap().ancestors(); let slot_3_descendants = descendants.get(&3).unwrap().clone(); diff --git a/core/src/result.rs b/core/src/result.rs index bca8f79544..ac9907bfbc 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -10,8 +10,8 @@ use std::any::Any; #[derive(Debug)] pub enum Error { - IO(std::io::Error), - JSON(serde_json::Error), + Io(std::io::Error), + Json(serde_json::Error), AddrParse(std::net::AddrParseError), JoinError(Box), RecvError(std::sync::mpsc::RecvError), @@ -108,7 +108,7 @@ impl std::convert::From> for Error { } impl std::convert::From for Error { fn from(e: std::io::Error) -> Error { - Error::IO(e) + Error::Io(e) } } impl std::convert::From for Error { @@ -118,7 +118,7 @@ impl std::convert::From for Error { } impl std::convert::From for Error { fn from(e: serde_json::Error) -> Error { - Error::JSON(e) + Error::Json(e) } } impl std::convert::From for Error { @@ -199,7 +199,7 @@ mod tests { assert_matches!(send_error(), Err(Error::SendError)); assert_matches!(join_error(), Err(Error::JoinError(_))); let ioe = io::Error::new(io::ErrorKind::NotFound, "hi"); - assert_matches!(Error::from(ioe), Error::IO(_)); + assert_matches!(Error::from(ioe), Error::Io(_)); } #[test] fn fmt_test() { diff --git a/core/src/rpc.rs b/core/src/rpc.rs index b412f5d2a6..11cd5c2d8c 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -1402,7 +1402,7 @@ impl JsonRpcRequestProcessor { { bank.get_filtered_indexed_accounts(&IndexKey::ProgramId(*program_id), |account| { // The program-id account index checks for Account owner on inclusion. However, due - // to the current AccountsDB implementation, an account may remain in storage as a + // to the current AccountsDb implementation, an account may remain in storage as a // zero-lamport Account::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these // accounts. @@ -1421,7 +1421,7 @@ impl JsonRpcRequestProcessor { mut filters: Vec, ) -> Vec<(Pubkey, Account)> { // The by-owner accounts index checks for Token Account state and Owner address on - // inclusion. However, due to the current AccountsDB implementation, an account may remain + // inclusion. However, due to the current AccountsDb implementation, an account may remain // in storage as a zero-lamport Account::Default() after being wiped and reinitialized in // later updates. We include the redundant filters here to avoid returning these accounts. // @@ -1461,7 +1461,7 @@ impl JsonRpcRequestProcessor { mut filters: Vec, ) -> Vec<(Pubkey, Account)> { // The by-mint accounts index checks for Token Account state and Mint address on inclusion. - // However, due to the current AccountsDB implementation, an account may remain in storage + // However, due to the current AccountsDb implementation, an account may remain in storage // as be zero-lamport Account::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these accounts. // @@ -3037,7 +3037,7 @@ pub mod tests { genesis_utils::{create_genesis_config, GenesisConfigInfo}, }; use solana_runtime::{ - accounts_background_service::ABSRequestSender, commitment::BlockCommitment, + accounts_background_service::AbsRequestSender, commitment::BlockCommitment, }; use solana_sdk::{ clock::MAX_RECENT_BLOCKHASHES, @@ -3154,7 +3154,7 @@ pub mod tests { bank_forks .write() .unwrap() - .set_root(*root, &ABSRequestSender::default(), Some(0)); + .set_root(*root, &AbsRequestSender::default(), Some(0)); let mut stakes = HashMap::new(); stakes.insert(leader_vote_keypair.pubkey(), (1, Account::default())); let block_time = bank_forks diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 6074a01118..0e7e939dac 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -30,7 +30,7 @@ use solana_ledger::{ }; use solana_runtime::{ accounts_background_service::{ - ABSRequestHandler, ABSRequestSender, AccountsBackgroundService, SendDroppedBankCallback, + AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, SendDroppedBankCallback, SnapshotRequestHandler, }, bank_forks::{BankForks, SnapshotConfig}, @@ -228,9 +228,9 @@ impl Tvu { )))); } - let accounts_background_request_sender = ABSRequestSender::new(snapshot_request_sender); + let accounts_background_request_sender = AbsRequestSender::new(snapshot_request_sender); - let accounts_background_request_handler = ABSRequestHandler { + let accounts_background_request_handler = AbsRequestHandler { snapshot_request_handler, pruned_banks_receiver, }; diff --git a/core/src/validator.rs b/core/src/validator.rs index 3bcaad773d..933a85489e 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1048,7 +1048,7 @@ fn new_banks_from_ledger( )); bank_forks.set_root( warp_slot, - &solana_runtime::accounts_background_service::ABSRequestSender::default(), + &solana_runtime::accounts_background_service::AbsRequestSender::default(), Some(warp_slot), ); leader_schedule_cache.set_root(&bank_forks.root_bank()); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 9196437b3d..5a8be3eae0 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -45,7 +45,7 @@ mod tests { snapshot_packager_service::{PendingSnapshotPackage, SnapshotPackagerService}, }; use solana_runtime::{ - accounts_background_service::{ABSRequestSender, SnapshotRequestHandler}, + accounts_background_service::{AbsRequestSender, SnapshotRequestHandler}, accounts_db, bank::{Bank, BankSlotDelta}, bank_forks::{ArchiveFormat, BankForks, SnapshotConfig}, @@ -203,7 +203,7 @@ mod tests { let (s, snapshot_request_receiver) = unbounded(); let (accounts_package_sender, _r) = channel(); - let request_sender = ABSRequestSender::new(Some(s)); + let request_sender = AbsRequestSender::new(Some(s)); let snapshot_request_handler = SnapshotRequestHandler { snapshot_config: snapshot_test_config.snapshot_config.clone(), snapshot_request_receiver, @@ -518,7 +518,7 @@ mod tests { (*add_root_interval * num_set_roots * 2) as u64, ); let mut current_bank = snapshot_test_config.bank_forks[0].clone(); - let request_sender = ABSRequestSender::new(Some(snapshot_sender)); + let request_sender = AbsRequestSender::new(Some(snapshot_sender)); for _ in 0..num_set_roots { for _ in 0..*add_root_interval { let new_slot = current_bank.slot() + 1; diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index 7365d195b8..c233a29867 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -1,15 +1,11 @@ use crate::abi_example::{normalize_type_name, AbiEnumVisitor}; use crate::hash::{Hash, Hasher}; - use log::*; - use serde::ser::Error as SerdeError; use serde::ser::*; use serde::{Serialize, Serializer}; - use std::any::type_name; use std::io::Write; - use thiserror::Error; #[derive(Debug)] @@ -561,21 +557,21 @@ mod tests { #[frozen_abi(digest = "GttWH8FAY3teUjTaSds9mL3YbiDQ7qWw7WAvDXKd4ZzX")] type TestUnitStruct = std::marker::PhantomData; - #[frozen_abi(digest = "2zvXde11f8sNnFbc9E6ZZeFxV7D2BTVLKEZmNTsCDBpS")] + #[frozen_abi(digest = "6kj3mPXbzWTwZho48kZWxZjuseLU2oiqhbpqca4DmcRq")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestEnum { - VARIANT1, - VARIANT2, + Variant1, + Variant2, } - #[frozen_abi(digest = "6keb3v7GXLahhL6zoinzCWwSvB3KhmvZMB3tN2mamAm3")] + #[frozen_abi(digest = "3WqYwnbQEdu6iPZi5LJa2b5kw55hxBtZdqFqiViFCKPo")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestTupleVariant { - VARIANT1(u8, u16), - VARIANT2(u8, u16), + Variant1(u8, u16), + Variant2(u8, u16), } - #[frozen_abi(digest = "DywMfwKq8HZCbUfTwnemHWMN8LvMZCvipQuLddQ2ywwG")] + #[frozen_abi(digest = "4E9gJjvKiETBeZ8dybZPAQ7maaHTHFucmLqgX2m6yrBh")] #[derive(Serialize, AbiExample)] struct TestVecEnum { enums: Vec, @@ -642,21 +638,21 @@ mod tests { _skipped_test_field: i8, } - #[frozen_abi(digest = "2zvXde11f8sNnFbc9E6ZZeFxV7D2BTVLKEZmNTsCDBpS")] + #[frozen_abi(digest = "6kj3mPXbzWTwZho48kZWxZjuseLU2oiqhbpqca4DmcRq")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestEnum { - VARIANT1, - VARIANT2, + Variant1, + Variant2, #[serde(skip)] #[allow(dead_code)] - VARIANT3, + Variant3, } - #[frozen_abi(digest = "6keb3v7GXLahhL6zoinzCWwSvB3KhmvZMB3tN2mamAm3")] + #[frozen_abi(digest = "3WqYwnbQEdu6iPZi5LJa2b5kw55hxBtZdqFqiViFCKPo")] #[derive(Serialize, AbiExample, AbiEnumVisitor)] enum TestTupleVariant { - VARIANT1(u8, u16), - VARIANT2(u8, u16, #[serde(skip)] u32), + Variant1(u8, u16), + Variant2(u8, u16, #[serde(skip)] u32), } } } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 45e6b8f102..fdb87f95fe 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -45,7 +45,7 @@ use std::{ cmp, collections::{HashMap, HashSet}, fs, - io::{Error as IOError, ErrorKind}, + io::{Error as IoError, ErrorKind}, path::{Path, PathBuf}, rc::Rc, sync::{ @@ -2082,7 +2082,7 @@ impl Blockstore { Some((slot, _)) => { let confirmed_block = self.get_confirmed_block(slot, false).map_err(|err| { - BlockstoreError::IO(IOError::new( + BlockstoreError::Io(IoError::new( ErrorKind::Other, format!("Unable to get confirmed block: {}", err), )) @@ -2133,7 +2133,7 @@ impl Blockstore { Some((slot, _)) => { let confirmed_block = self.get_confirmed_block(slot, false).map_err(|err| { - BlockstoreError::IO(IOError::new( + BlockstoreError::Io(IoError::new( ErrorKind::Other, format!("Unable to get confirmed block: {}", err), )) @@ -3256,7 +3256,7 @@ pub fn create_new_ledger( error!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?")); error!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?")); - return Err(BlockstoreError::IO(IOError::new( + return Err(BlockstoreError::Io(IoError::new( ErrorKind::Other, format!( "Error trying to generate snapshot archive: {}", @@ -3303,7 +3303,7 @@ pub fn create_new_ledger( error_messages += &format!("/failed to stash problematic rocksdb: {}", e) }); - return Err(BlockstoreError::IO(IOError::new( + return Err(BlockstoreError::Io(IoError::new( ErrorKind::Other, format!( "Error checking to unpack genesis archive: {}{}", diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index b7e8c238f9..14466a2333 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -64,7 +64,7 @@ pub enum BlockstoreError { RocksDb(#[from] rocksdb::Error), SlotNotRooted, DeadSlot, - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), Serialize(#[from] Box), FsExtraError(#[from] fs_extra::error::Error), SlotCleanedUp, diff --git a/ledger/src/entry.rs b/ledger/src/entry.rs index 6a03b42e01..1cde091107 100644 --- a/ledger/src/entry.rs +++ b/ledger/src/entry.rs @@ -214,8 +214,8 @@ pub struct GpuVerificationData { } pub enum DeviceVerificationData { - CPU(), - GPU(GpuVerificationData), + Cpu(), + Gpu(GpuVerificationData), } pub struct EntryVerificationState { @@ -257,7 +257,7 @@ impl EntryVerificationState { pub fn finish_verify(&mut self, entries: &[Entry]) -> bool { match &mut self.device_verification_data { - DeviceVerificationData::GPU(verification_state) => { + DeviceVerificationData::Gpu(verification_state) => { let gpu_time_us = verification_state.thread_h.take().unwrap().join().unwrap(); let mut verify_check_time = Measure::start("verify_check"); @@ -297,7 +297,7 @@ impl EntryVerificationState { }; res } - DeviceVerificationData::CPU() => { + DeviceVerificationData::Cpu() => { self.verification_status == EntryVerificationStatus::Success } } @@ -380,7 +380,7 @@ impl EntrySlice for [Entry] { }, poh_duration_us, transaction_duration_us: 0, - device_verification_data: DeviceVerificationData::CPU(), + device_verification_data: DeviceVerificationData::Cpu(), } } @@ -464,7 +464,7 @@ impl EntrySlice for [Entry] { }, poh_duration_us, transaction_duration_us: 0, - device_verification_data: DeviceVerificationData::CPU(), + device_verification_data: DeviceVerificationData::Cpu(), } } @@ -527,7 +527,7 @@ impl EntrySlice for [Entry] { verification_status: EntryVerificationStatus::Failure, transaction_duration_us, poh_duration_us: 0, - device_verification_data: DeviceVerificationData::CPU(), + device_verification_data: DeviceVerificationData::Cpu(), }; } @@ -607,7 +607,7 @@ impl EntrySlice for [Entry] { }) }); - let device_verification_data = DeviceVerificationData::GPU(GpuVerificationData { + let device_verification_data = DeviceVerificationData::Gpu(GpuVerificationData { thread_h: Some(gpu_verify_thread), tx_hashes, hashes: Some(hashes), diff --git a/log-analyzer/src/main.rs b/log-analyzer/src/main.rs index 8d0852b02a..b9ce45dd64 100644 --- a/log-analyzer/src/main.rs +++ b/log-analyzer/src/main.rs @@ -11,7 +11,7 @@ use std::ops::Sub; use std::path::PathBuf; #[derive(Deserialize, Serialize, Debug)] -struct IPAddrMapping { +struct IAddrMapping { private: String, public: String, } @@ -90,7 +90,7 @@ impl Sub for &LogLine { } } -fn map_ip_address(mappings: &[IPAddrMapping], target: String) -> String { +fn map_ip_address(mappings: &[IAddrMapping], target: String) -> String { for mapping in mappings { if target.contains(&mapping.private) { return target.replace(&mapping.private, mapping.public.as_str()); @@ -100,7 +100,7 @@ fn map_ip_address(mappings: &[IPAddrMapping], target: String) -> String { } fn process_iftop_logs(matches: &ArgMatches) { - let mut map_list: Vec = vec![]; + let mut map_list: Vec = vec![]; if let ("map-IP", Some(args_matches)) = matches.subcommand() { let mut list = args_matches .value_of("list") diff --git a/merkle-root-bench/src/main.rs b/merkle-root-bench/src/main.rs index 4beb9515cf..a0a91fe7b4 100644 --- a/merkle-root-bench/src/main.rs +++ b/merkle-root-bench/src/main.rs @@ -1,7 +1,7 @@ extern crate log; use clap::{crate_description, crate_name, value_t, App, Arg}; use solana_measure::measure::Measure; -use solana_runtime::accounts_db::AccountsDB; +use solana_runtime::accounts_db::AccountsDb; use solana_sdk::{hash::Hash, pubkey::Pubkey}; fn main() { @@ -36,7 +36,7 @@ fn main() { let hashes = hashes.clone(); // done outside timing let mut time = Measure::start("compute_merkle_root_and_capitalization"); let fanout = 16; - AccountsDB::compute_merkle_root_and_capitalization(hashes, fanout); + AccountsDb::compute_merkle_root_and_capitalization(hashes, fanout); time.stop(); time.as_us() }) diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index ad93ec9ad4..e55fe3cb91 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -873,7 +873,7 @@ impl ProgramTestContext { )); bank_forks.set_root( pre_warp_slot, - &solana_runtime::accounts_background_service::ABSRequestSender::default(), + &solana_runtime::accounts_background_service::AbsRequestSender::default(), Some(warp_slot), ); diff --git a/programs/bpf/benches/bpf_loader.rs b/programs/bpf/benches/bpf_loader.rs index 97bcbc9ea5..23e784c9d1 100644 --- a/programs/bpf/benches/bpf_loader.rs +++ b/programs/bpf/benches/bpf_loader.rs @@ -7,7 +7,7 @@ extern crate solana_bpf_loader_program; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use solana_bpf_loader_program::{ - create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BPFError, + create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BpfError, ThisInstructionMeter, }; use solana_measure::measure::Measure; @@ -76,7 +76,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { bencher.iter(|| { let _ = - Executable::::from_elf(&elf, None, Config::default()) + Executable::::from_elf(&elf, None, Config::default()) .unwrap(); }); } @@ -95,7 +95,7 @@ fn bench_program_alu(bencher: &mut Bencher) { let elf = load_elf("bench_alu").unwrap(); let mut executable = - Executable::::from_elf(&elf, None, Config::default()) + Executable::::from_elf(&elf, None, Config::default()) .unwrap(); executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); executable.jit_compile().unwrap(); @@ -221,7 +221,7 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let elf = load_elf("tuner").unwrap(); let mut executable = - Executable::::from_elf(&elf, None, Config::default()) + Executable::::from_elf(&elf, None, Config::default()) .unwrap(); executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); let compute_meter = invoke_context.get_compute_meter(); diff --git a/programs/bpf_loader/src/allocator_bump.rs b/programs/bpf_loader/src/allocator_bump.rs index 406bb1481d..05d68ba912 100644 --- a/programs/bpf_loader/src/allocator_bump.rs +++ b/programs/bpf_loader/src/allocator_bump.rs @@ -4,14 +4,14 @@ use alloc::{Alloc, AllocErr}; use std::alloc::Layout; #[derive(Debug)] -pub struct BPFAllocator { +pub struct BpfAllocator { heap: Vec, start: u64, len: u64, pos: u64, } -impl BPFAllocator { +impl BpfAllocator { pub fn new(heap: Vec, virtual_address: u64) -> Self { let len = heap.len() as u64; Self { @@ -23,7 +23,7 @@ impl BPFAllocator { } } -impl Alloc for BPFAllocator { +impl Alloc for BpfAllocator { fn alloc(&mut self, layout: Layout) -> Result { let bytes_to_align = (self.pos as *const u8).align_offset(layout.align()) as u64; if self diff --git a/programs/bpf_loader/src/bpf_verifier.rs b/programs/bpf_loader/src/bpf_verifier.rs index c42e522282..7038cd14c7 100644 --- a/programs/bpf_loader/src/bpf_verifier.rs +++ b/programs/bpf_loader/src/bpf_verifier.rs @@ -1,4 +1,4 @@ -use crate::BPFError; +use crate::BpfError; use solana_rbpf::ebpf; use thiserror::Error; @@ -58,7 +58,7 @@ fn adj_insn_ptr(insn_ptr: usize) -> usize { insn_ptr + ebpf::ELF_INSN_DUMP_OFFSET } -fn check_prog_len(prog: &[u8], is_program_size_cap: bool) -> Result<(), BPFError> { +fn check_prog_len(prog: &[u8], is_program_size_cap: bool) -> Result<(), BpfError> { if prog.len() % ebpf::INSN_SIZE != 0 { return Err(VerifierError::ProgramLengthNotMultiple.into()); } @@ -72,21 +72,21 @@ fn check_prog_len(prog: &[u8], is_program_size_cap: bool) -> Result<(), BPFError Ok(()) } -fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BPFError> { +fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BpfError> { if insn.imm == 0 { return Err(VerifierError::DivisionByZero(adj_insn_ptr(insn_ptr)).into()); } Ok(()) } -fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BPFError> { +fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), BpfError> { match insn.imm { 16 | 32 | 64 => Ok(()), _ => Err(VerifierError::UnsupportedLEBEArgument(adj_insn_ptr(insn_ptr)).into()), } } -fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { +fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BpfError> { if insn_ptr + 1 >= (prog.len() / ebpf::INSN_SIZE) { // Last instruction cannot be LD_DW because there would be no 2nd DW return Err(VerifierError::LDDWCannotBeLast.into()); @@ -98,7 +98,7 @@ fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { Ok(()) } -fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { +fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BpfError> { let insn = ebpf::get_insn(prog, insn_ptr); // if insn.off == -1 { // return Err(VerifierError::InfiniteLoop(adj_insn_ptr(insn_ptr)).into()); @@ -121,7 +121,7 @@ fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), BPFError> { Ok(()) } -fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), BPFError> { +fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), BpfError> { if insn.src > 10 { return Err(VerifierError::InvalidSourceRegister(adj_insn_ptr(insn_ptr)).into()); } @@ -149,7 +149,7 @@ fn check_imm_register(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Verifier } #[rustfmt::skip] -pub fn check(prog: &[u8], is_program_size_cap: bool) -> Result<(), BPFError> { +pub fn check(prog: &[u8], is_program_size_cap: bool) -> Result<(), BpfError> { check_prog_len(prog, is_program_size_cap)?; let mut insn_ptr: usize = 0; diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 9eae2fe3e9..acdafe4f08 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -53,17 +53,17 @@ solana_sdk::declare_builtin!( /// Errors returned by functions the BPF Loader registers with the VM #[derive(Debug, Error, PartialEq)] -pub enum BPFError { +pub enum BpfError { #[error("{0}")] VerifierError(#[from] VerifierError), #[error("{0}")] SyscallError(#[from] SyscallError), } -impl UserDefinedError for BPFError {} +impl UserDefinedError for BpfError {} fn map_ebpf_error( invoke_context: &mut dyn InvokeContext, - e: EbpfError, + e: EbpfError, ) -> InstructionError { ic_msg!(invoke_context, "{}", e); InstructionError::InvalidAccountData @@ -74,9 +74,9 @@ pub fn create_and_cache_executor( data: &[u8], invoke_context: &mut dyn InvokeContext, use_jit: bool, -) -> Result, InstructionError> { +) -> Result, InstructionError> { let bpf_compute_budget = invoke_context.get_bpf_compute_budget(); - let mut program = Executable::::from_elf( + let mut program = Executable::::from_elf( data, None, Config { @@ -106,7 +106,7 @@ pub fn create_and_cache_executor( return Err(InstructionError::ProgramFailedToCompile); } } - let executor = Arc::new(BPFExecutor { program }); + let executor = Arc::new(BpfExecutor { program }); invoke_context.add_executor(key, executor.clone()); Ok(executor) } @@ -144,11 +144,11 @@ const DEFAULT_HEAP_SIZE: usize = 32 * 1024; /// Create the BPF virtual machine pub fn create_vm<'a>( loader_id: &'a Pubkey, - program: &'a dyn Executable, + program: &'a dyn Executable, parameter_bytes: &mut [u8], parameter_accounts: &'a [KeyedAccount<'a>], invoke_context: &'a mut dyn InvokeContext, -) -> Result, EbpfError> { +) -> Result, EbpfError> { let heap = vec![0_u8; DEFAULT_HEAP_SIZE]; let heap_region = MemoryRegion::new_from_slice(&heap, MM_HEAP_START, 0, true); let mut vm = EbpfVm::new(program, parameter_bytes, &[heap_region])?; @@ -751,18 +751,18 @@ impl InstructionMeter for ThisInstructionMeter { } /// BPF Loader's Executor implementation -pub struct BPFExecutor { - program: Box>, +pub struct BpfExecutor { + program: Box>, } // Well, implement Debug for solana_rbpf::vm::Executable in solana-rbpf... -impl Debug for BPFExecutor { +impl Debug for BpfExecutor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "BPFExecutor({:p})", self) + write!(f, "BpfExecutor({:p})", self) } } -impl Executor for BPFExecutor { +impl Executor for BpfExecutor { fn execute( &self, loader_id: &Pubkey, @@ -822,7 +822,7 @@ impl Executor for BPFExecutor { } Err(error) => { let error = match error { - EbpfError::UserError(BPFError::SyscallError( + EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(error), )) => error, err => { @@ -892,14 +892,14 @@ mod tests { ]; let input = &mut [0x00]; - let program = Executable::::from_text_bytes( + let program = Executable::::from_text_bytes( program, None, Config::default(), ) .unwrap(); let mut vm = - EbpfVm::::new(program.as_ref(), input, &[]).unwrap(); + EbpfVm::::new(program.as_ref(), input, &[]).unwrap(); let mut instruction_meter = TestInstructionMeter { remaining: 10 }; vm.execute_program_interpreted(&mut instruction_meter) .unwrap(); diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 6958e5cc67..c0291c1c4d 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -1,4 +1,4 @@ -use crate::{alloc, BPFError}; +use crate::{alloc, BpfError}; use alloc::Alloc; use curve25519_dalek::{ristretto::RistrettoPoint, scalar::Scalar}; use solana_rbpf::{ @@ -73,17 +73,17 @@ pub enum SyscallError { #[error("Too many accounts passed to inner instruction")] TooManyAccounts, } -impl From for EbpfError { +impl From for EbpfError { fn from(error: SyscallError) -> Self { EbpfError::UserError(error.into()) } } trait SyscallConsume { - fn consume(&mut self, amount: u64) -> Result<(), EbpfError>; + fn consume(&mut self, amount: u64) -> Result<(), EbpfError>; } impl SyscallConsume for Rc> { - fn consume(&mut self, amount: u64) -> Result<(), EbpfError> { + fn consume(&mut self, amount: u64) -> Result<(), EbpfError> { self.try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed)? .consume(amount) @@ -98,11 +98,11 @@ impl SyscallConsume for Rc> { /// Only one allocator is currently supported /// Simple bump allocator, never frees -use crate::allocator_bump::BPFAllocator; +use crate::allocator_bump::BpfAllocator; pub fn register_syscalls( invoke_context: &mut dyn InvokeContext, -) -> Result> { +) -> Result> { let mut syscall_registry = SyscallRegistry::default(); syscall_registry.register_syscall_by_name(b"abort", SyscallAbort::call)?; @@ -162,11 +162,11 @@ macro_rules! bind_feature_gated_syscall_context_object { pub fn bind_syscall_context_objects<'a>( loader_id: &'a Pubkey, - vm: &mut EbpfVm<'a, BPFError, crate::ThisInstructionMeter>, + vm: &mut EbpfVm<'a, BpfError, crate::ThisInstructionMeter>, callers_keyed_accounts: &'a [KeyedAccount<'a>], invoke_context: &'a mut dyn InvokeContext, heap: Vec, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { let bpf_compute_budget = invoke_context.get_bpf_compute_budget(); // Syscall functions common across languages @@ -293,7 +293,7 @@ pub fn bind_syscall_context_objects<'a>( vm.bind_syscall_context_object( Box::new(SyscallAllocFree { aligned: *loader_id != bpf_loader_deprecated::id(), - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }), None, )?; @@ -306,8 +306,8 @@ fn translate( access_type: AccessType, vm_addr: u64, len: u64, -) -> Result> { - memory_mapping.map::(access_type, vm_addr, len) +) -> Result> { + memory_mapping.map::(access_type, vm_addr, len) } fn translate_type_inner<'a, T>( @@ -315,7 +315,7 @@ fn translate_type_inner<'a, T>( access_type: AccessType, vm_addr: u64, loader_id: &Pubkey, -) -> Result<&'a mut T, EbpfError> { +) -> Result<&'a mut T, EbpfError> { if loader_id != &bpf_loader_deprecated::id() && (vm_addr as u64 as *mut T).align_offset(align_of::()) != 0 { @@ -333,14 +333,14 @@ fn translate_type_mut<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, loader_id: &Pubkey, -) -> Result<&'a mut T, EbpfError> { +) -> Result<&'a mut T, EbpfError> { translate_type_inner::(memory_mapping, AccessType::Store, vm_addr, loader_id) } fn translate_type<'a, T>( memory_mapping: &MemoryMapping, vm_addr: u64, loader_id: &Pubkey, -) -> Result<&'a T, EbpfError> { +) -> Result<&'a T, EbpfError> { match translate_type_inner::(memory_mapping, AccessType::Load, vm_addr, loader_id) { Ok(value) => Ok(&*value), Err(e) => Err(e), @@ -353,7 +353,7 @@ fn translate_slice_inner<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, -) -> Result<&'a mut [T], EbpfError> { +) -> Result<&'a mut [T], EbpfError> { if loader_id != &bpf_loader_deprecated::id() && (vm_addr as u64 as *mut T).align_offset(align_of::()) != 0 { @@ -377,7 +377,7 @@ fn translate_slice_mut<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, -) -> Result<&'a mut [T], EbpfError> { +) -> Result<&'a mut [T], EbpfError> { translate_slice_inner::(memory_mapping, AccessType::Store, vm_addr, len, loader_id) } fn translate_slice<'a, T>( @@ -385,7 +385,7 @@ fn translate_slice<'a, T>( vm_addr: u64, len: u64, loader_id: &Pubkey, -) -> Result<&'a [T], EbpfError> { +) -> Result<&'a [T], EbpfError> { match translate_slice_inner::(memory_mapping, AccessType::Load, vm_addr, len, loader_id) { Ok(value) => Ok(&*value), Err(e) => Err(e), @@ -399,8 +399,8 @@ fn translate_string_and_do( addr: u64, len: u64, loader_id: &Pubkey, - work: &mut dyn FnMut(&str) -> Result>, -) -> Result> { + work: &mut dyn FnMut(&str) -> Result>, +) -> Result> { let buf = translate_slice::(memory_mapping, addr, len, loader_id)?; let i = match buf.iter().position(|byte| *byte == 0) { Some(i) => i, @@ -417,7 +417,7 @@ fn translate_string_and_do( /// `abort()` is not intended to be called explicitly by the program. /// Causes the BPF program to be halted immediately pub struct SyscallAbort {} -impl SyscallObject for SyscallAbort { +impl SyscallObject for SyscallAbort { fn call( &mut self, _arg1: u64, @@ -426,7 +426,7 @@ impl SyscallObject for SyscallAbort { _arg4: u64, _arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { *result = Err(SyscallError::Abort.into()); } @@ -439,7 +439,7 @@ pub struct SyscallPanic<'a> { compute_meter: Option>>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallPanic<'a> { +impl<'a> SyscallObject for SyscallPanic<'a> { fn call( &mut self, file: u64, @@ -448,7 +448,7 @@ impl<'a> SyscallObject for SyscallPanic<'a> { column: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { if let Some(ref mut compute_meter) = self.compute_meter { question_mark!(compute_meter.consume(len), result); @@ -471,7 +471,7 @@ pub struct SyscallLog<'a> { logger: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallLog<'a> { +impl<'a> SyscallObject for SyscallLog<'a> { fn call( &mut self, addr: u64, @@ -480,7 +480,7 @@ impl<'a> SyscallObject for SyscallLog<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { if self.per_byte_cost { question_mark!(self.compute_meter.consume(len), result); @@ -510,7 +510,7 @@ pub struct SyscallLogU64 { compute_meter: Rc>, logger: Rc>, } -impl SyscallObject for SyscallLogU64 { +impl SyscallObject for SyscallLogU64 { fn call( &mut self, arg1: u64, @@ -519,7 +519,7 @@ impl SyscallObject for SyscallLogU64 { arg4: u64, arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); stable_log::program_log( @@ -539,7 +539,7 @@ pub struct SyscallLogBpfComputeUnits { compute_meter: Rc>, logger: Rc>, } -impl SyscallObject for SyscallLogBpfComputeUnits { +impl SyscallObject for SyscallLogBpfComputeUnits { fn call( &mut self, _arg1: u64, @@ -548,7 +548,7 @@ impl SyscallObject for SyscallLogBpfComputeUnits { _arg4: u64, _arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); let logger = question_mark!( @@ -574,7 +574,7 @@ pub struct SyscallLogPubkey<'a> { logger: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallLogPubkey<'a> { +impl<'a> SyscallObject for SyscallLogPubkey<'a> { fn call( &mut self, pubkey_addr: u64, @@ -583,7 +583,7 @@ impl<'a> SyscallObject for SyscallLogPubkey<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); let pubkey = question_mark!( @@ -603,9 +603,9 @@ impl<'a> SyscallObject for SyscallLogPubkey<'a> { /// to the VM to use for enforcement. pub struct SyscallAllocFree { aligned: bool, - allocator: BPFAllocator, + allocator: BpfAllocator, } -impl SyscallObject for SyscallAllocFree { +impl SyscallObject for SyscallAllocFree { fn call( &mut self, size: u64, @@ -614,7 +614,7 @@ impl SyscallObject for SyscallAllocFree { _arg4: u64, _arg5: u64, _memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { let align = if self.aligned { align_of::() @@ -646,7 +646,7 @@ fn translate_program_address_inputs<'a>( program_id_addr: u64, memory_mapping: &MemoryMapping, loader_id: &Pubkey, -) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { +) -> Result<(Vec<&'a [u8]>, &'a Pubkey), EbpfError> { let untranslated_seeds = translate_slice::<&[&u8]>(memory_mapping, seeds_addr, seeds_len, loader_id)?; if untranslated_seeds.len() > MAX_SEEDS { @@ -662,7 +662,7 @@ fn translate_program_address_inputs<'a>( loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let program_id = translate_type::(memory_mapping, program_id_addr, loader_id)?; Ok((seeds, program_id)) } @@ -673,7 +673,7 @@ struct SyscallCreateProgramAddress<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { +impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { fn call( &mut self, seeds_addr: u64, @@ -682,7 +682,7 @@ impl<'a> SyscallObject for SyscallCreateProgramAddress<'a> { address_addr: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { let (seeds, program_id) = question_mark!( translate_program_address_inputs( @@ -718,7 +718,7 @@ struct SyscallTryFindProgramAddress<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { +impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { fn call( &mut self, seeds_addr: u64, @@ -727,7 +727,7 @@ impl<'a> SyscallObject for SyscallTryFindProgramAddress<'a> { address_addr: u64, bump_seed_addr: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { let (seeds, program_id) = question_mark!( translate_program_address_inputs( @@ -777,7 +777,7 @@ pub struct SyscallSha256<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallSha256<'a> { +impl<'a> SyscallObject for SyscallSha256<'a> { fn call( &mut self, vals_addr: u64, @@ -786,7 +786,7 @@ impl<'a> SyscallObject for SyscallSha256<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.sha256_base_cost), result); let hash_result = question_mark!( @@ -833,7 +833,7 @@ pub struct SyscallRistrettoMul<'a> { compute_meter: Rc>, loader_id: &'a Pubkey, } -impl<'a> SyscallObject for SyscallRistrettoMul<'a> { +impl<'a> SyscallObject for SyscallRistrettoMul<'a> { fn call( &mut self, point_addr: u64, @@ -842,7 +842,7 @@ impl<'a> SyscallObject for SyscallRistrettoMul<'a> { _arg4: u64, _arg5: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { question_mark!(self.compute_meter.consume(self.cost), result); @@ -882,14 +882,14 @@ type TranslatedAccounts<'a> = ( /// Implemented by language specific data structure translators trait SyscallInvokeSigned<'a> { - fn get_context_mut(&self) -> Result, EbpfError>; - fn get_context(&self) -> Result, EbpfError>; + fn get_context_mut(&self) -> Result, EbpfError>; + fn get_context(&self) -> Result, EbpfError>; fn get_callers_keyed_accounts(&self) -> &'a [KeyedAccount<'a>]; fn translate_instruction( &self, addr: u64, memory_mapping: &MemoryMapping, - ) -> Result>; + ) -> Result>; fn translate_accounts( &self, account_keys: &[Pubkey], @@ -897,14 +897,14 @@ trait SyscallInvokeSigned<'a> { account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError>; + ) -> Result, EbpfError>; fn translate_signers( &self, program_id: &Pubkey, signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError>; + ) -> Result, EbpfError>; } /// Cross-program invocation called from Rust @@ -914,12 +914,12 @@ pub struct SyscallInvokeSignedRust<'a> { loader_id: &'a Pubkey, } impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { - fn get_context_mut(&self) -> Result, EbpfError> { + fn get_context_mut(&self) -> Result, EbpfError> { self.invoke_context .try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) } - fn get_context(&self) -> Result, EbpfError> { + fn get_context(&self) -> Result, EbpfError> { self.invoke_context .try_borrow() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) @@ -931,7 +931,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, - ) -> Result> { + ) -> Result> { let ix = translate_type::(memory_mapping, addr, self.loader_id)?; check_instruction_size( @@ -968,7 +968,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let invoke_context = self.invoke_context.borrow(); let account_infos = translate_slice::( @@ -987,7 +987,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { self.loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let translate = |account_info: &AccountInfo| { // Translate the account from user space @@ -1075,7 +1075,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let mut signers = Vec::new(); if signers_seeds_len > 0 { let signers_seeds = translate_slice::<&[&[u8]]>( @@ -1110,7 +1110,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { self.loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let signer = Pubkey::create_program_address(&seeds, program_id) .map_err(SyscallError::BadSeeds)?; signers.push(signer); @@ -1121,7 +1121,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> { } } } -impl<'a> SyscallObject for SyscallInvokeSignedRust<'a> { +impl<'a> SyscallObject for SyscallInvokeSignedRust<'a> { fn call( &mut self, instruction_addr: u64, @@ -1130,7 +1130,7 @@ impl<'a> SyscallObject for SyscallInvokeSignedRust<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { *result = call( self, @@ -1197,12 +1197,12 @@ pub struct SyscallInvokeSignedC<'a> { loader_id: &'a Pubkey, } impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { - fn get_context_mut(&self) -> Result, EbpfError> { + fn get_context_mut(&self) -> Result, EbpfError> { self.invoke_context .try_borrow_mut() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) } - fn get_context(&self) -> Result, EbpfError> { + fn get_context(&self) -> Result, EbpfError> { self.invoke_context .try_borrow() .map_err(|_| SyscallError::InvokeContextBorrowFailed.into()) @@ -1216,7 +1216,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { &self, addr: u64, memory_mapping: &MemoryMapping, - ) -> Result> { + ) -> Result> { let ix_c = translate_type::(memory_mapping, addr, self.loader_id)?; check_instruction_size( @@ -1250,7 +1250,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { is_writable: meta_c.is_writable, }) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; Ok(Instruction { program_id: *program_id, @@ -1266,7 +1266,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { account_infos_addr: u64, account_infos_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { let invoke_context = self.invoke_context.borrow(); let account_infos = translate_slice::( @@ -1281,7 +1281,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { .map(|account_info| { translate_type::(memory_mapping, account_info.key_addr, self.loader_id) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; let translate = |account_info: &SolAccountInfo| { // Translate the account from user space @@ -1358,7 +1358,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - ) -> Result, EbpfError> { + ) -> Result, EbpfError> { if signers_seeds_len > 0 { let signers_seeds = translate_slice::( memory_mapping, @@ -1394,17 +1394,17 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> { self.loader_id, ) }) - .collect::, EbpfError>>()?; + .collect::, EbpfError>>()?; Pubkey::create_program_address(&seeds_bytes, program_id) .map_err(|err| SyscallError::BadSeeds(err).into()) }) - .collect::, EbpfError>>()?) + .collect::, EbpfError>>()?) } else { Ok(vec![]) } } } -impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { +impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { fn call( &mut self, instruction_addr: u64, @@ -1413,7 +1413,7 @@ impl<'a> SyscallObject for SyscallInvokeSignedC<'a> { signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, - result: &mut Result>, + result: &mut Result>, ) { *result = call( self, @@ -1434,9 +1434,9 @@ fn get_translated_accounts<'a, T, F>( account_infos: &[T], invoke_context: &Ref<&mut dyn InvokeContext>, do_translate: F, -) -> Result, EbpfError> +) -> Result, EbpfError> where - F: Fn(&T) -> Result, EbpfError>, + F: Fn(&T) -> Result, EbpfError>, { let mut accounts = Vec::with_capacity(account_keys.len()); let mut refs = Vec::with_capacity(account_keys.len()); @@ -1490,7 +1490,7 @@ fn check_instruction_size( num_accounts: usize, data_len: usize, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { let size = num_accounts .saturating_mul(size_of::()) .saturating_add(data_len); @@ -1506,7 +1506,7 @@ fn check_instruction_size( fn check_account_infos( len: usize, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if len * size_of::() > invoke_context .get_bpf_compute_budget() @@ -1523,7 +1523,7 @@ fn check_account_infos( fn check_authorized_program( program_id: &Pubkey, instruction_data: &[u8], -) -> Result<(), EbpfError> { +) -> Result<(), EbpfError> { if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) @@ -1539,7 +1539,7 @@ fn get_upgradeable_executable( callee_program_id: &Pubkey, program_account: &RefCell, invoke_context: &Ref<&mut dyn InvokeContext>, -) -> Result)>, EbpfError> { +) -> Result)>, EbpfError> { if program_account.borrow().owner == bpf_loader_upgradeable::id() { match program_account.borrow().state() { Ok(UpgradeableLoaderState::Program { @@ -1579,7 +1579,7 @@ fn call<'a>( signers_seeds_addr: u64, signers_seeds_len: u64, memory_mapping: &MemoryMapping, -) -> Result> { +) -> Result> { let ( message, executables, @@ -1854,7 +1854,7 @@ mod tests { let translated_instruction = translate_type::(&memory_mapping, 96, &bpf_loader::id()).unwrap(); assert_eq!(instruction, *translated_instruction); - memory_mapping.resize_region::(0, 1).unwrap(); + memory_mapping.resize_region::(0, 1).unwrap(); assert!(translate_type::(&memory_mapping, 100, &bpf_loader::id()).is_err()); } @@ -1992,7 +1992,7 @@ mod tests { #[should_panic(expected = "UserError(SyscallError(Abort))")] fn test_syscall_abort() { let memory_mapping = MemoryMapping::new(vec![MemoryRegion::default()], &DEFAULT_CONFIG); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); SyscallAbort::call( &mut SyscallAbort {}, 0, @@ -2030,7 +2030,7 @@ mod tests { compute_meter: Some(compute_meter), loader_id: &bpf_loader::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_panic.call( 100, string.len() as u64, @@ -2041,7 +2041,7 @@ mod tests { &mut result, ); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2051,7 +2051,7 @@ mod tests { compute_meter: None, loader_id: &bpf_loader::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_panic.call( 100, string.len() as u64, @@ -2092,7 +2092,7 @@ mod tests { &DEFAULT_CONFIG, ); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2106,7 +2106,7 @@ mod tests { assert_eq!(log.borrow().len(), 1); assert_eq!(log.borrow()[0], "Program log: Gaggablaghblagh!"); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 101, // AccessViolation string.len() as u64, @@ -2117,7 +2117,7 @@ mod tests { &mut result, ); assert_access_violation!(result, 101, string.len() as u64); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64 * 2, // AccessViolation @@ -2128,7 +2128,7 @@ mod tests { &mut result, ); assert_access_violation!(result, 100, string.len() as u64 * 2); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2139,7 +2139,7 @@ mod tests { &mut result, ); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2157,7 +2157,7 @@ mod tests { logger, loader_id: &bpf_loader::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2168,7 +2168,7 @@ mod tests { &mut result, ); result.unwrap(); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log.call( 100, string.len() as u64, @@ -2179,7 +2179,7 @@ mod tests { &mut result, ); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2202,7 +2202,7 @@ mod tests { }; let memory_mapping = MemoryMapping::new(vec![], &DEFAULT_CONFIG); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_log_u64.call(1, 2, 3, 4, 5, &memory_mapping, &mut result); result.unwrap(); @@ -2237,7 +2237,7 @@ mod tests { &DEFAULT_CONFIG, ); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_pubkey.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); result.unwrap(); assert_eq!(log.borrow().len(), 1); @@ -2245,7 +2245,7 @@ mod tests { log.borrow()[0], "Program log: MoqiU1vryuCGQSxFKA1SZ316JdLEFFhoAu6cKUNk7dN" ); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_pubkey.call( 101, // AccessViolation 32, @@ -2256,10 +2256,10 @@ mod tests { &mut result, ); assert_access_violation!(result, 101, 32); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall_sol_pubkey.call(100, 32, 0, 0, 0, &memory_mapping, &mut result); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result @@ -2277,15 +2277,15 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: true, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_ne!(result.unwrap(), 0); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(u64::MAX, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); } @@ -2298,14 +2298,14 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: false, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; for _ in 0..100 { - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(1, 0, 0, 0, 0, &memory_mapping, &mut result); assert_ne!(result.unwrap(), 0); } - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); } @@ -2318,14 +2318,14 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: true, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; for _ in 0..12 { - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(1, 0, 0, 0, 0, &memory_mapping, &mut result); assert_ne!(result.unwrap(), 0); } - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(100, 0, 0, 0, 0, &memory_mapping, &mut result); assert_eq!(result.unwrap(), 0); } @@ -2339,9 +2339,9 @@ mod tests { ); let mut syscall = SyscallAllocFree { aligned: true, - allocator: BPFAllocator::new(heap, MM_HEAP_START), + allocator: BpfAllocator::new(heap, MM_HEAP_START), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( size_of::() as u64, 0, @@ -2428,13 +2428,13 @@ mod tests { loader_id: &bpf_loader_deprecated::id(), }; - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call(ro_va, ro_len, rw_va, 0, 0, &memory_mapping, &mut result); result.unwrap(); let hash_local = hashv(&[bytes1.as_ref(), bytes2.as_ref()]).to_bytes(); assert_eq!(hash_result, hash_local); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( ro_va - 1, // AccessViolation ro_len, @@ -2445,7 +2445,7 @@ mod tests { &mut result, ); assert_access_violation!(result, ro_va - 1, ro_len); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( ro_va, ro_len + 1, // AccessViolation @@ -2456,7 +2456,7 @@ mod tests { &mut result, ); assert_access_violation!(result, ro_va, ro_len + 1); - let mut result: Result> = Ok(0); + let mut result: Result> = Ok(0); syscall.call( ro_va, ro_len, @@ -2470,7 +2470,7 @@ mod tests { syscall.call(ro_va, ro_len, rw_va, 0, 0, &memory_mapping, &mut result); assert_eq!( - Err(EbpfError::UserError(BPFError::SyscallError( + Err(EbpfError::UserError(BpfError::SyscallError( SyscallError::InstructionError(InstructionError::ComputationalBudgetExceeded) ))), result diff --git a/remote-wallet/src/ledger_error.rs b/remote-wallet/src/ledger_error.rs index 6c714cb9c1..6e5b6e2296 100644 --- a/remote-wallet/src/ledger_error.rs +++ b/remote-wallet/src/ledger_error.rs @@ -37,7 +37,7 @@ pub enum LedgerError { SdkTimeout = 0x6810, #[error("Ledger PIC exception")] - SdkExceptionPIC = 0x6811, + SdkExceptionPic = 0x6811, #[error("Ledger app exit exception")] SdkExceptionAppExit = 0x6812, diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 035e1c2414..a98cf97af0 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1,5 +1,5 @@ use crate::{ - accounts_db::{AccountsDB, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult}, + accounts_db::{AccountsDb, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult}, accounts_index::{AccountIndex, Ancestors, IndexKey}, bank::{ NonceRollbackFull, NonceRollbackInfo, TransactionCheckResult, TransactionExecutionResult, @@ -51,8 +51,8 @@ pub struct Accounts { /// my epoch pub epoch: Epoch, - /// Single global AccountsDB - pub accounts_db: Arc, + /// Single global AccountsDb + pub accounts_db: Arc, /// set of writable accounts which are currently in the pipeline pub(crate) account_locks: Mutex>, @@ -94,7 +94,7 @@ impl Accounts { caching_enabled: bool, ) -> Self { Self { - accounts_db: Arc::new(AccountsDB::new_with_config( + accounts_db: Arc::new(AccountsDb::new_with_config( paths, cluster_type, account_indexes, @@ -118,7 +118,7 @@ impl Accounts { } } - pub(crate) fn new_empty(accounts_db: AccountsDB) -> Self { + pub(crate) fn new_empty(accounts_db: AccountsDb) -> Self { Self { accounts_db: Arc::new(accounts_db), account_locks: Mutex::new(HashSet::new()), @@ -560,14 +560,14 @@ impl Accounts { |total_capitalization: &mut u64, (_pubkey, loaded_account, _slot)| { let lamports = loaded_account.lamports(); if Self::is_loadable(lamports) { - let account_cap = AccountsDB::account_balance_for_capitalization( + let account_cap = AccountsDb::account_balance_for_capitalization( lamports, &loaded_account.owner(), loaded_account.executable(), simple_capitalization_enabled, ); - *total_capitalization = AccountsDB::checked_iterative_sum_for_capitalization( + *total_capitalization = AccountsDb::checked_iterative_sum_for_capitalization( *total_capitalization, account_cap, ); @@ -1647,7 +1647,7 @@ mod tests { let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); - // Load accounts owned by various programs into AccountsDB + // Load accounts owned by various programs into AccountsDb let pubkey0 = solana_sdk::pubkey::new_rand(); let account0 = Account::new(1, 0, &Pubkey::new(&[2; 32])); accounts.store_slow_uncached(0, &pubkey0, &account0); diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 5cb74a8856..c784f1c351 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -195,13 +195,13 @@ impl SnapshotRequestHandler { } #[derive(Default)] -pub struct ABSRequestSender { +pub struct AbsRequestSender { snapshot_request_sender: Option, } -impl ABSRequestSender { +impl AbsRequestSender { pub fn new(snapshot_request_sender: Option) -> Self { - ABSRequestSender { + AbsRequestSender { snapshot_request_sender, } } @@ -222,12 +222,12 @@ impl ABSRequestSender { } } -pub struct ABSRequestHandler { +pub struct AbsRequestHandler { pub snapshot_request_handler: Option, pub pruned_banks_receiver: DroppedSlotsReceiver, } -impl ABSRequestHandler { +impl AbsRequestHandler { // Returns the latest requested snapshot block height, if one exists pub fn handle_snapshot_requests( &self, @@ -265,7 +265,7 @@ impl AccountsBackgroundService { pub fn new( bank_forks: Arc>, exit: &Arc, - request_handler: ABSRequestHandler, + request_handler: AbsRequestHandler, accounts_db_caching_enabled: bool, test_hash_calculation: bool, use_index_hash_calculation: bool, @@ -368,7 +368,7 @@ impl AccountsBackgroundService { fn remove_dead_slots( bank: &Bank, - request_handler: &ABSRequestHandler, + request_handler: &AbsRequestHandler, removed_slots_count: &mut usize, total_remove_slots_time: &mut u64, ) { @@ -401,7 +401,7 @@ mod test { let genesis = create_genesis_config(10); let bank0 = Arc::new(Bank::new(&genesis.genesis_config)); let (pruned_banks_sender, pruned_banks_receiver) = unbounded(); - let request_handler = ABSRequestHandler { + let request_handler = AbsRequestHandler { snapshot_request_handler: None, pruned_banks_receiver, }; diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 1097d11b43..01fe2535fb 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -14,7 +14,7 @@ //! //! AppendVec's only store accounts for single slots. To bootstrap the //! index from a persistent store of AppendVec's, the entries include -//! a "write_version". A single global atomic `AccountsDB::write_version` +//! a "write_version". A single global atomic `AccountsDb::write_version` //! tracks the number of commits to the entire data store. So the latest //! commit for each slot entry would be indexed. @@ -52,7 +52,7 @@ use std::{ boxed::Box, collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, convert::{TryFrom, TryInto}, - io::{Error as IOError, Result as IOResult}, + io::{Error as IoError, Result as IoResult}, ops::RangeBounds, path::{Path, PathBuf}, sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, @@ -97,7 +97,7 @@ const MERKLE_FANOUT: usize = 16; type DashMapVersionHash = DashMap; lazy_static! { - // FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDB panic has occurred, + // FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDb panic has occurred, // as |cargo test| cannot observe panics in other threads pub static ref FROZEN_ACCOUNT_PANIC: Arc = Arc::new(AtomicBool::new(false)); } @@ -153,7 +153,7 @@ impl ZeroLamport for AccountInfo { } } -/// An offset into the AccountsDB::storage vector +/// An offset into the AccountsDb::storage vector pub type AppendVecId = usize; pub type SnapshotStorage = Vec>; pub type SnapshotStorages = Vec; @@ -282,10 +282,10 @@ impl<'a> LoadedAccount<'a> { pub fn compute_hash(&self, slot: Slot, cluster_type: &ClusterType, pubkey: &Pubkey) -> Hash { match self { LoadedAccount::Stored(stored_account_meta) => { - AccountsDB::hash_stored_account(slot, &stored_account_meta, cluster_type) + AccountsDb::hash_stored_account(slot, &stored_account_meta, cluster_type) } LoadedAccount::Cached((_, cached_account)) => { - AccountsDB::hash_account(slot, &cached_account.account, pubkey, cluster_type) + AccountsDb::hash_account(slot, &cached_account.account, pubkey, cluster_type) } } } @@ -505,7 +505,7 @@ impl AccountStorageEntry { self.id.load(Ordering::Relaxed) } - pub fn flush(&self) -> Result<(), IOError> { + pub fn flush(&self) -> Result<(), IoError> { self.accounts.flush() } @@ -580,8 +580,8 @@ impl AccountStorageEntry { } } -pub fn get_temp_accounts_paths(count: u32) -> IOResult<(Vec, Vec)> { - let temp_dirs: IOResult> = (0..count).map(|_| TempDir::new()).collect(); +pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec, Vec)> { + let temp_dirs: IoResult> = (0..count).map(|_| TempDir::new()).collect(); let temp_dirs = temp_dirs?; let paths: Vec = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect(); Ok((temp_dirs, paths)) @@ -679,7 +679,7 @@ impl RecycleStores { // This structure handles the load/store of the accounts #[derive(Debug)] -pub struct AccountsDB { +pub struct AccountsDb { /// Keeps tracks of index into AppendVec on a per slot basis pub accounts_index: AccountsIndex, @@ -1031,9 +1031,9 @@ pub fn make_min_priority_thread_pool() -> ThreadPool { } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION))] -impl solana_frozen_abi::abi_example::AbiExample for AccountsDB { +impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { fn example() -> Self { - let accounts_db = AccountsDB::new_single(); + let accounts_db = AccountsDb::new_single(); let key = Pubkey::default(); let some_data_len = 5; let some_slot: Slot = 0; @@ -1045,13 +1045,13 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDB { } } -impl Default for AccountsDB { +impl Default for AccountsDb { fn default() -> Self { let num_threads = get_thread_count(); let mut bank_hashes = HashMap::new(); bank_hashes.insert(0, BankHashInfo::default()); - AccountsDB { + AccountsDb { accounts_index: AccountsIndex::default(), storage: AccountStorage::default(), accounts_cache: AccountsCache::default(), @@ -1085,9 +1085,9 @@ impl Default for AccountsDB { } } -impl AccountsDB { +impl AccountsDb { pub fn new(paths: Vec, cluster_type: &ClusterType) -> Self { - AccountsDB::new_with_config(paths, cluster_type, HashSet::new(), false) + AccountsDb::new_with_config(paths, cluster_type, HashSet::new(), false) } pub fn new_with_config( @@ -1140,9 +1140,9 @@ impl AccountsDB { } pub fn new_single() -> Self { - AccountsDB { + AccountsDb { min_num_stores: 0, - ..AccountsDB::new(Vec::new(), &ClusterType::Development) + ..AccountsDb::new(Vec::new(), &ClusterType::Development) } } @@ -1155,7 +1155,7 @@ impl AccountsDB { ) } - // Reclaim older states of rooted accounts for AccountsDB bloat mitigation + // Reclaim older states of rooted accounts for AccountsDb bloat mitigation fn clean_old_rooted_accounts( &self, purges_in_root: Vec, @@ -4722,19 +4722,19 @@ impl AccountsDB { } #[cfg(test)] -impl AccountsDB { +impl AccountsDb { pub fn new_sized(paths: Vec, file_size: u64) -> Self { - AccountsDB { + AccountsDb { file_size, - ..AccountsDB::new(paths, &ClusterType::Development) + ..AccountsDb::new(paths, &ClusterType::Development) } } pub fn new_sized_no_extra_stores(paths: Vec, file_size: u64) -> Self { - AccountsDB { + AccountsDb { file_size, min_num_stores: 0, - ..AccountsDB::new(paths, &ClusterType::Development) + ..AccountsDb::new(paths, &ClusterType::Development) } } @@ -4754,7 +4754,7 @@ impl AccountsDB { /// Legacy shrink functions to support non-cached path. /// Should be able to be deleted after cache path is the only path. -impl AccountsDB { +impl AccountsDb { // Reads all accounts in given slot's AppendVecs and filter only to alive, // then create a minimum AppendVec filled with the alive. // v1 path shrinks all stores in the slot @@ -5157,7 +5157,7 @@ pub mod tests { account_maps.insert(key, val); let result = - AccountsDB::rest_of_hash_calculation((account_maps.clone(), Measure::start(""))); + AccountsDb::rest_of_hash_calculation((account_maps.clone(), Measure::start(""))); let expected_hash = Hash::from_str("8j9ARGFv4W2GfML7d3sVJK2MePwrikqYnu6yqer28cCa").unwrap(); assert_eq!(result, (expected_hash, 88)); @@ -5167,7 +5167,7 @@ pub mod tests { let val = CalculateHashIntermediate::new(0, hash, 20, Slot::default()); account_maps.insert(key, val); - let result = AccountsDB::rest_of_hash_calculation((account_maps, Measure::start(""))); + let result = AccountsDb::rest_of_hash_calculation((account_maps, Measure::start(""))); let expected_hash = Hash::from_str("EHv9C5vX7xQjjMpsJMzudnDTzoTSRwYkqLzY8tVMihGj").unwrap(); assert_eq!(result, (expected_hash, 108)); } @@ -5181,31 +5181,31 @@ pub mod tests { let hash = Hash::new_unique(); let val = CalculateHashIntermediate::new(1, hash, 1, 1); - AccountsDB::handle_one_loaded_account(&key, val.clone(), &account_maps); + AccountsDb::handle_one_loaded_account(&key, val.clone(), &account_maps); assert_eq!(*account_maps.get(&key).unwrap(), val); // slot same, version < let hash2 = Hash::new_unique(); let val2 = CalculateHashIntermediate::new(0, hash2, 4, 1); - AccountsDB::handle_one_loaded_account(&key, val2, &account_maps); + AccountsDb::handle_one_loaded_account(&key, val2, &account_maps); assert_eq!(*account_maps.get(&key).unwrap(), val); // slot same, vers = let hash3 = Hash::new_unique(); let val3 = CalculateHashIntermediate::new(1, hash3, 2, 1); - AccountsDB::handle_one_loaded_account(&key, val3.clone(), &account_maps); + AccountsDb::handle_one_loaded_account(&key, val3.clone(), &account_maps); assert_eq!(*account_maps.get(&key).unwrap(), val3); // slot same, vers > let hash4 = Hash::new_unique(); let val4 = CalculateHashIntermediate::new(2, hash4, 6, 1); - AccountsDB::handle_one_loaded_account(&key, val4.clone(), &account_maps); + AccountsDb::handle_one_loaded_account(&key, val4.clone(), &account_maps); assert_eq!(*account_maps.get(&key).unwrap(), val4); // slot >, version < let hash5 = Hash::new_unique(); let val5 = CalculateHashIntermediate::new(0, hash5, 8, 2); - AccountsDB::handle_one_loaded_account(&key, val5.clone(), &account_maps); + AccountsDb::handle_one_loaded_account(&key, val5.clone(), &account_maps); assert_eq!(*account_maps.get(&key).unwrap(), val5); } @@ -5219,7 +5219,7 @@ pub mod tests { let val = CalculateHashIntermediate::new(0, hash, 1, Slot::default()); account_maps.insert(key, val.clone()); - let result = AccountsDB::remove_zero_balance_accounts(account_maps); + let result = AccountsDb::remove_zero_balance_accounts(account_maps); assert_eq!(result, vec![(key, val.hash, val.lamports)]); // zero original lamports @@ -5228,7 +5228,7 @@ pub mod tests { CalculateHashIntermediate::new(0, hash, ZERO_RAW_LAMPORTS_SENTINEL, Slot::default()); account_maps.insert(key, val); - let result = AccountsDB::remove_zero_balance_accounts(account_maps); + let result = AccountsDb::remove_zero_balance_accounts(account_maps); assert_eq!(result, vec![]); } @@ -5237,7 +5237,7 @@ pub mod tests { solana_logger::setup(); let (storages, _size, _slot_expected) = sample_storage(); - let result = AccountsDB::calculate_accounts_hash_without_index(&storages, true, None); + let result = AccountsDb::calculate_accounts_hash_without_index(&storages, true, None); let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap(); assert_eq!(result, (expected_hash, 0)); } @@ -5282,7 +5282,7 @@ pub mod tests { .append_accounts(&[(sm, &acc)], &[Hash::default()]); let calls = AtomicU64::new(0); - let result = AccountsDB::scan_account_storage_no_bank( + let result = AccountsDb::scan_account_storage_no_bank( &storages, |loaded_account: LoadedAccount, accum: &mut Vec, slot: Slot| { calls.fetch_add(1, Ordering::Relaxed); @@ -5344,23 +5344,23 @@ pub mod tests { let result; if pass == 0 { result = - AccountsDB::compute_merkle_root_and_capitalization(input.clone(), fanout); + AccountsDb::compute_merkle_root_and_capitalization(input.clone(), fanout); } else { - result = AccountsDB::accumulate_account_hashes_and_capitalization( + result = AccountsDb::accumulate_account_hashes_and_capitalization( input.clone(), Slot::default(), false, ) .0; assert_eq!( - AccountsDB::accumulate_account_hashes( + AccountsDb::accumulate_account_hashes( input.clone(), Slot::default(), false ), result.0 ); - AccountsDB::sort_hashes_by_pubkey(&mut input); + AccountsDb::sort_hashes_by_pubkey(&mut input); } let mut expected = 0; if count > 0 { @@ -5398,13 +5398,13 @@ pub mod tests { (Pubkey::new_unique(), Hash::new_unique(), u64::MAX), (Pubkey::new_unique(), Hash::new_unique(), 1), ]; - AccountsDB::compute_merkle_root_and_capitalization(input, fanout); + AccountsDb::compute_merkle_root_and_capitalization(input, fanout); } #[test] fn test_accountsdb_add_root() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5417,7 +5417,7 @@ pub mod tests { #[test] fn test_accountsdb_latest_ancestor() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5442,7 +5442,7 @@ pub mod tests { #[test] fn test_accountsdb_latest_ancestor_with_root() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5462,7 +5462,7 @@ pub mod tests { #[test] fn test_accountsdb_root_one_slot() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5503,7 +5503,7 @@ pub mod tests { #[test] fn test_accountsdb_add_root_many() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 100, 0, 0); @@ -5539,7 +5539,7 @@ pub mod tests { #[test] fn test_accountsdb_count_stores() { solana_logger::setup(); - let db = AccountsDB::new_single(); + let db = AccountsDb::new_single(); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0); @@ -5601,7 +5601,7 @@ pub mod tests { let key = Pubkey::default(); // 1 token in the "root", i.e. db zero - let db0 = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development); let account0 = Account::new(1, 0, &key); db0.store_uncached(0, &[(&key, &account0)]); @@ -5620,7 +5620,7 @@ pub mod tests { #[test] fn test_remove_unrooted_slot() { let unrooted_slot = 9; - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -5661,7 +5661,7 @@ pub mod tests { fn test_remove_unrooted_slot_snapshot() { solana_logger::setup(); let unrooted_slot = 9; - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); let account0 = Account::new(1, 0, &key); db.store_uncached(unrooted_slot, &[(&key, &account0)]); @@ -5687,7 +5687,7 @@ pub mod tests { } fn create_account( - accounts: &AccountsDB, + accounts: &AccountsDb, pubkeys: &mut Vec, slot: Slot, num: usize, @@ -5712,7 +5712,7 @@ pub mod tests { } } - fn update_accounts(accounts: &AccountsDB, pubkeys: &[Pubkey], slot: Slot, range: usize) { + fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) { for _ in 1..1000 { let idx = thread_rng().gen_range(0, range); let ancestors = vec![(slot, 0)].into_iter().collect(); @@ -5733,7 +5733,7 @@ pub mod tests { } } - fn check_storage(accounts: &AccountsDB, slot: Slot, count: usize) -> bool { + fn check_storage(accounts: &AccountsDb, slot: Slot, count: usize) -> bool { assert_eq!( accounts .storage @@ -5767,7 +5767,7 @@ pub mod tests { } fn check_accounts( - accounts: &AccountsDB, + accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, num: usize, @@ -5787,7 +5787,7 @@ pub mod tests { #[allow(clippy::needless_range_loop)] fn modify_accounts( - accounts: &AccountsDB, + accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, num: usize, @@ -5802,7 +5802,7 @@ pub mod tests { #[test] fn test_account_one() { let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let db = AccountsDB::new(paths, &ClusterType::Development); + let db = AccountsDb::new(paths, &ClusterType::Development); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 1, 0, 0); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -5817,7 +5817,7 @@ pub mod tests { #[test] fn test_account_many() { let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap(); - let db = AccountsDB::new(paths, &ClusterType::Development); + let db = AccountsDb::new(paths, &ClusterType::Development); let mut pubkeys: Vec = vec![]; create_account(&db, &mut pubkeys, 0, 100, 0, 0); check_accounts(&db, &pubkeys, 0, 100, 1); @@ -5825,7 +5825,7 @@ pub mod tests { #[test] fn test_account_update() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut pubkeys: Vec = vec![]; create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); update_accounts(&accounts, &pubkeys, 0, 99); @@ -5836,7 +5836,7 @@ pub mod tests { fn test_account_grow_many() { let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap(); let size = 4096; - let accounts = AccountsDB::new_sized(paths, size); + let accounts = AccountsDb::new_sized(paths, size); let mut keys = vec![]; for i in 0..9 { let key = solana_sdk::pubkey::new_rand(); @@ -5867,7 +5867,7 @@ pub mod tests { #[test] fn test_account_grow() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let status = [AccountStorageStatus::Available, AccountStorageStatus::Full]; let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -5932,7 +5932,7 @@ pub mod tests { //This test is pedantic //A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is //not root, it means we are retaining dead banks. - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account @@ -5974,7 +5974,7 @@ pub mod tests { assert_eq!(accounts.load_slow(&ancestors, &pubkey), Some((account, 1))); } - impl AccountsDB { + impl AccountsDb { fn all_account_count_in_append_vec(&self, slot: Slot) -> usize { let slot_storage = self.storage.get_slot_stores(slot); if let Some(slot_storage) = slot_storage { @@ -6003,7 +6003,7 @@ pub mod tests { fn test_clean_zero_lamport_and_dead_slot() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 1, &Account::default().owner); @@ -6059,7 +6059,7 @@ pub mod tests { fn test_clean_zero_lamport_and_old_roots() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); let zero_lamport_account = Account::new(0, 0, &Account::default().owner); @@ -6098,7 +6098,7 @@ pub mod tests { fn test_clean_old_with_normal_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account @@ -6126,7 +6126,7 @@ pub mod tests { fn test_clean_old_with_zero_lamport_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let normal_account = Account::new(1, 0, &Account::default().owner); @@ -6160,7 +6160,7 @@ pub mod tests { fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { solana_logger::setup(); - let accounts = AccountsDB::new_with_config( + let accounts = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, spl_token_mint_index_enabled(), @@ -6243,7 +6243,7 @@ pub mod tests { fn test_clean_max_slot_zero_lamport_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); let zero_account = Account::new(0, 0, &Account::default().owner); @@ -6280,7 +6280,7 @@ pub mod tests { fn test_uncleaned_roots_with_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); //store an account @@ -6300,7 +6300,7 @@ pub mod tests { fn test_uncleaned_roots_with_no_account() { solana_logger::setup(); - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); @@ -6316,7 +6316,7 @@ pub mod tests { #[test] fn test_accounts_db_serialize1() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut pubkeys: Vec = vec![]; // Create 100 accounts in slot 0 @@ -6418,7 +6418,7 @@ pub mod tests { } fn assert_load_account( - accounts: &AccountsDB, + accounts: &AccountsDb, slot: Slot, pubkey: Pubkey, expected_lamports: u64, @@ -6428,19 +6428,19 @@ pub mod tests { assert_eq!((account.lamports, slot), (expected_lamports, slot)); } - fn assert_not_load_account(accounts: &AccountsDB, slot: Slot, pubkey: Pubkey) { + fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) { let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); } - fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDB, slot: Slot) -> AccountsDB { + fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDb, slot: Slot) -> AccountsDb { let daccounts = crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot); daccounts.print_count_and_status("daccounts"); daccounts } - fn assert_no_stores(accounts: &AccountsDB, slot: Slot) { + fn assert_no_stores(accounts: &AccountsDb, slot: Slot) { let slot_stores = accounts.storage.get_slot_stores(slot); let r_slot_stores = slot_stores.as_ref().map(|slot_stores| { let r_slot_stores = slot_stores.read().unwrap(); @@ -6466,7 +6466,7 @@ pub mod tests { let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); accounts.add_root(0); // Step A @@ -6542,7 +6542,7 @@ pub mod tests { let zero_lamport_account = Account::new(zero_lamport, no_data, &owner); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); accounts.add_root(0); let mut current_slot = 1; @@ -6607,7 +6607,7 @@ pub mod tests { let filler_account = Account::new(some_lamport, no_data, &owner); let filler_account_pubkey = solana_sdk::pubkey::new_rand(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut current_slot = 1; accounts.store_uncached(current_slot, &[(&pubkey, &account)]); @@ -6647,7 +6647,7 @@ pub mod tests { fn with_chained_zero_lamport_accounts(f: F) where - F: Fn(AccountsDB, Slot) -> AccountsDB, + F: Fn(AccountsDb, Slot) -> AccountsDb, { let some_lamport = 223; let zero_lamport = 0; @@ -6667,7 +6667,7 @@ pub mod tests { let dummy_account = Account::new(dummy_lamport, no_data, &owner); let dummy_pubkey = Pubkey::default(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut current_slot = 1; accounts.store_uncached(current_slot, &[(&pubkey, &account)]); @@ -6733,7 +6733,7 @@ pub mod tests { let min_file_bytes = std::mem::size_of::() + std::mem::size_of::(); - let db = Arc::new(AccountsDB::new_sized(Vec::new(), min_file_bytes as u64)); + let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64)); db.add_root(slot); let thread_hdls: Vec<_> = (0..num_threads) @@ -6771,7 +6771,7 @@ pub mod tests { #[test] fn test_accountsdb_scan_accounts() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); let account0 = Account::new(1, 0, &key); @@ -6800,7 +6800,7 @@ pub mod tests { #[test] fn test_cleanup_key_not_removed() { solana_logger::setup(); - let db = AccountsDB::new_single(); + let db = AccountsDb::new_single(); let key = Pubkey::default(); let key0 = solana_sdk::pubkey::new_rand(); @@ -6829,7 +6829,7 @@ pub mod tests { #[test] fn test_store_large_account() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let data_len = DEFAULT_FILE_SIZE as usize + 7; @@ -6846,7 +6846,7 @@ pub mod tests { fn test_hash_frozen_account_data() { let account = Account::new(1, 42, &Pubkey::default()); - let hash = AccountsDB::hash_frozen_account_data(&account); + let hash = AccountsDb::hash_frozen_account_data(&account); assert_ne!(hash, Hash::default()); // Better not be the default Hash // Lamports changes to not affect the hash @@ -6854,7 +6854,7 @@ pub mod tests { account_modified.lamports -= 1; assert_eq!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Rent epoch may changes to not affect the hash @@ -6862,7 +6862,7 @@ pub mod tests { account_modified.rent_epoch += 1; assert_eq!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Account data may not be modified @@ -6870,7 +6870,7 @@ pub mod tests { account_modified.data[0] = 42; assert_ne!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Owner may not be modified @@ -6879,7 +6879,7 @@ pub mod tests { Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); assert_ne!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); // Executable may not be modified @@ -6887,7 +6887,7 @@ pub mod tests { account_modified.executable = true; assert_ne!( hash, - AccountsDB::hash_frozen_account_data(&account_modified) + AccountsDb::hash_frozen_account_data(&account_modified) ); } @@ -6895,7 +6895,7 @@ pub mod tests { fn test_frozen_account_lamport_increase() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); @@ -6930,7 +6930,7 @@ pub mod tests { fn test_frozen_account_lamport_decrease() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); @@ -6950,7 +6950,7 @@ pub mod tests { fn test_frozen_account_nonexistent() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let ancestors = vec![(0, 0)].into_iter().collect(); db.freeze_accounts(&ancestors, &[frozen_pubkey]); @@ -6963,7 +6963,7 @@ pub mod tests { fn test_frozen_account_data_modified() { let frozen_pubkey = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); let mut account = Account::new(1, 42, &frozen_pubkey); db.store_uncached(0, &[(&frozen_pubkey, &account)]); @@ -7017,12 +7017,12 @@ pub mod tests { Hash::from_str("4StuvYHFd7xuShVXB94uHHvpqGMCaacdZnYB74QQkPA1").unwrap(); assert_eq!( - AccountsDB::hash_stored_account(slot, &stored_account, &ClusterType::Development), + AccountsDb::hash_stored_account(slot, &stored_account, &ClusterType::Development), expected_account_hash, "StoredAccountMeta's data layout might be changed; update hashing if needed." ); assert_eq!( - AccountsDB::hash_account( + AccountsDb::hash_account( slot, &account, &stored_account.meta.pubkey, @@ -7036,7 +7036,7 @@ pub mod tests { #[test] fn test_bank_hash_stats() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let some_data_len = 5; @@ -7064,7 +7064,7 @@ pub mod tests { fn test_verify_bank_hash() { use BankHashVerificationError::*; solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -7106,7 +7106,7 @@ pub mod tests { fn test_verify_bank_capitalization() { use BankHashVerificationError::*; solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = solana_sdk::pubkey::new_rand(); let some_data_len = 0; @@ -7149,7 +7149,7 @@ pub mod tests { #[test] fn test_verify_bank_hash_no_account() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors = vec![(some_slot, 0)].into_iter().collect(); @@ -7170,7 +7170,7 @@ pub mod tests { fn test_verify_bank_hash_bad_account_hash() { use BankHashVerificationError::*; solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let some_data_len = 0; @@ -7179,7 +7179,7 @@ pub mod tests { let ancestors = vec![(some_slot, 0)].into_iter().collect(); let accounts = &[(&key, &account)]; - // update AccountsDB's bank hash but discard real account hashes + // update AccountsDb's bank hash but discard real account hashes db.hash_accounts(some_slot, accounts, &ClusterType::Development); // provide bogus account hashes let some_hash = Hash::new(&[0xca; HASH_BYTES]); @@ -7195,7 +7195,7 @@ pub mod tests { fn test_bad_bank_hash() { solana_logger::setup(); use solana_sdk::signature::{Keypair, Signer}; - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect(); @@ -7242,7 +7242,7 @@ pub mod tests { for (key, account) in &account_refs { assert_eq!( db.load_account_hash(&ancestors, &key), - AccountsDB::hash_account(some_slot, &account, &key, &ClusterType::Development) + AccountsDb::hash_account(some_slot, &account, &key, &ClusterType::Development) ); } existing.clear(); @@ -7252,7 +7252,7 @@ pub mod tests { #[test] fn test_storage_finder() { solana_logger::setup(); - let db = AccountsDB::new_sized(Vec::new(), 16 * 1024); + let db = AccountsDb::new_sized(Vec::new(), 16 * 1024); let key = solana_sdk::pubkey::new_rand(); let lamports = 100; let data_len = 8190; @@ -7264,13 +7264,13 @@ pub mod tests { #[test] fn test_get_snapshot_storages_empty() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); assert!(db.get_snapshot_storages(0).is_empty()); } #[test] fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7288,7 +7288,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_only_non_empty() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7311,7 +7311,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_only_roots() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7327,7 +7327,7 @@ pub mod tests { #[test] fn test_get_snapshot_storages_exclude_empty() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let key = Pubkey::default(); let account = Account::new(1, 0, &key); @@ -7353,7 +7353,7 @@ pub mod tests { #[test] #[should_panic(expected = "double remove of account in slot: 0/store: 0!!")] fn test_storage_remove_account_double_remove() { - let accounts = AccountsDB::new(Vec::new(), &ClusterType::Development); + let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development); let pubkey = solana_sdk::pubkey::new_rand(); let account = Account::new(1, 0, &Account::default().owner); accounts.store_uncached(0, &[(&pubkey, &account)]); @@ -7391,7 +7391,7 @@ pub mod tests { let purged_pubkey2 = solana_sdk::pubkey::new_rand(); let mut current_slot = 0; - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); // create intermediate updates to purged_pubkey1 so that // generate_index must add slots as root last at once @@ -7453,9 +7453,9 @@ pub mod tests { let zero_lamport_account = Account::new(zero_lamport, data_size, &owner); let mut current_slot = 0; - let accounts = AccountsDB::new_sized_no_extra_stores(Vec::new(), store_size); + let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size); - // A: Initialize AccountsDB with pubkey1 and pubkey2 + // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; if store1_first { accounts.store_uncached(current_slot, &[(&pubkey1, &account)]); @@ -7581,9 +7581,9 @@ pub mod tests { let dummy_pubkey = solana_sdk::pubkey::new_rand(); let mut current_slot = 0; - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); - // A: Initialize AccountsDB with pubkey1 and pubkey2 + // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; accounts.store_uncached(current_slot, &[(&pubkey1, &account)]); accounts.store_uncached(current_slot, &[(&pubkey2, &account)]); @@ -7667,7 +7667,7 @@ pub mod tests { #[test] fn test_clean_stored_dead_slots_empty() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let mut dead_slots = HashSet::new(); dead_slots.insert(10); accounts.clean_stored_dead_slots(&dead_slots, None); @@ -7675,7 +7675,7 @@ pub mod tests { #[test] fn test_shrink_all_slots_none() { - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); for _ in 0..10 { accounts.shrink_candidate_slots(); @@ -7686,7 +7686,7 @@ pub mod tests { #[test] fn test_shrink_next_slots() { - let mut accounts = AccountsDB::new_single(); + let mut accounts = AccountsDb::new_single(); accounts.caching_enabled = false; let mut current_slot = 7; @@ -7725,7 +7725,7 @@ pub mod tests { #[test] fn test_shrink_reset_uncleaned_roots() { - let mut accounts = AccountsDB::new_single(); + let mut accounts = AccountsDb::new_single(); accounts.caching_enabled = false; accounts.reset_uncleaned_roots_v1(); @@ -7762,7 +7762,7 @@ pub mod tests { fn test_shrink_stale_slots_processed() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let pubkey_count = 100; let pubkeys: Vec<_> = (0..pubkey_count) @@ -7830,7 +7830,7 @@ pub mod tests { fn test_shrink_candidate_slots() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let pubkey_count = 30000; let pubkeys: Vec<_> = (0..pubkey_count) @@ -7889,7 +7889,7 @@ pub mod tests { fn test_shrink_stale_slots_skipped() { solana_logger::setup(); - let mut accounts = AccountsDB::new_single(); + let mut accounts = AccountsDb::new_single(); accounts.caching_enabled = false; let pubkey_count = 30000; @@ -8054,7 +8054,7 @@ pub mod tests { store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1]))); store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2]))); store_counts.insert(3, (1, HashSet::from_iter(vec![key2]))); - AccountsDB::calc_delete_dependencies(&purges, &mut store_counts); + AccountsDb::calc_delete_dependencies(&purges, &mut store_counts); let mut stores: Vec<_> = store_counts.keys().cloned().collect(); stores.sort_unstable(); for store in &stores { @@ -8076,7 +8076,7 @@ pub mod tests { // repeat the whole test scenario for _ in 0..5 { - let accounts = Arc::new(AccountsDB::new_single()); + let accounts = Arc::new(AccountsDb::new_single()); let accounts_for_shrink = accounts.clone(); // spawn the slot shrinking background thread @@ -8092,7 +8092,7 @@ pub mod tests { let mut alive_accounts = vec![]; let owner = Pubkey::default(); - // populate the AccountsDB with plenty of food for slot shrinking + // populate the AccountsDb with plenty of food for slot shrinking // also this simulates realistic some heavy spike account updates in the wild for current_slot in 0..1000 { while alive_accounts.len() <= 10 { @@ -8127,12 +8127,12 @@ pub mod tests { fn test_account_balance_for_capitalization_normal() { // system accounts assert_eq!( - AccountsDB::account_balance_for_capitalization(10, &Pubkey::default(), false, true), + AccountsDb::account_balance_for_capitalization(10, &Pubkey::default(), false, true), 10 ); // any random program data accounts assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::pubkey::new_rand(), false, @@ -8141,7 +8141,7 @@ pub mod tests { 10 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::pubkey::new_rand(), false, @@ -8158,7 +8158,7 @@ pub mod tests { 1, ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_sysvar.lamports, &normal_sysvar.owner, normal_sysvar.executable, @@ -8167,7 +8167,7 @@ pub mod tests { 0 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_sysvar.lamports, &normal_sysvar.owner, normal_sysvar.executable, @@ -8178,7 +8178,7 @@ pub mod tests { // currently transactions can send any lamports to sysvars although this is not sensible. assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::sysvar::id(), false, @@ -8187,7 +8187,7 @@ pub mod tests { 9 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 10, &solana_sdk::sysvar::id(), false, @@ -8201,7 +8201,7 @@ pub mod tests { fn test_account_balance_for_capitalization_native_program() { let normal_native_program = solana_sdk::native_loader::create_loadable_account("foo", 1); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_native_program.lamports, &normal_native_program.owner, normal_native_program.executable, @@ -8210,7 +8210,7 @@ pub mod tests { 0 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( normal_native_program.lamports, &normal_native_program.owner, normal_native_program.executable, @@ -8221,7 +8221,7 @@ pub mod tests { // test maliciously assigned bogus native loader account assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 1, &solana_sdk::native_loader::id(), false, @@ -8230,7 +8230,7 @@ pub mod tests { 1 ); assert_eq!( - AccountsDB::account_balance_for_capitalization( + AccountsDb::account_balance_for_capitalization( 1, &solana_sdk::native_loader::id(), false, @@ -8243,7 +8243,7 @@ pub mod tests { #[test] fn test_checked_sum_for_capitalization_normal() { assert_eq!( - AccountsDB::checked_sum_for_capitalization(vec![1, 2].into_iter()), + AccountsDb::checked_sum_for_capitalization(vec![1, 2].into_iter()), 3 ); } @@ -8252,7 +8252,7 @@ pub mod tests { #[should_panic(expected = "overflow is detected while summing capitalization")] fn test_checked_sum_for_capitalization_overflow() { assert_eq!( - AccountsDB::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()), + AccountsDb::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()), 3 ); } @@ -8260,7 +8260,7 @@ pub mod tests { #[test] fn test_store_overhead() { solana_logger::setup(); - let accounts = AccountsDB::new_single(); + let accounts = AccountsDb::new_single(); let account = Account::default(); let pubkey = solana_sdk::pubkey::new_rand(); accounts.store_uncached(0, &[(&pubkey, &account)]); @@ -8276,7 +8276,7 @@ pub mod tests { #[test] fn test_store_reuse() { solana_logger::setup(); - let accounts = AccountsDB::new_sized(vec![], 4096); + let accounts = AccountsDb::new_sized(vec![], 4096); let size = 100; let num_accounts: usize = 100; @@ -8325,7 +8325,7 @@ pub mod tests { #[test] fn test_zero_lamport_new_root_not_cleaned() { - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let account_key = Pubkey::new_unique(); let zero_lamport_account = Account::new(0, 0, &Account::default().owner); @@ -8349,7 +8349,7 @@ pub mod tests { #[test] fn test_store_load_cached() { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -8377,7 +8377,7 @@ pub mod tests { #[test] fn test_store_flush_load_cached() { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let key = Pubkey::default(); let account0 = Account::new(1, 0, &key); @@ -8402,7 +8402,7 @@ pub mod tests { #[test] fn test_flush_accounts_cache() { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let account0 = Account::new(1, 0, &Pubkey::default()); @@ -8461,7 +8461,7 @@ pub mod tests { } fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) { - let mut db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development); db.caching_enabled = true; let account0 = Account::new(1, 0, &Pubkey::default()); let mut keys = vec![]; @@ -8510,7 +8510,7 @@ pub mod tests { } } - fn slot_stores(db: &AccountsDB, slot: Slot) -> Vec> { + fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec> { db.storage .get_slot_storage_entries(slot) .unwrap_or_default() @@ -8519,7 +8519,7 @@ pub mod tests { #[test] fn test_flush_cache_clean() { let caching_enabled = true; - let db = Arc::new(AccountsDB::new_with_config( + let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8564,7 +8564,7 @@ pub mod tests { } fn setup_scan( - db: Arc, + db: Arc, scan_ancestors: Arc, stall_key: Pubkey, ) -> ScanTracker { @@ -8607,7 +8607,7 @@ pub mod tests { #[test] fn test_scan_flush_accounts_cache_then_clean_drop() { let caching_enabled = true; - let db = Arc::new(AccountsDB::new_with_config( + let db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8690,7 +8690,7 @@ pub mod tests { #[test] fn test_alive_bytes() { let caching_enabled = true; - let accounts_db = AccountsDB::new_with_config( + let accounts_db = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -8742,9 +8742,9 @@ pub mod tests { fn setup_accounts_db_cache_clean( num_slots: usize, scan_slot: Option, - ) -> (Arc, Vec, Vec, Option) { + ) -> (Arc, Vec, Vec, Option) { let caching_enabled = true; - let accounts_db = Arc::new(AccountsDB::new_with_config( + let accounts_db = Arc::new(AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::new(), @@ -9126,7 +9126,7 @@ pub mod tests { // Enable caching so that we use the straightforward implementation // of shrink that will shrink all candidate slots let caching_enabled = true; - let db = AccountsDB::new_with_config( + let db = AccountsDb::new_with_config( Vec::new(), &ClusterType::Development, HashSet::default(), @@ -9209,7 +9209,7 @@ pub mod tests { #[test] fn test_partial_clean() { solana_logger::setup(); - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let account_key1 = Pubkey::new_unique(); let account_key2 = Pubkey::new_unique(); let account1 = Account::new(1, 0, &Account::default().owner); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 22e7025ec6..51928dd804 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1067,7 +1067,7 @@ impl Bank { new.ancestors.insert(p.slot(), i + 1); }); - // Following code may touch AccountsDB, requiring proper ancestors + // Following code may touch AccountsDb, requiring proper ancestors let parent_epoch = parent.epoch(); if parent_epoch < new.epoch() { new.apply_feature_activations(false); @@ -2169,7 +2169,7 @@ impl Bank { self.capitalization.fetch_sub(account.lamports, Relaxed); - // Resetting account balance to 0 is needed to really purge from AccountsDB and + // Resetting account balance to 0 is needed to really purge from AccountsDb and // flush the Stakes cache account.lamports = 0; self.store_account(&program_id, &account); @@ -2189,7 +2189,7 @@ impl Bank { ), Some(account) => { if *name == String::from_utf8_lossy(&account.data) { - // nop; it seems that already AccountsDB is updated. + // nop; it seems that already AccountsDb is updated. return; } // continue to replace account diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 07970f3dec..f01ebabd13 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -1,7 +1,7 @@ //! The `bank_forks` module implements BankForks a DAG of checkpointed Banks use crate::{ - accounts_background_service::{ABSRequestSender, SnapshotRequest}, + accounts_background_service::{AbsRequestSender, SnapshotRequest}, bank::Bank, }; use log::*; @@ -187,7 +187,7 @@ impl BankForks { pub fn set_root( &mut self, root: Slot, - accounts_background_request_sender: &ABSRequestSender, + accounts_background_request_sender: &AbsRequestSender, highest_confirmed_root: Option, ) { let old_epoch = self.root_bank().epoch(); @@ -428,7 +428,7 @@ mod tests { let bank0 = Bank::new(&genesis_config); let mut bank_forks0 = BankForks::new(bank0); - bank_forks0.set_root(0, &ABSRequestSender::default(), None); + bank_forks0.set_root(0, &AbsRequestSender::default(), None); let bank1 = Bank::new(&genesis_config); let mut bank_forks1 = BankForks::new(bank1); @@ -460,7 +460,7 @@ mod tests { // Set root in bank_forks0 to truncate the ancestor history bank_forks0.insert(child1); - bank_forks0.set_root(slot, &ABSRequestSender::default(), None); + bank_forks0.set_root(slot, &AbsRequestSender::default(), None); // Don't set root in bank_forks1 to keep the ancestor history bank_forks1.insert(child2); @@ -514,7 +514,7 @@ mod tests { ); bank_forks.set_root( 2, - &ABSRequestSender::default(), + &AbsRequestSender::default(), None, // highest confirmed root ); banks[2].squash(); @@ -573,7 +573,7 @@ mod tests { ); bank_forks.set_root( 2, - &ABSRequestSender::default(), + &AbsRequestSender::default(), Some(1), // highest confirmed root ); banks[2].squash(); diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index a221aacff5..d35d33db8a 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -19,7 +19,7 @@ use thiserror::Error; #[derive(Error, Debug)] pub enum UnpackError { #[error("IO error: {0}")] - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("Archive error: {0}")] Archive(String), } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 799959feb2..ef952fb445 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -1,7 +1,7 @@ use { crate::{ accounts::Accounts, - accounts_db::{AccountStorageEntry, AccountsDB, AppendVecId, BankHashInfo}, + accounts_db::{AccountStorageEntry, AccountsDb, AppendVecId, BankHashInfo}, accounts_index::{AccountIndex, Ancestors}, append_vec::AppendVec, bank::{Bank, BankFieldsToDeserialize, BankRc, Builtins}, @@ -59,7 +59,7 @@ pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages}; #[derive(Copy, Clone, Eq, PartialEq)] pub(crate) enum SerdeStyle { - NEWER, + Newer, } const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; @@ -82,7 +82,7 @@ trait TypeContext<'a> { fn serialize_accounts_db_fields( serializer: S, - serializable_db: &SerializableAccountsDB<'a, Self>, + serializable_db: &SerializableAccountsDb<'a, Self>, ) -> std::result::Result where Self: std::marker::Sized; @@ -155,7 +155,7 @@ where }}; } match serde_style { - SerdeStyle::NEWER => INTO!(TypeContextFuture), + SerdeStyle::Newer => INTO!(TypeContextFuture), } .map_err(|err| { warn!("bankrc_from_stream error: {:?}", err); @@ -185,7 +185,7 @@ where }; } match serde_style { - SerdeStyle::NEWER => INTO!(TypeContextFuture), + SerdeStyle::Newer => INTO!(TypeContextFuture), } .map_err(|err| { warn!("bankrc_to_stream error: {:?}", err); @@ -208,14 +208,14 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> { } } -struct SerializableAccountsDB<'a, C> { - accounts_db: &'a AccountsDB, +struct SerializableAccountsDb<'a, C> { + accounts_db: &'a AccountsDb, slot: Slot, account_storage_entries: &'a [SnapshotStorage], phantom: std::marker::PhantomData, } -impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> { +impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDb<'a, C> { fn serialize(&self, serializer: S) -> std::result::Result where S: serde::ser::Serializer, @@ -225,7 +225,7 @@ impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDB<'a, C> { } #[cfg(RUSTC_WITH_SPECIALIZATION)] -impl<'a, C> IgnoreAsHelper for SerializableAccountsDB<'a, C> {} +impl<'a, C> IgnoreAsHelper for SerializableAccountsDb<'a, C> {} #[allow(clippy::too_many_arguments)] fn reconstruct_bank_from_fields( @@ -273,12 +273,12 @@ fn reconstruct_accountsdb_from_fields( cluster_type: &ClusterType, account_indexes: HashSet, caching_enabled: bool, -) -> Result +) -> Result where E: SerializableStorage, P: AsRef, { - let mut accounts_db = AccountsDB::new_with_config( + let mut accounts_db = AccountsDb::new_with_config( account_paths.to_vec(), cluster_type, account_indexes, diff --git a/runtime/src/serde_snapshot/future.rs b/runtime/src/serde_snapshot/future.rs index e61faf53b1..2bf612fba1 100644 --- a/runtime/src/serde_snapshot/future.rs +++ b/runtime/src/serde_snapshot/future.rs @@ -215,7 +215,7 @@ impl<'a> TypeContext<'a> for Context { { ( SerializableVersionedBank::from(serializable_bank.bank.get_fields_to_serialize()), - SerializableAccountsDB::<'a, Self> { + SerializableAccountsDb::<'a, Self> { accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, @@ -227,7 +227,7 @@ impl<'a> TypeContext<'a> for Context { fn serialize_accounts_db_fields( serializer: S, - serializable_db: &SerializableAccountsDB<'a, Self>, + serializable_db: &SerializableAccountsDb<'a, Self>, ) -> std::result::Result where Self: std::marker::Sized, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 14671d827a..d29d57b7ed 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -21,7 +21,7 @@ use { #[cfg(test)] fn copy_append_vecs>( - accounts_db: &AccountsDB, + accounts_db: &AccountsDb, output_dir: P, ) -> std::io::Result<()> { let storage_entries = accounts_db.get_snapshot_storages(Slot::max_value()); @@ -57,7 +57,7 @@ fn context_accountsdb_from_stream<'a, C, R, P>( stream: &mut BufReader, account_paths: &[PathBuf], stream_append_vecs_path: P, -) -> Result +) -> Result where C: TypeContext<'a>, R: Read, @@ -80,13 +80,13 @@ fn accountsdb_from_stream( stream: &mut BufReader, account_paths: &[PathBuf], stream_append_vecs_path: P, -) -> Result +) -> Result where R: Read, P: AsRef, { match serde_style { - SerdeStyle::NEWER => context_accountsdb_from_stream::( + SerdeStyle::Newer => context_accountsdb_from_stream::( stream, account_paths, stream_append_vecs_path, @@ -98,7 +98,7 @@ where fn accountsdb_to_stream( serde_style: SerdeStyle, stream: &mut W, - accounts_db: &AccountsDB, + accounts_db: &AccountsDb, slot: Slot, account_storage_entries: &[SnapshotStorage], ) -> Result<(), Error> @@ -106,9 +106,9 @@ where W: Write, { match serde_style { - SerdeStyle::NEWER => serialize_into( + SerdeStyle::Newer => serialize_into( stream, - &SerializableAccountsDB:: { + &SerializableAccountsDb:: { accounts_db, slot, account_storage_entries, @@ -230,13 +230,13 @@ fn test_bank_serialize_style(serde_style: SerdeStyle) { #[cfg(test)] pub(crate) fn reconstruct_accounts_db_via_serialization( - accounts: &AccountsDB, + accounts: &AccountsDb, slot: Slot, -) -> AccountsDB { +) -> AccountsDb { let mut writer = Cursor::new(vec![]); let snapshot_storages = accounts.get_snapshot_storages(slot); accountsdb_to_stream( - SerdeStyle::NEWER, + SerdeStyle::Newer, &mut writer, &accounts, slot, @@ -249,17 +249,17 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( let copied_accounts = TempDir::new().unwrap(); // Simulate obtaining a copy of the AppendVecs from a tarball copy_append_vecs(&accounts, copied_accounts.path()).unwrap(); - accountsdb_from_stream(SerdeStyle::NEWER, &mut reader, &[], copied_accounts.path()).unwrap() + accountsdb_from_stream(SerdeStyle::Newer, &mut reader, &[], copied_accounts.path()).unwrap() } #[test] fn test_accounts_serialize_newer() { - test_accounts_serialize_style(SerdeStyle::NEWER) + test_accounts_serialize_style(SerdeStyle::Newer) } #[test] fn test_bank_serialize_newer() { - test_bank_serialize_style(SerdeStyle::NEWER) + test_bank_serialize_style(SerdeStyle::Newer) } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION))] @@ -268,7 +268,7 @@ mod test_bank_serialize { // These some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "9CqwEeiVycBp9wVDLz19XUJXRMZ68itGfYVEe29S8JmA")] + #[frozen_abi(digest = "DuRGntVwLGNAv5KooafUSpxk67BPAx2yC7Z8A9c8wr2G")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperFuture { #[serde(serialize_with = "wrapper_future")] diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 57dfc67e13..c4d01ef6a8 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,5 +1,5 @@ use crate::{ - accounts_db::AccountsDB, + accounts_db::AccountsDb, accounts_index::AccountIndex, bank::{Bank, BankSlotDelta, Builtins}, bank_forks::ArchiveFormat, @@ -25,7 +25,7 @@ use std::{ cmp::Ordering, fmt, fs::{self, File}, - io::{self, BufReader, BufWriter, Error as IOError, ErrorKind, Read, Seek, SeekFrom, Write}, + io::{self, BufReader, BufWriter, Error as IoError, ErrorKind, Read, Seek, SeekFrom, Write}, path::{Path, PathBuf}, process::{self, ExitStatus}, str::FromStr, @@ -108,7 +108,7 @@ pub struct SlotSnapshotPaths { #[derive(Error, Debug)] pub enum SnapshotError { #[error("I/O error: {0}")] - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("serialization error: {0}")] Serialize(#[from] bincode::Error), @@ -319,7 +319,7 @@ pub fn archive_snapshot_package(snapshot_package: &AccountsPackage) -> Result<() match &mut tar.stdout { None => { - return Err(SnapshotError::IO(IOError::new( + return Err(SnapshotError::Io(IoError::new( ErrorKind::Other, "tar stdout unavailable".to_string(), ))); @@ -521,7 +521,7 @@ pub fn add_snapshot>( let mut bank_serialize = Measure::start("bank-serialize-ms"); let bank_snapshot_serializer = move |stream: &mut BufWriter| -> Result<()> { let serde_style = match snapshot_version { - SnapshotVersion::V1_2_0 => SerdeStyle::NEWER, + SnapshotVersion::V1_2_0 => SerdeStyle::Newer, }; bank_to_stream(serde_style, stream.by_ref(), bank, snapshot_storages)?; Ok(()) @@ -797,7 +797,7 @@ where let bank = deserialize_snapshot_data_file(&root_paths.snapshot_file_path, |mut stream| { Ok(match snapshot_version_enum { SnapshotVersion::V1_2_0 => bank_from_stream( - SerdeStyle::NEWER, + SerdeStyle::Newer, &mut stream, &append_vecs_path, account_paths, @@ -841,7 +841,7 @@ fn get_bank_snapshot_dir>(path: P, slot: Slot) -> PathBuf { fn get_io_error(error: &str) -> SnapshotError { warn!("Snapshot Error: {:?}", error); - SnapshotError::IO(IOError::new(ErrorKind::Other, error)) + SnapshotError::Io(IoError::new(ErrorKind::Other, error)) } pub fn verify_snapshot_archive( @@ -968,7 +968,7 @@ pub fn process_accounts_package_pre( let hash = accounts_package.hash; // temporarily remaining here if let Some(expected_hash) = accounts_package.hash_for_testing { - let (hash, lamports) = AccountsDB::calculate_accounts_hash_without_index( + let (hash, lamports) = AccountsDb::calculate_accounts_hash_without_index( &accounts_package.storages, accounts_package.simple_capitalization_testing, thread_pool, @@ -1039,7 +1039,7 @@ mod tests { Ok(()) }, ); - assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize")); + assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to serialize")); } #[test] @@ -1088,7 +1088,7 @@ mod tests { expected_consumed_size - 1, |stream| Ok(deserialize_from::<_, u32>(stream)?), ); - assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize")); + assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("too large snapshot data file to deserialize")); } #[test] @@ -1113,7 +1113,7 @@ mod tests { expected_consumed_size * 2, |stream| Ok(deserialize_from::<_, u32>(stream)?), ); - assert_matches!(result, Err(SnapshotError::IO(ref message)) if message.to_string().starts_with("invalid snapshot data file")); + assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("invalid snapshot data file")); } #[test] diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 5593feae07..7a0b50fcd5 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -1,7 +1,7 @@ use log::*; use rand::{thread_rng, Rng}; use rayon::prelude::*; -use solana_runtime::{accounts_db::AccountsDB, accounts_index::Ancestors}; +use solana_runtime::{accounts_db::AccountsDb, accounts_index::Ancestors}; use solana_sdk::genesis_config::ClusterType; use solana_sdk::{account::Account, clock::Slot, pubkey::Pubkey}; use std::collections::HashSet; @@ -15,7 +15,7 @@ fn test_shrink_and_clean() { // repeat the whole test scenario for _ in 0..5 { - let accounts = Arc::new(AccountsDB::new_single()); + let accounts = Arc::new(AccountsDb::new_single()); let accounts_for_shrink = accounts.clone(); // spawn the slot shrinking background thread @@ -31,7 +31,7 @@ fn test_shrink_and_clean() { let mut alive_accounts = vec![]; let owner = Pubkey::default(); - // populate the AccountsDB with plenty of food for slot shrinking + // populate the AccountsDb with plenty of food for slot shrinking // also this simulates realistic some heavy spike account updates in the wild for current_slot in 0..100 { while alive_accounts.len() <= 10 { @@ -66,7 +66,7 @@ fn test_shrink_and_clean() { fn test_bad_bank_hash() { solana_logger::setup(); use solana_sdk::signature::{Keypair, Signer}; - let db = AccountsDB::new(Vec::new(), &ClusterType::Development); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); let some_slot: Slot = 0; let ancestors: Ancestors = [(some_slot, 0)].iter().copied().collect(); @@ -113,7 +113,7 @@ fn test_bad_bank_hash() { for (key, account) in &account_refs { assert_eq!( db.load_account_hash(&ancestors, &key), - AccountsDB::hash_account(some_slot, &account, &key, &ClusterType::Development) + AccountsDb::hash_account(some_slot, &account, &key, &ClusterType::Development) ); } existing.clear(); diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 6bbdb9fcf0..926342b876 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -19,7 +19,7 @@ pub type PacketSender = Sender; #[derive(Error, Debug)] pub enum StreamerError { #[error("I/O error")] - IO(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("receive timeout error")] RecvTimeoutError(#[from] RecvTimeoutError),