chore: cargo +nightly clippy --fix -Z unstable-options
This commit is contained in:
committed by
Michael Vines
parent
3570b00560
commit
6514096a67
@ -240,7 +240,7 @@ fn store_accounts_with_possible_contention<F: 'static>(
|
||||
// Write to a different slot than the one being read from. Because
|
||||
// there's a new account pubkey being written to every time, will
|
||||
// compete for the accounts index lock on every store
|
||||
accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), &account);
|
||||
accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), account);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ pub fn create_builtin_transactions(
|
||||
// Seed the signer account
|
||||
let rando0 = Keypair::new();
|
||||
bank_client
|
||||
.transfer_and_confirm(10_000, &mint_keypair, &rando0.pubkey())
|
||||
.transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey())
|
||||
.unwrap_or_else(|_| panic!("{}:{}", line!(), file!()));
|
||||
|
||||
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
|
||||
@ -72,7 +72,7 @@ pub fn create_native_loader_transactions(
|
||||
// Seed the signer account©41
|
||||
let rando0 = Keypair::new();
|
||||
bank_client
|
||||
.transfer_and_confirm(10_000, &mint_keypair, &rando0.pubkey())
|
||||
.transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey())
|
||||
.unwrap_or_else(|_| panic!("{}:{}", line!(), file!()));
|
||||
|
||||
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
|
||||
@ -94,7 +94,7 @@ fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &[Tra
|
||||
}
|
||||
for _ in 0..1_000_000_000_u64 {
|
||||
if bank
|
||||
.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
||||
.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap())
|
||||
.is_some()
|
||||
{
|
||||
break;
|
||||
@ -102,13 +102,13 @@ fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &[Tra
|
||||
sleep(Duration::from_nanos(1));
|
||||
}
|
||||
if bank
|
||||
.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
||||
.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap())
|
||||
.unwrap()
|
||||
.is_err()
|
||||
{
|
||||
error!(
|
||||
"transaction failed: {:?}",
|
||||
bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
||||
bank.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap())
|
||||
.unwrap()
|
||||
);
|
||||
panic!();
|
||||
|
@ -215,7 +215,7 @@ impl Accounts {
|
||||
let mut account_deps = Vec::with_capacity(message.account_keys.len());
|
||||
let demote_sysvar_write_locks =
|
||||
feature_set.is_active(&feature_set::demote_sysvar_write_locks::id());
|
||||
let mut key_check = MessageProgramIdsCache::new(&message);
|
||||
let mut key_check = MessageProgramIdsCache::new(message);
|
||||
let mut rent_debits = RentDebits::default();
|
||||
for (i, key) in message.account_keys.iter().enumerate() {
|
||||
let account = if key_check.is_non_loader_key(key, i) {
|
||||
@ -237,7 +237,7 @@ impl Accounts {
|
||||
.map(|(mut account, _)| {
|
||||
if message.is_writable(i, demote_sysvar_write_locks) {
|
||||
let rent_due = rent_collector
|
||||
.collect_from_existing_account(&key, &mut account);
|
||||
.collect_from_existing_account(key, &mut account);
|
||||
(account, rent_due)
|
||||
} else {
|
||||
(account, 0)
|
||||
@ -1029,7 +1029,7 @@ impl Accounts {
|
||||
}
|
||||
}
|
||||
if account.rent_epoch() == INITIAL_RENT_EPOCH {
|
||||
let rent = rent_collector.collect_from_created_account(&key, account);
|
||||
let rent = rent_collector.collect_from_created_account(key, account);
|
||||
loaded_transaction.rent += rent;
|
||||
loaded_transaction
|
||||
.rent_debits
|
||||
@ -1106,7 +1106,7 @@ pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64)
|
||||
for pubkey in pubkeys {
|
||||
let amount = thread_rng().gen_range(0, 10);
|
||||
let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner());
|
||||
accounts.store_slow_uncached(slot, &pubkey, &account);
|
||||
accounts.store_slow_uncached(slot, pubkey, &account);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1140,7 +1140,7 @@ mod tests {
|
||||
error_counters: &mut ErrorCounters,
|
||||
) -> Vec<TransactionLoadResult> {
|
||||
let mut hash_queue = BlockhashQueue::new(100);
|
||||
hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator);
|
||||
hash_queue.register_hash(&tx.message().recent_blockhash, fee_calculator);
|
||||
let accounts = Accounts::new_with_config(
|
||||
Vec::new(),
|
||||
&ClusterType::Development,
|
||||
|
@ -420,7 +420,7 @@ impl AccountsBackgroundService {
|
||||
total_remove_slots_time: &mut u64,
|
||||
) {
|
||||
let mut remove_slots_time = Measure::start("remove_slots_time");
|
||||
*removed_slots_count += request_handler.handle_pruned_banks(&bank, true);
|
||||
*removed_slots_count += request_handler.handle_pruned_banks(bank, true);
|
||||
remove_slots_time.stop();
|
||||
*total_remove_slots_time += remove_slots_time.as_us();
|
||||
|
||||
|
@ -371,7 +371,7 @@ impl<'a> LoadedAccount<'a> {
|
||||
pub fn owner(&self) -> &Pubkey {
|
||||
match self {
|
||||
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.account_meta.owner,
|
||||
LoadedAccount::Cached((_, cached_account)) => &cached_account.account.owner(),
|
||||
LoadedAccount::Cached((_, cached_account)) => cached_account.account.owner(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -394,7 +394,7 @@ impl<'a> LoadedAccount<'a> {
|
||||
pub fn pubkey(&self) -> &Pubkey {
|
||||
match self {
|
||||
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.meta.pubkey,
|
||||
LoadedAccount::Cached((pubkey, _)) => &pubkey,
|
||||
LoadedAccount::Cached((pubkey, _)) => pubkey,
|
||||
}
|
||||
}
|
||||
|
||||
@ -408,7 +408,7 @@ impl<'a> LoadedAccount<'a> {
|
||||
pub fn compute_hash(&self, slot: Slot, pubkey: &Pubkey) -> Hash {
|
||||
match self {
|
||||
LoadedAccount::Stored(stored_account_meta) => {
|
||||
AccountsDb::hash_stored_account(slot, &stored_account_meta)
|
||||
AccountsDb::hash_stored_account(slot, stored_account_meta)
|
||||
}
|
||||
LoadedAccount::Cached((_, cached_account)) => {
|
||||
AccountsDb::hash_account(slot, &cached_account.account, pubkey)
|
||||
@ -1496,7 +1496,7 @@ impl AccountsDb {
|
||||
let mut reclaims = Vec::new();
|
||||
for pubkey in pubkeys {
|
||||
self.accounts_index.clean_rooted_entries(
|
||||
&pubkey,
|
||||
pubkey,
|
||||
&mut reclaims,
|
||||
max_clean_root,
|
||||
);
|
||||
@ -1588,7 +1588,7 @@ impl AccountsDb {
|
||||
|
||||
let affected_pubkeys = &store_counts.get(&id).unwrap().1;
|
||||
for key in affected_pubkeys {
|
||||
for (_slot, account_info) in &purges.get(&key).unwrap().0 {
|
||||
for (_slot, account_info) in &purges.get(key).unwrap().0 {
|
||||
if !already_counted.contains(&account_info.store_id) {
|
||||
pending_store_ids.insert(account_info.store_id);
|
||||
}
|
||||
@ -1641,7 +1641,7 @@ impl AccountsDb {
|
||||
for (pubkey, slots_set) in pubkey_to_slot_set {
|
||||
let is_empty = self
|
||||
.accounts_index
|
||||
.purge_exact(&pubkey, slots_set, &mut reclaims);
|
||||
.purge_exact(pubkey, slots_set, &mut reclaims);
|
||||
if is_empty {
|
||||
dead_keys.push(pubkey);
|
||||
}
|
||||
@ -1862,12 +1862,12 @@ impl AccountsDb {
|
||||
// Then purge if we can
|
||||
let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new();
|
||||
for (key, (account_infos, ref_count)) in purges_zero_lamports.iter_mut() {
|
||||
if purged_account_slots.contains_key(&key) {
|
||||
*ref_count = self.accounts_index.ref_count_from_storage(&key);
|
||||
if purged_account_slots.contains_key(key) {
|
||||
*ref_count = self.accounts_index.ref_count_from_storage(key);
|
||||
}
|
||||
account_infos.retain(|(slot, account_info)| {
|
||||
let was_slot_purged = purged_account_slots
|
||||
.get(&key)
|
||||
.get(key)
|
||||
.map(|slots_removed| slots_removed.contains(slot))
|
||||
.unwrap_or(false);
|
||||
if was_slot_purged {
|
||||
@ -2061,7 +2061,7 @@ impl AccountsDb {
|
||||
return;
|
||||
}
|
||||
let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots");
|
||||
self.clean_stored_dead_slots(&dead_slots, purged_account_slots);
|
||||
self.clean_stored_dead_slots(dead_slots, purged_account_slots);
|
||||
clean_dead_slots.stop();
|
||||
|
||||
let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots");
|
||||
@ -3289,7 +3289,7 @@ impl AccountsDb {
|
||||
let path_index = thread_rng().gen_range(0, paths.len());
|
||||
let store = Arc::new(self.new_storage_entry(
|
||||
slot,
|
||||
&Path::new(&paths[path_index]),
|
||||
Path::new(&paths[path_index]),
|
||||
Self::page_align(size),
|
||||
));
|
||||
|
||||
@ -3491,7 +3491,7 @@ impl AccountsDb {
|
||||
let mut remove_storage_entries_elapsed = Measure::start("remove_storage_entries_elapsed");
|
||||
for remove_slot in removed_slots {
|
||||
// Remove the storage entries and collect some metrics
|
||||
if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(&remove_slot) {
|
||||
if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(remove_slot) {
|
||||
{
|
||||
let r_slot_removed_storages = slot_storages_to_be_removed.read().unwrap();
|
||||
total_removed_storage_entries += r_slot_removed_storages.len();
|
||||
@ -3761,10 +3761,10 @@ impl AccountsDb {
|
||||
Self::hash_account_data(
|
||||
slot,
|
||||
account.lamports(),
|
||||
&account.owner(),
|
||||
account.owner(),
|
||||
account.executable(),
|
||||
account.rent_epoch(),
|
||||
&account.data(),
|
||||
account.data(),
|
||||
pubkey,
|
||||
)
|
||||
}
|
||||
@ -3772,8 +3772,8 @@ impl AccountsDb {
|
||||
fn hash_frozen_account_data(account: &AccountSharedData) -> Hash {
|
||||
let mut hasher = Hasher::default();
|
||||
|
||||
hasher.hash(&account.data());
|
||||
hasher.hash(&account.owner().as_ref());
|
||||
hasher.hash(account.data());
|
||||
hasher.hash(account.owner().as_ref());
|
||||
|
||||
if account.executable() {
|
||||
hasher.hash(&[1u8; 1]);
|
||||
@ -3805,7 +3805,7 @@ impl AccountsDb {
|
||||
|
||||
hasher.update(&rent_epoch.to_le_bytes());
|
||||
|
||||
hasher.update(&data);
|
||||
hasher.update(data);
|
||||
|
||||
if executable {
|
||||
hasher.update(&[1u8; 1]);
|
||||
@ -3813,8 +3813,8 @@ impl AccountsDb {
|
||||
hasher.update(&[0u8; 1]);
|
||||
}
|
||||
|
||||
hasher.update(&owner.as_ref());
|
||||
hasher.update(&pubkey.as_ref());
|
||||
hasher.update(owner.as_ref());
|
||||
hasher.update(pubkey.as_ref());
|
||||
|
||||
Hash(<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap())
|
||||
}
|
||||
@ -4834,7 +4834,7 @@ impl AccountsDb {
|
||||
);
|
||||
|
||||
if check_hash {
|
||||
let computed_hash = loaded_account.compute_hash(slot, &pubkey);
|
||||
let computed_hash = loaded_account.compute_hash(slot, pubkey);
|
||||
if computed_hash != source_item.hash {
|
||||
info!(
|
||||
"hash mismatch found: computed: {}, loaded: {}, pubkey: {}",
|
||||
@ -4905,7 +4905,7 @@ impl AccountsDb {
|
||||
};
|
||||
|
||||
let result = Self::scan_snapshot_stores_with_cache(
|
||||
&storages,
|
||||
storages,
|
||||
&mut stats,
|
||||
PUBKEY_BINS_FOR_CALCULATING_HASHES,
|
||||
&bounds,
|
||||
@ -5100,8 +5100,8 @@ impl AccountsDb {
|
||||
self.accounts_index.upsert(
|
||||
slot,
|
||||
pubkey,
|
||||
&pubkey_account.1.owner(),
|
||||
&pubkey_account.1.data(),
|
||||
pubkey_account.1.owner(),
|
||||
pubkey_account.1.data(),
|
||||
&self.account_indexes,
|
||||
info,
|
||||
&mut reclaims,
|
||||
@ -5353,7 +5353,7 @@ impl AccountsDb {
|
||||
|
||||
pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) {
|
||||
for account_pubkey in account_pubkeys {
|
||||
if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, &account_pubkey)
|
||||
if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, account_pubkey)
|
||||
{
|
||||
let frozen_account_info = FrozenAccountInfo {
|
||||
hash: Self::hash_frozen_account_data(&account),
|
||||
@ -5391,7 +5391,7 @@ impl AccountsDb {
|
||||
)
|
||||
}
|
||||
|
||||
let hash = Self::hash_frozen_account_data(&account);
|
||||
let hash = Self::hash_frozen_account_data(account);
|
||||
if hash != frozen_account_info.hash {
|
||||
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
|
||||
panic!(
|
||||
@ -5735,10 +5735,10 @@ impl AccountsDb {
|
||||
if *slot <= snapshot_slot
|
||||
&& (self.accounts_index.is_root(*slot)
|
||||
|| ancestors
|
||||
.map(|ancestors| ancestors.contains_key(&slot))
|
||||
.map(|ancestors| ancestors.contains_key(slot))
|
||||
.unwrap_or_default())
|
||||
{
|
||||
self.storage.0.get(&slot).map_or_else(
|
||||
self.storage.0.get(slot).map_or_else(
|
||||
|| None,
|
||||
|item| {
|
||||
let storages = item
|
||||
@ -5891,9 +5891,9 @@ impl AccountsDb {
|
||||
if !self.account_indexes.is_empty() {
|
||||
for (pubkey, (_, _store_id, stored_account)) in accounts_map.iter() {
|
||||
self.accounts_index.update_secondary_indexes(
|
||||
&pubkey,
|
||||
pubkey,
|
||||
&stored_account.account_meta.owner,
|
||||
&stored_account.data,
|
||||
stored_account.data,
|
||||
&self.account_indexes,
|
||||
);
|
||||
}
|
||||
@ -5929,7 +5929,7 @@ impl AccountsDb {
|
||||
for (id, store) in slot_stores.value().read().unwrap().iter() {
|
||||
// Should be default at this point
|
||||
assert_eq!(store.alive_bytes(), 0);
|
||||
if let Some((stored_size, count)) = stored_sizes_and_counts.get(&id) {
|
||||
if let Some((stored_size, count)) = stored_sizes_and_counts.get(id) {
|
||||
trace!("id: {} setting count: {} cur: {}", id, count, store.count(),);
|
||||
store.count_and_status.write().unwrap().0 = *count;
|
||||
store.alive_bytes.store(*stored_size, Ordering::SeqCst);
|
||||
@ -6020,7 +6020,7 @@ impl AccountsDb {
|
||||
|
||||
pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option<AppendVecId> {
|
||||
let ancestors = vec![(slot, 1)].into_iter().collect();
|
||||
let result = self.accounts_index.get(&pubkey, Some(&ancestors), None);
|
||||
let result = self.accounts_index.get(pubkey, Some(&ancestors), None);
|
||||
result.map(|(list, index)| list.slot_list()[index].1.store_id)
|
||||
}
|
||||
|
||||
@ -7296,7 +7296,7 @@ pub mod tests {
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
assert_eq!(
|
||||
accounts
|
||||
.load_without_fixed_root(&ancestors, &key)
|
||||
.load_without_fixed_root(&ancestors, key)
|
||||
.unwrap()
|
||||
.0
|
||||
.lamports(),
|
||||
@ -7462,7 +7462,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
|
||||
self.accounts_index.ref_count_from_storage(&pubkey)
|
||||
self.accounts_index.ref_count_from_storage(pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
@ -9390,7 +9390,7 @@ pub mod tests {
|
||||
|
||||
current_slot += 1;
|
||||
for pubkey in &pubkeys {
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
|
||||
}
|
||||
let shrink_slot = current_slot;
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
@ -9401,7 +9401,7 @@ pub mod tests {
|
||||
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
|
||||
|
||||
for pubkey in updated_pubkeys {
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
|
||||
}
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
@ -9459,7 +9459,7 @@ pub mod tests {
|
||||
|
||||
current_slot += 1;
|
||||
for pubkey in &pubkeys {
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
|
||||
}
|
||||
let shrink_slot = current_slot;
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
@ -9470,7 +9470,7 @@ pub mod tests {
|
||||
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
|
||||
|
||||
for pubkey in updated_pubkeys {
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
|
||||
}
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
@ -9517,7 +9517,7 @@ pub mod tests {
|
||||
|
||||
let dummy_id1 = 22;
|
||||
let entry1 = Arc::new(AccountStorageEntry::new(
|
||||
&dummy_path,
|
||||
dummy_path,
|
||||
dummy_slot,
|
||||
dummy_id1,
|
||||
dummy_size,
|
||||
@ -9531,7 +9531,7 @@ pub mod tests {
|
||||
|
||||
let dummy_id2 = 44;
|
||||
let entry2 = Arc::new(AccountStorageEntry::new(
|
||||
&dummy_path,
|
||||
dummy_path,
|
||||
dummy_slot,
|
||||
dummy_id2,
|
||||
dummy_size,
|
||||
@ -9553,7 +9553,7 @@ pub mod tests {
|
||||
let dummy_size = 4 * PAGE_SIZE;
|
||||
let dummy_id1 = 22;
|
||||
let entry1 = Arc::new(AccountStorageEntry::new(
|
||||
&dummy_path,
|
||||
dummy_path,
|
||||
dummy_slot,
|
||||
dummy_id1,
|
||||
dummy_size,
|
||||
@ -9568,7 +9568,7 @@ pub mod tests {
|
||||
let dummy_id2 = 44;
|
||||
let dummy_slot2 = 44;
|
||||
let entry2 = Arc::new(AccountStorageEntry::new(
|
||||
&dummy_path,
|
||||
dummy_path,
|
||||
dummy_slot2,
|
||||
dummy_id2,
|
||||
dummy_size,
|
||||
@ -9612,7 +9612,7 @@ pub mod tests {
|
||||
|
||||
current_slot += 1;
|
||||
for pubkey in &pubkeys {
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
|
||||
}
|
||||
let shrink_slot = current_slot;
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
@ -9623,7 +9623,7 @@ pub mod tests {
|
||||
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
|
||||
|
||||
for pubkey in updated_pubkeys {
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
|
||||
}
|
||||
accounts.get_accounts_delta_hash(current_slot);
|
||||
accounts.add_root(current_slot);
|
||||
@ -9766,7 +9766,7 @@ pub mod tests {
|
||||
info!(
|
||||
"store: {:?} : {:?}",
|
||||
store,
|
||||
store_counts.get(&store).unwrap()
|
||||
store_counts.get(store).unwrap()
|
||||
);
|
||||
}
|
||||
for x in 0..3 {
|
||||
@ -11037,7 +11037,7 @@ pub mod tests {
|
||||
|
||||
let dummy_id1 = 22;
|
||||
let entry1 = Arc::new(AccountStorageEntry::new(
|
||||
&dummy_path,
|
||||
dummy_path,
|
||||
dummy_slot,
|
||||
dummy_id1,
|
||||
dummy_size,
|
||||
@ -11045,7 +11045,7 @@ pub mod tests {
|
||||
|
||||
let dummy_id2 = 44;
|
||||
let entry2 = Arc::new(AccountStorageEntry::new(
|
||||
&dummy_path,
|
||||
dummy_path,
|
||||
dummy_slot,
|
||||
dummy_id2,
|
||||
dummy_size,
|
||||
@ -11584,7 +11584,7 @@ pub mod tests {
|
||||
let mut accounts = AccountsDb::new_single();
|
||||
let dummy_path = Path::new("");
|
||||
let dummy_size = 2 * PAGE_SIZE;
|
||||
let entry = Arc::new(AccountStorageEntry::new(&dummy_path, 0, 1, dummy_size));
|
||||
let entry = Arc::new(AccountStorageEntry::new(dummy_path, 0, 1, dummy_size));
|
||||
match accounts.shrink_ratio {
|
||||
AccountShrinkThreshold::TotalSpace { shrink_ratio } => {
|
||||
assert_eq!(
|
||||
|
@ -255,7 +255,7 @@ impl AccountsHash {
|
||||
|
||||
let mut hasher = Hasher::default();
|
||||
for item in hashes.iter().take(end_index).skip(start_index) {
|
||||
let h = extractor(&item);
|
||||
let h = extractor(item);
|
||||
hasher.hash(h.as_ref());
|
||||
}
|
||||
|
||||
|
@ -923,7 +923,7 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
|
||||
read_lock_timer.stop();
|
||||
read_lock_elapsed += read_lock_timer.as_us();
|
||||
let mut latest_slot_timer = Measure::start("latest_slot");
|
||||
if let Some(index) = self.latest_slot(Some(ancestors), &list_r, max_root) {
|
||||
if let Some(index) = self.latest_slot(Some(ancestors), list_r, max_root) {
|
||||
latest_slot_timer.stop();
|
||||
latest_slot_elapsed += latest_slot_timer.as_us();
|
||||
let mut load_account_timer = Measure::start("load_account");
|
||||
@ -1157,7 +1157,7 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
|
||||
max: Option<Slot>,
|
||||
) -> (SlotList<T>, RefCount) {
|
||||
(
|
||||
self.get_rooted_entries(&locked_account_entry.slot_list(), max),
|
||||
self.get_rooted_entries(locked_account_entry.slot_list(), max),
|
||||
locked_account_entry.ref_count().load(Ordering::Relaxed),
|
||||
)
|
||||
}
|
||||
@ -1174,7 +1174,7 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
|
||||
if let Some(mut write_account_map_entry) = self.get_account_write_entry(pubkey) {
|
||||
write_account_map_entry.slot_list_mut(|slot_list| {
|
||||
slot_list.retain(|(slot, item)| {
|
||||
let should_purge = slots_to_purge.contains(&slot);
|
||||
let should_purge = slots_to_purge.contains(slot);
|
||||
if should_purge {
|
||||
reclaims.push((*slot, item.clone()));
|
||||
false
|
||||
@ -1228,7 +1228,7 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
|
||||
Some(inner) => inner,
|
||||
None => self.roots_tracker.read().unwrap(),
|
||||
};
|
||||
if lock.roots.contains(&slot) {
|
||||
if lock.roots.contains(slot) {
|
||||
rv = Some(i);
|
||||
current_max = *slot;
|
||||
}
|
||||
@ -1483,7 +1483,7 @@ impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> {
|
||||
) {
|
||||
let roots_tracker = &self.roots_tracker.read().unwrap();
|
||||
let newest_root_in_slot_list =
|
||||
Self::get_newest_root_in_slot_list(&roots_tracker.roots, &slot_list, max_clean_root);
|
||||
Self::get_newest_root_in_slot_list(&roots_tracker.roots, slot_list, max_clean_root);
|
||||
let max_clean_root = max_clean_root.unwrap_or(roots_tracker.max_root);
|
||||
|
||||
let mut purged_slots: HashSet<Slot> = HashSet::new();
|
||||
@ -1946,7 +1946,7 @@ pub mod tests {
|
||||
fn remove(&mut self, slot: &u64) -> bool {
|
||||
let result = self.bitfield.remove(slot);
|
||||
assert_eq!(result, self.hash_set.remove(slot));
|
||||
assert!(!self.bitfield.contains(&slot));
|
||||
assert!(!self.bitfield.contains(slot));
|
||||
self.compare();
|
||||
result
|
||||
}
|
||||
@ -2211,7 +2211,7 @@ pub mod tests {
|
||||
compare_internal(hashset, bitfield);
|
||||
let clone = bitfield.clone();
|
||||
compare_internal(hashset, &clone);
|
||||
assert!(clone.eq(&bitfield));
|
||||
assert!(clone.eq(bitfield));
|
||||
assert_eq!(clone, *bitfield);
|
||||
}
|
||||
|
||||
@ -2262,8 +2262,8 @@ pub mod tests {
|
||||
|
||||
// remove the rest, including a call that removes slot again
|
||||
for item in all.iter() {
|
||||
assert!(tester.remove(&item));
|
||||
assert!(!tester.remove(&item));
|
||||
assert!(tester.remove(item));
|
||||
assert!(!tester.remove(item));
|
||||
}
|
||||
|
||||
let min = max + ((width * 2) as u64) + 3;
|
||||
@ -2538,15 +2538,15 @@ pub mod tests {
|
||||
assert!(index.zero_lamport_pubkeys().is_empty());
|
||||
|
||||
let mut ancestors = Ancestors::default();
|
||||
assert!(index.get(&pubkey, Some(&ancestors), None).is_none());
|
||||
assert!(index.get(&pubkey, None, None).is_none());
|
||||
assert!(index.get(pubkey, Some(&ancestors), None).is_none());
|
||||
assert!(index.get(pubkey, None, None).is_none());
|
||||
|
||||
let mut num = 0;
|
||||
index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1);
|
||||
assert_eq!(num, 0);
|
||||
ancestors.insert(slot, 0);
|
||||
assert!(index.get(&pubkey, Some(&ancestors), None).is_some());
|
||||
assert_eq!(index.ref_count_from_storage(&pubkey), 1);
|
||||
assert!(index.get(pubkey, Some(&ancestors), None).is_some());
|
||||
assert_eq!(index.ref_count_from_storage(pubkey), 1);
|
||||
index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1);
|
||||
assert_eq!(num, 1);
|
||||
|
||||
@ -2559,15 +2559,15 @@ pub mod tests {
|
||||
assert!(!index.zero_lamport_pubkeys().is_empty());
|
||||
|
||||
let mut ancestors = Ancestors::default();
|
||||
assert!(index.get(&pubkey, Some(&ancestors), None).is_none());
|
||||
assert!(index.get(&pubkey, None, None).is_none());
|
||||
assert!(index.get(pubkey, Some(&ancestors), None).is_none());
|
||||
assert!(index.get(pubkey, None, None).is_none());
|
||||
|
||||
let mut num = 0;
|
||||
index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1);
|
||||
assert_eq!(num, 0);
|
||||
ancestors.insert(slot, 0);
|
||||
assert!(index.get(&pubkey, Some(&ancestors), None).is_some());
|
||||
assert_eq!(index.ref_count_from_storage(&pubkey), 0); // cached, so 0
|
||||
assert!(index.get(pubkey, Some(&ancestors), None).is_some());
|
||||
assert_eq!(index.ref_count_from_storage(pubkey), 0); // cached, so 0
|
||||
index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1);
|
||||
assert_eq!(num, 1);
|
||||
}
|
||||
@ -3681,7 +3681,7 @@ pub mod tests {
|
||||
|
||||
// Both pubkeys will now be present in the index
|
||||
check_secondary_index_mapping_correct(
|
||||
&secondary_index,
|
||||
secondary_index,
|
||||
&[secondary_key1, secondary_key2],
|
||||
&account_key,
|
||||
);
|
||||
|
@ -170,7 +170,7 @@ pub mod tests {
|
||||
let key = item.0;
|
||||
min = std::cmp::min(min, *key);
|
||||
max = std::cmp::max(max, *key);
|
||||
assert!(ancestors.get(&key));
|
||||
assert!(ancestors.get(key));
|
||||
}
|
||||
for slot in min - 1..max + 2 {
|
||||
assert_eq!(ancestors.get(&slot), hashset.contains(&slot));
|
||||
|
@ -778,7 +778,7 @@ pub mod tests {
|
||||
fn test_new_from_file_crafted_zero_lamport_account() {
|
||||
let file = get_append_vec_path("test_append");
|
||||
let path = &file.path;
|
||||
let mut av = AppendVec::new(&path, true, 1024 * 1024);
|
||||
let mut av = AppendVec::new(path, true, 1024 * 1024);
|
||||
av.set_no_remove_on_drop();
|
||||
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
@ -806,7 +806,7 @@ pub mod tests {
|
||||
fn test_new_from_file_crafted_data_len() {
|
||||
let file = get_append_vec_path("test_new_from_file_crafted_data_len");
|
||||
let path = &file.path;
|
||||
let mut av = AppendVec::new(&path, true, 1024 * 1024);
|
||||
let mut av = AppendVec::new(path, true, 1024 * 1024);
|
||||
av.set_no_remove_on_drop();
|
||||
|
||||
let crafted_data_len = 1;
|
||||
@ -834,7 +834,7 @@ pub mod tests {
|
||||
fn test_new_from_file_too_large_data_len() {
|
||||
let file = get_append_vec_path("test_new_from_file_too_large_data_len");
|
||||
let path = &file.path;
|
||||
let mut av = AppendVec::new(&path, true, 1024 * 1024);
|
||||
let mut av = AppendVec::new(path, true, 1024 * 1024);
|
||||
av.set_no_remove_on_drop();
|
||||
|
||||
let too_large_data_len = u64::max_value();
|
||||
@ -860,7 +860,7 @@ pub mod tests {
|
||||
fn test_new_from_file_crafted_executable() {
|
||||
let file = get_append_vec_path("test_new_from_crafted_executable");
|
||||
let path = &file.path;
|
||||
let mut av = AppendVec::new(&path, true, 1024 * 1024);
|
||||
let mut av = AppendVec::new(path, true, 1024 * 1024);
|
||||
av.set_no_remove_on_drop();
|
||||
av.append_account_test(&create_test_account(10)).unwrap();
|
||||
{
|
||||
|
@ -1007,7 +1007,7 @@ impl Default for BlockhashQueue {
|
||||
impl Bank {
|
||||
pub fn new(genesis_config: &GenesisConfig) -> Self {
|
||||
Self::new_with_paths(
|
||||
&genesis_config,
|
||||
genesis_config,
|
||||
Vec::new(),
|
||||
&[],
|
||||
None,
|
||||
@ -1021,7 +1021,7 @@ impl Bank {
|
||||
|
||||
pub fn new_no_wallclock_throttle(genesis_config: &GenesisConfig) -> Self {
|
||||
let mut bank = Self::new_with_paths(
|
||||
&genesis_config,
|
||||
genesis_config,
|
||||
Vec::new(),
|
||||
&[],
|
||||
None,
|
||||
@ -1044,7 +1044,7 @@ impl Bank {
|
||||
shrink_ratio: AccountShrinkThreshold,
|
||||
) -> Self {
|
||||
Self::new_with_paths(
|
||||
&genesis_config,
|
||||
genesis_config,
|
||||
Vec::new(),
|
||||
&[],
|
||||
None,
|
||||
@ -1708,7 +1708,7 @@ impl Bank {
|
||||
// if I'm the first Bank in an epoch, ensure stake_history is updated
|
||||
self.update_sysvar_account(&sysvar::stake_history::id(), |account| {
|
||||
create_account::<sysvar::stake_history::StakeHistory>(
|
||||
&self.stakes.read().unwrap().history(),
|
||||
self.stakes.read().unwrap().history(),
|
||||
self.inherit_specially_retained_account_fields(account),
|
||||
)
|
||||
});
|
||||
@ -1766,7 +1766,7 @@ impl Bank {
|
||||
.feature_set
|
||||
.full_inflation_features_enabled()
|
||||
.iter()
|
||||
.filter_map(|id| self.feature_set.activated_slot(&id))
|
||||
.filter_map(|id| self.feature_set.activated_slot(id))
|
||||
.collect::<Vec<_>>();
|
||||
slots.sort_unstable();
|
||||
slots.get(0).cloned().unwrap_or_else(|| {
|
||||
@ -1913,7 +1913,7 @@ impl Bank {
|
||||
.iter()
|
||||
.for_each(|(stake_pubkey, delegation)| {
|
||||
match (
|
||||
self.get_account_with_fixed_root(&stake_pubkey),
|
||||
self.get_account_with_fixed_root(stake_pubkey),
|
||||
self.get_account_with_fixed_root(&delegation.voter_pubkey),
|
||||
) {
|
||||
(Some(stake_account), Some(vote_account)) => {
|
||||
@ -1979,8 +1979,8 @@ impl Bank {
|
||||
})
|
||||
.map(|(stake_account, vote_account)| {
|
||||
stake_state::calculate_points(
|
||||
&stake_account,
|
||||
&vote_account,
|
||||
stake_account,
|
||||
vote_account,
|
||||
Some(&stake_history),
|
||||
fix_stake_deactivate,
|
||||
)
|
||||
@ -2019,7 +2019,7 @@ impl Bank {
|
||||
fix_stake_deactivate,
|
||||
);
|
||||
if let Ok((stakers_reward, _voters_reward)) = redeemed {
|
||||
self.store_account(&stake_pubkey, &stake_account);
|
||||
self.store_account(stake_pubkey, stake_account);
|
||||
vote_account_changed = true;
|
||||
|
||||
if stakers_reward > 0 {
|
||||
@ -2053,7 +2053,7 @@ impl Bank {
|
||||
},
|
||||
));
|
||||
}
|
||||
self.store_account(&vote_pubkey, &vote_account);
|
||||
self.store_account(vote_pubkey, vote_account);
|
||||
}
|
||||
}
|
||||
self.rewards.write().unwrap().append(&mut rewards);
|
||||
@ -2296,7 +2296,7 @@ impl Bank {
|
||||
self.fee_calculator = self.fee_rate_governor.create_fee_calculator();
|
||||
|
||||
for (pubkey, account) in genesis_config.accounts.iter() {
|
||||
if self.get_account(&pubkey).is_some() {
|
||||
if self.get_account(pubkey).is_some() {
|
||||
panic!("{} repeated in genesis config", pubkey);
|
||||
}
|
||||
self.store_account(pubkey, &AccountSharedData::from(account.clone()));
|
||||
@ -2307,7 +2307,7 @@ impl Bank {
|
||||
self.update_fees();
|
||||
|
||||
for (pubkey, account) in genesis_config.rewards_pools.iter() {
|
||||
if self.get_account(&pubkey).is_some() {
|
||||
if self.get_account(pubkey).is_some() {
|
||||
panic!("{} repeated in genesis config", pubkey);
|
||||
}
|
||||
self.store_account(pubkey, &AccountSharedData::from(account.clone()));
|
||||
@ -2354,11 +2354,11 @@ impl Bank {
|
||||
// NOTE: must hold idempotent for the same set of arguments
|
||||
pub fn add_native_program(&self, name: &str, program_id: &Pubkey, must_replace: bool) {
|
||||
let existing_genuine_program =
|
||||
if let Some(mut account) = self.get_account_with_fixed_root(&program_id) {
|
||||
if let Some(mut account) = self.get_account_with_fixed_root(program_id) {
|
||||
// it's very unlikely to be squatted at program_id as non-system account because of burden to
|
||||
// find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's
|
||||
// safe to assume it's a genuine program.
|
||||
if native_loader::check_id(&account.owner()) {
|
||||
if native_loader::check_id(account.owner()) {
|
||||
Some(account)
|
||||
} else {
|
||||
// malicious account is pre-occupying at program_id
|
||||
@ -2369,7 +2369,7 @@ impl Bank {
|
||||
// Resetting account balance to 0 is needed to really purge from AccountsDb and
|
||||
// flush the Stakes cache
|
||||
account.set_lamports(0);
|
||||
self.store_account(&program_id, &account);
|
||||
self.store_account(program_id, &account);
|
||||
None
|
||||
}
|
||||
} else {
|
||||
@ -2385,7 +2385,7 @@ impl Bank {
|
||||
name, program_id
|
||||
),
|
||||
Some(account) => {
|
||||
if *name == String::from_utf8_lossy(&account.data()) {
|
||||
if *name == String::from_utf8_lossy(account.data()) {
|
||||
// nop; it seems that already AccountsDb is updated.
|
||||
return;
|
||||
}
|
||||
@ -2425,7 +2425,7 @@ impl Bank {
|
||||
name,
|
||||
self.inherit_specially_retained_account_fields(&existing_genuine_program),
|
||||
);
|
||||
self.store_account_and_update_capitalization(&program_id, &account);
|
||||
self.store_account_and_update_capitalization(program_id, &account);
|
||||
|
||||
debug!("Added native program {} under {:?}", name, program_id);
|
||||
}
|
||||
@ -2594,7 +2594,7 @@ impl Bank {
|
||||
hashed_txs.as_transactions_iter(),
|
||||
self.demote_sysvar_write_locks(),
|
||||
);
|
||||
TransactionBatch::new(lock_results, &self, Cow::Owned(hashed_txs))
|
||||
TransactionBatch::new(lock_results, self, Cow::Owned(hashed_txs))
|
||||
}
|
||||
|
||||
pub fn prepare_hashed_batch<'a, 'b>(
|
||||
@ -2605,7 +2605,7 @@ impl Bank {
|
||||
hashed_txs.as_transactions_iter(),
|
||||
self.demote_sysvar_write_locks(),
|
||||
);
|
||||
TransactionBatch::new(lock_results, &self, Cow::Borrowed(hashed_txs))
|
||||
TransactionBatch::new(lock_results, self, Cow::Borrowed(hashed_txs))
|
||||
}
|
||||
|
||||
pub(crate) fn prepare_simulation_batch<'a, 'b>(
|
||||
@ -2614,7 +2614,7 @@ impl Bank {
|
||||
) -> TransactionBatch<'a, 'b> {
|
||||
let mut batch = TransactionBatch::new(
|
||||
vec![tx.sanitize().map_err(|e| e.into())],
|
||||
&self,
|
||||
self,
|
||||
Cow::Owned(vec![HashedTransaction::from(tx)]),
|
||||
);
|
||||
batch.needs_unlock = false;
|
||||
@ -2628,7 +2628,7 @@ impl Bank {
|
||||
) -> (Result<()>, TransactionLogMessages, Vec<AccountSharedData>) {
|
||||
assert!(self.is_frozen(), "simulation bank must be frozen");
|
||||
|
||||
let batch = self.prepare_simulation_batch(&transaction);
|
||||
let batch = self.prepare_simulation_batch(transaction);
|
||||
|
||||
let mut timings = ExecuteTimings::default();
|
||||
|
||||
@ -2703,7 +2703,7 @@ impl Bank {
|
||||
let hash_age = hash_queue.check_hash_age(&message.recent_blockhash, max_age);
|
||||
if hash_age == Some(true) {
|
||||
(Ok(()), None)
|
||||
} else if let Some((pubkey, acc)) = self.check_tx_durable_nonce(&tx) {
|
||||
} else if let Some((pubkey, acc)) = self.check_tx_durable_nonce(tx) {
|
||||
(Ok(()), Some(NonceRollbackPartial::new(pubkey, acc)))
|
||||
} else if hash_age == Some(false) {
|
||||
error_counters.blockhash_too_old += 1;
|
||||
@ -2794,10 +2794,10 @@ impl Bank {
|
||||
}
|
||||
|
||||
pub fn check_tx_durable_nonce(&self, tx: &Transaction) -> Option<(Pubkey, AccountSharedData)> {
|
||||
transaction::uses_durable_nonce(&tx)
|
||||
.and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(&nonce_ix, &tx))
|
||||
transaction::uses_durable_nonce(tx)
|
||||
.and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(nonce_ix, tx))
|
||||
.and_then(|nonce_pubkey| {
|
||||
self.get_account(&nonce_pubkey)
|
||||
self.get_account(nonce_pubkey)
|
||||
.map(|acc| (*nonce_pubkey, acc))
|
||||
})
|
||||
.filter(|(_pubkey, nonce_account)| {
|
||||
@ -3437,7 +3437,7 @@ impl Bank {
|
||||
hashed_txs.len()
|
||||
);
|
||||
timings.store_us += write_time.as_us();
|
||||
self.update_transaction_statuses(hashed_txs, &executed);
|
||||
self.update_transaction_statuses(hashed_txs, executed);
|
||||
let fee_collection_results =
|
||||
self.filter_program_errors_and_collect_fee(hashed_txs.as_transactions_iter(), executed);
|
||||
|
||||
@ -4195,7 +4195,7 @@ impl Bank {
|
||||
pubkey: &Pubkey,
|
||||
new_account: &AccountSharedData,
|
||||
) {
|
||||
if let Some(old_account) = self.get_account_with_fixed_root(&pubkey) {
|
||||
if let Some(old_account) = self.get_account_with_fixed_root(pubkey) {
|
||||
match new_account.lamports().cmp(&old_account.lamports()) {
|
||||
std::cmp::Ordering::Greater => {
|
||||
self.capitalization
|
||||
@ -5064,7 +5064,7 @@ impl Bank {
|
||||
|
||||
pub fn deactivate_feature(&mut self, id: &Pubkey) {
|
||||
let mut feature_set = Arc::make_mut(&mut self.feature_set).clone();
|
||||
feature_set.active.remove(&id);
|
||||
feature_set.active.remove(id);
|
||||
feature_set.inactive.insert(*id);
|
||||
self.feature_set = Arc::new(feature_set);
|
||||
}
|
||||
@ -5164,8 +5164,8 @@ impl Bank {
|
||||
) {
|
||||
let feature_builtins = self.feature_builtins.clone();
|
||||
for (builtin, feature, activation_type) in feature_builtins.iter() {
|
||||
let should_populate = init_or_warp && self.feature_set.is_active(&feature)
|
||||
|| !init_or_warp && new_feature_activations.contains(&feature);
|
||||
let should_populate = init_or_warp && self.feature_set.is_active(feature)
|
||||
|| !init_or_warp && new_feature_activations.contains(feature);
|
||||
if should_populate {
|
||||
match activation_type {
|
||||
ActivationType::NewProgram => self.add_builtin(
|
||||
@ -5267,10 +5267,10 @@ impl Bank {
|
||||
|
||||
if purge_window_epoch {
|
||||
for reward_pubkey in self.rewards_pool_pubkeys.iter() {
|
||||
if let Some(mut reward_account) = self.get_account_with_fixed_root(&reward_pubkey) {
|
||||
if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) {
|
||||
if reward_account.lamports() == u64::MAX {
|
||||
reward_account.set_lamports(0);
|
||||
self.store_account(&reward_pubkey, &reward_account);
|
||||
self.store_account(reward_pubkey, &reward_account);
|
||||
// Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport
|
||||
self.capitalization.fetch_add(1, Relaxed);
|
||||
info!(
|
||||
@ -5313,7 +5313,7 @@ impl Drop for Bank {
|
||||
pub fn goto_end_of_slot(bank: &mut Bank) {
|
||||
let mut tick_hash = bank.last_blockhash();
|
||||
loop {
|
||||
tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]);
|
||||
tick_hash = hashv(&[tick_hash.as_ref(), &[42]]);
|
||||
bank.register_tick(&tick_hash);
|
||||
if tick_hash == bank.last_blockhash() {
|
||||
bank.freeze();
|
||||
@ -7092,7 +7092,7 @@ pub(crate) mod tests {
|
||||
.accounts
|
||||
.accounts_db
|
||||
.accounts_index
|
||||
.get(&pubkey, Some(&ancestors), None)
|
||||
.get(pubkey, Some(ancestors), None)
|
||||
.unwrap();
|
||||
locked_entry
|
||||
.slot_list()
|
||||
@ -7320,7 +7320,7 @@ pub(crate) mod tests {
|
||||
.map(move |(_stake_pubkey, stake_account)| (stake_account, vote_account))
|
||||
})
|
||||
.map(|(stake_account, vote_account)| {
|
||||
stake_state::calculate_points(&stake_account, &vote_account, None, true)
|
||||
stake_state::calculate_points(stake_account, vote_account, None, true)
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.sum();
|
||||
@ -9462,11 +9462,11 @@ pub(crate) mod tests {
|
||||
assert_eq!(bank.calculate_capitalization(true), bank.capitalization());
|
||||
assert_eq!(
|
||||
"mock_program1",
|
||||
String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data())
|
||||
String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data())
|
||||
);
|
||||
assert_eq!(
|
||||
"mock_program2",
|
||||
String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data())
|
||||
String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data())
|
||||
);
|
||||
|
||||
// Re-adding builtin programs should be no-op
|
||||
@ -9482,11 +9482,11 @@ pub(crate) mod tests {
|
||||
assert_eq!(bank.calculate_capitalization(true), bank.capitalization());
|
||||
assert_eq!(
|
||||
"mock_program1",
|
||||
String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data())
|
||||
String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data())
|
||||
);
|
||||
assert_eq!(
|
||||
"mock_program2",
|
||||
String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data())
|
||||
String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data())
|
||||
);
|
||||
}
|
||||
|
||||
@ -9623,7 +9623,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
fn get_nonce_account(bank: &Bank, nonce_pubkey: &Pubkey) -> Option<Hash> {
|
||||
bank.get_account(&nonce_pubkey).and_then(|acc| {
|
||||
bank.get_account(nonce_pubkey).and_then(|acc| {
|
||||
let state =
|
||||
StateMut::<nonce::state::Versions>::state(&acc).map(|v| v.convert_to_current());
|
||||
match state {
|
||||
@ -10114,7 +10114,7 @@ pub(crate) mod tests {
|
||||
let pubkey2 = solana_sdk::pubkey::new_rand();
|
||||
let keypair0_account = AccountSharedData::new(8, 0, &Pubkey::default());
|
||||
let keypair1_account = AccountSharedData::new(9, 0, &Pubkey::default());
|
||||
let account0 = AccountSharedData::new(11, 0, &&Pubkey::default());
|
||||
let account0 = AccountSharedData::new(11, 0, &Pubkey::default());
|
||||
bank0.store_account(&keypair0.pubkey(), &keypair0_account);
|
||||
bank0.store_account(&keypair1.pubkey(), &keypair1_account);
|
||||
bank0.store_account(&pubkey0, &account0);
|
||||
@ -11991,7 +11991,7 @@ pub(crate) mod tests {
|
||||
|
||||
// Write accounts to the store
|
||||
for key in &all_pubkeys {
|
||||
bank0.store_account(&key, &starting_account);
|
||||
bank0.store_account(key, &starting_account);
|
||||
}
|
||||
|
||||
// Set aside a subset of accounts to modify
|
||||
|
@ -516,7 +516,7 @@ mod tests {
|
||||
slot: child.slot(),
|
||||
timestamp: recent_timestamp + additional_timestamp_secs,
|
||||
},
|
||||
&child,
|
||||
child,
|
||||
&voting_keypair.pubkey(),
|
||||
);
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ impl EpochStakes {
|
||||
pub fn new(stakes: &Stakes, leader_schedule_epoch: Epoch) -> Self {
|
||||
let epoch_vote_accounts = Stakes::vote_accounts(stakes);
|
||||
let (total_stake, node_id_to_vote_accounts, epoch_authorized_voters) =
|
||||
Self::parse_epoch_vote_accounts(&epoch_vote_accounts, leader_schedule_epoch);
|
||||
Self::parse_epoch_vote_accounts(epoch_vote_accounts, leader_schedule_epoch);
|
||||
Self {
|
||||
stakes: Arc::new(stakes.clone()),
|
||||
total_stake,
|
||||
|
@ -191,15 +191,15 @@ pub fn create_genesis_config_with_leader_ex(
|
||||
mut initial_accounts: Vec<(Pubkey, AccountSharedData)>,
|
||||
) -> GenesisConfig {
|
||||
let validator_vote_account = vote_state::create_account(
|
||||
&validator_vote_account_pubkey,
|
||||
&validator_pubkey,
|
||||
validator_vote_account_pubkey,
|
||||
validator_pubkey,
|
||||
0,
|
||||
validator_stake_lamports,
|
||||
);
|
||||
|
||||
let validator_stake_account = stake_state::create_account(
|
||||
validator_stake_account_pubkey,
|
||||
&validator_vote_account_pubkey,
|
||||
validator_vote_account_pubkey,
|
||||
&validator_vote_account,
|
||||
&rent,
|
||||
validator_stake_lamports,
|
||||
|
@ -280,7 +280,7 @@ pub fn open_genesis_config(
|
||||
ledger_path: &Path,
|
||||
max_genesis_archive_unpacked_size: u64,
|
||||
) -> GenesisConfig {
|
||||
GenesisConfig::load(&ledger_path).unwrap_or_else(|load_err| {
|
||||
GenesisConfig::load(ledger_path).unwrap_or_else(|load_err| {
|
||||
let genesis_package = ledger_path.join(DEFAULT_GENESIS_ARCHIVE);
|
||||
unpack_genesis_archive(
|
||||
&genesis_package,
|
||||
@ -296,7 +296,7 @@ pub fn open_genesis_config(
|
||||
});
|
||||
|
||||
// loading must succeed at this moment
|
||||
GenesisConfig::load(&ledger_path).unwrap()
|
||||
GenesisConfig::load(ledger_path).unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ pub fn load_buffer_account<T: Client>(
|
||||
|
||||
bank_client
|
||||
.send_and_confirm_message(
|
||||
&[from_keypair, &buffer_keypair],
|
||||
&[from_keypair, buffer_keypair],
|
||||
Message::new(
|
||||
&bpf_loader_upgradeable::create_buffer(
|
||||
&from_keypair.pubkey(),
|
||||
@ -102,7 +102,7 @@ pub fn load_buffer_account<T: Client>(
|
||||
Some(&from_keypair.pubkey()),
|
||||
);
|
||||
bank_client
|
||||
.send_and_confirm_message(&[from_keypair, &buffer_authority_keypair], message)
|
||||
.send_and_confirm_message(&[from_keypair, buffer_authority_keypair], message)
|
||||
.unwrap();
|
||||
offset += chunk_size as u32;
|
||||
}
|
||||
@ -121,7 +121,7 @@ pub fn load_upgradeable_program<T: Client>(
|
||||
|
||||
load_buffer_account(
|
||||
bank_client,
|
||||
&from_keypair,
|
||||
from_keypair,
|
||||
buffer_keypair,
|
||||
authority_keypair,
|
||||
&program,
|
||||
@ -147,7 +147,7 @@ pub fn load_upgradeable_program<T: Client>(
|
||||
);
|
||||
bank_client
|
||||
.send_and_confirm_message(
|
||||
&[from_keypair, &executable_keypair, &authority_keypair],
|
||||
&[from_keypair, executable_keypair, authority_keypair],
|
||||
message,
|
||||
)
|
||||
.unwrap();
|
||||
@ -163,15 +163,15 @@ pub fn upgrade_program<T: Client>(
|
||||
) {
|
||||
let message = Message::new(
|
||||
&[bpf_loader_upgradeable::upgrade(
|
||||
&program_pubkey,
|
||||
&buffer_pubkey,
|
||||
program_pubkey,
|
||||
buffer_pubkey,
|
||||
&authority_keypair.pubkey(),
|
||||
&spill_pubkey,
|
||||
spill_pubkey,
|
||||
)],
|
||||
Some(&from_keypair.pubkey()),
|
||||
);
|
||||
bank_client
|
||||
.send_and_confirm_message(&[from_keypair, &authority_keypair], message)
|
||||
.send_and_confirm_message(&[from_keypair, authority_keypair], message)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@ -191,7 +191,7 @@ pub fn set_upgrade_authority<T: Client>(
|
||||
Some(&from_keypair.pubkey()),
|
||||
);
|
||||
bank_client
|
||||
.send_and_confirm_message(&[from_keypair, ¤t_authority_keypair], message)
|
||||
.send_and_confirm_message(&[from_keypair, current_authority_keypair], message)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ impl PreAccount {
|
||||
&& (!is_writable // line coverage used to get branch coverage
|
||||
|| pre.executable()
|
||||
|| program_id != pre.owner()
|
||||
|| !Self::is_zeroed(&post.data()))
|
||||
|| !Self::is_zeroed(post.data()))
|
||||
{
|
||||
return Err(InstructionError::ModifiedProgramId);
|
||||
}
|
||||
@ -454,7 +454,7 @@ impl<'a> InvokeContext for ThisInvokeContext<'a> {
|
||||
self.executors.borrow_mut().insert(*pubkey, executor);
|
||||
}
|
||||
fn get_executor(&self, pubkey: &Pubkey) -> Option<Arc<dyn Executor>> {
|
||||
self.executors.borrow().get(&pubkey)
|
||||
self.executors.borrow().get(pubkey)
|
||||
}
|
||||
fn record_instruction(&self, instruction: &Instruction) {
|
||||
if let Some(recorder) = &self.instruction_recorder {
|
||||
@ -657,7 +657,7 @@ impl MessageProcessor {
|
||||
if id == root_id {
|
||||
invoke_context.remove_first_keyed_account()?;
|
||||
// Call the builtin program
|
||||
return process_instruction(&program_id, instruction_data, invoke_context);
|
||||
return process_instruction(program_id, instruction_data, invoke_context);
|
||||
}
|
||||
}
|
||||
// Call the program via the native loader
|
||||
@ -671,7 +671,7 @@ impl MessageProcessor {
|
||||
for (id, process_instruction) in &self.programs {
|
||||
if id == owner_id {
|
||||
// Call the program via a builtin loader
|
||||
return process_instruction(&program_id, instruction_data, invoke_context);
|
||||
return process_instruction(program_id, instruction_data, invoke_context);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -782,7 +782,7 @@ impl MessageProcessor {
|
||||
.map(|index| keyed_account_at_index(keyed_accounts, *index))
|
||||
.collect::<Result<Vec<&KeyedAccount>, InstructionError>>()?;
|
||||
let (message, callee_program_id, _) =
|
||||
Self::create_message(&instruction, &keyed_accounts, &signers, &invoke_context)?;
|
||||
Self::create_message(&instruction, &keyed_accounts, signers, &invoke_context)?;
|
||||
let keyed_accounts = invoke_context.get_keyed_accounts()?;
|
||||
let mut caller_write_privileges = keyed_account_indices
|
||||
.iter()
|
||||
@ -1036,7 +1036,7 @@ impl MessageProcessor {
|
||||
let account = accounts[account_index].borrow();
|
||||
pre_accounts[unique_index]
|
||||
.verify(
|
||||
&program_id,
|
||||
program_id,
|
||||
message.is_writable(account_index, demote_sysvar_write_locks),
|
||||
rent,
|
||||
&account,
|
||||
@ -1102,7 +1102,7 @@ impl MessageProcessor {
|
||||
}
|
||||
let account = account.borrow();
|
||||
pre_account
|
||||
.verify(&program_id, is_writable, &rent, &account, timings, false)
|
||||
.verify(program_id, is_writable, rent, &account, timings, false)
|
||||
.map_err(|err| {
|
||||
ic_logger_msg!(logger, "failed to verify account {}: {}", key, err);
|
||||
err
|
||||
|
@ -112,7 +112,7 @@ impl NativeLoader {
|
||||
if let Some(entrypoint) = cache.get(name) {
|
||||
Ok(entrypoint.clone())
|
||||
} else {
|
||||
match Self::library_open(&Self::create_path(&name)?) {
|
||||
match Self::library_open(&Self::create_path(name)?) {
|
||||
Ok(library) => {
|
||||
let result = unsafe { library.get::<T>(name.as_bytes()) };
|
||||
match result {
|
||||
|
@ -70,7 +70,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> ScanResult<NonCircu
|
||||
|
||||
let lamports = non_circulating_accounts_set
|
||||
.iter()
|
||||
.map(|pubkey| bank.get_balance(&pubkey))
|
||||
.map(|pubkey| bank.get_balance(pubkey))
|
||||
.sum();
|
||||
|
||||
Ok(NonCirculatingSupply {
|
||||
|
@ -134,10 +134,10 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
|
||||
.downgrade()
|
||||
});
|
||||
|
||||
let should_insert = !outer_keys.read().unwrap().contains(&key);
|
||||
let should_insert = !outer_keys.read().unwrap().contains(key);
|
||||
if should_insert {
|
||||
let mut w_outer_keys = outer_keys.write().unwrap();
|
||||
if !w_outer_keys.contains(&key) {
|
||||
if !w_outer_keys.contains(key) {
|
||||
w_outer_keys.push(*key);
|
||||
}
|
||||
}
|
||||
@ -175,11 +175,11 @@ impl<SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send>
|
||||
let is_outer_key_empty = {
|
||||
let inner_key_map = self
|
||||
.index
|
||||
.get_mut(&outer_key)
|
||||
.get_mut(outer_key)
|
||||
.expect("If we're removing a key, then it must have an entry in the map");
|
||||
// If we deleted a pubkey from the reverse_index, then the corresponding entry
|
||||
// better exist in this index as well or the two indexes are out of sync!
|
||||
assert!(inner_key_map.value().remove_inner_key(&removed_inner_key));
|
||||
assert!(inner_key_map.value().remove_inner_key(removed_inner_key));
|
||||
inner_key_map.is_empty()
|
||||
};
|
||||
|
||||
|
@ -165,7 +165,7 @@ impl<'a> From<crate::bank::BankFieldsToSerialize<'a>> for SerializableVersionedB
|
||||
}
|
||||
Self {
|
||||
blockhash_queue: rhs.blockhash_queue,
|
||||
ancestors: &rhs.ancestors,
|
||||
ancestors: rhs.ancestors,
|
||||
hash: rhs.hash,
|
||||
parent_hash: rhs.parent_hash,
|
||||
parent_slot: rhs.parent_slot,
|
||||
|
@ -250,7 +250,7 @@ pub(crate) fn reconstruct_accounts_db_via_serialization(
|
||||
accountsdb_to_stream(
|
||||
SerdeStyle::Newer,
|
||||
&mut writer,
|
||||
&accounts,
|
||||
accounts,
|
||||
slot,
|
||||
&snapshot_storages,
|
||||
)
|
||||
@ -261,7 +261,7 @@ pub(crate) fn reconstruct_accounts_db_via_serialization(
|
||||
let copied_accounts = TempDir::new().unwrap();
|
||||
|
||||
// Simulate obtaining a copy of the AppendVecs from a tarball
|
||||
let unpacked_append_vec_map = copy_append_vecs(&accounts, copied_accounts.path()).unwrap();
|
||||
let unpacked_append_vec_map = copy_append_vecs(accounts, copied_accounts.path()).unwrap();
|
||||
let mut accounts_db =
|
||||
accountsdb_from_stream(SerdeStyle::Newer, &mut reader, &[], unpacked_append_vec_map)
|
||||
.unwrap();
|
||||
|
@ -622,7 +622,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
|
||||
let mut untar = Measure::start("untar");
|
||||
let unpacked_append_vec_map = untar_snapshot_in(
|
||||
&snapshot_tar,
|
||||
&unpack_dir.as_ref(),
|
||||
unpack_dir.as_ref(),
|
||||
account_paths,
|
||||
archive_format,
|
||||
)?;
|
||||
@ -913,7 +913,7 @@ pub fn verify_snapshot_archive<P, Q, R>(
|
||||
let unpack_dir = temp_dir.path();
|
||||
untar_snapshot_in(
|
||||
snapshot_archive,
|
||||
&unpack_dir,
|
||||
unpack_dir,
|
||||
&[unpack_dir.to_path_buf()],
|
||||
archive_format,
|
||||
)
|
||||
@ -953,7 +953,7 @@ pub fn snapshot_bank(
|
||||
) -> Result<()> {
|
||||
let storages: Vec<_> = root_bank.get_snapshot_storages();
|
||||
let mut add_snapshot_time = Measure::start("add-snapshot-ms");
|
||||
add_snapshot(snapshot_path, &root_bank, &storages, snapshot_version)?;
|
||||
add_snapshot(snapshot_path, root_bank, &storages, snapshot_version)?;
|
||||
add_snapshot_time.stop();
|
||||
inc_new_counter_info!("add-snapshot-ms", add_snapshot_time.as_ms() as usize);
|
||||
|
||||
@ -964,7 +964,7 @@ pub fn snapshot_bank(
|
||||
.expect("no snapshots found in config snapshot_path");
|
||||
|
||||
let package = package_snapshot(
|
||||
&root_bank,
|
||||
root_bank,
|
||||
latest_slot_snapshot_paths,
|
||||
snapshot_path,
|
||||
status_cache_slot_deltas,
|
||||
@ -1003,9 +1003,9 @@ pub fn bank_to_snapshot_archive<P: AsRef<Path>, Q: AsRef<Path>>(
|
||||
let temp_dir = tempfile::tempdir_in(snapshot_path)?;
|
||||
|
||||
let storages: Vec<_> = bank.get_snapshot_storages();
|
||||
let slot_snapshot_paths = add_snapshot(&temp_dir, &bank, &storages, snapshot_version)?;
|
||||
let slot_snapshot_paths = add_snapshot(&temp_dir, bank, &storages, snapshot_version)?;
|
||||
let package = package_snapshot(
|
||||
&bank,
|
||||
bank,
|
||||
&slot_snapshot_paths,
|
||||
&temp_dir,
|
||||
bank.src.slot_deltas(&bank.src.roots()),
|
||||
|
@ -134,7 +134,7 @@ impl Stakes {
|
||||
// when account is removed (lamports == 0 or data uninitialized), don't read so that
|
||||
// given `pubkey` can be used for any owner in the future, while not affecting Stakes.
|
||||
if account.lamports() != 0
|
||||
&& !(check_vote_init && VoteState::is_uninitialized_no_deser(&account.data()))
|
||||
&& !(check_vote_init && VoteState::is_uninitialized_no_deser(account.data()))
|
||||
{
|
||||
let stake = old.as_ref().map_or_else(
|
||||
|| {
|
||||
@ -258,8 +258,8 @@ pub mod tests {
|
||||
stake_pubkey,
|
||||
stake_state::create_account(
|
||||
&stake_pubkey,
|
||||
&vote_pubkey,
|
||||
&vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1),
|
||||
vote_pubkey,
|
||||
&vote_state::create_account(vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1),
|
||||
&Rent::free(),
|
||||
stake,
|
||||
),
|
||||
@ -290,8 +290,8 @@ pub mod tests {
|
||||
stake_pubkey,
|
||||
stake_state::create_account_with_activation_epoch(
|
||||
&stake_pubkey,
|
||||
&vote_pubkey,
|
||||
&vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1),
|
||||
vote_pubkey,
|
||||
&vote_state::create_account(vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1),
|
||||
&Rent::free(),
|
||||
stake,
|
||||
epoch,
|
||||
|
@ -251,7 +251,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
|
||||
.iter()
|
||||
.for_each(|(tx_hash, (key_index, statuses))| {
|
||||
for (key_slice, res) in statuses.iter() {
|
||||
self.insert_with_slice(&tx_hash, *slot, *key_index, *key_slice, res.clone())
|
||||
self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone())
|
||||
}
|
||||
});
|
||||
if *is_root {
|
||||
|
@ -79,7 +79,7 @@ fn allocate(
|
||||
|
||||
// if it looks like the `to` account is already in use, bail
|
||||
// (note that the id check is also enforced by message_processor)
|
||||
if !account.data().is_empty() || !system_program::check_id(&account.owner()) {
|
||||
if !account.data().is_empty() || !system_program::check_id(account.owner()) {
|
||||
ic_msg!(
|
||||
invoke_context,
|
||||
"Allocate: account {:?} already in use",
|
||||
@ -115,13 +115,13 @@ fn assign(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !address.is_signer(&signers) {
|
||||
if !address.is_signer(signers) {
|
||||
ic_msg!(invoke_context, "Assign: account {:?} must sign", address);
|
||||
return Err(InstructionError::MissingRequiredSignature);
|
||||
}
|
||||
|
||||
// guard against sysvars being made
|
||||
if sysvar::check_id(&owner) {
|
||||
if sysvar::check_id(owner) {
|
||||
ic_msg!(invoke_context, "Assign: cannot assign to sysvar, {}", owner);
|
||||
return Err(SystemError::InvalidProgramId.into());
|
||||
}
|
||||
@ -300,13 +300,13 @@ pub fn process_instruction(
|
||||
let from = keyed_account_at_index(keyed_accounts, 0)?;
|
||||
let to = keyed_account_at_index(keyed_accounts, 1)?;
|
||||
let to_address = Address::create(
|
||||
&to.unsigned_key(),
|
||||
to.unsigned_key(),
|
||||
Some((&base, &seed, &owner)),
|
||||
invoke_context,
|
||||
)?;
|
||||
create_account(
|
||||
from,
|
||||
&to,
|
||||
to,
|
||||
&to_address,
|
||||
lamports,
|
||||
space,
|
||||
@ -736,11 +736,11 @@ mod tests {
|
||||
let result = create_account(
|
||||
&KeyedAccount::new(&from, true, &from_account),
|
||||
&KeyedAccount::new(&to, false, &to_account),
|
||||
&address,
|
||||
address,
|
||||
50,
|
||||
MAX_PERMITTED_DATA_LENGTH + 1,
|
||||
&system_program::id(),
|
||||
&signers,
|
||||
signers,
|
||||
&MockInvokeContext::new(vec![]),
|
||||
);
|
||||
assert!(result.is_err());
|
||||
@ -753,11 +753,11 @@ mod tests {
|
||||
let result = create_account(
|
||||
&KeyedAccount::new(&from, true, &from_account),
|
||||
&KeyedAccount::new(&to, false, &to_account),
|
||||
&address,
|
||||
address,
|
||||
50,
|
||||
MAX_PERMITTED_DATA_LENGTH,
|
||||
&system_program::id(),
|
||||
&signers,
|
||||
signers,
|
||||
&MockInvokeContext::new(vec![]),
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
@ -790,7 +790,7 @@ mod tests {
|
||||
50,
|
||||
2,
|
||||
&new_owner,
|
||||
&signers,
|
||||
signers,
|
||||
&MockInvokeContext::new(vec![]),
|
||||
);
|
||||
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
|
||||
@ -809,7 +809,7 @@ mod tests {
|
||||
50,
|
||||
2,
|
||||
&new_owner,
|
||||
&signers,
|
||||
signers,
|
||||
&MockInvokeContext::new(vec![]),
|
||||
);
|
||||
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
|
||||
@ -827,7 +827,7 @@ mod tests {
|
||||
50,
|
||||
2,
|
||||
&new_owner,
|
||||
&signers,
|
||||
signers,
|
||||
&MockInvokeContext::new(vec![]),
|
||||
);
|
||||
assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into()));
|
||||
@ -1141,7 +1141,7 @@ mod tests {
|
||||
transfer_with_seed(
|
||||
&from_keyed_account,
|
||||
&from_base_keyed_account,
|
||||
&from_seed,
|
||||
from_seed,
|
||||
&from_owner,
|
||||
&to_keyed_account,
|
||||
50,
|
||||
@ -1158,7 +1158,7 @@ mod tests {
|
||||
let result = transfer_with_seed(
|
||||
&from_keyed_account,
|
||||
&from_base_keyed_account,
|
||||
&from_seed,
|
||||
from_seed,
|
||||
&from_owner,
|
||||
&to_keyed_account,
|
||||
100,
|
||||
@ -1173,7 +1173,7 @@ mod tests {
|
||||
assert!(transfer_with_seed(
|
||||
&from_keyed_account,
|
||||
&from_base_keyed_account,
|
||||
&from_seed,
|
||||
from_seed,
|
||||
&from_owner,
|
||||
&to_keyed_account,
|
||||
0,
|
||||
|
@ -52,7 +52,7 @@ fn test_shrink_and_clean() {
|
||||
|
||||
for (pubkey, account) in alive_accounts.iter_mut() {
|
||||
account.checked_sub_lamports(1).unwrap();
|
||||
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
|
||||
accounts.store_uncached(current_slot, &[(pubkey, account)]);
|
||||
}
|
||||
accounts.add_root(current_slot);
|
||||
}
|
||||
@ -121,9 +121,9 @@ fn test_bad_bank_hash() {
|
||||
|
||||
for (key, account) in &account_refs {
|
||||
assert_eq!(
|
||||
db.load_account_hash(&ancestors, &key, None, LoadHint::Unspecified)
|
||||
db.load_account_hash(&ancestors, key, None, LoadHint::Unspecified)
|
||||
.unwrap(),
|
||||
AccountsDb::hash_account(some_slot, *account, &key)
|
||||
AccountsDb::hash_account(some_slot, *account, key)
|
||||
);
|
||||
}
|
||||
existing.clear();
|
||||
|
@ -28,7 +28,7 @@ fn next_epoch(bank: &Arc<Bank>) -> Arc<Bank> {
|
||||
bank.squash();
|
||||
|
||||
Arc::new(Bank::new_from_parent(
|
||||
&bank,
|
||||
bank,
|
||||
&Pubkey::default(),
|
||||
bank.get_slots_in_epoch(bank.epoch()) + bank.slot(),
|
||||
))
|
||||
|
Reference in New Issue
Block a user