2019-02-16 20:58:07 -07:00
|
|
|
use crate::bank_forks::BankForks;
|
2019-02-16 03:26:21 -07:00
|
|
|
use crate::blocktree::Blocktree;
|
|
|
|
use crate::entry::{Entry, EntrySlice};
|
2019-04-19 02:39:44 -07:00
|
|
|
use crate::leader_schedule_cache::LeaderScheduleCache;
|
2019-02-16 14:17:37 -07:00
|
|
|
use rayon::prelude::*;
|
2019-02-18 23:26:22 -07:00
|
|
|
use solana_metrics::counter::Counter;
|
2019-04-05 10:42:54 -06:00
|
|
|
use solana_runtime::bank::Bank;
|
2019-04-02 03:55:42 -07:00
|
|
|
use solana_runtime::locked_accounts_results::LockedAccountsResults;
|
2019-02-19 18:31:56 -08:00
|
|
|
use solana_sdk::genesis_block::GenesisBlock;
|
|
|
|
use solana_sdk::timing::duration_as_ms;
|
2019-03-02 16:35:13 -08:00
|
|
|
use solana_sdk::timing::MAX_RECENT_BLOCKHASHES;
|
2019-04-25 13:37:30 -07:00
|
|
|
use solana_sdk::transaction::Result;
|
2019-03-13 13:47:09 -06:00
|
|
|
use std::result;
|
2019-02-28 12:09:19 -08:00
|
|
|
use std::sync::Arc;
|
2019-04-20 20:17:57 -07:00
|
|
|
use std::time::{Duration, Instant};
|
2019-02-16 03:26:21 -07:00
|
|
|
|
2019-02-16 14:17:37 -07:00
|
|
|
fn first_err(results: &[Result<()>]) -> Result<()> {
|
|
|
|
for r in results {
|
|
|
|
r.clone()?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-04-02 03:55:42 -07:00
|
|
|
fn par_execute_entries(bank: &Bank, entries: &[(&Entry, LockedAccountsResults)]) -> Result<()> {
|
2019-02-16 14:17:37 -07:00
|
|
|
inc_new_counter_info!("bank-par_execute_entries-count", entries.len());
|
2019-02-21 13:37:08 -08:00
|
|
|
let results: Vec<Result<()>> = entries
|
2019-02-16 14:17:37 -07:00
|
|
|
.into_par_iter()
|
2019-04-02 03:55:42 -07:00
|
|
|
.map(|(e, locked_accounts)| {
|
2019-02-22 11:52:48 -08:00
|
|
|
let results = bank.load_execute_and_commit_transactions(
|
2019-02-16 14:17:37 -07:00
|
|
|
&e.transactions,
|
2019-04-02 03:55:42 -07:00
|
|
|
locked_accounts,
|
2019-03-02 16:35:13 -08:00
|
|
|
MAX_RECENT_BLOCKHASHES,
|
2019-02-16 14:17:37 -07:00
|
|
|
);
|
2019-04-09 15:05:43 -07:00
|
|
|
let mut first_err = None;
|
|
|
|
for r in results {
|
|
|
|
if let Err(ref e) = r {
|
|
|
|
if first_err.is_none() {
|
|
|
|
first_err = Some(r.clone());
|
|
|
|
}
|
2019-04-25 13:37:30 -07:00
|
|
|
if !Bank::can_commit(&r) {
|
2019-04-09 15:05:43 -07:00
|
|
|
warn!("Unexpected validator error: {:?}", e);
|
|
|
|
solana_metrics::submit(
|
|
|
|
solana_metrics::influxdb::Point::new("validator_process_entry_error")
|
|
|
|
.add_field(
|
|
|
|
"error",
|
|
|
|
solana_metrics::influxdb::Value::String(format!("{:?}", e)),
|
|
|
|
)
|
|
|
|
.to_owned(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
first_err.unwrap_or(Ok(()))
|
2019-02-16 14:17:37 -07:00
|
|
|
})
|
|
|
|
.collect();
|
2019-02-19 15:41:40 -08:00
|
|
|
|
2019-02-21 13:37:08 -08:00
|
|
|
first_err(&results)
|
2019-02-16 14:17:37 -07:00
|
|
|
}
|
|
|
|
|
2019-03-12 16:46:41 -07:00
|
|
|
/// Process an ordered list of entries in parallel
|
2019-02-16 14:17:37 -07:00
|
|
|
/// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry
|
|
|
|
/// 2. Process the locked group in parallel
|
|
|
|
/// 3. Register the `Tick` if it's available
|
|
|
|
/// 4. Update the leader scheduler, goto 1
|
2019-03-12 16:46:41 -07:00
|
|
|
pub fn process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
|
2019-02-16 14:17:37 -07:00
|
|
|
// accumulator for entries that can be processed in parallel
|
|
|
|
let mut mt_group = vec![];
|
|
|
|
for entry in entries {
|
|
|
|
if entry.is_tick() {
|
|
|
|
// if its a tick, execute the group and register the tick
|
2019-02-21 13:37:08 -08:00
|
|
|
par_execute_entries(bank, &mt_group)?;
|
2019-03-01 08:57:42 -08:00
|
|
|
bank.register_tick(&entry.hash);
|
2019-02-16 14:17:37 -07:00
|
|
|
mt_group = vec![];
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// try to lock the accounts
|
|
|
|
let lock_results = bank.lock_accounts(&entry.transactions);
|
|
|
|
// if any of the locks error out
|
|
|
|
// execute the current group
|
2019-04-02 03:55:42 -07:00
|
|
|
if first_err(lock_results.locked_accounts_results()).is_err() {
|
2019-02-21 13:37:08 -08:00
|
|
|
par_execute_entries(bank, &mt_group)?;
|
2019-04-02 03:55:42 -07:00
|
|
|
// Drop all the locks on accounts by clearing the LockedAccountsFinalizer's in the
|
|
|
|
// mt_group
|
2019-02-16 14:17:37 -07:00
|
|
|
mt_group = vec![];
|
2019-04-02 03:55:42 -07:00
|
|
|
drop(lock_results);
|
2019-02-16 14:17:37 -07:00
|
|
|
let lock_results = bank.lock_accounts(&entry.transactions);
|
|
|
|
mt_group.push((entry, lock_results));
|
|
|
|
} else {
|
|
|
|
// push the entry to the mt_group
|
|
|
|
mt_group.push((entry, lock_results));
|
|
|
|
}
|
|
|
|
}
|
2019-02-21 13:37:08 -08:00
|
|
|
par_execute_entries(bank, &mt_group)?;
|
|
|
|
Ok(())
|
2019-02-16 14:17:37 -07:00
|
|
|
}
|
|
|
|
|
2019-02-20 15:42:35 -08:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct BankForksInfo {
|
2019-03-04 16:40:28 -08:00
|
|
|
pub bank_slot: u64,
|
2019-02-20 15:42:35 -08:00
|
|
|
pub entry_height: u64,
|
2019-02-16 03:26:21 -07:00
|
|
|
}
|
|
|
|
|
2019-03-13 13:47:09 -06:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum BlocktreeProcessorError {
|
|
|
|
LedgerVerificationFailed,
|
|
|
|
}
|
|
|
|
|
2019-02-16 12:00:35 -07:00
|
|
|
pub fn process_blocktree(
|
2019-02-19 18:31:56 -08:00
|
|
|
genesis_block: &GenesisBlock,
|
2019-02-16 12:00:35 -07:00
|
|
|
blocktree: &Blocktree,
|
2019-02-25 21:22:00 -08:00
|
|
|
account_paths: Option<String>,
|
2019-04-19 02:39:44 -07:00
|
|
|
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
2019-02-19 18:31:56 -08:00
|
|
|
let now = Instant::now();
|
|
|
|
info!("processing ledger...");
|
2019-02-20 15:42:35 -08:00
|
|
|
// Setup bank for slot 0
|
2019-02-28 12:09:19 -08:00
|
|
|
let mut pending_slots = {
|
2019-02-20 15:42:35 -08:00
|
|
|
let slot = 0;
|
2019-02-28 12:09:19 -08:00
|
|
|
let bank = Arc::new(Bank::new_with_paths(&genesis_block, account_paths));
|
2019-02-20 15:42:35 -08:00
|
|
|
let entry_height = 0;
|
2019-03-02 10:25:16 -08:00
|
|
|
let last_entry_hash = bank.last_blockhash();
|
2019-02-20 15:42:35 -08:00
|
|
|
|
|
|
|
// Load the metadata for this slot
|
|
|
|
let meta = blocktree
|
|
|
|
.meta(slot)
|
|
|
|
.map_err(|err| {
|
2019-03-05 13:26:59 -08:00
|
|
|
warn!("Failed to load meta for slot {}: {:?}", slot, err);
|
2019-03-13 13:47:09 -06:00
|
|
|
BlocktreeProcessorError::LedgerVerificationFailed
|
2019-02-20 15:42:35 -08:00
|
|
|
})?
|
|
|
|
.unwrap();
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
vec![(slot, meta, bank, entry_height, last_entry_hash)]
|
|
|
|
};
|
|
|
|
|
2019-04-30 13:23:21 -07:00
|
|
|
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
|
2019-04-19 02:39:44 -07:00
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
let mut fork_info = vec![];
|
2019-04-20 20:17:57 -07:00
|
|
|
let mut last_status_report = Instant::now();
|
2019-03-04 19:22:23 -08:00
|
|
|
while !pending_slots.is_empty() {
|
|
|
|
let (slot, meta, bank, mut entry_height, mut last_entry_hash) =
|
|
|
|
pending_slots.pop().unwrap();
|
|
|
|
|
2019-04-20 20:17:57 -07:00
|
|
|
if last_status_report.elapsed() > Duration::from_secs(2) {
|
|
|
|
info!("processing ledger...block {}", slot);
|
|
|
|
last_status_report = Instant::now();
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:42:35 -08:00
|
|
|
// Fetch all entries for this slot
|
|
|
|
let mut entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
|
|
|
|
warn!("Failed to load entries for slot {}: {:?}", slot, err);
|
2019-03-13 13:47:09 -06:00
|
|
|
BlocktreeProcessorError::LedgerVerificationFailed
|
2019-02-20 15:42:35 -08:00
|
|
|
})?;
|
|
|
|
|
|
|
|
if slot == 0 {
|
|
|
|
// The first entry in the ledger is a pseudo-tick used only to ensure the number of ticks
|
|
|
|
// in slot 0 is the same as the number of ticks in all subsequent slots. It is not
|
2019-02-26 16:35:00 -08:00
|
|
|
// processed by the bank, skip over it.
|
2019-02-20 15:42:35 -08:00
|
|
|
if entries.is_empty() {
|
|
|
|
warn!("entry0 not present");
|
2019-03-13 13:47:09 -06:00
|
|
|
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
2019-03-20 11:06:39 -07:00
|
|
|
let entry0 = entries.remove(0);
|
2019-03-01 08:26:47 -08:00
|
|
|
if !(entry0.is_tick() && entry0.verify(&last_entry_hash)) {
|
2019-02-20 15:42:35 -08:00
|
|
|
warn!("Ledger proof of history failed at entry0");
|
2019-03-13 13:47:09 -06:00
|
|
|
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
2019-03-01 08:57:42 -08:00
|
|
|
last_entry_hash = entry0.hash;
|
2019-02-20 15:42:35 -08:00
|
|
|
entry_height += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if !entries.is_empty() {
|
2019-03-01 08:26:47 -08:00
|
|
|
if !entries.verify(&last_entry_hash) {
|
2019-03-04 19:22:23 -08:00
|
|
|
warn!(
|
|
|
|
"Ledger proof of history failed at slot: {}, entry: {}",
|
|
|
|
slot, entry_height
|
|
|
|
);
|
2019-03-13 13:47:09 -06:00
|
|
|
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
|
|
|
|
2019-03-08 09:15:33 -08:00
|
|
|
process_entries(&bank, &entries).map_err(|err| {
|
2019-02-20 15:42:35 -08:00
|
|
|
warn!("Failed to process entries for slot {}: {:?}", slot, err);
|
2019-03-13 13:47:09 -06:00
|
|
|
BlocktreeProcessorError::LedgerVerificationFailed
|
2019-02-20 15:42:35 -08:00
|
|
|
})?;
|
|
|
|
|
2019-03-01 08:57:42 -08:00
|
|
|
last_entry_hash = entries.last().unwrap().hash;
|
2019-02-20 15:42:35 -08:00
|
|
|
entry_height += entries.len() as u64;
|
|
|
|
}
|
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
bank.freeze(); // all banks handled by this routine are created from complete slots
|
|
|
|
|
|
|
|
if blocktree.is_root(slot) {
|
2019-04-30 13:23:21 -07:00
|
|
|
leader_schedule_cache.set_root(slot);
|
2019-04-15 13:12:28 -07:00
|
|
|
bank.squash();
|
2019-04-29 12:29:14 -07:00
|
|
|
pending_slots.clear();
|
|
|
|
fork_info.clear();
|
2019-04-15 13:12:28 -07:00
|
|
|
}
|
2019-02-28 12:09:19 -08:00
|
|
|
|
|
|
|
if meta.next_slots.is_empty() {
|
2019-03-01 08:57:42 -08:00
|
|
|
// Reached the end of this fork. Record the final entry height and last entry.hash
|
2019-02-28 12:09:19 -08:00
|
|
|
let bfi = BankForksInfo {
|
2019-03-04 16:40:28 -08:00
|
|
|
bank_slot: slot,
|
2019-02-26 09:18:24 -08:00
|
|
|
entry_height,
|
2019-02-28 12:09:19 -08:00
|
|
|
};
|
|
|
|
fork_info.push((bank, bfi));
|
2019-02-26 09:18:24 -08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-02-27 10:15:18 -08:00
|
|
|
// This is a fork point, create a new child bank for each fork
|
2019-03-04 19:22:23 -08:00
|
|
|
for next_slot in meta.next_slots {
|
|
|
|
let next_meta = blocktree
|
|
|
|
.meta(next_slot)
|
|
|
|
.map_err(|err| {
|
2019-03-05 13:26:59 -08:00
|
|
|
warn!("Failed to load meta for slot {}: {:?}", slot, err);
|
2019-03-13 13:47:09 -06:00
|
|
|
BlocktreeProcessorError::LedgerVerificationFailed
|
2019-03-04 19:22:23 -08:00
|
|
|
})?
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// only process full slots in blocktree_processor, replay_stage
|
|
|
|
// handles any partials
|
|
|
|
if next_meta.is_full() {
|
|
|
|
let next_bank = Arc::new(Bank::new_from_parent(
|
|
|
|
&bank,
|
2019-04-19 02:39:44 -07:00
|
|
|
&leader_schedule_cache
|
2019-04-30 13:23:21 -07:00
|
|
|
.slot_leader_at(next_slot, Some(&bank))
|
2019-04-19 02:39:44 -07:00
|
|
|
.unwrap(),
|
2019-03-04 19:22:23 -08:00
|
|
|
next_slot,
|
|
|
|
));
|
|
|
|
trace!("Add child bank for slot={}", next_slot);
|
|
|
|
// bank_forks.insert(*next_slot, child_bank);
|
|
|
|
pending_slots.push((
|
|
|
|
next_slot,
|
|
|
|
next_meta,
|
|
|
|
next_bank,
|
|
|
|
entry_height,
|
|
|
|
last_entry_hash,
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
let bfi = BankForksInfo {
|
|
|
|
bank_slot: slot,
|
|
|
|
entry_height,
|
|
|
|
};
|
|
|
|
fork_info.push((bank.clone(), bfi));
|
|
|
|
}
|
|
|
|
}
|
2019-02-27 10:15:18 -08:00
|
|
|
|
|
|
|
// reverse sort by slot, so the next slot to be processed can be pop()ed
|
|
|
|
// TODO: remove me once leader_scheduler can hang with out-of-order slots?
|
|
|
|
pending_slots.sort_by(|a, b| b.0.cmp(&a.0));
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
2019-02-19 18:31:56 -08:00
|
|
|
|
2019-02-28 12:09:19 -08:00
|
|
|
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
|
|
|
|
let bank_forks = BankForks::new_from_banks(&banks);
|
2019-02-19 18:31:56 -08:00
|
|
|
info!(
|
2019-04-20 20:17:57 -07:00
|
|
|
"processing ledger...complete in {}ms, forks={}...",
|
2019-02-19 18:31:56 -08:00
|
|
|
duration_as_ms(&now.elapsed()),
|
2019-02-20 15:42:35 -08:00
|
|
|
bank_forks_info.len(),
|
2019-02-19 18:31:56 -08:00
|
|
|
);
|
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
|
2019-02-16 03:26:21 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2019-02-26 16:35:00 -08:00
|
|
|
use crate::blocktree::create_new_tmp_ledger;
|
2019-02-19 19:40:23 -08:00
|
|
|
use crate::blocktree::tests::entries_to_blobs;
|
2019-02-20 16:21:32 -07:00
|
|
|
use crate::entry::{create_ticks, next_entry, Entry};
|
2019-02-18 23:26:22 -07:00
|
|
|
use solana_sdk::genesis_block::GenesisBlock;
|
2019-02-28 12:59:26 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2019-04-11 11:51:34 -07:00
|
|
|
use solana_sdk::instruction::InstructionError;
|
2019-03-30 21:37:33 -06:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2019-02-16 03:26:21 -07:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-04-03 09:45:57 -06:00
|
|
|
use solana_sdk::system_transaction;
|
2019-03-13 14:37:24 -06:00
|
|
|
use solana_sdk::transaction::TransactionError;
|
2019-02-16 03:26:21 -07:00
|
|
|
|
2019-02-19 19:40:23 -08:00
|
|
|
fn fill_blocktree_slot_with_ticks(
|
|
|
|
blocktree: &Blocktree,
|
2019-02-21 19:46:04 -07:00
|
|
|
ticks_per_slot: u64,
|
2019-02-19 19:40:23 -08:00
|
|
|
slot: u64,
|
|
|
|
parent_slot: u64,
|
2019-03-01 08:26:47 -08:00
|
|
|
last_entry_hash: Hash,
|
2019-02-19 19:40:23 -08:00
|
|
|
) -> Hash {
|
2019-03-01 08:26:47 -08:00
|
|
|
let entries = create_ticks(ticks_per_slot, last_entry_hash);
|
2019-03-01 08:57:42 -08:00
|
|
|
let last_entry_hash = entries.last().unwrap().hash;
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
let blobs = entries_to_blobs(&entries, slot, parent_slot, true);
|
2019-02-19 19:40:23 -08:00
|
|
|
blocktree.insert_data_blobs(blobs.iter()).unwrap();
|
|
|
|
|
2019-03-01 08:26:47 -08:00
|
|
|
last_entry_hash
|
2019-02-19 19:40:23 -08:00
|
|
|
}
|
|
|
|
|
2019-02-26 09:18:24 -08:00
|
|
|
#[test]
|
|
|
|
fn test_process_blocktree_with_incomplete_slot() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
|
|
|
|
let ticks_per_slot = genesis_block.ticks_per_slot;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Build a blocktree in the ledger with the following fork structure:
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
slot 0 (all ticks)
|
2019-02-26 09:18:24 -08:00
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
slot 1 (all ticks but one)
|
2019-02-26 16:35:00 -08:00
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
slot 2 (all ticks)
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
where slot 1 is incomplete (missing 1 tick at the end)
|
2019-02-26 09:18:24 -08:00
|
|
|
*/
|
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2019-03-02 10:25:16 -08:00
|
|
|
let (ledger_path, mut blockhash) = create_new_tmp_ledger!(&genesis_block);
|
2019-02-26 16:35:00 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
|
2019-03-14 15:18:37 -07:00
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
// Write slot 1
|
|
|
|
// slot 1, points at slot 0. Missing one tick
|
|
|
|
{
|
|
|
|
let parent_slot = 0;
|
|
|
|
let slot = 1;
|
2019-03-02 10:25:16 -08:00
|
|
|
let mut entries = create_ticks(ticks_per_slot, blockhash);
|
|
|
|
blockhash = entries.last().unwrap().hash;
|
2019-02-26 16:35:00 -08:00
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
// throw away last one
|
2019-02-26 16:35:00 -08:00
|
|
|
entries.pop();
|
|
|
|
|
2019-03-04 19:22:23 -08:00
|
|
|
let blobs = entries_to_blobs(&entries, slot, parent_slot, false);
|
2019-02-26 16:35:00 -08:00
|
|
|
blocktree.insert_data_blobs(blobs.iter()).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
// slot 2, points at slot 1
|
2019-03-02 10:25:16 -08:00
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
2019-02-26 09:18:24 -08:00
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
let (mut _bank_forks, bank_forks_info, _) =
|
2019-02-25 21:22:00 -08:00
|
|
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
2019-02-26 09:18:24 -08:00
|
|
|
|
|
|
|
assert_eq!(bank_forks_info.len(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks_info[0],
|
|
|
|
BankForksInfo {
|
2019-03-04 19:22:23 -08:00
|
|
|
bank_slot: 0, // slot 1 isn't "full", we stop at slot zero
|
2019-02-28 12:09:19 -08:00
|
|
|
entry_height: ticks_per_slot,
|
2019-02-26 09:18:24 -08:00
|
|
|
}
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:29:14 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_blocktree_with_two_forks_and_squash() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
|
|
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
|
|
|
|
let ticks_per_slot = genesis_block.ticks_per_slot;
|
|
|
|
|
|
|
|
// Create a new ledger with slot 0 full of ticks
|
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
|
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
|
|
|
let mut last_entry_hash = blockhash;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Build a blocktree in the ledger with the following fork structure:
|
|
|
|
|
|
|
|
slot 0
|
|
|
|
|
|
|
|
|
slot 1
|
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
/ |
|
|
|
|
slot 3 |
|
|
|
|
|
|
|
|
|
slot 4 <-- set_root(true)
|
|
|
|
|
|
|
|
*/
|
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
|
|
|
|
|
|
|
// Fork 1, ending at slot 3
|
|
|
|
let last_slot1_entry_hash =
|
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash);
|
|
|
|
last_entry_hash =
|
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash);
|
|
|
|
let last_fork1_entry_hash =
|
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash);
|
|
|
|
|
|
|
|
// Fork 2, ending at slot 4
|
|
|
|
let last_fork2_entry_hash =
|
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash);
|
|
|
|
|
|
|
|
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
|
|
|
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
|
|
|
|
|
|
|
blocktree.set_root(4).unwrap();
|
|
|
|
|
|
|
|
let (bank_forks, bank_forks_info, _) =
|
|
|
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks_info[0],
|
|
|
|
BankForksInfo {
|
|
|
|
bank_slot: 4, // Fork 2's head is slot 4
|
|
|
|
entry_height: ticks_per_slot * 3,
|
|
|
|
}
|
|
|
|
);
|
|
|
|
assert!(&bank_forks[4]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.is_empty());
|
|
|
|
|
|
|
|
// Ensure bank_forks holds the right banks
|
|
|
|
for info in bank_forks_info {
|
|
|
|
assert_eq!(bank_forks[info.bank_slot].slot(), info.bank_slot);
|
|
|
|
assert!(bank_forks[info.bank_slot].is_frozen());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-19 19:40:23 -08:00
|
|
|
#[test]
|
|
|
|
fn test_process_blocktree_with_two_forks() {
|
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-02-21 23:36:01 -07:00
|
|
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(10_000);
|
|
|
|
let ticks_per_slot = genesis_block.ticks_per_slot;
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
// Create a new ledger with slot 0 full of ticks
|
2019-03-02 10:25:16 -08:00
|
|
|
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
|
2019-02-19 19:40:23 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
2019-03-02 10:25:16 -08:00
|
|
|
let mut last_entry_hash = blockhash;
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
Build a blocktree in the ledger with the following fork structure:
|
|
|
|
|
|
|
|
slot 0
|
|
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
slot 1 <-- set_root(true)
|
2019-02-19 19:40:23 -08:00
|
|
|
/ \
|
|
|
|
slot 2 |
|
|
|
|
/ |
|
|
|
|
slot 3 |
|
|
|
|
|
|
|
|
|
slot 4
|
|
|
|
|
|
|
|
*/
|
2019-03-14 15:18:37 -07:00
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
// Fork 1, ending at slot 3
|
2019-03-01 08:57:42 -08:00
|
|
|
let last_slot1_entry_hash =
|
2019-03-01 08:26:47 -08:00
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash);
|
|
|
|
last_entry_hash =
|
2019-03-01 08:57:42 -08:00
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash);
|
|
|
|
let last_fork1_entry_hash =
|
2019-03-01 08:26:47 -08:00
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash);
|
2019-02-19 19:40:23 -08:00
|
|
|
|
|
|
|
// Fork 2, ending at slot 4
|
2019-03-01 08:57:42 -08:00
|
|
|
let last_fork2_entry_hash =
|
|
|
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash);
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2019-03-01 08:57:42 -08:00
|
|
|
info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
|
|
|
|
info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
blocktree.set_root(0).unwrap();
|
|
|
|
blocktree.set_root(1).unwrap();
|
|
|
|
|
2019-04-19 02:39:44 -07:00
|
|
|
let (bank_forks, bank_forks_info, _) =
|
2019-02-25 21:22:00 -08:00
|
|
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
2019-02-19 19:40:23 -08:00
|
|
|
|
2019-02-20 15:42:35 -08:00
|
|
|
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks_info[0],
|
|
|
|
BankForksInfo {
|
2019-03-04 16:40:28 -08:00
|
|
|
bank_slot: 3, // Fork 1's head is slot 3
|
2019-02-21 19:46:04 -07:00
|
|
|
entry_height: ticks_per_slot * 4,
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
|
|
|
);
|
2019-04-15 13:12:28 -07:00
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[3]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[2, 1]
|
|
|
|
);
|
2019-02-20 15:42:35 -08:00
|
|
|
assert_eq!(
|
|
|
|
bank_forks_info[1],
|
|
|
|
BankForksInfo {
|
2019-03-04 16:40:28 -08:00
|
|
|
bank_slot: 4, // Fork 2's head is slot 4
|
2019-02-21 19:46:04 -07:00
|
|
|
entry_height: ticks_per_slot * 3,
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
|
|
|
);
|
2019-04-15 13:12:28 -07:00
|
|
|
assert_eq!(
|
|
|
|
&bank_forks[4]
|
|
|
|
.parents()
|
|
|
|
.iter()
|
|
|
|
.map(|bank| bank.slot())
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
&[1]
|
|
|
|
);
|
2019-02-20 15:42:35 -08:00
|
|
|
|
2019-04-15 13:12:28 -07:00
|
|
|
// Ensure bank_forks holds the right banks
|
2019-02-20 15:42:35 -08:00
|
|
|
for info in bank_forks_info {
|
2019-03-04 16:40:28 -08:00
|
|
|
assert_eq!(bank_forks[info.bank_slot].slot(), info.bank_slot);
|
2019-03-04 19:22:23 -08:00
|
|
|
assert!(bank_forks[info.bank_slot].is_frozen());
|
2019-02-20 15:42:35 -08:00
|
|
|
}
|
2019-02-19 19:40:23 -08:00
|
|
|
}
|
|
|
|
|
2019-02-16 14:17:37 -07:00
|
|
|
#[test]
|
|
|
|
fn test_first_err() {
|
|
|
|
assert_eq!(first_err(&[Ok(())]), Ok(()));
|
|
|
|
assert_eq!(
|
2019-03-13 13:58:44 -06:00
|
|
|
first_err(&[Ok(()), Err(TransactionError::DuplicateSignature)]),
|
|
|
|
Err(TransactionError::DuplicateSignature)
|
2019-02-16 14:17:37 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
first_err(&[
|
|
|
|
Ok(()),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::DuplicateSignature),
|
|
|
|
Err(TransactionError::AccountInUse)
|
2019-02-16 14:17:37 -07:00
|
|
|
]),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::DuplicateSignature)
|
2019-02-16 14:17:37 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
first_err(&[
|
|
|
|
Ok(()),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::AccountInUse),
|
|
|
|
Err(TransactionError::DuplicateSignature)
|
2019-02-16 14:17:37 -07:00
|
|
|
]),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::AccountInUse)
|
2019-02-16 14:17:37 -07:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
first_err(&[
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::AccountInUse),
|
2019-02-16 14:17:37 -07:00
|
|
|
Ok(()),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::DuplicateSignature)
|
2019-02-16 14:17:37 -07:00
|
|
|
]),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::AccountInUse)
|
2019-02-16 14:17:37 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_process_empty_entry_is_registered() {
|
2019-03-01 14:52:27 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-02-16 14:17:37 -07:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair = Keypair::new();
|
2019-03-01 14:52:27 -08:00
|
|
|
let slot_entries = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash());
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-01 14:52:27 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair.pubkey(),
|
2019-03-01 14:52:27 -08:00
|
|
|
1,
|
|
|
|
slot_entries.last().unwrap().hash,
|
|
|
|
0,
|
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
|
|
|
|
// First, ensure the TX is rejected because of the unregistered last ID
|
|
|
|
assert_eq!(
|
|
|
|
bank.process_transaction(&tx),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::BlockhashNotFound)
|
2019-02-16 14:17:37 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
|
2019-03-12 16:46:41 -07:00
|
|
|
process_entries(&bank, &slot_entries).unwrap();
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
|
|
|
}
|
|
|
|
|
2019-02-20 15:42:35 -08:00
|
|
|
#[test]
|
|
|
|
fn test_process_ledger_simple() {
|
2019-03-01 14:52:27 -08:00
|
|
|
solana_logger::setup();
|
2019-03-30 21:37:33 -06:00
|
|
|
let leader_pubkey = Pubkey::new_rand();
|
2019-03-09 19:28:43 -08:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(100, &leader_pubkey, 50);
|
2019-03-01 14:52:27 -08:00
|
|
|
let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger!(&genesis_block);
|
2019-02-20 15:42:35 -08:00
|
|
|
debug!("ledger_path: {:?}", ledger_path);
|
2019-02-16 03:26:21 -07:00
|
|
|
|
2019-02-20 15:42:35 -08:00
|
|
|
let mut entries = vec![];
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = genesis_block.hash();
|
2019-02-20 15:42:35 -08:00
|
|
|
for _ in 0..3 {
|
2019-02-16 03:26:21 -07:00
|
|
|
// Transfer one token from the mint to a random account
|
|
|
|
let keypair = Keypair::new();
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-04-02 17:01:56 -06:00
|
|
|
&mint_keypair,
|
|
|
|
&keypair.pubkey(),
|
|
|
|
1,
|
|
|
|
blockhash,
|
|
|
|
0,
|
|
|
|
);
|
2019-03-01 08:26:47 -08:00
|
|
|
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
|
2019-03-01 08:57:42 -08:00
|
|
|
last_entry_hash = entry.hash;
|
2019-02-16 03:26:21 -07:00
|
|
|
entries.push(entry);
|
|
|
|
|
|
|
|
// Add a second Transaction that will produce a
|
2019-03-18 10:05:03 -06:00
|
|
|
// InstructionError<0, ResultWithNegativeLamports> error when processed
|
2019-02-16 03:26:21 -07:00
|
|
|
let keypair2 = Keypair::new();
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
|
|
|
&keypair,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
42,
|
|
|
|
blockhash,
|
|
|
|
0,
|
|
|
|
);
|
2019-03-01 08:26:47 -08:00
|
|
|
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
|
2019-03-01 08:57:42 -08:00
|
|
|
last_entry_hash = entry.hash;
|
2019-02-16 03:26:21 -07:00
|
|
|
entries.push(entry);
|
|
|
|
}
|
|
|
|
|
2019-02-26 16:35:00 -08:00
|
|
|
// Fill up the rest of slot 1 with ticks
|
2019-03-01 08:26:47 -08:00
|
|
|
entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash));
|
2019-02-19 18:19:19 -08:00
|
|
|
|
2019-02-21 23:36:01 -07:00
|
|
|
let blocktree =
|
|
|
|
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
2019-03-14 15:18:37 -07:00
|
|
|
blocktree
|
|
|
|
.write_entries(1, 0, 0, genesis_block.ticks_per_slot, &entries)
|
|
|
|
.unwrap();
|
2019-02-26 16:35:00 -08:00
|
|
|
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
2019-04-19 02:39:44 -07:00
|
|
|
let (bank_forks, bank_forks_info, _) =
|
2019-02-26 21:41:05 -08:00
|
|
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
2019-02-20 15:42:35 -08:00
|
|
|
|
|
|
|
assert_eq!(bank_forks_info.len(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks_info[0],
|
|
|
|
BankForksInfo {
|
2019-03-04 16:40:28 -08:00
|
|
|
bank_slot: 1,
|
2019-02-20 15:42:35 -08:00
|
|
|
entry_height,
|
|
|
|
}
|
|
|
|
);
|
|
|
|
|
2019-02-28 10:57:58 -08:00
|
|
|
let bank = bank_forks[1].clone();
|
2019-02-19 18:19:19 -08:00
|
|
|
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 50 - 3);
|
2019-02-26 16:35:00 -08:00
|
|
|
assert_eq!(bank.tick_height(), 2 * genesis_block.ticks_per_slot - 1);
|
2019-03-02 10:25:16 -08:00
|
|
|
assert_eq!(bank.last_blockhash(), entries.last().unwrap().hash);
|
2019-02-16 03:26:21 -07:00
|
|
|
}
|
2019-02-16 14:17:37 -07:00
|
|
|
|
2019-02-28 12:09:19 -08:00
|
|
|
#[test]
|
|
|
|
fn test_process_ledger_with_one_tick_per_slot() {
|
|
|
|
let (mut genesis_block, _mint_keypair) = GenesisBlock::new(123);
|
|
|
|
genesis_block.ticks_per_slot = 1;
|
2019-03-02 10:25:16 -08:00
|
|
|
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
2019-02-28 12:09:19 -08:00
|
|
|
|
|
|
|
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
2019-04-19 02:39:44 -07:00
|
|
|
let (bank_forks, bank_forks_info, _) =
|
2019-02-28 12:09:19 -08:00
|
|
|
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
|
|
|
|
|
|
|
assert_eq!(bank_forks_info.len(), 1);
|
|
|
|
assert_eq!(
|
|
|
|
bank_forks_info[0],
|
|
|
|
BankForksInfo {
|
2019-03-04 16:40:28 -08:00
|
|
|
bank_slot: 0,
|
2019-02-28 12:09:19 -08:00
|
|
|
entry_height: 1,
|
|
|
|
}
|
|
|
|
);
|
|
|
|
let bank = bank_forks[0].clone();
|
|
|
|
assert_eq!(bank.tick_height(), 0);
|
|
|
|
}
|
|
|
|
|
2019-02-16 14:17:37 -07:00
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_tick() {
|
2019-02-16 14:17:37 -07:00
|
|
|
let (genesis_block, _mint_keypair) = GenesisBlock::new(1000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
|
|
|
|
// ensure bank can process a tick
|
2019-03-01 14:52:27 -08:00
|
|
|
assert_eq!(bank.tick_height(), 0);
|
2019-03-01 09:49:37 -08:00
|
|
|
let tick = next_entry(&genesis_block.hash(), 1, vec![]);
|
2019-03-12 16:46:41 -07:00
|
|
|
assert_eq!(process_entries(&bank, &[tick.clone()]), Ok(()));
|
2019-03-01 14:52:27 -08:00
|
|
|
assert_eq!(bank.tick_height(), 1);
|
2019-02-16 14:17:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_entries_collision() {
|
2019-02-16 14:17:37 -07:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = bank.last_blockhash();
|
2019-02-16 14:17:37 -07:00
|
|
|
|
|
|
|
// ensure bank can process 2 entries that have a common account and no tick is registered
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair1.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair2.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
|
2019-03-12 16:46:41 -07:00
|
|
|
assert_eq!(process_entries(&bank, &[entry_1, entry_2]), Ok(()));
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
|
2019-03-02 10:25:16 -08:00
|
|
|
assert_eq!(bank.last_blockhash(), blockhash);
|
2019-02-16 14:17:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_txes_collision() {
|
2019-02-16 14:17:37 -07:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
|
|
|
|
// fund: put 4 in each of 1 and 2
|
2019-03-27 05:59:30 -06:00
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
|
2019-02-16 14:17:37 -07:00
|
|
|
|
|
|
|
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
|
|
|
|
let entry_1_to_mint = next_entry(
|
2019-03-02 10:25:16 -08:00
|
|
|
&bank.last_blockhash(),
|
2019-02-16 14:17:37 -07:00
|
|
|
1,
|
2019-04-03 09:45:57 -06:00
|
|
|
vec![system_transaction::create_user_account(
|
2019-02-16 14:17:37 -07:00
|
|
|
&keypair1,
|
2019-03-09 19:28:43 -08:00
|
|
|
&mint_keypair.pubkey(),
|
2019-02-16 14:17:37 -07:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-02-16 14:17:37 -07:00
|
|
|
0,
|
|
|
|
)],
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry_2_to_3_mint_to_1 = next_entry(
|
2019-03-01 08:57:42 -08:00
|
|
|
&entry_1_to_mint.hash,
|
2019-02-16 14:17:37 -07:00
|
|
|
1,
|
|
|
|
vec![
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&keypair2,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair3.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
), // should be fine
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::create_user_account(
|
2019-02-16 14:17:37 -07:00
|
|
|
&keypair1,
|
2019-03-09 19:28:43 -08:00
|
|
|
&mint_keypair.pubkey(),
|
2019-02-16 14:17:37 -07:00
|
|
|
2,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-02-16 14:17:37 -07:00
|
|
|
0,
|
|
|
|
), // will collide
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
2019-03-12 16:46:41 -07:00
|
|
|
process_entries(&bank, &[entry_1_to_mint, entry_2_to_3_mint_to_1]),
|
2019-02-16 14:17:37 -07:00
|
|
|
Ok(())
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 1);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
|
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 2);
|
|
|
|
}
|
|
|
|
|
2019-04-02 03:55:42 -07:00
|
|
|
#[test]
|
|
|
|
fn test_process_entries_2_txes_collision_and_error() {
|
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
|
|
|
|
// fund: put 4 in each of 1 and 2
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_));
|
|
|
|
assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_));
|
|
|
|
|
|
|
|
// construct an Entry whose 2nd transaction would cause a lock conflict with previous entry
|
|
|
|
let entry_1_to_mint = next_entry(
|
|
|
|
&bank.last_blockhash(),
|
|
|
|
1,
|
|
|
|
vec![
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::create_user_account(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair1,
|
|
|
|
&mint_keypair.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
0,
|
|
|
|
),
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::transfer(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair4,
|
|
|
|
&keypair4.pubkey(),
|
|
|
|
1,
|
|
|
|
Hash::default(), // Should cause a transaction failure with BlockhashNotFound
|
|
|
|
0,
|
|
|
|
),
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry_2_to_3_mint_to_1 = next_entry(
|
|
|
|
&entry_1_to_mint.hash,
|
|
|
|
1,
|
|
|
|
vec![
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::create_user_account(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair2,
|
|
|
|
&keypair3.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
0,
|
|
|
|
), // should be fine
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::create_user_account(
|
2019-04-02 03:55:42 -07:00
|
|
|
&keypair1,
|
|
|
|
&mint_keypair.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
0,
|
|
|
|
), // will collide
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(process_entries(
|
|
|
|
&bank,
|
|
|
|
&[entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()]
|
|
|
|
)
|
|
|
|
.is_err());
|
|
|
|
|
|
|
|
// First transaction in first entry succeeded, so keypair1 lost 1 lamport
|
|
|
|
assert_eq!(bank.get_balance(&keypair1.pubkey()), 3);
|
|
|
|
assert_eq!(bank.get_balance(&keypair2.pubkey()), 4);
|
|
|
|
|
|
|
|
// Check all accounts are unlocked
|
|
|
|
let txs1 = &entry_1_to_mint.transactions[..];
|
|
|
|
let txs2 = &entry_2_to_3_mint_to_1.transactions[..];
|
|
|
|
let locked_accounts1 = bank.lock_accounts(txs1);
|
|
|
|
for result in locked_accounts1.locked_accounts_results() {
|
|
|
|
assert!(result.is_ok());
|
|
|
|
}
|
|
|
|
// txs1 and txs2 have accounts that conflict, so we must drop txs1 first
|
|
|
|
drop(locked_accounts1);
|
|
|
|
let locked_accounts2 = bank.lock_accounts(txs2);
|
|
|
|
for result in locked_accounts2.locked_accounts_results() {
|
|
|
|
assert!(result.is_ok());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-16 14:17:37 -07:00
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_entries_par() {
|
2019-02-16 14:17:37 -07:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
|
|
|
|
//load accounts
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair1.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair2.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
|
|
|
|
|
|
|
// ensure bank can process 2 entries that do not have a common account and no tick is registered
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = bank.last_blockhash();
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&keypair1,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair3.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&keypair2,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair4.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
|
2019-03-12 16:46:41 -07:00
|
|
|
assert_eq!(process_entries(&bank, &[entry_1, entry_2]), Ok(()));
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
|
|
|
|
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
|
2019-03-02 10:25:16 -08:00
|
|
|
assert_eq!(bank.last_blockhash(), blockhash);
|
2019-02-16 14:17:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-03-12 16:46:41 -07:00
|
|
|
fn test_process_entries_2_entries_tick() {
|
2019-02-16 14:17:37 -07:00
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(1000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let keypair3 = Keypair::new();
|
|
|
|
let keypair4 = Keypair::new();
|
|
|
|
|
|
|
|
//load accounts
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair1.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&mint_keypair,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair2.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
|
|
|
|
2019-03-02 10:25:16 -08:00
|
|
|
let blockhash = bank.last_blockhash();
|
|
|
|
while blockhash == bank.last_blockhash() {
|
2019-03-01 14:52:27 -08:00
|
|
|
bank.register_tick(&Hash::default());
|
|
|
|
}
|
2019-02-16 14:17:37 -07:00
|
|
|
|
|
|
|
// ensure bank can process 2 entries that do not have a common account and tick is registered
|
2019-04-02 17:01:56 -06:00
|
|
|
let tx =
|
2019-04-03 09:45:57 -06:00
|
|
|
system_transaction::create_user_account(&keypair2, &keypair3.pubkey(), 1, blockhash, 0);
|
2019-03-02 10:25:16 -08:00
|
|
|
let entry_1 = next_entry(&blockhash, 1, vec![tx]);
|
2019-03-01 08:57:42 -08:00
|
|
|
let tick = next_entry(&entry_1.hash, 1, vec![]);
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&keypair1,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair4.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
|
2019-02-21 13:37:08 -08:00
|
|
|
assert_eq!(
|
2019-03-12 16:46:41 -07:00
|
|
|
process_entries(&bank, &[entry_1.clone(), tick.clone(), entry_2.clone()]),
|
2019-02-21 13:37:08 -08:00
|
|
|
Ok(())
|
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
|
|
|
|
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
|
2019-03-01 14:52:27 -08:00
|
|
|
|
2019-02-16 14:17:37 -07:00
|
|
|
// ensure that an error is returned for an empty account (keypair2)
|
2019-04-03 09:45:57 -06:00
|
|
|
let tx = system_transaction::create_user_account(
|
2019-03-02 10:20:10 -08:00
|
|
|
&keypair2,
|
2019-03-09 19:28:43 -08:00
|
|
|
&keypair3.pubkey(),
|
2019-03-02 10:20:10 -08:00
|
|
|
1,
|
2019-03-02 10:25:16 -08:00
|
|
|
bank.last_blockhash(),
|
2019-03-02 10:20:10 -08:00
|
|
|
0,
|
|
|
|
);
|
2019-03-01 08:57:42 -08:00
|
|
|
let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
|
2019-02-21 13:37:08 -08:00
|
|
|
assert_eq!(
|
2019-03-12 16:46:41 -07:00
|
|
|
process_entries(&bank, &[entry_3]),
|
2019-03-13 13:58:44 -06:00
|
|
|
Err(TransactionError::AccountNotFound)
|
2019-02-21 13:37:08 -08:00
|
|
|
);
|
2019-02-16 14:17:37 -07:00
|
|
|
}
|
2019-04-11 11:51:34 -07:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_update_transaction_statuses() {
|
|
|
|
// Make sure instruction errors still update the signature cache
|
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(11_000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let pubkey = Pubkey::new_rand();
|
|
|
|
bank.transfer(1_000, &mint_keypair, &pubkey).unwrap();
|
|
|
|
assert_eq!(bank.transaction_count(), 1);
|
|
|
|
assert_eq!(bank.get_balance(&pubkey), 1_000);
|
|
|
|
assert_eq!(
|
|
|
|
bank.transfer(10_001, &mint_keypair, &pubkey),
|
|
|
|
Err(TransactionError::InstructionError(
|
|
|
|
0,
|
|
|
|
InstructionError::new_result_with_negative_lamports(),
|
|
|
|
))
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
bank.transfer(10_001, &mint_keypair, &pubkey),
|
|
|
|
Err(TransactionError::DuplicateSignature)
|
|
|
|
);
|
|
|
|
|
|
|
|
// Make sure other errors don't update the signature cache
|
|
|
|
let tx = system_transaction::create_user_account(
|
|
|
|
&mint_keypair,
|
|
|
|
&pubkey,
|
|
|
|
1000,
|
|
|
|
Hash::default(),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
let signature = tx.signatures[0];
|
|
|
|
|
|
|
|
// Should fail with blockhash not found
|
|
|
|
assert_eq!(
|
|
|
|
bank.process_transaction(&tx).map(|_| signature),
|
|
|
|
Err(TransactionError::BlockhashNotFound)
|
|
|
|
);
|
|
|
|
|
|
|
|
// Should fail again with blockhash not found
|
|
|
|
assert_eq!(
|
|
|
|
bank.process_transaction(&tx).map(|_| signature),
|
|
|
|
Err(TransactionError::BlockhashNotFound)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_update_transaction_statuses_fail() {
|
|
|
|
let (genesis_block, mint_keypair) = GenesisBlock::new(11_000);
|
|
|
|
let bank = Bank::new(&genesis_block);
|
|
|
|
let keypair1 = Keypair::new();
|
|
|
|
let keypair2 = Keypair::new();
|
|
|
|
let success_tx = system_transaction::create_user_account(
|
|
|
|
&mint_keypair,
|
|
|
|
&keypair1.pubkey(),
|
|
|
|
1,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
let fail_tx = system_transaction::create_user_account(
|
|
|
|
&mint_keypair,
|
|
|
|
&keypair2.pubkey(),
|
|
|
|
2,
|
|
|
|
bank.last_blockhash(),
|
|
|
|
0,
|
|
|
|
);
|
|
|
|
|
|
|
|
let entry_1_to_mint = next_entry(
|
|
|
|
&bank.last_blockhash(),
|
|
|
|
1,
|
|
|
|
vec![
|
|
|
|
success_tx,
|
|
|
|
fail_tx.clone(), // will collide
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
process_entries(&bank, &[entry_1_to_mint]),
|
|
|
|
Err(TransactionError::AccountInUse)
|
|
|
|
);
|
|
|
|
|
|
|
|
// Should not see duplicate signature error
|
|
|
|
assert_eq!(bank.process_transaction(&fail_tx), Ok(()));
|
|
|
|
}
|
2019-02-16 03:26:21 -07:00
|
|
|
}
|