Verify number of hashes for each block of entries (#6262)

* Verify number of hashes for each block of entries

* Fix blocktree processor tick check

* Rebase once more
This commit is contained in:
Justin Starry
2019-10-31 16:38:50 -04:00
committed by GitHub
parent 111942a47d
commit e8e5ddc55d
19 changed files with 537 additions and 139 deletions

15
ledger/src/block_error.rs Normal file
View File

@ -0,0 +1,15 @@
#[derive(Debug, PartialEq)]
pub enum BlockError {
/// Block entries hashes must all be valid
InvalidEntryHash,
/// Blocks can not have extra ticks or missing ticks
InvalidTickCount,
/// All ticks must contain the same number of hashes within a block
InvalidTickHashCount,
/// Blocks must end in a tick entry, trailing transaction entries are not allowed to guarantee
/// that each block has the same number of hashes
TrailingEntry,
}

View File

@ -888,18 +888,12 @@ impl Blocktree {
keypair: &Arc<Keypair>,
entries: Vec<Entry>,
) -> Result<usize> {
assert!(num_ticks_in_start_slot < ticks_per_slot);
let mut remaining_ticks_in_slot = ticks_per_slot - num_ticks_in_start_slot;
let mut parent_slot = parent.map_or(start_slot.saturating_sub(1), |v| v);
let num_slots = (start_slot - parent_slot).max(1); // Note: slot 0 has parent slot 0
assert!(num_ticks_in_start_slot < num_slots * ticks_per_slot);
let mut remaining_ticks_in_slot = num_slots * ticks_per_slot - num_ticks_in_start_slot;
let mut current_slot = start_slot;
let mut parent_slot = parent.map_or(
if current_slot == 0 {
current_slot
} else {
current_slot - 1
},
|v| v,
);
let mut shredder = Shredder::new(current_slot, parent_slot, 0.0, keypair.clone())
.expect("Failed to create entry shredder");
let mut all_shreds = vec![];
@ -1686,14 +1680,14 @@ fn slot_has_updates(slot_meta: &SlotMeta, slot_meta_backup: &Option<SlotMeta>) -
//
// Returns the blockhash that can be used to append entries with.
pub fn create_new_ledger(ledger_path: &Path, genesis_block: &GenesisBlock) -> Result<Hash> {
let ticks_per_slot = genesis_block.ticks_per_slot;
Blocktree::destroy(ledger_path)?;
genesis_block.write(&ledger_path)?;
// Fill slot 0 with ticks that link back to the genesis_block to bootstrap the ledger.
let blocktree = Blocktree::open(ledger_path)?;
let entries = create_ticks(ticks_per_slot, genesis_block.hash());
let ticks_per_slot = genesis_block.ticks_per_slot;
let hashes_per_tick = genesis_block.poh_config.hashes_per_tick.unwrap_or(0);
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_block.hash());
let last_hash = entries.last().unwrap().hash;
let shredder = Shredder::new(0, 0, 0.0, Arc::new(Keypair::new()))
@ -1787,16 +1781,18 @@ pub fn entries_to_test_shreds(
shredder.entries_to_shreds(&entries, is_full_slot, 0).0
}
// used for tests only
pub fn make_slot_entries(
slot: u64,
parent_slot: u64,
num_entries: u64,
) -> (Vec<Shred>, Vec<Entry>) {
let entries = create_ticks(num_entries, Hash::default());
let entries = create_ticks(num_entries, 0, Hash::default());
let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true);
(shreds, entries)
}
// used for tests only
pub fn make_many_slot_entries(
start_slot: u64,
num_slots: u64,
@ -1816,6 +1812,7 @@ pub fn make_many_slot_entries(
}
// Create shreds for slots that have a parent-child relationship defined by the input `chain`
// used for tests only
pub fn make_chaining_slot_entries(
chain: &[u64],
entries_per_slot: u64,
@ -1857,7 +1854,7 @@ pub mod tests {
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let ledger = Blocktree::open(&ledger_path).unwrap();
let ticks = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash());
let ticks = create_ticks(genesis_block.ticks_per_slot, 0, genesis_block.hash());
let entries = ledger.get_slot_entries(0, 0, None).unwrap();
assert_eq!(ticks, entries);
@ -1911,7 +1908,7 @@ pub mod tests {
let mut shreds_per_slot = vec![];
for i in 0..num_slots {
let mut new_ticks = create_ticks(ticks_per_slot, Hash::default());
let mut new_ticks = create_ticks(ticks_per_slot, 0, Hash::default());
let num_shreds = ledger
.write_entries(
i,
@ -2241,7 +2238,7 @@ pub mod tests {
let blocktree_path = get_tmp_ledger_path("test_get_slot_entries1");
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let entries = create_ticks(8, Hash::default());
let entries = create_ticks(8, 0, Hash::default());
let shreds = entries_to_test_shreds(entries[0..4].to_vec(), 1, 0, false);
blocktree
.insert_shreds(shreds, None)
@ -2276,7 +2273,7 @@ pub mod tests {
let num_slots = 5 as u64;
let mut index = 0;
for slot in 0..num_slots {
let entries = create_ticks(slot + 1, Hash::default());
let entries = create_ticks(slot + 1, 0, Hash::default());
let last_entry = entries.last().unwrap().clone();
let mut shreds =
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), false);
@ -2308,13 +2305,13 @@ pub mod tests {
let num_slots = 5 as u64;
let shreds_per_slot = 5 as u64;
let entry_serialized_size =
bincode::serialized_size(&create_ticks(1, Hash::default())).unwrap();
bincode::serialized_size(&create_ticks(1, 0, Hash::default())).unwrap();
let entries_per_slot =
(shreds_per_slot * PACKET_DATA_SIZE as u64) / entry_serialized_size;
// Write entries
for slot in 0..num_slots {
let entries = create_ticks(entries_per_slot, Hash::default());
let entries = create_ticks(entries_per_slot, 0, Hash::default());
let shreds =
entries_to_test_shreds(entries.clone(), slot, slot.saturating_sub(1), false);
assert!(shreds.len() as u64 >= shreds_per_slot);
@ -3097,7 +3094,7 @@ pub mod tests {
assert!(gap > 3);
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let entries = create_ticks(num_entries, Hash::default());
let entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len();
assert!(num_shreds > 1);
@ -3189,7 +3186,7 @@ pub mod tests {
assert_eq!(blocktree.find_missing_data_indexes(slot, 4, 3, 1), empty);
assert_eq!(blocktree.find_missing_data_indexes(slot, 1, 2, 0), empty);
let entries = create_ticks(100, Hash::default());
let entries = create_ticks(100, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true);
assert!(shreds.len() > 2);
shreds.drain(2..);
@ -3231,7 +3228,7 @@ pub mod tests {
// Write entries
let num_entries = 10;
let entries = create_ticks(num_entries, Hash::default());
let entries = create_ticks(num_entries, 0, Hash::default());
let shreds = entries_to_test_shreds(entries, slot, 0, true);
let num_shreds = shreds.len();
@ -3746,7 +3743,7 @@ pub mod tests {
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let num_ticks = 8;
let entries = create_ticks(num_ticks, Hash::default());
let entries = create_ticks(num_ticks, 0, Hash::default());
let slot = 1;
let shreds = entries_to_test_shreds(entries, slot, 0, false);
let next_shred_index = shreds.len();

View File

@ -1,4 +1,5 @@
use crate::bank_forks::BankForks;
use crate::block_error::BlockError;
use crate::blocktree::Blocktree;
use crate::blocktree_meta::SlotMeta;
use crate::entry::{create_ticks, Entry, EntrySlice};
@ -173,9 +174,18 @@ pub struct BankForksInfo {
pub bank_slot: u64,
}
#[derive(Debug)]
#[derive(Debug, PartialEq)]
pub enum BlocktreeProcessorError {
LedgerVerificationFailed,
FailedToLoadEntries,
FailedToLoadMeta,
InvalidBlock(BlockError),
InvalidTransaction,
}
impl From<BlockError> for BlocktreeProcessorError {
fn from(block_error: BlockError) -> Self {
BlocktreeProcessorError::InvalidBlock(block_error)
}
}
/// Callback for accessing bank state while processing the blocktree
@ -277,7 +287,7 @@ pub fn process_blocktree_from_root(
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
}
fn verify_and_process_entries(
fn verify_and_process_slot_entries(
bank: &Arc<Bank>,
entries: &[Entry],
last_entry_hash: Hash,
@ -285,9 +295,34 @@ fn verify_and_process_entries(
) -> result::Result<Hash, BlocktreeProcessorError> {
assert!(!entries.is_empty());
if opts.verify_ledger && !entries.verify(&last_entry_hash) {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
if opts.verify_ledger {
let next_bank_tick_height = bank.tick_height() + entries.tick_count();
let max_bank_tick_height = bank.max_tick_height();
if next_bank_tick_height != max_bank_tick_height {
warn!(
"Invalid number of entry ticks found in slot: {}",
bank.slot()
);
return Err(BlockError::InvalidTickCount.into());
} else if !entries.last().unwrap().is_tick() {
warn!("Slot: {} did not end with a tick entry", bank.slot());
return Err(BlockError::TrailingEntry.into());
}
if let Some(hashes_per_tick) = bank.hashes_per_tick() {
if !entries.verify_tick_hash_count(&mut 0, *hashes_per_tick) {
warn!(
"Tick with invalid number of hashes found in slot: {}",
bank.slot()
);
return Err(BlockError::InvalidTickHashCount.into());
}
}
if !entries.verify(&last_entry_hash) {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
return Err(BlockError::InvalidEntryHash.into());
}
}
process_entries_with_callback(bank, &entries, true, opts.entry_callback.as_ref()).map_err(
@ -297,7 +332,7 @@ fn verify_and_process_entries(
bank.slot(),
err
);
BlocktreeProcessorError::LedgerVerificationFailed
BlocktreeProcessorError::InvalidTransaction
},
)?;
@ -315,15 +350,10 @@ fn process_bank_0(
// Fetch all entries for this slot
let entries = blocktree.get_slot_entries(0, 0, None).map_err(|err| {
warn!("Failed to load entries for slot 0, err: {:?}", err);
BlocktreeProcessorError::LedgerVerificationFailed
BlocktreeProcessorError::FailedToLoadEntries
})?;
if entries.is_empty() {
warn!("entry0 not present");
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
}
verify_and_process_entries(bank0, &entries, bank0.last_blockhash(), opts)?;
verify_and_process_slot_entries(bank0, &entries, bank0.last_blockhash(), opts)?;
bank0.freeze();
@ -355,7 +385,7 @@ fn process_next_slots(
.meta(*next_slot)
.map_err(|err| {
warn!("Failed to load meta for slot {}: {:?}", next_slot, err);
BlocktreeProcessorError::LedgerVerificationFailed
BlocktreeProcessorError::FailedToLoadMeta
})?
.unwrap();
@ -419,10 +449,10 @@ fn process_pending_slots(
// Fetch all entries for this slot
let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
warn!("Failed to load entries for slot {}: {:?}", slot, err);
BlocktreeProcessorError::LedgerVerificationFailed
BlocktreeProcessorError::FailedToLoadEntries
})?;
verify_and_process_entries(&bank, &entries, last_entry_hash, opts)?;
verify_and_process_slot_entries(&bank, &entries, last_entry_hash, opts)?;
bank.freeze(); // all banks handled by this routine are created from complete slots
@ -463,7 +493,8 @@ pub fn fill_blocktree_slot_with_ticks(
parent_slot: u64,
last_entry_hash: Hash,
) -> Hash {
let entries = create_ticks(ticks_per_slot, last_entry_hash);
let num_slots = (slot - parent_slot).max(1); // Note: slot 0 has parent slot 0
let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash);
let last_entry_hash = entries.last().unwrap().hash;
blocktree
@ -486,7 +517,7 @@ pub fn fill_blocktree_slot_with_ticks(
pub mod tests {
use super::*;
use crate::blocktree::create_new_tmp_ledger;
use crate::entry::{create_ticks, next_entry, next_entry_mut, Entry};
use crate::entry::{create_ticks, next_entry, next_entry_mut};
use crate::genesis_utils::{
create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo,
};
@ -503,6 +534,140 @@ pub mod tests {
};
use std::sync::RwLock;
#[test]
fn test_process_blocktree_with_missing_hashes() {
solana_logger::setup();
let hashes_per_tick = 2;
let GenesisBlockInfo {
mut genesis_block, ..
} = create_genesis_block(10_000);
genesis_block.poh_config.hashes_per_tick = Some(hashes_per_tick);
let ticks_per_slot = genesis_block.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
blocktree
.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_block, &blocktree, None, opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::InvalidTickHashCount
)),
);
}
#[test]
fn test_process_blocktree_with_invalid_slot_tick_count() {
solana_logger::setup();
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
// Create a new ledger with slot 0 full of ticks
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree = Blocktree::open(&ledger_path).unwrap();
// Write slot 1 with one tick missing
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
blocktree
.write_entries(
slot,
0,
0,
ticks_per_slot,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_block, &blocktree, None, opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::InvalidTickCount
)),
);
}
#[test]
fn test_process_blocktree_with_slot_with_trailing_entry() {
solana_logger::setup();
let GenesisBlockInfo {
mint_keypair,
genesis_block,
..
} = create_genesis_block(10_000);
let ticks_per_slot = genesis_block.ticks_per_slot;
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let blocktree = Blocktree::open(&ledger_path).unwrap();
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
let trailing_entry = {
let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
next_entry(&blockhash, 1, vec![tx])
};
entries.push(trailing_entry);
// Tricks blocktree into writing the trailing entry by lying that there is one more tick
// per slot.
let parent_slot = 0;
let slot = 1;
blocktree
.write_entries(
slot,
0,
0,
ticks_per_slot + 1,
Some(parent_slot),
true,
&Arc::new(Keypair::new()),
entries,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
verify_ledger: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_block, &blocktree, None, opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::TrailingEntry
)),
);
}
#[test]
fn test_process_blocktree_with_incomplete_slot() {
solana_logger::setup();
@ -534,7 +699,7 @@ pub mod tests {
{
let parent_slot = 0;
let slot = 1;
let mut entries = create_ticks(ticks_per_slot, blockhash);
let mut entries = create_ticks(ticks_per_slot, 0, blockhash);
blockhash = entries.last().unwrap().hash;
// throw away last one
@ -841,7 +1006,7 @@ pub mod tests {
} = create_genesis_block(2);
let bank = Arc::new(Bank::new(&genesis_block));
let keypair = Keypair::new();
let slot_entries = create_ticks(genesis_block.ticks_per_slot, genesis_block.hash());
let slot_entries = create_ticks(genesis_block.ticks_per_slot, 1, genesis_block.hash());
let tx = system_transaction::transfer(
&mint_keypair,
&keypair.pubkey(),
@ -865,11 +1030,13 @@ pub mod tests {
solana_logger::setup();
let leader_pubkey = Pubkey::new_rand();
let mint = 100;
let hashes_per_tick = 10;
let GenesisBlockInfo {
genesis_block,
mut genesis_block,
mint_keypair,
..
} = create_genesis_block_with_leader(mint, &leader_pubkey, 50);
genesis_block.poh_config.hashes_per_tick = Some(hashes_per_tick);
let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger!(&genesis_block);
debug!("ledger_path: {:?}", ledger_path);
@ -880,8 +1047,7 @@ pub mod tests {
// Transfer one token from the mint to a random account
let keypair = Keypair::new();
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.hash;
let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
entries.push(entry);
// Add a second Transaction that will produce a
@ -889,14 +1055,22 @@ pub mod tests {
let keypair2 = Keypair::new();
let tx =
system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.hash;
let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]);
entries.push(entry);
}
let remaining_hashes = hashes_per_tick - entries.len() as u64;
let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]);
entries.push(tick_entry);
// Fill up the rest of slot 1 with ticks
entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash));
entries.extend(create_ticks(
genesis_block.ticks_per_slot - 1,
genesis_block.poh_config.hashes_per_tick.unwrap(),
last_entry_hash,
));
let last_blockhash = entries.last().unwrap().hash;
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
blocktree
@ -1004,7 +1178,11 @@ pub mod tests {
let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
let mut entries = vec![entry_1, entry_2];
entries.extend(create_ticks(genesis_block.ticks_per_slot, last_entry_hash));
entries.extend(create_ticks(
genesis_block.ticks_per_slot,
0,
last_entry_hash,
));
blocktree
.write_entries(
1,
@ -1683,7 +1861,8 @@ pub mod tests {
let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1));
bank1.squash();
let slot1_entries = blocktree.get_slot_entries(1, 0, None).unwrap();
verify_and_process_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts).unwrap();
verify_and_process_slot_entries(&bank1, &slot1_entries, bank0.last_blockhash(), &opts)
.unwrap();
// Test process_blocktree_from_root() from slot 1 onwards
let (bank_forks, bank_forks_info, _) =

View File

@ -61,32 +61,18 @@ pub struct Entry {
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
if num_hashes == 0 && transactions.is_empty() {
Entry {
num_hashes: 0,
hash: *prev_hash,
transactions,
}
} else if num_hashes == 0 {
// If you passed in transactions, but passed in num_hashes == 0, then
// next_hash will generate the next hash and set num_hashes == 1
let hash = next_hash(prev_hash, 1, &transactions);
Entry {
num_hashes: 1,
hash,
transactions,
}
} else {
// Otherwise, the next Entry `num_hashes` after `start_hash`.
// If you wanted a tick for instance, then pass in num_hashes = 1
// and transactions = empty
let hash = next_hash(prev_hash, num_hashes, &transactions);
Entry {
num_hashes,
hash,
transactions,
}
pub fn new(prev_hash: &Hash, mut num_hashes: u64, transactions: Vec<Transaction>) -> Self {
// If you passed in transactions, but passed in num_hashes == 0, then
// next_hash will generate the next hash and set num_hashes == 1
if num_hashes == 0 && !transactions.is_empty() {
num_hashes = 1;
}
let hash = next_hash(prev_hash, num_hashes, &transactions);
Entry {
num_hashes,
hash,
transactions,
}
}
@ -219,6 +205,12 @@ pub trait EntrySlice {
fn verify_cpu(&self, start_hash: &Hash) -> EntryVerifyState;
fn start_verify(&self, start_hash: &Hash) -> EntryVerifyState;
fn verify(&self, start_hash: &Hash) -> bool;
/// Checks that each entry tick has the correct number of hashes. Entry slices do not
/// necessarily end in a tick, so `tick_hash_count` is used to carry over the hash count
/// for the next entry slice.
fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool;
/// Counts tick entries
fn tick_count(&self) -> u64;
}
impl EntrySlice for [Entry] {
@ -338,6 +330,34 @@ impl EntrySlice for [Entry] {
hashes: Some(hashes),
}
}
fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool {
// When hashes_per_tick is 0, hashing is disabled.
if hashes_per_tick == 0 {
return true;
}
for entry in self {
*tick_hash_count += entry.num_hashes;
if entry.is_tick() {
if *tick_hash_count != hashes_per_tick {
warn!(
"invalid tick hash count!: entry: {:#?}, tick_hash_count: {}, hashes_per_tick: {}",
entry,
tick_hash_count,
hashes_per_tick
);
return false;
}
*tick_hash_count = 0;
}
}
*tick_hash_count < hashes_per_tick
}
fn tick_count(&self) -> u64 {
self.iter().filter(|e| e.is_tick()).count() as u64
}
}
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
@ -346,10 +366,10 @@ pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Trans
entry
}
pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
pub fn create_ticks(num_ticks: u64, hashes_per_tick: u64, mut hash: Hash) -> Vec<Entry> {
let mut ticks = Vec::with_capacity(num_ticks as usize);
for _ in 0..num_ticks {
let new_tick = next_entry_mut(&mut hash, 1, vec![]);
let new_tick = next_entry_mut(&mut hash, hashes_per_tick, vec![]);
ticks.push(new_tick);
}
@ -373,9 +393,11 @@ mod tests {
use chrono::prelude::Utc;
use solana_budget_api::budget_instruction;
use solana_sdk::{
hash::hash,
hash::{hash, Hash},
message::Message,
signature::{Keypair, KeypairUtil},
system_transaction,
transaction::Transaction,
};
fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction {
@ -528,4 +550,58 @@ mod tests {
bad_ticks[1].hash = one;
assert!(!bad_ticks.verify(&one)); // inductive step, bad
}
#[test]
fn test_verify_tick_hash_count() {
let hashes_per_tick = 10;
let keypairs: Vec<&Keypair> = Vec::new();
let tx: Transaction =
Transaction::new(&keypairs, Message::new(Vec::new()), Hash::default());
let tx_entry = Entry::new(&Hash::default(), 1, vec![tx]);
let full_tick_entry = Entry::new_tick(hashes_per_tick, &Hash::default());
let partial_tick_entry = Entry::new_tick(hashes_per_tick - 1, &Hash::default());
let no_hash_tick_entry = Entry::new_tick(0, &Hash::default());
let single_hash_tick_entry = Entry::new_tick(1, &Hash::default());
let no_ticks = vec![];
let mut tick_hash_count = 0;
assert!(no_ticks.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
// validation is disabled when hashes_per_tick == 0
let no_hash_tick = vec![no_hash_tick_entry.clone()];
assert!(no_hash_tick.verify_tick_hash_count(&mut tick_hash_count, 0));
assert_eq!(tick_hash_count, 0);
// validation is disabled when hashes_per_tick == 0
let tx_and_no_hash_tick = vec![tx_entry.clone(), no_hash_tick_entry];
assert!(tx_and_no_hash_tick.verify_tick_hash_count(&mut tick_hash_count, 0));
assert_eq!(tick_hash_count, 0);
let single_tick = vec![full_tick_entry.clone()];
assert!(single_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
assert!(!single_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick - 1));
assert_eq!(tick_hash_count, hashes_per_tick);
tick_hash_count = 0;
let ticks_and_txs = vec![tx_entry.clone(), partial_tick_entry.clone()];
assert!(ticks_and_txs.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
let partial_tick = vec![partial_tick_entry.clone()];
assert!(!partial_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, hashes_per_tick - 1);
tick_hash_count = 0;
let tx_entries: Vec<Entry> = (0..hashes_per_tick - 1).map(|_| tx_entry.clone()).collect();
let tx_entries_and_tick = [tx_entries, vec![single_hash_tick_entry]].concat();
assert!(tx_entries_and_tick.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, 0);
let too_many_tx_entries: Vec<Entry> =
(0..hashes_per_tick).map(|_| tx_entry.clone()).collect();
assert!(!too_many_tx_entries.verify_tick_hash_count(&mut tick_hash_count, hashes_per_tick));
assert_eq!(tick_hash_count, hashes_per_tick);
}
}

View File

@ -1,4 +1,5 @@
pub mod bank_forks;
pub mod block_error;
#[macro_use]
pub mod blocktree;
mod blocktree_db;

View File

@ -715,7 +715,7 @@ impl Shredder {
}
pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 {
let ticks = create_ticks(1, Hash::default());
let ticks = create_ticks(1, 0, Hash::default());
max_entries_per_n_shred(&ticks[0], num_shreds)
}

View File

@ -19,7 +19,7 @@ fn test_multiple_threads_insert_shred() {
// with parent = slot 0
let threads: Vec<_> = (0..num_threads)
.map(|i| {
let entries = entry::create_ticks(1, Hash::default());
let entries = entry::create_ticks(1, 0, Hash::default());
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false);
let blocktree_ = blocktree.clone();
Builder::new()