Rename Entry.id to Entry.hash

This commit is contained in:
Michael Vines
2019-03-01 08:57:42 -08:00
parent 7e7b79ef34
commit 7c4473e0aa
13 changed files with 96 additions and 96 deletions

View File

@ -127,7 +127,7 @@ fn main() {
exit(1); exit(1);
} }
} }
last_id = entry.id; last_id = entry.hash;
num_entries += 1; num_entries += 1;
if let Err(e) = blocktree_processor::process_entry(&bank, &entry) { if let Err(e) = blocktree_processor::process_entry(&bank, &entry) {

View File

@ -412,7 +412,7 @@ mod tests {
.collect(); .collect();
assert!(entries.len() != 0); assert!(entries.len() != 0);
assert!(entries.verify(&start_hash)); assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].id, bank.last_id()); assert_eq!(entries[entries.len() - 1].hash, bank.last_id());
banking_stage.join().unwrap(); banking_stage.join().unwrap();
poh_service.close().unwrap(); poh_service.close().unwrap();
} }
@ -467,7 +467,7 @@ mod tests {
entries.iter().for_each(|entries| { entries.iter().for_each(|entries| {
assert_eq!(entries.len(), 1); assert_eq!(entries.len(), 1);
assert!(entries.verify(&last_id)); assert!(entries.verify(&last_id));
last_id = entries.last().unwrap().id; last_id = entries.last().unwrap().hash;
}); });
drop(entry_receiver); drop(entry_receiver);
banking_stage.join().unwrap(); banking_stage.join().unwrap();

View File

@ -180,7 +180,7 @@ mod test {
curr_slot += 1; curr_slot += 1;
} }
let entry = Entry::new(&mut last_id, 1, vec![]); // just ticks let entry = Entry::new(&mut last_id, 1, vec![]); // just ticks
last_id = entry.id; last_id = entry.hash;
blockstream blockstream
.emit_entry_event(curr_slot, tick_height, leader_id, &entry) .emit_entry_event(curr_slot, tick_height, leader_id, &entry)
.unwrap(); .unwrap();

View File

@ -85,7 +85,7 @@ impl BlockstreamService {
}); });
if i == entries.len() - 1 { if i == entries.len() - 1 {
blockstream blockstream
.emit_block_event(slot, tick_height, slot_leader, entry.id) .emit_block_event(slot, tick_height, slot_leader, entry.hash)
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
debug!("Blockstream error: {:?}, {:?}", e, blockstream.output); debug!("Blockstream error: {:?}, {:?}", e, blockstream.output);
}); });
@ -138,10 +138,10 @@ mod test {
let mut entries = create_ticks(4, Hash::default()); let mut entries = create_ticks(4, Hash::default());
let keypair = Keypair::new(); let keypair = Keypair::new();
let mut last_id = entries[3].id; let mut last_id = entries[3].hash;
let tx = SystemTransaction::new_account(&keypair, keypair.pubkey(), 1, Hash::default(), 0); let tx = SystemTransaction::new_account(&keypair, keypair.pubkey(), 1, Hash::default(), 0);
let entry = Entry::new(&mut last_id, 1, vec![tx]); let entry = Entry::new(&mut last_id, 1, vec![tx]);
last_id = entry.id; last_id = entry.hash;
entries.push(entry); entries.push(entry);
let final_tick = create_ticks(1, last_id); let final_tick = create_ticks(1, last_id);
entries.extend_from_slice(&final_tick); entries.extend_from_slice(&final_tick);

View File

@ -1279,7 +1279,7 @@ impl Iterator for EntryIterator {
} }
} }
self.db_iterator.next(); self.db_iterator.next();
self.last_id = Some(entry.id); self.last_id = Some(entry.hash);
return Some(entry); return Some(entry);
} }
} }
@ -1301,7 +1301,7 @@ pub fn create_new_ledger(ledger_path: &str, genesis_block: &GenesisBlock) -> Res
let entries = crate::entry::create_ticks(ticks_per_slot, genesis_block.last_id()); let entries = crate::entry::create_ticks(ticks_per_slot, genesis_block.last_id());
blocktree.write_entries(0, 0, 0, &entries)?; blocktree.write_entries(0, 0, 0, &entries)?;
Ok(entries.last().unwrap().id) Ok(entries.last().unwrap().hash)
} }
pub fn genesis<'a, I>(ledger_path: &str, keypair: &Keypair, entries: I) -> Result<()> pub fn genesis<'a, I>(ledger_path: &str, keypair: &Keypair, entries: I) -> Result<()>
@ -1398,7 +1398,7 @@ pub fn tmp_copy_blocktree(from: &str, name: &str) -> String {
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::entry::{ use crate::entry::{
create_ticks, make_tiny_test_entries, make_tiny_test_entries_from_id, Entry, EntrySlice, create_ticks, make_tiny_test_entries, make_tiny_test_entries_from_hash, Entry, EntrySlice,
}; };
use crate::packet::index_blobs; use crate::packet::index_blobs;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@ -1924,7 +1924,7 @@ pub mod tests {
#[test] #[test]
pub fn test_genesis_and_entry_iterator() { pub fn test_genesis_and_entry_iterator() {
let entries = make_tiny_test_entries_from_id(&Hash::default(), 10); let entries = make_tiny_test_entries_from_hash(&Hash::default(), 10);
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator"); let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
{ {
@ -1942,7 +1942,7 @@ pub mod tests {
} }
#[test] #[test]
pub fn test_entry_iterator_up_to_consumed() { pub fn test_entry_iterator_up_to_consumed() {
let entries = make_tiny_test_entries_from_id(&Hash::default(), 3); let entries = make_tiny_test_entries_from_hash(&Hash::default(), 3);
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator"); let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
{ {
// put entries except last 2 into ledger // put entries except last 2 into ledger

View File

@ -15,7 +15,7 @@ pub fn process_entry(bank: &Bank, entry: &Entry) -> Result<()> {
if !entry.is_tick() { if !entry.is_tick() {
first_err(&bank.process_transactions(&entry.transactions))?; first_err(&bank.process_transactions(&entry.transactions))?;
} else { } else {
bank.register_tick(&entry.id); bank.register_tick(&entry.hash);
} }
Ok(()) Ok(())
} }
@ -57,7 +57,7 @@ fn par_process_entries(bank: &Bank, entries: &[Entry]) -> Result<()> {
if entry.is_tick() { if entry.is_tick() {
// if its a tick, execute the group and register the tick // if its a tick, execute the group and register the tick
par_execute_entries(bank, &mt_group)?; par_execute_entries(bank, &mt_group)?;
bank.register_tick(&entry.id); bank.register_tick(&entry.hash);
mt_group = vec![]; mt_group = vec![];
continue; continue;
} }
@ -159,7 +159,7 @@ pub fn process_blocktree(
warn!("Ledger proof of history failed at entry0"); warn!("Ledger proof of history failed at entry0");
return Err(BankError::LedgerVerificationFailed); return Err(BankError::LedgerVerificationFailed);
} }
last_entry_hash = entry0.id; last_entry_hash = entry0.hash;
entry_height += 1; entry_height += 1;
entries = entries.drain(1..).collect(); entries = entries.drain(1..).collect();
} }
@ -175,7 +175,7 @@ pub fn process_blocktree(
BankError::LedgerVerificationFailed BankError::LedgerVerificationFailed
})?; })?;
last_entry_hash = entries.last().unwrap().id; last_entry_hash = entries.last().unwrap().hash;
entry_height += entries.len() as u64; entry_height += entries.len() as u64;
} }
@ -201,7 +201,7 @@ pub fn process_blocktree(
bank.squash(); bank.squash();
if meta.next_slots.is_empty() { if meta.next_slots.is_empty() {
// Reached the end of this fork. Record the final entry height and last entry id // Reached the end of this fork. Record the final entry height and last entry.hash
let bfi = BankForksInfo { let bfi = BankForksInfo {
bank_id: slot, bank_id: slot,
@ -258,7 +258,7 @@ mod tests {
last_entry_hash: Hash, last_entry_hash: Hash,
) -> Hash { ) -> Hash {
let entries = create_ticks(ticks_per_slot, last_entry_hash); let entries = create_ticks(ticks_per_slot, last_entry_hash);
let last_entry_hash = entries.last().unwrap().id; let last_entry_hash = entries.last().unwrap().hash;
let blobs = entries_to_blobs(&entries, slot, parent_slot); let blobs = entries_to_blobs(&entries, slot, parent_slot);
blocktree.insert_data_blobs(blobs.iter()).unwrap(); blocktree.insert_data_blobs(blobs.iter()).unwrap();
@ -298,7 +298,7 @@ mod tests {
let parent_slot = 0; let parent_slot = 0;
let slot = 1; let slot = 1;
let mut entries = create_ticks(ticks_per_slot, last_id); let mut entries = create_ticks(ticks_per_slot, last_id);
last_id = entries.last().unwrap().id; last_id = entries.last().unwrap().hash;
entries.pop(); entries.pop();
@ -352,19 +352,19 @@ mod tests {
.expect("Expected to successfully open database ledger"); .expect("Expected to successfully open database ledger");
// Fork 1, ending at slot 3 // Fork 1, ending at slot 3
let last_slot1_entry_id = let last_slot1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash); fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 1, 0, last_entry_hash);
last_entry_hash = last_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_id); fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, last_slot1_entry_hash);
let last_fork1_entry_id = let last_fork1_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash); fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 2, last_entry_hash);
// Fork 2, ending at slot 4 // Fork 2, ending at slot 4
let last_fork2_entry_id = let last_fork2_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_id); fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, 1, last_slot1_entry_hash);
info!("last_fork1_entry_id: {:?}", last_fork1_entry_id); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash);
info!("last_fork2_entry_id: {:?}", last_fork2_entry_id); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash);
let (bank_forks, bank_forks_info) = let (bank_forks, bank_forks_info) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None).unwrap();
@ -430,7 +430,7 @@ mod tests {
let bank = Bank::new(&genesis_block); let bank = Bank::new(&genesis_block);
let keypair = Keypair::new(); let keypair = Keypair::new();
let entry = next_entry(&genesis_block.last_id(), 1, vec![]); let entry = next_entry(&genesis_block.last_id(), 1, vec![]);
let tx = SystemTransaction::new_account(&mint_keypair, keypair.pubkey(), 1, entry.id, 0); let tx = SystemTransaction::new_account(&mint_keypair, keypair.pubkey(), 1, entry.hash, 0);
// First, ensure the TX is rejected because of the unregistered last ID // First, ensure the TX is rejected because of the unregistered last ID
assert_eq!( assert_eq!(
@ -457,7 +457,7 @@ mod tests {
let keypair = Keypair::new(); let keypair = Keypair::new();
let tx = SystemTransaction::new_account(&mint_keypair, keypair.pubkey(), 1, last_id, 0); let tx = SystemTransaction::new_account(&mint_keypair, keypair.pubkey(), 1, last_id, 0);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]); let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.id; last_entry_hash = entry.hash;
entries.push(entry); entries.push(entry);
// Add a second Transaction that will produce a // Add a second Transaction that will produce a
@ -465,7 +465,7 @@ mod tests {
let keypair2 = Keypair::new(); let keypair2 = Keypair::new();
let tx = SystemTransaction::new_account(&keypair, keypair2.pubkey(), 42, last_id, 0); let tx = SystemTransaction::new_account(&keypair, keypair2.pubkey(), 42, last_id, 0);
let entry = Entry::new(&last_entry_hash, 1, vec![tx]); let entry = Entry::new(&last_entry_hash, 1, vec![tx]);
last_entry_hash = entry.id; last_entry_hash = entry.hash;
entries.push(entry); entries.push(entry);
} }
@ -491,7 +491,7 @@ mod tests {
let bank = bank_forks[1].clone(); let bank = bank_forks[1].clone();
assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 50 - 3); assert_eq!(bank.get_balance(&mint_keypair.pubkey()), 50 - 3);
assert_eq!(bank.tick_height(), 2 * genesis_block.ticks_per_slot - 1); assert_eq!(bank.tick_height(), 2 * genesis_block.ticks_per_slot - 1);
assert_eq!(bank.last_id(), entries.last().unwrap().id); assert_eq!(bank.last_id(), entries.last().unwrap().hash);
} }
#[test] #[test]
@ -524,7 +524,7 @@ mod tests {
// ensure bank can process a tick // ensure bank can process a tick
let tick = next_entry(&genesis_block.last_id(), 1, vec![]); let tick = next_entry(&genesis_block.last_id(), 1, vec![]);
assert_eq!(par_process_entries(&bank, &[tick.clone()]), Ok(())); assert_eq!(par_process_entries(&bank, &[tick.clone()]), Ok(()));
assert_eq!(bank.last_id(), tick.id); assert_eq!(bank.last_id(), tick.hash);
} }
#[test] #[test]
@ -542,7 +542,7 @@ mod tests {
let entry_1 = next_entry(&last_id, 1, vec![tx]); let entry_1 = next_entry(&last_id, 1, vec![tx]);
let tx = let tx =
SystemTransaction::new_account(&mint_keypair, keypair2.pubkey(), 2, bank.last_id(), 0); SystemTransaction::new_account(&mint_keypair, keypair2.pubkey(), 2, bank.last_id(), 0);
let entry_2 = next_entry(&entry_1.id, 1, vec![tx]); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
assert_eq!(par_process_entries(&bank, &[entry_1, entry_2]), Ok(())); assert_eq!(par_process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); assert_eq!(bank.get_balance(&keypair1.pubkey()), 2);
assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2);
@ -581,7 +581,7 @@ mod tests {
); );
let entry_2_to_3_mint_to_1 = next_entry( let entry_2_to_3_mint_to_1 = next_entry(
&entry_1_to_mint.id, &entry_1_to_mint.hash,
1, 1,
vec![ vec![
SystemTransaction::new_account(&keypair2, keypair3.pubkey(), 2, bank.last_id(), 0), // should be fine SystemTransaction::new_account(&keypair2, keypair3.pubkey(), 2, bank.last_id(), 0), // should be fine
@ -627,7 +627,7 @@ mod tests {
let tx = SystemTransaction::new_account(&keypair1, keypair3.pubkey(), 1, bank.last_id(), 0); let tx = SystemTransaction::new_account(&keypair1, keypair3.pubkey(), 1, bank.last_id(), 0);
let entry_1 = next_entry(&last_id, 1, vec![tx]); let entry_1 = next_entry(&last_id, 1, vec![tx]);
let tx = SystemTransaction::new_account(&keypair2, keypair4.pubkey(), 1, bank.last_id(), 0); let tx = SystemTransaction::new_account(&keypair2, keypair4.pubkey(), 1, bank.last_id(), 0);
let entry_2 = next_entry(&entry_1.id, 1, vec![tx]); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]);
assert_eq!(par_process_entries(&bank, &[entry_1, entry_2]), Ok(())); assert_eq!(par_process_entries(&bank, &[entry_1, entry_2]), Ok(()));
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
@ -656,19 +656,19 @@ mod tests {
// ensure bank can process 2 entries that do not have a common account and tick is registered // ensure bank can process 2 entries that do not have a common account and tick is registered
let tx = SystemTransaction::new_account(&keypair2, keypair3.pubkey(), 1, bank.last_id(), 0); let tx = SystemTransaction::new_account(&keypair2, keypair3.pubkey(), 1, bank.last_id(), 0);
let entry_1 = next_entry(&last_id, 1, vec![tx]); let entry_1 = next_entry(&last_id, 1, vec![tx]);
let tick = next_entry(&entry_1.id, 1, vec![]); let tick = next_entry(&entry_1.hash, 1, vec![]);
let tx = SystemTransaction::new_account(&keypair1, keypair4.pubkey(), 1, tick.id, 0); let tx = SystemTransaction::new_account(&keypair1, keypair4.pubkey(), 1, tick.hash, 0);
let entry_2 = next_entry(&tick.id, 1, vec![tx]); let entry_2 = next_entry(&tick.hash, 1, vec![tx]);
assert_eq!( assert_eq!(
par_process_entries(&bank, &[entry_1.clone(), tick.clone(), entry_2.clone()]), par_process_entries(&bank, &[entry_1.clone(), tick.clone(), entry_2.clone()]),
Ok(()) Ok(())
); );
assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1);
assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1);
assert_eq!(bank.last_id(), tick.id); assert_eq!(bank.last_id(), tick.hash);
// ensure that an error is returned for an empty account (keypair2) // ensure that an error is returned for an empty account (keypair2)
let tx = SystemTransaction::new_account(&keypair2, keypair3.pubkey(), 1, tick.id, 0); let tx = SystemTransaction::new_account(&keypair2, keypair3.pubkey(), 1, tick.hash, 0);
let entry_3 = next_entry(&entry_2.id, 1, vec![tx]); let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]);
assert_eq!( assert_eq!(
par_process_entries(&bank, &[entry_3]), par_process_entries(&bank, &[entry_3]),
Err(BankError::AccountNotFound) Err(BankError::AccountNotFound)

View File

@ -9,7 +9,7 @@ use bincode::{deserialize, serialize_into, serialized_size};
use chrono::prelude::Utc; use chrono::prelude::Utc;
use rayon::prelude::*; use rayon::prelude::*;
use solana_sdk::budget_transaction::BudgetTransaction; use solana_sdk::budget_transaction::BudgetTransaction;
use solana_sdk::hash::{hash, Hash}; use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::transaction::Transaction; use solana_sdk::transaction::Transaction;
@ -25,9 +25,9 @@ pub type EntrySender = Sender<Vec<Entry>>;
pub type EntryReceiver = Receiver<Vec<Entry>>; pub type EntryReceiver = Receiver<Vec<Entry>>;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number /// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result /// of hashes performed since the previous entry. The `hash` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions` /// of hashing `hash` from the previous entry `num_hashes` times. The `transactions`
/// field points to Transactions that took place shortly before `id` was generated. /// field points to Transactions that took place shortly before `hash` was generated.
/// ///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you /// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases /// get a duration estimate since the last Entry. Since processing power increases
@ -45,7 +45,7 @@ pub struct Entry {
pub num_hashes: u64, pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID. /// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub id: Hash, pub hash: Hash,
/// An unordered list of transactions that were observed before the Entry ID was /// An unordered list of transactions that were observed before the Entry ID was
/// generated. They may have been observed before a previous Entry ID but were /// generated. They may have been observed before a previous Entry ID but were
@ -60,26 +60,26 @@ impl Entry {
if num_hashes == 0 && transactions.is_empty() { if num_hashes == 0 && transactions.is_empty() {
Entry { Entry {
num_hashes: 0, num_hashes: 0,
id: *prev_hash, hash: *prev_hash,
transactions, transactions,
} }
} else if num_hashes == 0 { } else if num_hashes == 0 {
// If you passed in transactions, but passed in num_hashes == 0, then // If you passed in transactions, but passed in num_hashes == 0, then
// next_hash will generate the next hash and set num_hashes == 1 // next_hash will generate the next hash and set num_hashes == 1
let id = next_hash(prev_hash, 1, &transactions); let hash = next_hash(prev_hash, 1, &transactions);
Entry { Entry {
num_hashes: 1, num_hashes: 1,
id, hash,
transactions, transactions,
} }
} else { } else {
// Otherwise, the next Entry `num_hashes` after `start_hash`. // Otherwise, the next Entry `num_hashes` after `start_hash`.
// If you wanted a tick for instance, then pass in num_hashes = 1 // If you wanted a tick for instance, then pass in num_hashes = 1
// and transactions = empty // and transactions = empty
let id = next_hash(prev_hash, num_hashes, &transactions); let hash = next_hash(prev_hash, num_hashes, &transactions);
Entry { Entry {
num_hashes, num_hashes,
id, hash,
transactions, transactions,
} }
} }
@ -118,7 +118,7 @@ impl Entry {
.iter() .iter()
.map(|tx| tx.serialized_size().unwrap()) .map(|tx| tx.serialized_size().unwrap())
.sum(); .sum();
// num_hashes + id + txs // num_hashes + hash + txs
(2 * size_of::<u64>() + size_of::<Hash>()) as u64 + txs_size (2 * size_of::<u64>() + size_of::<Hash>()) as u64 + txs_size
} }
@ -165,32 +165,32 @@ impl Entry {
transactions: Vec<Transaction>, transactions: Vec<Transaction>,
) -> Self { ) -> Self {
let entry = Self::new(start_hash, *num_hashes, transactions); let entry = Self::new(start_hash, *num_hashes, transactions);
*start_hash = entry.id; *start_hash = entry.hash;
*num_hashes = 0; *num_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64); assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
entry entry
} }
/// Creates a Entry from the number of hashes `num_hashes` /// Creates a Entry from the number of hashes `num_hashes`
/// since the previous transaction and that resulting `id`. /// since the previous transaction and that resulting `hash`.
#[cfg(test)] #[cfg(test)]
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self { pub fn new_tick(num_hashes: u64, hash: &Hash) -> Self {
Entry { Entry {
num_hashes, num_hashes,
id: *id, hash: *hash,
transactions: vec![], transactions: vec![],
} }
} }
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times. /// Verifies self.hash is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the transaction is not a Tick, then hash that as well. /// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool { pub fn verify(&self, start_hash: &Hash) -> bool {
let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions); let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
if self.id != ref_hash { if self.hash != ref_hash {
warn!( warn!(
"next_hash is invalid expected: {:?} actual: {:?}", "next_hash is invalid expected: {:?} actual: {:?}",
self.id, ref_hash self.hash, ref_hash
); );
return false; return false;
} }
@ -259,17 +259,17 @@ impl EntrySlice for [Entry] {
fn verify(&self, start_hash: &Hash) -> bool { fn verify(&self, start_hash: &Hash) -> bool {
let genesis = [Entry { let genesis = [Entry {
num_hashes: 0, num_hashes: 0,
id: *start_hash, hash: *start_hash,
transactions: vec![], transactions: vec![],
}]; }];
let entry_pairs = genesis.par_iter().chain(self).zip(self); let entry_pairs = genesis.par_iter().chain(self).zip(self);
entry_pairs.all(|(x0, x1)| { entry_pairs.all(|(x0, x1)| {
let r = x1.verify(&x0.id); let r = x1.verify(&x0.hash);
if !r { if !r {
warn!( warn!(
"entry invalid!: x0: {:?}, x1: {:?} num txs: {}", "entry invalid!: x0: {:?}, x1: {:?} num txs: {}",
x0.id, x0.hash,
x1.id, x1.hash,
x1.transactions.len() x1.transactions.len()
); );
} }
@ -299,12 +299,12 @@ impl EntrySlice for [Entry] {
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry { pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
let entry = Entry::new(&start, num_hashes, transactions); let entry = Entry::new(&start, num_hashes, transactions);
*start = entry.id; *start = entry.hash;
entry entry
} }
/// Creates the next entries for given transactions, outputs /// Creates the next entries for given transactions, outputs
/// updates start_hash to id of last Entry, sets num_hashes to 0 /// updates start_hash to hash of last Entry, sets num_hashes to 0
pub fn next_entries_mut( pub fn next_entries_mut(
start_hash: &mut Hash, start_hash: &mut Hash,
num_hashes: &mut u64, num_hashes: &mut u64,
@ -373,9 +373,9 @@ pub fn next_entries(
num_hashes: u64, num_hashes: u64,
transactions: Vec<Transaction>, transactions: Vec<Transaction>,
) -> Vec<Entry> { ) -> Vec<Entry> {
let mut id = *start_hash; let mut hash = *start_hash;
let mut num_hashes = num_hashes; let mut num_hashes = num_hashes;
next_entries_mut(&mut id, &mut num_hashes, transactions) next_entries_mut(&mut hash, &mut num_hashes, transactions)
} }
pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> { pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
@ -388,15 +388,15 @@ pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
ticks ticks
} }
pub fn make_tiny_test_entries_from_id(start: &Hash, num: usize) -> Vec<Entry> { pub fn make_tiny_test_entries_from_hash(start: &Hash, num: usize) -> Vec<Entry> {
let keypair = Keypair::new(); let keypair = Keypair::new();
let mut id = *start; let mut hash = *start;
let mut num_hashes = 0; let mut num_hashes = 0;
(0..num) (0..num)
.map(|_| { .map(|_| {
Entry::new_mut( Entry::new_mut(
&mut id, &mut hash,
&mut num_hashes, &mut num_hashes,
vec![BudgetTransaction::new_timestamp( vec![BudgetTransaction::new_timestamp(
&keypair, &keypair,
@ -412,13 +412,13 @@ pub fn make_tiny_test_entries_from_id(start: &Hash, num: usize) -> Vec<Entry> {
pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> { pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
let zero = Hash::default(); let zero = Hash::default();
let one = hash(&zero.as_ref()); let one = solana_sdk::hash::hash(&zero.as_ref());
make_tiny_test_entries_from_id(&one, num) make_tiny_test_entries_from_hash(&one, num)
} }
pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> { pub fn make_large_test_entries(num_entries: usize) -> Vec<Entry> {
let zero = Hash::default(); let zero = Hash::default();
let one = hash(&zero.as_ref()); let one = solana_sdk::hash::hash(&zero.as_ref());
let keypair = Keypair::new(); let keypair = Keypair::new();
let tx = BudgetTransaction::new_timestamp( let tx = BudgetTransaction::new_timestamp(
@ -465,7 +465,7 @@ pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transacti
assert!(num_hashes > 0 || transactions.is_empty()); assert!(num_hashes > 0 || transactions.is_empty());
Entry { Entry {
num_hashes, num_hashes,
id: next_hash(prev_hash, num_hashes, &transactions), hash: next_hash(prev_hash, num_hashes, &transactions),
transactions, transactions,
} }
} }
@ -536,11 +536,11 @@ mod tests {
let zero = Hash::default(); let zero = Hash::default();
let tick = next_entry(&zero, 1, vec![]); let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1); assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero); assert_ne!(tick.hash, zero);
let tick = next_entry(&zero, 0, vec![]); let tick = next_entry(&zero, 0, vec![]);
assert_eq!(tick.num_hashes, 0); assert_eq!(tick.num_hashes, 0);
assert_eq!(tick.id, zero); assert_eq!(tick.hash, zero);
let keypair = Keypair::new(); let keypair = Keypair::new();
let tx0 = BudgetTransaction::new_timestamp( let tx0 = BudgetTransaction::new_timestamp(
@ -552,7 +552,7 @@ mod tests {
); );
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]); let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1); assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0])); assert_eq!(entry0.hash, next_hash(&zero, 1, &vec![tx0]));
} }
#[test] #[test]
@ -587,7 +587,7 @@ mod tests {
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2]; let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
bad_ticks[1].id = one; bad_ticks[1].hash = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad assert!(!bad_ticks.verify(&zero)); // inductive step, bad
} }
@ -640,8 +640,8 @@ mod tests {
#[test] #[test]
fn test_next_entries() { fn test_next_entries() {
solana_logger::setup(); solana_logger::setup();
let id = Hash::default(); let hash = Hash::default();
let next_hash = hash(&id.as_ref()); let next_hash = solana_sdk::hash::hash(&hash.as_ref());
let keypair = Keypair::new(); let keypair = Keypair::new();
let vote_account = Keypair::new(); let vote_account = Keypair::new();
let tx_small = VoteTransaction::new_vote(&vote_account, 1, next_hash, 2); let tx_small = VoteTransaction::new_vote(&vote_account, 1, next_hash, 2);
@ -651,7 +651,7 @@ mod tests {
let tx_large_size = tx_large.serialized_size().unwrap() as usize; let tx_large_size = tx_large.serialized_size().unwrap() as usize;
let entry_size = serialized_size(&Entry { let entry_size = serialized_size(&Entry {
num_hashes: 0, num_hashes: 0,
id: Hash::default(), hash: Hash::default(),
transactions: vec![], transactions: vec![],
}) })
.unwrap() as usize; .unwrap() as usize;
@ -662,15 +662,15 @@ mod tests {
// verify no split // verify no split
let transactions = vec![tx_small.clone(); threshold]; let transactions = vec![tx_small.clone(); threshold];
let entries0 = next_entries(&id, 0, transactions.clone()); let entries0 = next_entries(&hash, 0, transactions.clone());
assert_eq!(entries0.len(), 1); assert_eq!(entries0.len(), 1);
assert!(entries0.verify(&id)); assert!(entries0.verify(&hash));
// verify the split with uniform transactions // verify the split with uniform transactions
let transactions = vec![tx_small.clone(); threshold * 2]; let transactions = vec![tx_small.clone(); threshold * 2];
let entries0 = next_entries(&id, 0, transactions.clone()); let entries0 = next_entries(&hash, 0, transactions.clone());
assert_eq!(entries0.len(), 2); assert_eq!(entries0.len(), 2);
assert!(entries0.verify(&id)); assert!(entries0.verify(&hash));
// verify the split with small transactions followed by large // verify the split with small transactions followed by large
// transactions // transactions
@ -679,9 +679,9 @@ mod tests {
transactions.extend(large_transactions); transactions.extend(large_transactions);
let entries0 = next_entries(&id, 0, transactions.clone()); let entries0 = next_entries(&hash, 0, transactions.clone());
assert!(entries0.len() >= 2); assert!(entries0.len() >= 2);
assert!(entries0.verify(&id)); assert!(entries0.verify(&hash));
} }
} }

View File

@ -785,7 +785,7 @@ mod tests {
); );
let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot).unwrap(); let blocktree = Blocktree::open_config(&ledger_path, ticks_per_slot).unwrap();
let last_id = entries.last().unwrap().id; let last_id = entries.last().unwrap().hash;
let entry_height = ticks_per_slot + entries.len() as u64; let entry_height = ticks_per_slot + entries.len() as u64;
blocktree.write_entries(1, 0, 0, entries).unwrap(); blocktree.write_entries(1, 0, 0, entries).unwrap();

View File

@ -102,7 +102,7 @@ impl PohRecorder {
); );
let cache = &self.tick_cache[..cnt]; let cache = &self.tick_cache[..cnt];
for t in cache { for t in cache {
working_bank.bank.register_tick(&t.0.id); working_bank.bank.register_tick(&t.0.hash);
} }
working_bank.sender.send(cache.to_vec()) working_bank.sender.send(cache.to_vec())
} else { } else {
@ -162,7 +162,7 @@ impl PohRecorder {
assert!(!txs.is_empty(), "Entries without transactions are used to track real-time passing in the ledger and can only be generated with PohRecorder::tick function"); assert!(!txs.is_empty(), "Entries without transactions are used to track real-time passing in the ledger and can only be generated with PohRecorder::tick function");
let recorded_entry = Entry { let recorded_entry = Entry {
num_hashes: entry.num_hashes, num_hashes: entry.num_hashes,
id: entry.id, hash: entry.id,
transactions: txs, transactions: txs,
}; };
trace!("sending entry {}", recorded_entry.is_tick()); trace!("sending entry {}", recorded_entry.is_tick());
@ -178,7 +178,7 @@ impl PohRecorder {
( (
Entry { Entry {
num_hashes: tick.num_hashes, num_hashes: tick.num_hashes,
id: tick.id, hash: tick.id,
transactions: vec![], transactions: vec![],
}, },
tick.tick_height, tick.tick_height,

View File

@ -138,7 +138,7 @@ impl ReplayStage {
*last_entry_hash = entries *last_entry_hash = entries
.last() .last()
.expect("Entries cannot be empty at this point") .expect("Entries cannot be empty at this point")
.id; .hash;
inc_new_counter_info!( inc_new_counter_info!(
"replicate-transactions", "replicate-transactions",
@ -338,7 +338,7 @@ impl ReplayStage {
let last_entry = blocktree let last_entry = blocktree
.get_slot_entries(slot, meta.last_index, Some(1)) .get_slot_entries(slot, meta.last_index, Some(1))
.unwrap(); .unwrap();
last_entry_hash = last_entry[0].id; last_entry_hash = last_entry[0].hash;
} }
let old_bank = bank.clone(); let old_bank = bank.clone();

View File

@ -421,7 +421,7 @@ impl StorageStage {
&storage_state, &storage_state,
&keypair, &keypair,
&blocktree, &blocktree,
entry.id, entry.hash,
*entry_height, *entry_height,
tx_sender, tx_sender,
)?; )?;

View File

@ -1313,7 +1313,7 @@ fn test_full_leader_validator_network() {
&last_id, &last_id,
ticks_per_slot, ticks_per_slot,
); );
last_id = node_active_set_entries.last().unwrap().id; last_id = node_active_set_entries.last().unwrap().hash;
active_set_entries.extend(node_active_set_entries); active_set_entries.extend(node_active_set_entries);
} }
@ -1702,7 +1702,7 @@ fn stake_fullnode(
fn add_tick(last_id: &mut Hash, entries: &mut Vec<Entry>) -> Hash { fn add_tick(last_id: &mut Hash, entries: &mut Vec<Entry>) -> Hash {
let tick = solana::entry::create_ticks(1, *last_id); let tick = solana::entry::create_ticks(1, *last_id);
*last_id = tick[0].id; *last_id = tick[0].hash;
entries.extend(tick); entries.extend(tick);
*last_id *last_id
} }

View File

@ -177,7 +177,7 @@ fn test_replicator_startup_basic() {
assert!(br.index() == repair_index); assert!(br.index() == repair_index);
let entry: Entry = deserialize(&br.data()[..br.meta.size]).unwrap(); let entry: Entry = deserialize(&br.data()[..br.meta.size]).unwrap();
info!("entry: {:?}", entry); info!("entry: {:?}", entry);
assert_ne!(entry.id, Hash::default()); assert_ne!(entry.hash, Hash::default());
received_blob = true; received_blob = true;
} }
break; break;