add entry.has_more

* quick fix for really big genesis
 * longer term fix for possible parallel verification over multiple
      Blobs/Entries
This commit is contained in:
Rob Walker
2018-06-25 23:28:41 -07:00
committed by Grimes
parent b7ddefdbf9
commit 55ec7f9fe9
6 changed files with 34 additions and 11 deletions

View File

@ -314,7 +314,12 @@ impl Bank {
result?; result?;
} }
} }
self.register_entry_id(&entry.id); // TODO: verify this is ok in cases like:
// 1. an untrusted genesis or tx-<DATE>.log
// 2. a crazy leader..
if !entry.has_more {
self.register_entry_id(&entry.id);
}
} }
Ok(self.entry_count()) Ok(self.entry_count())
} }

View File

@ -35,17 +35,30 @@ pub struct Entry {
/// generated. The may have been observed before a previous Entry ID but were /// generated. The may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger. /// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>, pub transactions: Vec<Transaction>,
/// Indication that:
/// 1. the next Entry in the ledger has transactions that can potentially
/// be verified in parallel with these transactions
/// 2. this Entry can be left out of the bank's entry_id cache for
/// purposes of duplicate rejection
pub has_more: bool,
} }
impl Entry { impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`. /// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self { pub fn new(
start_hash: &Hash,
cur_hashes: u64,
transactions: Vec<Transaction>,
has_more: bool,
) -> Self {
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 }; let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &transactions); let id = next_hash(start_hash, 0, &transactions);
let entry = Entry { let entry = Entry {
num_hashes, num_hashes,
id, id,
transactions, transactions,
has_more,
}; };
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64); assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
entry entry
@ -56,8 +69,9 @@ impl Entry {
start_hash: &mut Hash, start_hash: &mut Hash,
cur_hashes: &mut u64, cur_hashes: &mut u64,
transactions: Vec<Transaction>, transactions: Vec<Transaction>,
has_more: bool,
) -> Self { ) -> Self {
let entry = Self::new(start_hash, *cur_hashes, transactions); let entry = Self::new(start_hash, *cur_hashes, transactions, has_more);
*start_hash = entry.id; *start_hash = entry.id;
*cur_hashes = 0; *cur_hashes = 0;
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64); assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
@ -71,6 +85,7 @@ impl Entry {
num_hashes, num_hashes,
id: *id, id: *id,
transactions: vec![], transactions: vec![],
has_more: false,
} }
} }
@ -119,6 +134,7 @@ pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transact
num_hashes, num_hashes,
id: next_hash(start_hash, num_hashes, &transactions), id: next_hash(start_hash, num_hashes, &transactions),
transactions, transactions,
has_more: false,
} }
} }
@ -149,7 +165,7 @@ mod tests {
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero); let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero); let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));
// Next, swap two transactions and ensure verification fails. // Next, swap two transactions and ensure verification fails.
@ -166,7 +182,7 @@ mod tests {
let keypair = KeyPair::new(); let keypair = KeyPair::new();
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero); let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero); let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]); let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
assert!(e0.verify(&zero)); assert!(e0.verify(&zero));
// Next, swap two witness transactions and ensure verification fails. // Next, swap two witness transactions and ensure verification fails.

View File

@ -73,7 +73,7 @@ pub fn next_entries_mut(
transactions: Vec<Transaction>, transactions: Vec<Transaction>,
) -> Vec<Entry> { ) -> Vec<Entry> {
if transactions.is_empty() { if transactions.is_empty() {
vec![Entry::new_mut(start_hash, cur_hashes, transactions)] vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)]
} else { } else {
let mut chunk_len = transactions.len(); let mut chunk_len = transactions.len();
@ -82,6 +82,7 @@ pub fn next_entries_mut(
num_hashes: 0, num_hashes: 0,
id: Hash::default(), id: Hash::default(),
transactions: transactions[0..chunk_len].to_vec(), transactions: transactions[0..chunk_len].to_vec(),
has_more: false,
}).unwrap() > BLOB_DATA_SIZE as u64 }).unwrap() > BLOB_DATA_SIZE as u64
{ {
chunk_len /= 2; chunk_len /= 2;
@ -90,7 +91,7 @@ pub fn next_entries_mut(
let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1); let mut entries = Vec::with_capacity(transactions.len() / chunk_len + 1);
for chunk in transactions.chunks(chunk_len) { for chunk in transactions.chunks(chunk_len) {
entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec())); entries.push(Entry::new_mut(start_hash, cur_hashes, chunk.to_vec(), true));
} }
entries entries
} }

View File

@ -53,8 +53,8 @@ impl Mint {
} }
pub fn create_entries(&self) -> Vec<Entry> { pub fn create_entries(&self) -> Vec<Entry> {
let e0 = Entry::new(&self.seed(), 0, vec![]); let e0 = Entry::new(&self.seed(), 0, vec![], false);
let e1 = Entry::new(&e0.id, 0, self.create_transactions()); let e1 = Entry::new(&e0.id, 0, self.create_transactions(), false);
vec![e0, e1] vec![e0, e1]
} }
} }

View File

@ -39,6 +39,7 @@ impl Recorder {
&mut self.last_hash, &mut self.last_hash,
&mut self.num_hashes, &mut self.num_hashes,
vec![], vec![],
false,
)) ))
} else { } else {
None None

View File

@ -210,7 +210,7 @@ pub mod tests {
let transfer_amount = 501; let transfer_amount = 501;
let bob_keypair = KeyPair::new(); let bob_keypair = KeyPair::new();
for i in 0..num_transfers { for i in 0..num_transfers {
let entry0 = Entry::new(&cur_hash, i, vec![]); let entry0 = Entry::new(&cur_hash, i, vec![], false);
bank.register_entry_id(&cur_hash); bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash); cur_hash = hash(&cur_hash);
@ -222,7 +222,7 @@ pub mod tests {
); );
bank.register_entry_id(&cur_hash); bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash); cur_hash = hash(&cur_hash);
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0]); let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0], false);
bank.register_entry_id(&cur_hash); bank.register_entry_id(&cur_hash);
cur_hash = hash(&cur_hash); cur_hash = hash(&cur_hash);