From be0bcd85ed070b79c6a276d05c48f52b1972a3b2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 23 Dec 2021 19:38:50 +0000 Subject: [PATCH] tracks erasure coding shreds' indices explicitly (#21822) (#22094) The indices for erasure coding shreds are tied to data shreds: https://github.com/solana-labs/solana/blob/90f41fd9b/ledger/src/shred.rs#L921 However with the upcoming changes to erasure schema, there will be more erasure coding shreds than data shreds and we can no longer infer coding shreds indices from data shreds. The commit adds constructs to track coding shreds indices explicitly. (cherry picked from commit 65d59f4ef025410120e50129a4e97df2b6774f40) Co-authored-by: behzad nouri --- core/benches/retransmit_stage.rs | 6 +- core/benches/shredder.rs | 8 +- core/src/broadcast_stage.rs | 1 + .../broadcast_duplicates_run.rs | 13 +- .../broadcast_fake_shreds_run.rs | 13 ++ core/src/broadcast_stage/broadcast_utils.rs | 1 + .../fail_entry_verification_broadcast_run.rs | 13 +- .../broadcast_stage/standard_broadcast_run.rs | 55 +++++--- core/src/shred_fetch_stage.rs | 1 + core/src/window_service.rs | 7 +- gossip/src/duplicate_shred.rs | 1 + ledger/src/blockstore.rs | 69 ++++++++-- ledger/src/shred.rs | 122 ++++++++++++++---- ledger/tests/shred.rs | 12 +- 14 files changed, 253 insertions(+), 69 deletions(-) diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index 59afa0ada5..3ae1b60598 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -100,7 +100,11 @@ fn bench_retransmitter(bencher: &mut Bencher) { let slot = 0; let parent = 0; let shredder = Shredder::new(slot, parent, 0, 0).unwrap(); - let mut data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; + let (mut data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let num_packets = data_shreds.len(); diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 53353b9b9d..565f8ced2d 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -74,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) { let entries = create_ticks(num_ticks, 0, Hash::default()); bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - shredder.entries_to_shreds(&kp, &entries, true, 0); + shredder.entries_to_shreds(&kp, &entries, true, 0, 0); }) } @@ -93,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { // 1Mb bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - shredder.entries_to_shreds(&kp, &entries, true, 0); + shredder.entries_to_shreds(&kp, &entries, true, 0, 0); }) } @@ -106,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - let data_shreds = shredder.entries_to_shreds(&kp, &entries, true, 0).0; + let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0); bencher.iter(|| { let raw = &mut Shredder::deshred(&data_shreds).unwrap(); assert_ne!(raw.len(), 0); @@ -133,6 +133,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) { Shredder::generate_coding_shreds( &data_shreds[..symbol_count], true, // is_last_in_slot + 0, // next_code_index ) .len(); }) @@ -145,6 +146,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { let coding_shreds = Shredder::generate_coding_shreds( &data_shreds[..symbol_count], true, // is_last_in_slot + 0, // next_code_index ); bencher.iter(|| { Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap(); diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 388f3e6168..9fba19ba33 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -496,6 +496,7 @@ pub mod test { &keypair, &data_shreds[0..], true, // is_last_in_slot + 0, // next_code_index &mut ProcessShredsStats::default(), ) .unwrap(); diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 02c42a7769..ea39323f96 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -28,6 +28,7 @@ pub(super) struct BroadcastDuplicatesRun { config: BroadcastDuplicatesConfig, current_slot: Slot, next_shred_index: u32, + next_code_index: u32, shred_version: u16, recent_blockhash: Option, prev_entry_hash: Option, @@ -46,6 +47,7 @@ impl BroadcastDuplicatesRun { Self { config, next_shred_index: u32::MAX, + next_code_index: 0, shred_version, current_slot: 0, recent_blockhash: None, @@ -74,6 +76,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { if bank.slot() != self.current_slot { self.next_shred_index = 0; + self.next_code_index = 0; self.current_slot = bank.slot(); self.prev_entry_hash = None; self.num_slots_broadcasted += 1; @@ -154,22 +157,26 @@ impl BroadcastRun for BroadcastDuplicatesRun { ) .expect("Expected to create a new shredder"); - let (data_shreds, _) = shredder.entries_to_shreds( + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), self.next_shred_index, + self.next_code_index, ); self.next_shred_index += data_shreds.len() as u32; + if let Some(index) = coding_shreds.iter().map(Shred::index).max() { + self.next_code_index = index + 1; + } let last_shreds = last_entries.map(|(original_last_entry, duplicate_extra_last_entries)| { let (original_last_data_shred, _) = - shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index); + shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index, self.next_code_index); let (partition_last_data_shred, _) = // Don't mark the last shred as last so that validators won't know that // they've gotten all the shreds, and will continue trying to repair - shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index); + shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index, self.next_code_index); let sigs: Vec<_> = partition_last_data_shred.iter().map(|s| (s.signature(), s.index())).collect(); info!( diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index 7dd8b3322d..a0bf77153a 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -10,6 +10,7 @@ pub(super) struct BroadcastFakeShredsRun { last_blockhash: Hash, partition: usize, shred_version: u16, + next_code_index: u32, } impl BroadcastFakeShredsRun { @@ -18,6 +19,7 @@ impl BroadcastFakeShredsRun { last_blockhash: Hash::default(), partition, shred_version, + next_code_index: 0, } } } @@ -57,6 +59,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { &receive_results.entries, last_tick_height == bank.max_tick_height(), next_shred_index, + self.next_code_index, ); // If the last blockhash is default, a new block is being created @@ -74,8 +77,18 @@ impl BroadcastRun for BroadcastFakeShredsRun { &fake_entries, last_tick_height == bank.max_tick_height(), next_shred_index, + self.next_code_index, ); + if let Some(index) = coding_shreds + .iter() + .chain(&fake_coding_shreds) + .map(Shred::index) + .max() + { + self.next_code_index = index + 1; + } + // If it's the last tick, reset the last block hash to default // this will cause next run to grab last bank's blockhash if last_tick_height == bank.max_tick_height() { diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs index 0e76de935b..902b5672d7 100644 --- a/core/src/broadcast_stage/broadcast_utils.rs +++ b/core/src/broadcast_stage/broadcast_utils.rs @@ -21,6 +21,7 @@ pub(super) struct ReceiveResults { #[derive(Clone)] pub struct UnfinishedSlotInfo { pub next_shred_index: u32, + pub(crate) next_code_index: u32, pub slot: Slot, pub parent: Slot, // Data shreds buffered to make a batch of size diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index b8fbcdc95b..5a65653d60 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -15,6 +15,7 @@ pub(super) struct FailEntryVerificationBroadcastRun { good_shreds: Vec, current_slot: Slot, next_shred_index: u32, + next_code_index: u32, cluster_nodes_cache: Arc>, } @@ -29,6 +30,7 @@ impl FailEntryVerificationBroadcastRun { good_shreds: vec![], current_slot: 0, next_shred_index: 0, + next_code_index: 0, cluster_nodes_cache, } } @@ -50,6 +52,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { if bank.slot() != self.current_slot { self.next_shred_index = 0; + self.next_code_index = 0; self.current_slot = bank.slot(); } @@ -83,22 +86,26 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { ) .expect("Expected to create a new shredder"); - let (data_shreds, _) = shredder.entries_to_shreds( + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), self.next_shred_index, + self.next_code_index, ); self.next_shred_index += data_shreds.len() as u32; + if let Some(index) = coding_shreds.iter().map(Shred::index).max() { + self.next_code_index = index + 1; + } let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| { let (good_last_data_shred, _) = - shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index); + shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index, self.next_code_index); let (bad_last_data_shred, _) = // Don't mark the last shred as last so that validators won't know that // they've gotten all the shreds, and will continue trying to repair - shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index); + shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index, self.next_code_index); self.next_shred_index += 1; (good_last_data_shred, bad_last_data_shred) diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 454f29c3de..1788b19e86 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -141,8 +141,13 @@ impl StandardBroadcastRun { Some(index) => index + 1, None => next_shred_index, }; + let next_code_index = match &self.unfinished_slot { + Some(state) => state.next_code_index, + None => 0, + }; self.unfinished_slot = Some(UnfinishedSlotInfo { next_shred_index, + next_code_index, slot, parent: parent_slot, data_shreds_buffer, @@ -449,23 +454,40 @@ fn make_coding_shreds( is_slot_end: bool, stats: &mut ProcessShredsStats, ) -> Vec { - let data_shreds = match unfinished_slot { - None => Vec::default(), - Some(unfinished_slot) => { - let size = unfinished_slot.data_shreds_buffer.len(); - // Consume a multiple of 32, unless this is the slot end. - let offset = if is_slot_end { - 0 - } else { - size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - }; - unfinished_slot - .data_shreds_buffer - .drain(0..size - offset) - .collect() - } + let unfinished_slot = match unfinished_slot { + None => return Vec::default(), + Some(state) => state, }; - Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, is_slot_end, stats).unwrap() + let data_shreds: Vec<_> = { + let size = unfinished_slot.data_shreds_buffer.len(); + // Consume a multiple of 32, unless this is the slot end. + let offset = if is_slot_end { + 0 + } else { + size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + }; + unfinished_slot + .data_shreds_buffer + .drain(0..size - offset) + .collect() + }; + let shreds = Shredder::data_shreds_to_coding_shreds( + keypair, + &data_shreds, + is_slot_end, + unfinished_slot.next_code_index, + stats, + ) + .unwrap(); + if let Some(index) = shreds + .iter() + .filter(|shred| shred.is_code()) + .map(Shred::index) + .max() + { + unfinished_slot.next_code_index = unfinished_slot.next_code_index.max(index + 1); + } + shreds } impl BroadcastRun for StandardBroadcastRun { @@ -582,6 +604,7 @@ mod test { let parent = 0; run.unfinished_slot = Some(UnfinishedSlotInfo { next_shred_index, + next_code_index: 17, slot, parent, data_shreds_buffer: Vec::default(), diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index a580561870..8554116dda 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -270,6 +270,7 @@ mod tests { let coding = solana_ledger::shred::Shredder::generate_coding_shreds( &[shred], false, // is_last_in_slot + 3, // next_code_index ); coding[0].copy_to_packet(&mut packet); ShredFetchStage::process_packet( diff --git a/core/src/window_service.rs b/core/src/window_service.rs index fe41fb5ebe..47ae77ac7c 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -736,7 +736,12 @@ mod test { keypair: &Keypair, ) -> Vec { let shredder = Shredder::new(slot, parent, 0, 0).unwrap(); - shredder.entries_to_shreds(keypair, entries, true, 0).0 + let (data_shreds, _) = shredder.entries_to_shreds( + keypair, entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); + data_shreds } #[test] diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index d2da9dbe43..9a8d0437dc 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -341,6 +341,7 @@ pub(crate) mod tests { &entries, true, // is_last_in_slot next_shred_index, + next_shred_index, // next_code_index ); data_shreds.swap_remove(0) } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 9e91d7b61a..9c3c527d3d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1652,8 +1652,13 @@ impl Blockstore { 0 } }; - let (mut data_shreds, mut coding_shreds) = - shredder.entries_to_shreds(keypair, ¤t_entries, true, start_index); + let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds( + keypair, + ¤t_entries, + true, // is_last_in_slot + start_index, // next_shred_index + start_index, // next_code_index + ); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); shredder = Shredder::new( @@ -1672,8 +1677,13 @@ impl Blockstore { } if !slot_entries.is_empty() { - let (mut data_shreds, mut coding_shreds) = - shredder.entries_to_shreds(keypair, &slot_entries, is_full_slot, 0); + let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds( + keypair, + &slot_entries, + is_full_slot, + 0, // next_shred_index + 0, // next_code_index + ); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); } @@ -3572,7 +3582,13 @@ pub fn create_new_ledger( let shredder = Shredder::new(0, 0, 0, version).unwrap(); let shreds = shredder - .entries_to_shreds(&Keypair::new(), &entries, true, 0) + .entries_to_shreds( + &Keypair::new(), + &entries, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ) .0; assert!(shreds.last().unwrap().last_in_slot()); @@ -3801,7 +3817,13 @@ pub fn entries_to_test_shreds( ) -> Vec { Shredder::new(slot, parent_slot, 0, version) .unwrap() - .entries_to_shreds(&Keypair::new(), &entries, is_full_slot, 0) + .entries_to_shreds( + &Keypair::new(), + &entries, + is_full_slot, + 0, // next_shred_index, + 0, // next_code_index + ) .0 } @@ -8013,8 +8035,13 @@ pub mod tests { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap(); - let (data_shreds, coding_shreds) = - shredder.entries_to_shreds(&leader_keypair, &entries, true, 0); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &leader_keypair, + &entries, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let genesis_config = create_genesis_config(2).genesis_config; let bank = Arc::new(Bank::new_for_tests(&genesis_config)); @@ -8069,8 +8096,20 @@ pub mod tests { let entries2 = make_slot_entries_with_transactions(1); let leader_keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, 0, 0, 0).unwrap(); - let (shreds, _) = shredder.entries_to_shreds(&leader_keypair, &entries1, true, 0); - let (duplicate_shreds, _) = shredder.entries_to_shreds(&leader_keypair, &entries2, true, 0); + let (shreds, _) = shredder.entries_to_shreds( + &leader_keypair, + &entries1, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index, + ); + let (duplicate_shreds, _) = shredder.entries_to_shreds( + &leader_keypair, + &entries2, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let shred = shreds[0].clone(); let duplicate_shred = duplicate_shreds[0].clone(); let non_duplicate_shred = shred.clone(); @@ -8376,8 +8415,14 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let coding1 = Shredder::generate_coding_shreds(&shreds, false); - let coding2 = Shredder::generate_coding_shreds(&shreds, true); + let coding1 = Shredder::generate_coding_shreds( + &shreds, false, // is_last_in_slot + 0, // next_code_index + ); + let coding2 = Shredder::generate_coding_shreds( + &shreds, true, // is_last_in_slot + 0, // next_code_index + ); for shred in &shreds { info!("shred {:?}", shred); } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 976bf1d518..d984c850c9 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -69,11 +69,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }, - std::{ - cell::RefCell, - convert::{TryFrom, TryInto}, - mem::size_of, - }, + std::{cell::RefCell, convert::TryInto, mem::size_of}, thiserror::Error, }; @@ -759,6 +755,7 @@ impl Shredder { entries: &[Entry], is_last_in_slot: bool, next_shred_index: u32, + next_code_index: u32, ) -> ( Vec, // data shreds Vec, // coding shreds @@ -772,9 +769,14 @@ impl Shredder { next_shred_index, // fec_set_offset &mut stats, ); - let coding_shreds = - Self::data_shreds_to_coding_shreds(keypair, &data_shreds, is_last_in_slot, &mut stats) - .unwrap(); + let coding_shreds = Self::data_shreds_to_coding_shreds( + keypair, + &data_shreds, + is_last_in_slot, + next_code_index, + &mut stats, + ) + .unwrap(); (data_shreds, coding_shreds) } @@ -852,6 +854,7 @@ impl Shredder { keypair: &Keypair, data_shreds: &[Shred], is_last_in_slot: bool, + next_code_index: u32, process_stats: &mut ProcessShredsStats, ) -> Result> { if data_shreds.is_empty() { @@ -863,8 +866,26 @@ impl Shredder { thread_pool.borrow().install(|| { data_shreds .par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) - .flat_map(|shred_data_batch| { - Shredder::generate_coding_shreds(shred_data_batch, is_last_in_slot) + .enumerate() + .flat_map(|(i, shred_data_batch)| { + // Assumption here is that, for now, each fec block has + // as many coding shreds as data shreds (except for the + // last one in the slot). + // TODO: tie this more closely with + // generate_coding_shreds. + let next_code_index = next_code_index + .checked_add( + u32::try_from(i) + .unwrap() + .checked_mul(MAX_DATA_SHREDS_PER_FEC_BLOCK) + .unwrap(), + ) + .unwrap(); + Shredder::generate_coding_shreds( + shred_data_batch, + is_last_in_slot, + next_code_index, + ) }) .collect() }) @@ -922,7 +943,11 @@ impl Shredder { } /// Generates coding shreds for the data shreds in the current FEC set - pub fn generate_coding_shreds(data: &[Shred], is_last_in_slot: bool) -> Vec { + pub fn generate_coding_shreds( + data: &[Shred], + is_last_in_slot: bool, + next_code_index: u32, + ) -> Vec { const PAYLOAD_ENCODE_SIZE: usize = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS; let ShredCommonHeader { slot, @@ -958,9 +983,10 @@ impl Shredder { .iter() .enumerate() .map(|(i, parity)| { + let index = next_code_index + u32::try_from(i).unwrap(); let mut shred = Shred::new_empty_coding( slot, - fec_set_index + i as u32, // shred index + index, fec_set_index, num_data, num_coding, @@ -1308,8 +1334,13 @@ pub mod tests { .saturating_sub(num_expected_data_shreds as usize) .max(num_expected_data_shreds as usize); let start_index = 0; - let (data_shreds, coding_shreds) = - shredder.entries_to_shreds(&keypair, &entries, true, start_index); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, + &entries, + true, // is_last_in_slot + start_index, // next_shred_index + start_index, // next_code_index + ); let next_index = data_shreds.last().unwrap().index() + 1; assert_eq!(next_index as u64, num_expected_data_shreds); @@ -1379,8 +1410,11 @@ pub mod tests { }) .collect(); - let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; - + let (data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let deserialized_shred = Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap(); assert_eq!(deserialized_shred, *data_shreds.last().unwrap()); @@ -1402,7 +1436,11 @@ pub mod tests { }) .collect(); - let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; + let (data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); data_shreds.iter().for_each(|s| { assert_eq!(s.reference_tick(), 5); assert_eq!(Shred::reference_tick_from_data(&s.payload), 5); @@ -1429,7 +1467,11 @@ pub mod tests { }) .collect(); - let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; + let (data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); data_shreds.iter().for_each(|s| { assert_eq!(s.reference_tick(), SHRED_TICK_REFERENCE_MASK); assert_eq!( @@ -1462,8 +1504,11 @@ pub mod tests { }) .collect(); - let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 0); - + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); for (i, s) in data_shreds.iter().enumerate() { verify_test_data_shred( s, @@ -1515,6 +1560,7 @@ pub mod tests { &entries, is_last_in_slot, 0, // next_shred_index + 0, // next_code_index ); let num_coding_shreds = coding_shreds.len(); @@ -1641,7 +1687,11 @@ pub mod tests { // Test5: Try recovery/reassembly with non zero index full slot with 3 missing data shreds // and 2 missing coding shreds. Hint: should work let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 25); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 25, // next_shred_index, + 25, // next_code_index + ); // We should have 10 shreds now assert_eq!(data_shreds.len(), num_data_shreds); @@ -1725,8 +1775,13 @@ pub mod tests { ) .unwrap(); let next_shred_index = rng.gen_range(1, 1024); - let (data_shreds, coding_shreds) = - shredder.entries_to_shreds(&keypair, &[entry], is_last_in_slot, next_shred_index); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, + &[entry], + is_last_in_slot, + next_shred_index, + next_shred_index, // next_code_index + ); let num_data_shreds = data_shreds.len(); let mut shreds = coding_shreds; shreds.extend(data_shreds.iter().cloned()); @@ -1779,7 +1834,11 @@ pub mod tests { }) .collect(); - let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 0); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); assert!(!data_shreds .iter() .chain(coding_shreds.iter()) @@ -1827,9 +1886,13 @@ pub mod tests { .collect(); let start_index = 0x12; - let (data_shreds, coding_shreds) = - shredder.entries_to_shreds(&keypair, &entries, true, start_index); - + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, + &entries, + true, // is_last_in_slot + start_index, // next_shred_index + start_index, // next_code_index + ); let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; data_shreds.iter().enumerate().for_each(|(i, s)| { let expected_fec_set_index = start_index + ((i / max_per_block) * max_per_block) as u32; @@ -1874,12 +1937,14 @@ pub mod tests { ); assert!(data_shreds.len() > MAX_DATA_SHREDS_PER_FEC_BLOCK as usize); + let next_code_index = data_shreds[0].index(); (1..=MAX_DATA_SHREDS_PER_FEC_BLOCK as usize).for_each(|count| { let coding_shreds = Shredder::data_shreds_to_coding_shreds( &keypair, &data_shreds[..count], false, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1888,6 +1953,7 @@ pub mod tests { &keypair, &data_shreds[..count], true, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1901,6 +1967,7 @@ pub mod tests { &keypair, &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], false, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1912,6 +1979,7 @@ pub mod tests { &keypair, &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], true, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 6fcdc7443c..7a1cf976d8 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -48,7 +48,11 @@ fn test_multi_fec_block_coding() { .collect(); let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 0); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let next_index = data_shreds.last().unwrap().index() + 1; assert_eq!(next_index as usize, num_data_shreds); assert_eq!(data_shreds.len(), num_data_shreds); @@ -218,8 +222,10 @@ fn setup_different_sized_fec_blocks( let total_num_data_shreds: usize = 2 * num_shreds_per_iter; for i in 0..2 { let is_last = i == 1; - let (data_shreds, coding_shreds) = - shredder.entries_to_shreds(&keypair, &entries, is_last, next_index); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, is_last, next_index, // next_shred_index + next_index, // next_code_index + ); for shred in &data_shreds { if (shred.index() as usize) == total_num_data_shreds - 1 { assert!(shred.data_complete());