removes next_shred_index from return value of entries to shreds api (#21961) (#21980)

next-shred-index is already readily available from returned data shreds.
The commit simplifies the api for upcoming changes to erasure coding
schema which will require explicit tracking of indices for coding shreds
as well as data shreds.

(cherry picked from commit 89d66c3210)

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
This commit is contained in:
mergify[bot]
2021-12-17 17:57:57 +00:00
committed by GitHub
parent f4521002b9
commit e572678176
9 changed files with 57 additions and 56 deletions

View File

@ -154,7 +154,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, _) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
@ -163,10 +163,10 @@ impl BroadcastRun for BroadcastDuplicatesRun {
self.next_shred_index += data_shreds.len() as u32;
let last_shreds = last_entries.map(|(original_last_entry, duplicate_extra_last_entries)| {
let (original_last_data_shred, _, _) =
let (original_last_data_shred, _) =
shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index);
let (partition_last_data_shred, _, _) =
let (partition_last_data_shred, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index);

View File

@ -52,7 +52,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
@ -69,7 +69,7 @@ impl BroadcastRun for BroadcastFakeShredsRun {
.map(|_| Entry::new(&self.last_blockhash, 0, vec![]))
.collect();
let (fake_data_shreds, fake_coding_shreds, _) = shredder.entries_to_shreds(
let (fake_data_shreds, fake_coding_shreds) = shredder.entries_to_shreds(
keypair,
&fake_entries,
last_tick_height == bank.max_tick_height(),

View File

@ -83,7 +83,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
)
.expect("Expected to create a new shredder");
let (data_shreds, _, _) = shredder.entries_to_shreds(
let (data_shreds, _) = shredder.entries_to_shreds(
keypair,
&receive_results.entries,
last_tick_height == bank.max_tick_height() && last_entries.is_none(),
@ -92,10 +92,10 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
self.next_shred_index += data_shreds.len() as u32;
let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| {
let (good_last_data_shred, _, _) =
let (good_last_data_shred, _) =
shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index);
let (bad_last_data_shred, _, _) =
let (bad_last_data_shred, _) =
// Don't mark the last shred as last so that validators won't know that
// they've gotten all the shreds, and will continue trying to repair
shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index);

View File

@ -119,17 +119,16 @@ impl StandardBroadcastRun {
None => (0, 0),
},
};
let (data_shreds, next_shred_index) =
Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
.unwrap()
.entries_to_data_shreds(
keypair,
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let data_shreds = Shredder::new(slot, parent_slot, reference_tick, self.shred_version)
.unwrap()
.entries_to_data_shreds(
keypair,
entries,
is_slot_end,
next_shred_index,
fec_set_offset,
process_stats,
);
let mut data_shreds_buffer = match &mut self.unfinished_slot {
Some(state) => {
assert_eq!(state.slot, slot);
@ -138,6 +137,10 @@ impl StandardBroadcastRun {
None => Vec::default(),
};
data_shreds_buffer.extend(data_shreds.clone());
let next_shred_index = match data_shreds.iter().map(Shred::index).max() {
Some(index) => index + 1,
None => next_shred_index,
};
self.unfinished_slot = Some(UnfinishedSlotInfo {
next_shred_index,
slot,