The indices for erasure coding shreds are tied to data shreds:
https://github.com/solana-labs/solana/blob/90f41fd9b/ledger/src/shred.rs#L921
However with the upcoming changes to erasure schema, there will be more
erasure coding shreds than data shreds and we can no longer infer coding
shreds indices from data shreds.
The commit adds constructs to track coding shreds indices explicitly.
(cherry picked from commit 65d59f4ef0
)
Co-authored-by: behzad nouri <behzadnouri@gmail.com>
This commit is contained in:
@ -1652,8 +1652,13 @@ impl Blockstore {
|
||||
0
|
||||
}
|
||||
};
|
||||
let (mut data_shreds, mut coding_shreds) =
|
||||
shredder.entries_to_shreds(keypair, ¤t_entries, true, start_index);
|
||||
let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds(
|
||||
keypair,
|
||||
¤t_entries,
|
||||
true, // is_last_in_slot
|
||||
start_index, // next_shred_index
|
||||
start_index, // next_code_index
|
||||
);
|
||||
all_shreds.append(&mut data_shreds);
|
||||
all_shreds.append(&mut coding_shreds);
|
||||
shredder = Shredder::new(
|
||||
@ -1672,8 +1677,13 @@ impl Blockstore {
|
||||
}
|
||||
|
||||
if !slot_entries.is_empty() {
|
||||
let (mut data_shreds, mut coding_shreds) =
|
||||
shredder.entries_to_shreds(keypair, &slot_entries, is_full_slot, 0);
|
||||
let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds(
|
||||
keypair,
|
||||
&slot_entries,
|
||||
is_full_slot,
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
all_shreds.append(&mut data_shreds);
|
||||
all_shreds.append(&mut coding_shreds);
|
||||
}
|
||||
@ -3572,7 +3582,13 @@ pub fn create_new_ledger(
|
||||
|
||||
let shredder = Shredder::new(0, 0, 0, version).unwrap();
|
||||
let shreds = shredder
|
||||
.entries_to_shreds(&Keypair::new(), &entries, true, 0)
|
||||
.entries_to_shreds(
|
||||
&Keypair::new(),
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
)
|
||||
.0;
|
||||
assert!(shreds.last().unwrap().last_in_slot());
|
||||
|
||||
@ -3801,7 +3817,13 @@ pub fn entries_to_test_shreds(
|
||||
) -> Vec<Shred> {
|
||||
Shredder::new(slot, parent_slot, 0, version)
|
||||
.unwrap()
|
||||
.entries_to_shreds(&Keypair::new(), &entries, is_full_slot, 0)
|
||||
.entries_to_shreds(
|
||||
&Keypair::new(),
|
||||
&entries,
|
||||
is_full_slot,
|
||||
0, // next_shred_index,
|
||||
0, // next_code_index
|
||||
)
|
||||
.0
|
||||
}
|
||||
|
||||
@ -8013,8 +8035,13 @@ pub mod tests {
|
||||
let entries = make_slot_entries_with_transactions(num_entries);
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap();
|
||||
let (data_shreds, coding_shreds) =
|
||||
shredder.entries_to_shreds(&leader_keypair, &entries, true, 0);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&leader_keypair,
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
|
||||
let genesis_config = create_genesis_config(2).genesis_config;
|
||||
let bank = Arc::new(Bank::new_for_tests(&genesis_config));
|
||||
@ -8069,8 +8096,20 @@ pub mod tests {
|
||||
let entries2 = make_slot_entries_with_transactions(1);
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let shredder = Shredder::new(slot, 0, 0, 0).unwrap();
|
||||
let (shreds, _) = shredder.entries_to_shreds(&leader_keypair, &entries1, true, 0);
|
||||
let (duplicate_shreds, _) = shredder.entries_to_shreds(&leader_keypair, &entries2, true, 0);
|
||||
let (shreds, _) = shredder.entries_to_shreds(
|
||||
&leader_keypair,
|
||||
&entries1,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index,
|
||||
);
|
||||
let (duplicate_shreds, _) = shredder.entries_to_shreds(
|
||||
&leader_keypair,
|
||||
&entries2,
|
||||
true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
let shred = shreds[0].clone();
|
||||
let duplicate_shred = duplicate_shreds[0].clone();
|
||||
let non_duplicate_shred = shred.clone();
|
||||
@ -8376,8 +8415,14 @@ pub mod tests {
|
||||
let ledger_path = get_tmp_ledger_path_auto_delete!();
|
||||
let blockstore = Blockstore::open(ledger_path.path()).unwrap();
|
||||
|
||||
let coding1 = Shredder::generate_coding_shreds(&shreds, false);
|
||||
let coding2 = Shredder::generate_coding_shreds(&shreds, true);
|
||||
let coding1 = Shredder::generate_coding_shreds(
|
||||
&shreds, false, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
);
|
||||
let coding2 = Shredder::generate_coding_shreds(
|
||||
&shreds, true, // is_last_in_slot
|
||||
0, // next_code_index
|
||||
);
|
||||
for shred in &shreds {
|
||||
info!("shred {:?}", shred);
|
||||
}
|
||||
|
@ -69,11 +69,7 @@ use {
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signature, Signer},
|
||||
},
|
||||
std::{
|
||||
cell::RefCell,
|
||||
convert::{TryFrom, TryInto},
|
||||
mem::size_of,
|
||||
},
|
||||
std::{cell::RefCell, convert::TryInto, mem::size_of},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
@ -759,6 +755,7 @@ impl Shredder {
|
||||
entries: &[Entry],
|
||||
is_last_in_slot: bool,
|
||||
next_shred_index: u32,
|
||||
next_code_index: u32,
|
||||
) -> (
|
||||
Vec<Shred>, // data shreds
|
||||
Vec<Shred>, // coding shreds
|
||||
@ -772,9 +769,14 @@ impl Shredder {
|
||||
next_shred_index, // fec_set_offset
|
||||
&mut stats,
|
||||
);
|
||||
let coding_shreds =
|
||||
Self::data_shreds_to_coding_shreds(keypair, &data_shreds, is_last_in_slot, &mut stats)
|
||||
.unwrap();
|
||||
let coding_shreds = Self::data_shreds_to_coding_shreds(
|
||||
keypair,
|
||||
&data_shreds,
|
||||
is_last_in_slot,
|
||||
next_code_index,
|
||||
&mut stats,
|
||||
)
|
||||
.unwrap();
|
||||
(data_shreds, coding_shreds)
|
||||
}
|
||||
|
||||
@ -852,6 +854,7 @@ impl Shredder {
|
||||
keypair: &Keypair,
|
||||
data_shreds: &[Shred],
|
||||
is_last_in_slot: bool,
|
||||
next_code_index: u32,
|
||||
process_stats: &mut ProcessShredsStats,
|
||||
) -> Result<Vec<Shred>> {
|
||||
if data_shreds.is_empty() {
|
||||
@ -863,8 +866,26 @@ impl Shredder {
|
||||
thread_pool.borrow().install(|| {
|
||||
data_shreds
|
||||
.par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize)
|
||||
.flat_map(|shred_data_batch| {
|
||||
Shredder::generate_coding_shreds(shred_data_batch, is_last_in_slot)
|
||||
.enumerate()
|
||||
.flat_map(|(i, shred_data_batch)| {
|
||||
// Assumption here is that, for now, each fec block has
|
||||
// as many coding shreds as data shreds (except for the
|
||||
// last one in the slot).
|
||||
// TODO: tie this more closely with
|
||||
// generate_coding_shreds.
|
||||
let next_code_index = next_code_index
|
||||
.checked_add(
|
||||
u32::try_from(i)
|
||||
.unwrap()
|
||||
.checked_mul(MAX_DATA_SHREDS_PER_FEC_BLOCK)
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
Shredder::generate_coding_shreds(
|
||||
shred_data_batch,
|
||||
is_last_in_slot,
|
||||
next_code_index,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
@ -922,7 +943,11 @@ impl Shredder {
|
||||
}
|
||||
|
||||
/// Generates coding shreds for the data shreds in the current FEC set
|
||||
pub fn generate_coding_shreds(data: &[Shred], is_last_in_slot: bool) -> Vec<Shred> {
|
||||
pub fn generate_coding_shreds(
|
||||
data: &[Shred],
|
||||
is_last_in_slot: bool,
|
||||
next_code_index: u32,
|
||||
) -> Vec<Shred> {
|
||||
const PAYLOAD_ENCODE_SIZE: usize = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS;
|
||||
let ShredCommonHeader {
|
||||
slot,
|
||||
@ -958,9 +983,10 @@ impl Shredder {
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, parity)| {
|
||||
let index = next_code_index + u32::try_from(i).unwrap();
|
||||
let mut shred = Shred::new_empty_coding(
|
||||
slot,
|
||||
fec_set_index + i as u32, // shred index
|
||||
index,
|
||||
fec_set_index,
|
||||
num_data,
|
||||
num_coding,
|
||||
@ -1308,8 +1334,13 @@ pub mod tests {
|
||||
.saturating_sub(num_expected_data_shreds as usize)
|
||||
.max(num_expected_data_shreds as usize);
|
||||
let start_index = 0;
|
||||
let (data_shreds, coding_shreds) =
|
||||
shredder.entries_to_shreds(&keypair, &entries, true, start_index);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair,
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
start_index, // next_shred_index
|
||||
start_index, // next_code_index
|
||||
);
|
||||
let next_index = data_shreds.last().unwrap().index() + 1;
|
||||
assert_eq!(next_index as u64, num_expected_data_shreds);
|
||||
|
||||
@ -1379,8 +1410,11 @@ pub mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
||||
|
||||
let (data_shreds, _) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
let deserialized_shred =
|
||||
Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap();
|
||||
assert_eq!(deserialized_shred, *data_shreds.last().unwrap());
|
||||
@ -1402,7 +1436,11 @@ pub mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
||||
let (data_shreds, _) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
data_shreds.iter().for_each(|s| {
|
||||
assert_eq!(s.reference_tick(), 5);
|
||||
assert_eq!(Shred::reference_tick_from_data(&s.payload), 5);
|
||||
@ -1429,7 +1467,11 @@ pub mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0;
|
||||
let (data_shreds, _) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
data_shreds.iter().for_each(|s| {
|
||||
assert_eq!(s.reference_tick(), SHRED_TICK_REFERENCE_MASK);
|
||||
assert_eq!(
|
||||
@ -1462,8 +1504,11 @@ pub mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 0);
|
||||
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
for (i, s) in data_shreds.iter().enumerate() {
|
||||
verify_test_data_shred(
|
||||
s,
|
||||
@ -1515,6 +1560,7 @@ pub mod tests {
|
||||
&entries,
|
||||
is_last_in_slot,
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
let num_coding_shreds = coding_shreds.len();
|
||||
|
||||
@ -1641,7 +1687,11 @@ pub mod tests {
|
||||
// Test5: Try recovery/reassembly with non zero index full slot with 3 missing data shreds
|
||||
// and 2 missing coding shreds. Hint: should work
|
||||
let serialized_entries = bincode::serialize(&entries).unwrap();
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 25);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
25, // next_shred_index,
|
||||
25, // next_code_index
|
||||
);
|
||||
// We should have 10 shreds now
|
||||
assert_eq!(data_shreds.len(), num_data_shreds);
|
||||
|
||||
@ -1725,8 +1775,13 @@ pub mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
let next_shred_index = rng.gen_range(1, 1024);
|
||||
let (data_shreds, coding_shreds) =
|
||||
shredder.entries_to_shreds(&keypair, &[entry], is_last_in_slot, next_shred_index);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair,
|
||||
&[entry],
|
||||
is_last_in_slot,
|
||||
next_shred_index,
|
||||
next_shred_index, // next_code_index
|
||||
);
|
||||
let num_data_shreds = data_shreds.len();
|
||||
let mut shreds = coding_shreds;
|
||||
shreds.extend(data_shreds.iter().cloned());
|
||||
@ -1779,7 +1834,11 @@ pub mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 0);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
assert!(!data_shreds
|
||||
.iter()
|
||||
.chain(coding_shreds.iter())
|
||||
@ -1827,9 +1886,13 @@ pub mod tests {
|
||||
.collect();
|
||||
|
||||
let start_index = 0x12;
|
||||
let (data_shreds, coding_shreds) =
|
||||
shredder.entries_to_shreds(&keypair, &entries, true, start_index);
|
||||
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair,
|
||||
&entries,
|
||||
true, // is_last_in_slot
|
||||
start_index, // next_shred_index
|
||||
start_index, // next_code_index
|
||||
);
|
||||
let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
|
||||
data_shreds.iter().enumerate().for_each(|(i, s)| {
|
||||
let expected_fec_set_index = start_index + ((i / max_per_block) * max_per_block) as u32;
|
||||
@ -1874,12 +1937,14 @@ pub mod tests {
|
||||
);
|
||||
|
||||
assert!(data_shreds.len() > MAX_DATA_SHREDS_PER_FEC_BLOCK as usize);
|
||||
let next_code_index = data_shreds[0].index();
|
||||
|
||||
(1..=MAX_DATA_SHREDS_PER_FEC_BLOCK as usize).for_each(|count| {
|
||||
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
|
||||
&keypair,
|
||||
&data_shreds[..count],
|
||||
false, // is_last_in_slot
|
||||
next_code_index,
|
||||
&mut stats,
|
||||
)
|
||||
.unwrap();
|
||||
@ -1888,6 +1953,7 @@ pub mod tests {
|
||||
&keypair,
|
||||
&data_shreds[..count],
|
||||
true, // is_last_in_slot
|
||||
next_code_index,
|
||||
&mut stats,
|
||||
)
|
||||
.unwrap();
|
||||
@ -1901,6 +1967,7 @@ pub mod tests {
|
||||
&keypair,
|
||||
&data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1],
|
||||
false, // is_last_in_slot
|
||||
next_code_index,
|
||||
&mut stats,
|
||||
)
|
||||
.unwrap();
|
||||
@ -1912,6 +1979,7 @@ pub mod tests {
|
||||
&keypair,
|
||||
&data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1],
|
||||
true, // is_last_in_slot
|
||||
next_code_index,
|
||||
&mut stats,
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -48,7 +48,11 @@ fn test_multi_fec_block_coding() {
|
||||
.collect();
|
||||
|
||||
let serialized_entries = bincode::serialize(&entries).unwrap();
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(&keypair, &entries, true, 0);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, true, // is_last_in_slot
|
||||
0, // next_shred_index
|
||||
0, // next_code_index
|
||||
);
|
||||
let next_index = data_shreds.last().unwrap().index() + 1;
|
||||
assert_eq!(next_index as usize, num_data_shreds);
|
||||
assert_eq!(data_shreds.len(), num_data_shreds);
|
||||
@ -218,8 +222,10 @@ fn setup_different_sized_fec_blocks(
|
||||
let total_num_data_shreds: usize = 2 * num_shreds_per_iter;
|
||||
for i in 0..2 {
|
||||
let is_last = i == 1;
|
||||
let (data_shreds, coding_shreds) =
|
||||
shredder.entries_to_shreds(&keypair, &entries, is_last, next_index);
|
||||
let (data_shreds, coding_shreds) = shredder.entries_to_shreds(
|
||||
&keypair, &entries, is_last, next_index, // next_shred_index
|
||||
next_index, // next_code_index
|
||||
);
|
||||
for shred in &data_shreds {
|
||||
if (shred.index() as usize) == total_num_data_shreds - 1 {
|
||||
assert!(shred.data_complete());
|
||||
|
Reference in New Issue
Block a user