expands number of erasure coding shreds in the last batch in slots (#16484)

Number of parity coding shreds is always less than the number of data
shreds in FEC blocks:
https://github.com/solana-labs/solana/blob/6907a2366/ledger/src/shred.rs#L719

Data shreds are batched in chunks of 32 shreds each:
https://github.com/solana-labs/solana/blob/6907a2366/ledger/src/shred.rs#L714

However the very last batch of data shreds in a slot can be small, in
which case the loss rate can be exacerbated.

This commit expands the number of coding shreds in the last FEC block in
slots to: 64 - number of data shreds; so that FEC blocks are always 64
data and parity coding shreds each.

As a consequence of this, the last FEC block has more parity coding
shreds than data shreds. So for some shred indices we will have a coding
shred but no data shreds. This should not cause any kind of overlapping
FEC blocks as in:
https://github.com/solana-labs/solana/pull/10095
since this is done only for the very last batch in a slot, and the next
slot will reset the shred index.
This commit is contained in:
behzad nouri
2021-04-21 12:47:50 +00:00
committed by GitHub
parent ba2be0ca34
commit 37b8587d4e
14 changed files with 243 additions and 262 deletions

View File

@@ -81,8 +81,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
let keypair = Arc::new(Keypair::new());
let slot = 0;
let parent = 0;
let shredder =
Shredder::new(slot, parent, 0.0, keypair, 0, 0).expect("Failed to create entry shredder");
let shredder = Shredder::new(slot, parent, keypair, 0, 0).unwrap();
let mut data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
let num_packets = data_shreds.len();

View File

@@ -8,8 +8,8 @@ use raptorq::{Decoder, Encoder};
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, Shredder,
MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE, SHRED_PAYLOAD_SIZE,
SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_DATA_SHRED_PAYLOAD,
MAX_DATA_SHREDS_PER_FEC_BLOCK, SHRED_PAYLOAD_SIZE, SIZE_OF_CODING_SHRED_HEADERS,
SIZE_OF_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
@@ -39,8 +39,7 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder =
Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap();
let shredder = Shredder::new(1, 0, Arc::new(Keypair::new()), 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(
&entries,
@@ -75,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
let shredder = Shredder::new(1, 0, kp.clone(), 0, 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
@@ -94,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
let shredder = Shredder::new(1, 0, kp.clone(), 0, 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
@@ -107,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
let shredder = Shredder::new(1, 0, kp, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
bencher.iter(|| {
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
@@ -133,9 +132,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
let data_shreds = make_shreds(symbol_count);
bencher.iter(|| {
Shredder::generate_coding_shreds(
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
symbol_count,
true, // is_last_in_slot
)
.len();
})
@@ -146,9 +144,8 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
let data_shreds = make_shreds(symbol_count);
let coding_shreds = Shredder::generate_coding_shreds(
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
symbol_count,
true, // is_last_in_slot
);
bencher.iter(|| {
Shredder::try_recovery(

View File

@@ -447,7 +447,7 @@ pub mod test {
entry::create_ticks,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
shred::{max_ticks_per_n_shreds, ProcessShredsStats, Shredder, RECOMMENDED_FEC_RATE},
shred::{max_ticks_per_n_shreds, ProcessShredsStats, Shredder},
};
use solana_runtime::bank::Bank;
use solana_sdk::{
@@ -476,7 +476,7 @@ pub mod test {
let coding_shreds = Shredder::data_shreds_to_coding_shreds(
&keypair,
&data_shreds[0..],
RECOMMENDED_FEC_RATE,
true, // is_last_in_slot
&mut ProcessShredsStats::default(),
)
.unwrap();

View File

@@ -1,6 +1,6 @@
use super::*;
use solana_ledger::entry::Entry;
use solana_ledger::shred::{Shredder, RECOMMENDED_FEC_RATE};
use solana_ledger::shred::Shredder;
use solana_sdk::hash::Hash;
use solana_sdk::signature::Keypair;
@@ -47,7 +47,6 @@ impl BroadcastRun for BroadcastFakeShredsRun {
let shredder = Shredder::new(
bank.slot(),
bank.parent().unwrap().slot(),
RECOMMENDED_FEC_RATE,
self.keypair.clone(),
(bank.tick_height() % bank.ticks_per_slot()) as u8,
self.shred_version,

View File

@@ -71,7 +71,6 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
let shredder = Shredder::new(
bank.slot(),
bank.parent().unwrap().slot(),
0.0,
self.keypair.clone(),
(bank.tick_height() % bank.ticks_per_slot()) as u8,
self.shred_version,

View File

@@ -8,7 +8,7 @@ use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
use solana_ledger::{
entry::Entry,
shred::{
ProcessShredsStats, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE,
ProcessShredsStats, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
SHRED_TICK_REFERENCE_MASK,
},
};
@@ -121,7 +121,6 @@ impl StandardBroadcastRun {
let (data_shreds, next_shred_index) = Shredder::new(
slot,
parent_slot,
RECOMMENDED_FEC_RATE,
self.keypair.clone(),
reference_tick,
self.shred_version,
@@ -451,8 +450,7 @@ fn make_coding_shreds(
.collect()
}
};
Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, RECOMMENDED_FEC_RATE, stats)
.unwrap()
Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, is_slot_end, stats).unwrap()
}
impl BroadcastRun for StandardBroadcastRun {

View File

@@ -3809,17 +3809,9 @@ mod tests {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let keypair = Keypair::new();
let (slot, parent_slot, fec_rate, reference_tick, version) =
(53084024, 53084023, 0.0, 0, 0);
let shredder = Shredder::new(
slot,
parent_slot,
fec_rate,
leader.clone(),
reference_tick,
version,
)
.unwrap();
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder =
Shredder::new(slot, parent_slot, leader.clone(), reference_tick, version).unwrap();
let next_shred_index = rng.gen();
let shred = new_rand_shred(&mut rng, next_shred_index, &shredder);
let other_payload = new_rand_shred(&mut rng, next_shred_index, &shredder).payload;

View File

@@ -343,17 +343,9 @@ pub(crate) mod tests {
fn test_duplicate_shred_round_trip() {
let mut rng = rand::thread_rng();
let leader = Arc::new(Keypair::new());
let (slot, parent_slot, fec_rate, reference_tick, version) =
(53084024, 53084023, 0.0, 0, 0);
let shredder = Shredder::new(
slot,
parent_slot,
fec_rate,
leader.clone(),
reference_tick,
version,
)
.unwrap();
let (slot, parent_slot, reference_tick, version) = (53084024, 53084023, 0, 0);
let shredder =
Shredder::new(slot, parent_slot, leader.clone(), reference_tick, version).unwrap();
let next_shred_index = rng.gen();
let shred1 = new_rand_shred(&mut rng, next_shred_index, &shredder);
let shred2 = new_rand_shred(&mut rng, next_shred_index, &shredder);

View File

@@ -262,8 +262,10 @@ mod tests {
&hasher,
);
assert!(!packet.meta.discard);
let coding = solana_ledger::shred::Shredder::generate_coding_shreds(1.0f32, &[shred], 1);
let coding = solana_ledger::shred::Shredder::generate_coding_shreds(
&[shred],
false, // is_last_in_slot
);
coding[0].copy_to_packet(&mut packet);
ShredFetchStage::process_packet(
&mut packet,

View File

@@ -636,8 +636,7 @@ mod test {
parent: Slot,
keypair: &Arc<Keypair>,
) -> Vec<Shred> {
let shredder = Shredder::new(slot, parent, 0.0, keypair.clone(), 0, 0)
.expect("Failed to create entry shredder");
let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0).0
}