expands number of erasure coding shreds in the last batch in slots (#16484)

Number of parity coding shreds is always less than the number of data
shreds in FEC blocks:
https://github.com/solana-labs/solana/blob/6907a2366/ledger/src/shred.rs#L719

Data shreds are batched in chunks of 32 shreds each:
https://github.com/solana-labs/solana/blob/6907a2366/ledger/src/shred.rs#L714

However the very last batch of data shreds in a slot can be small, in
which case the loss rate can be exacerbated.

This commit expands the number of coding shreds in the last FEC block in
slots to: 64 - number of data shreds; so that FEC blocks are always 64
data and parity coding shreds each.

As a consequence of this, the last FEC block has more parity coding
shreds than data shreds. So for some shred indices we will have a coding
shred but no data shreds. This should not cause any kind of overlapping
FEC blocks as in:
https://github.com/solana-labs/solana/pull/10095
since this is done only for the very last batch in a slot, and the next
slot will reset the shred index.
This commit is contained in:
behzad nouri
2021-04-21 12:47:50 +00:00
committed by GitHub
parent ba2be0ca34
commit 37b8587d4e
14 changed files with 243 additions and 262 deletions

View File

@@ -8,8 +8,8 @@ use raptorq::{Decoder, Encoder};
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, Shredder,
MAX_DATA_SHREDS_PER_FEC_BLOCK, RECOMMENDED_FEC_RATE, SHRED_PAYLOAD_SIZE,
SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_DATA_SHRED_PAYLOAD,
MAX_DATA_SHREDS_PER_FEC_BLOCK, SHRED_PAYLOAD_SIZE, SIZE_OF_CODING_SHRED_HEADERS,
SIZE_OF_DATA_SHRED_PAYLOAD,
};
use solana_perf::test_tx;
use solana_sdk::hash::Hash;
@@ -39,8 +39,7 @@ fn make_shreds(num_shreds: usize) -> Vec<Shred> {
Some(shred_size),
);
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
let shredder =
Shredder::new(1, 0, RECOMMENDED_FEC_RATE, Arc::new(Keypair::new()), 0, 0).unwrap();
let shredder = Shredder::new(1, 0, Arc::new(Keypair::new()), 0, 0).unwrap();
let data_shreds = shredder
.entries_to_data_shreds(
&entries,
@@ -75,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
let shredder = Shredder::new(1, 0, kp.clone(), 0, 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
@@ -94,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
let shredder = Shredder::new(1, 0, kp.clone(), 0, 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
@@ -107,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
let entries = create_ticks(num_ticks, 0, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
let shredder = Shredder::new(1, 0, kp, 0, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
bencher.iter(|| {
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
@@ -133,9 +132,8 @@ fn bench_shredder_coding(bencher: &mut Bencher) {
let data_shreds = make_shreds(symbol_count);
bencher.iter(|| {
Shredder::generate_coding_shreds(
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
symbol_count,
true, // is_last_in_slot
)
.len();
})
@@ -146,9 +144,8 @@ fn bench_shredder_decoding(bencher: &mut Bencher) {
let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize;
let data_shreds = make_shreds(symbol_count);
let coding_shreds = Shredder::generate_coding_shreds(
RECOMMENDED_FEC_RATE,
&data_shreds[..symbol_count],
symbol_count,
true, // is_last_in_slot
);
bencher.iter(|| {
Shredder::try_recovery(