Upgrade Repair be more intelligent and agressive (bp #6789) (#6793)

automerge
This commit is contained in:
mergify[bot]
2019-11-07 21:36:53 -08:00
committed by Grimes
parent e599a90333
commit 80d780d666
12 changed files with 236 additions and 71 deletions

View File

@@ -35,7 +35,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let entries = create_ticks(num_ticks, Hash::default());
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap();
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
@@ -50,7 +50,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
// 1Mb
bencher.iter(|| {
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap();
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0).unwrap();
shredder.entries_to_shreds(&entries, true, 0);
})
}
@@ -63,7 +63,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
let entries = create_ticks(num_ticks, Hash::default());
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp).unwrap();
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0).unwrap();
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
bencher.iter(|| {
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
@@ -75,7 +75,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
fn bench_deserialize_hdr(bencher: &mut Bencher) {
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true);
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0);
bencher.iter(|| {
let payload = shred.payload.clone();

View File

@@ -44,6 +44,7 @@ impl BroadcastRun for BroadcastFakeBlobsRun {
bank.parent().unwrap().slot(),
RECOMMENDED_FEC_RATE,
keypair.clone(),
(bank.tick_height() % bank.ticks_per_slot()) as u8,
)
.expect("Expected to create a new shredder");

View File

@@ -42,6 +42,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
bank.parent().unwrap().slot(),
RECOMMENDED_FEC_RATE,
keypair.clone(),
(bank.tick_height() % bank.ticks_per_slot()) as u8,
)
.expect("Expected to create a new shredder");

View File

@@ -2,7 +2,7 @@ use super::broadcast_utils::{self, ReceiveResults};
use super::*;
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
use solana_ledger::entry::Entry;
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE};
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK};
use solana_sdk::signature::Keypair;
use solana_sdk::timing::duration_as_us;
use std::time::Duration;
@@ -46,7 +46,7 @@ impl StandardBroadcastRun {
}
}
fn check_for_interrupted_slot(&mut self) -> Option<Shred> {
fn check_for_interrupted_slot(&mut self, max_ticks_in_slot: u8) -> Option<Shred> {
let (slot, _) = self.current_slot_and_parent.unwrap();
let mut last_unfinished_slot_shred = self
.unfinished_slot
@@ -60,6 +60,7 @@ impl StandardBroadcastRun {
None,
true,
true,
max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK,
))
} else {
None
@@ -102,6 +103,7 @@ impl StandardBroadcastRun {
blocktree: &Blocktree,
entries: &[Entry],
is_slot_end: bool,
reference_tick: u8,
) -> (Vec<Shred>, Vec<Shred>) {
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
let shredder = Shredder::new(
@@ -109,6 +111,7 @@ impl StandardBroadcastRun {
parent_slot,
RECOMMENDED_FEC_RATE,
self.keypair.clone(),
reference_tick,
)
.expect("Expected to create a new shredder");
@@ -168,13 +171,15 @@ impl StandardBroadcastRun {
let to_shreds_start = Instant::now();
// 1) Check if slot was interrupted
let last_unfinished_slot_shred = self.check_for_interrupted_slot();
let last_unfinished_slot_shred =
self.check_for_interrupted_slot(bank.ticks_per_slot() as u8);
// 2) Convert entries to shreds and coding shreds
let (data_shreds, coding_shreds) = self.entries_to_shreds(
blocktree,
&receive_results.entries,
last_tick_height == bank.max_tick_height(),
(bank.tick_height() % bank.ticks_per_slot()) as u8,
);
let to_shreds_elapsed = to_shreds_start.elapsed();
@@ -362,7 +367,7 @@ mod test {
// Slot 2 interrupted slot 1
let shred = run
.check_for_interrupted_slot()
.check_for_interrupted_slot(0)
.expect("Expected a shred that signals an interrupt");
// Validate the shred

View File

@@ -165,7 +165,7 @@ mod tests {
hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes....
let golden: Hash = "BdmY3efqu7zbnFuGRAeFANwa35HkDdQ7hwhYez3xGXiM"
let golden: Hash = "HLzH7Nrh4q2K5WTh3e9vPNFZ1QVYhVDRMN9u5v51GqpJ"
.parse()
.unwrap();

View File

@@ -820,7 +820,7 @@ impl ClusterInfo {
}
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
match repair_request {
RepairType::Blob(slot, blob_index) => {
RepairType::Shred(slot, blob_index) => {
datapoint_debug!(
"cluster_info-repair",
("repair-slot", *slot, i64),
@@ -1882,7 +1882,7 @@ mod tests {
fn window_index_request() {
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(me);
let rv = cluster_info.repair_request(&RepairType::Blob(0, 0));
let rv = cluster_info.repair_request(&RepairType::Shred(0, 0));
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
@@ -1901,7 +1901,7 @@ mod tests {
);
cluster_info.insert_info(nxt.clone());
let rv = cluster_info
.repair_request(&RepairType::Blob(0, 0))
.repair_request(&RepairType::Shred(0, 0))
.unwrap();
assert_eq!(nxt.gossip, gossip_addr);
assert_eq!(rv.0, nxt.gossip);
@@ -1926,7 +1926,7 @@ mod tests {
while !one || !two {
//this randomly picks an option, so eventually it should pick both
let rv = cluster_info
.repair_request(&RepairType::Blob(0, 0))
.repair_request(&RepairType::Shred(0, 0))
.unwrap();
if rv.0 == gossip_addr {
one = true;

View File

@@ -18,8 +18,8 @@ use std::{
time::Duration,
};
pub const MAX_REPAIR_LENGTH: usize = 16;
pub const REPAIR_MS: u64 = 100;
pub const MAX_REPAIR_LENGTH: usize = 1024;
pub const REPAIR_MS: u64 = 50;
pub const MAX_ORPHANS: usize = 5;
pub enum RepairStrategy {
@@ -35,7 +35,7 @@ pub enum RepairStrategy {
pub enum RepairType {
Orphan(u64),
HighestBlob(u64, u64),
Blob(u64, u64),
Shred(u64, u64),
}
pub struct RepairSlotRange {
@@ -252,13 +252,13 @@ impl RepairService {
} else {
let reqs = blocktree.find_missing_data_indexes(
slot,
slot_meta.first_shred_timestamp,
slot_meta.consumed,
slot_meta.received,
max_repairs,
);
reqs.into_iter()
.map(|i| RepairType::Blob(slot, i))
.map(|i| RepairType::Shred(slot, i))
.collect()
}
}
@@ -478,12 +478,13 @@ mod test {
}
}
blocktree.insert_shreds(shreds_to_write, None).unwrap();
// sleep so that the holes are ready for repair
sleep(Duration::from_secs(1));
let expected: Vec<RepairType> = (0..num_slots)
.flat_map(|slot| {
missing_indexes_per_slot
.iter()
.map(move |blob_index| RepairType::Blob(slot as u64, *blob_index))
.map(move |blob_index| RepairType::Shred(slot as u64, *blob_index))
})
.collect();
@@ -543,7 +544,8 @@ mod test {
slot_shreds.remove(0);
blocktree.insert_shreds(slot_shreds, None).unwrap();
}
// sleep to make slot eligible for repair
sleep(Duration::from_secs(1));
// Iterate through all possible combinations of start..end (inclusive on both
// sides of the range)
for start in 0..slots.len() {
@@ -555,7 +557,7 @@ mod test {
..=repair_slot_range.end)
.map(|slot_index| {
if slots.contains(&(slot_index as u64)) {
RepairType::Blob(slot_index as u64, 0)
RepairType::Shred(slot_index as u64, 0)
} else {
RepairType::HighestBlob(slot_index as u64, 0)
}

View File

@@ -1005,7 +1005,7 @@ mod test {
let payload_len = SIZE_OF_DATA_SHRED_PAYLOAD;
let gibberish = [0xa5u8; PACKET_DATA_SIZE];
let mut data_header = DataShredHeader::default();
data_header.flags = DATA_COMPLETE_SHRED;
data_header.flags |= DATA_COMPLETE_SHRED;
let mut shred = Shred::new_empty_from_header(
ShredCommonHeader::default(),
data_header,

View File

@@ -308,7 +308,7 @@ mod test {
parent: u64,
keypair: &Arc<Keypair>,
) -> Vec<Shred> {
let shredder = Shredder::new(slot, parent, 0.0, keypair.clone())
let shredder = Shredder::new(slot, parent, 0.0, keypair.clone(), 0)
.expect("Failed to create entry shredder");
shredder.entries_to_shreds(&entries, true, 0).0
}