Erasure statistics for shreds (#5676)

This commit is contained in:
Pankaj Garg
2019-08-27 11:22:06 -07:00
committed by GitHub
parent 7aaf5bc02c
commit 12ad95eb5e

View File

@ -342,55 +342,83 @@ impl Blocktree {
// 3. Before trying recovery, check if enough number of shreds have been received // 3. Before trying recovery, check if enough number of shreds have been received
// 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data // 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data
for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { for (&(slot, set_index), erasure_meta) in erasure_metas.iter() {
let submit_metrics = |attempted: bool, status: String| {
datapoint_info!(
"blocktree-erasure",
("slot", slot as i64, i64),
("start_index", set_index as i64, i64),
("end_index", erasure_meta.end_indexes().0 as i64, i64),
("recovery_attempted", attempted, bool),
("recovery_status", status, String),
);
};
let index = index_working_set.get(&slot).expect("Index"); let index = index_working_set.get(&slot).expect("Index");
if let ErasureMetaStatus::CanRecover = erasure_meta.status(&index) { match erasure_meta.status(&index) {
// Find shreds for this erasure set and try recovery ErasureMetaStatus::CanRecover => {
let slot = index.slot; // Find shreds for this erasure set and try recovery
let mut available_shreds = vec![]; let slot = index.slot;
(set_index..set_index + erasure_meta.config.num_data() as u64).for_each(|i| { let mut available_shreds = vec![];
if index.data().is_present(i) { (set_index..set_index + erasure_meta.config.num_data() as u64).for_each(|i| {
if let Some(shred) = prev_inserted_datas.remove(&(slot, i)).or_else(|| { if index.data().is_present(i) {
let some_data = data_cf if let Some(shred) =
.get_bytes((slot, i)) prev_inserted_datas.remove(&(slot, i)).or_else(|| {
.expect("Database failure, could not fetch data shred"); let some_data = data_cf
if let Some(data) = some_data { .get_bytes((slot, i))
bincode::deserialize(&data).ok() .expect("Database failure, could not fetch data shred");
} else { if let Some(data) = some_data {
warn!("Data shred deleted while reading for recovery"); bincode::deserialize(&data).ok()
None } else {
warn!("Data shred deleted while reading for recovery");
None
}
})
{
available_shreds.push(shred);
} }
}) {
available_shreds.push(shred);
} }
} });
}); (set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(|i| { |i| {
if index.coding().is_present(i) { if index.coding().is_present(i) {
if let Some(shred) = prev_inserted_codes.remove(&(slot, i)).or_else(|| { if let Some(shred) =
let some_code = code_cf prev_inserted_codes.remove(&(slot, i)).or_else(|| {
.get_bytes((slot, i)) let some_code = code_cf
.expect("Database failure, could not fetch code shred"); .get_bytes((slot, i))
if let Some(code) = some_code { .expect("Database failure, could not fetch code shred");
bincode::deserialize(&code).ok() if let Some(code) = some_code {
} else { bincode::deserialize(&code).ok()
warn!("Code shred deleted while reading for recovery"); } else {
None warn!("Code shred deleted while reading for recovery");
None
}
})
{
available_shreds.push(shred);
}
} }
}) { },
available_shreds.push(shred); );
} if let Ok(mut result) = Shredder::try_recovery(
&available_shreds,
erasure_meta.config.num_data(),
erasure_meta.config.num_coding(),
set_index as usize,
slot,
) {
submit_metrics(true, "complete".into());
recovered_data_shreds.append(&mut result.recovered_data);
} else {
submit_metrics(true, "incomplete".into());
} }
});
if let Ok(mut result) = Shredder::try_recovery(
&available_shreds,
erasure_meta.config.num_data(),
erasure_meta.config.num_coding(),
set_index as usize,
slot,
) {
recovered_data_shreds.append(&mut result.recovered_data);
} }
} ErasureMetaStatus::DataFull => {
submit_metrics(false, "complete".into());
}
ErasureMetaStatus::StillNeed(needed) => {
submit_metrics(false, format!("still need: {}", needed));
}
};
} }
recovered_data_shreds recovered_data_shreds
} }
@ -475,6 +503,10 @@ impl Blocktree {
&mut write_batch, &mut write_batch,
)?; )?;
for ((slot, set_index), erasure_meta) in erasure_metas {
write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?;
}
for (&slot, index) in index_working_set.iter() { for (&slot, index) in index_working_set.iter() {
write_batch.put::<cf::Index>(slot, index)?; write_batch.put::<cf::Index>(slot, index)?;
} }