Remove obsolete references to Blob (#6957)

* Remove the name "blob" from archivers

* Remove the name "blob" from broadcast

* Remove the name "blob" from Cluset Info

* Remove the name "blob" from Repair

* Remove the name "blob" from a bunch more places

* Remove the name "blob" from tests and book
This commit is contained in:
Sagar Dhawan
2019-11-14 11:49:31 -08:00
committed by GitHub
parent e7f63cd336
commit 79d7090867
26 changed files with 258 additions and 259 deletions

View File

@@ -790,7 +790,7 @@ impl Blocktree {
);
return false;
}
// Check that we do not receive a blob with "last_index" true, but shred_index
// Check that we do not receive a shred with "last_index" true, but shred_index
// less than our current received
if last_in_slot && shred_index < slot_meta.received {
datapoint_error!(
@@ -1430,7 +1430,7 @@ fn get_slot_meta_entry<'a>(
) -> &'a mut SlotMetaWorkingSetEntry {
let meta_cf = db.column::<cf::SlotMeta>();
// Check if we've already inserted the slot metadata for this blob's slot
// Check if we've already inserted the slot metadata for this shred's slot
slot_meta_working_set.entry(slot).or_insert_with(|| {
// Store a 2-tuple of the metadata (working copy, backup copy)
if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") {
@@ -1823,7 +1823,7 @@ pub fn verify_shred_slots(slot: Slot, parent_slot: Slot, last_root: u64) -> bool
return false;
}
// Ignore blobs that chain to slots before the last root
// Ignore shreds that chain to slots before the last root
if parent_slot < last_root {
return false;
}
@@ -3602,7 +3602,7 @@ pub mod tests {
));
}
// Trying to insert with set_index with num_coding that would imply the last blob
// Trying to insert with set_index with num_coding that would imply the last shred
// has index > u32::MAX should fail
{
let mut coding_shred = Shred::new_empty_from_header(

View File

@@ -10,7 +10,7 @@ pub struct SlotMeta {
// The number of slots above the root (the genesis block). The first
// slot has slot 0.
pub slot: Slot,
// The total number of consecutive blobs starting from index 0
// The total number of consecutive shreds starting from index 0
// we have received for this slot.
pub consumed: u64,
// The index *plus one* of the highest shred received for this slot. Useful

View File

@@ -1,14 +1,14 @@
//! # Erasure Coding and Recovery
//!
//! Blobs are logically grouped into erasure sets or blocks. Each set contains 16 sequential data
//! blobs and 4 sequential coding blobs.
//! Shreds are logically grouped into erasure sets or blocks. Each set contains 16 sequential data
//! shreds and 4 sequential coding shreds.
//!
//! Coding blobs in each set starting from `start_idx`:
//! Coding shreds in each set starting from `start_idx`:
//! For each erasure set:
//! generate `NUM_CODING` coding_blobs.
//! index the coding blobs from `start_idx` to `start_idx + NUM_CODING - 1`.
//! generate `NUM_CODING` coding_shreds.
//! index the coding shreds from `start_idx` to `start_idx + NUM_CODING - 1`.
//!
//! model of an erasure set, with top row being data blobs and second being coding
//! model of an erasure set, with top row being data shreds and second being coding
//! |<======================= NUM_DATA ==============================>|
//! |<==== NUM_CODING ===>|
//! +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
@@ -17,10 +17,10 @@
//! | C | | C | | C | | C | | | | | | | | | | | | |
//! +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
//!
//! blob structure for coding blobs
//! shred structure for coding shreds
//!
//! + ------- meta is set and used by transport, meta.size is actual length
//! | of data in the byte array blob.data
//! | of data in the byte array shred.data
//! |
//! | + -- data is stuff shipped over the wire, and has an included
//! | | header
@@ -30,14 +30,14 @@
//! |+---+-- |+---+---+---+---+------------------------------------------+|
//! || s | . || i | | f | s | ||
//! || i | . || n | i | l | i | ||
//! || z | . || d | d | a | z | blob.data(), or blob.data_mut() ||
//! || z | . || d | d | a | z | shred.data(), or shred.data_mut() ||
//! || e | || e | | g | e | ||
//! |+---+-- || x | | s | | ||
//! | |+---+---+---+---+------------------------------------------+|
//! +----------+------------------------------------------------------------+
//! | |<=== coding blob part for "coding" =======>|
//! | |<=== coding shred part for "coding" =======>|
//! | |
//! |<============== data blob part for "coding" ==============>|
//! |<============== data shred part for "coding" ==============>|
//!
//!
@@ -46,9 +46,9 @@ use reed_solomon_erasure::ReedSolomon;
use serde::{Deserialize, Serialize};
//TODO(sakridge) pick these values
/// Number of data blobs
/// Number of data shreds
pub const NUM_DATA: usize = 8;
/// Number of coding blobs; also the maximum number that can go missing.
/// Number of coding shreds; also the maximum number that can go missing.
pub const NUM_CODING: usize = 8;
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
@@ -86,7 +86,7 @@ impl ErasureConfig {
type Result<T> = std::result::Result<T, reed_solomon_erasure::Error>;
/// Represents an erasure "session" with a particular configuration and number of data and coding
/// blobs
/// shreds
#[derive(Debug, Clone)]
pub struct Session(ReedSolomon<Field>);
@@ -134,11 +134,11 @@ pub mod test {
use log::*;
use solana_sdk::clock::Slot;
/// Specifies the contents of a 16-data-blob and 4-coding-blob erasure set
/// Specifies the contents of a 16-data-shred and 4-coding-shred erasure set
/// Exists to be passed to `generate_blocktree_with_coding`
#[derive(Debug, Copy, Clone)]
pub struct ErasureSpec {
/// Which 16-blob erasure set this represents
/// Which 16-shred erasure set this represents
pub set_index: u64,
pub num_data: usize,
pub num_coding: usize,

View File

@@ -128,7 +128,7 @@ impl LeaderScheduleCache {
if *pubkey == leader_schedule[i] {
if let Some(blocktree) = blocktree {
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
// We have already sent a blob for this slot, so skip it
// We have already sent a shred for this slot, so skip it
if meta.received > 0 {
continue;
}
@@ -435,7 +435,7 @@ mod tests {
1
);
// Write a blob into slot 2 that chains to slot 1,
// Write a shred into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped
let (shreds, _) = make_slot_entries(2, 1, 1);
blocktree.insert_shreds(shreds, None, false).unwrap();
@@ -447,7 +447,7 @@ mod tests {
1
);
// Write a blob into slot 1
// Write a shred into slot 1
let (shreds, _) = make_slot_entries(1, 0, 1);
// Check that slot 1 and 2 are skipped