fix erasure, remove Entry "pad"

* fixes #997
 * Entry pad is no longer required since erasure coding aligns data length
This commit is contained in:
Rob Walker
2018-08-17 11:56:32 -07:00
parent 46016b8c7e
commit bc5f29150b
3 changed files with 24 additions and 21 deletions

View File

@ -45,9 +45,6 @@ pub struct Entry {
/// 2. this Entry can be left out of the bank's entry_id cache for /// 2. this Entry can be left out of the bank's entry_id cache for
/// purposes of duplicate rejection /// purposes of duplicate rejection
pub has_more: bool, pub has_more: bool,
/// Erasure requires that Entry be a multiple of 4 bytes in size
pad: [u8; 3],
} }
impl Entry { impl Entry {
@ -65,7 +62,6 @@ impl Entry {
id, id,
transactions, transactions,
has_more, has_more,
pad: [0, 0, 0],
}; };
let size = serialized_size(&entry).unwrap(); let size = serialized_size(&entry).unwrap();
@ -116,7 +112,6 @@ impl Entry {
id: Hash::default(), id: Hash::default(),
transactions, transactions,
has_more: false, has_more: false,
pad: [0, 0, 0],
}).unwrap() <= BLOB_DATA_SIZE as u64 }).unwrap() <= BLOB_DATA_SIZE as u64
} }
@ -142,7 +137,6 @@ impl Entry {
id: *id, id: *id,
transactions: vec![], transactions: vec![],
has_more: false, has_more: false,
pad: [0, 0, 0],
} }
} }
@ -209,7 +203,6 @@ pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transact
id: next_hash(start_hash, num_hashes, &transactions), id: next_hash(start_hash, num_hashes, &transactions),
transactions, transactions,
has_more: false, has_more: false,
pad: [0, 0, 0],
} }
} }

View File

@ -584,16 +584,35 @@ pub fn recover(
let mut data_size; let mut data_size;
if n < NUM_DATA { if n < NUM_DATA {
data_size = locks[n].get_data_size().unwrap(); data_size = locks[n].get_data_size().unwrap() as usize;
data_size -= BLOB_HEADER_SIZE as u64; data_size -= BLOB_HEADER_SIZE;
if data_size > BLOB_DATA_SIZE {
trace!(
"{:x} corrupt data blob[{}] data_size: {}",
debug_id,
idx,
data_size
);
corrupt = true;
}
} else { } else {
data_size = size as u64; data_size = size;
idx -= NUM_CODING as u64; idx -= NUM_CODING as u64;
locks[n].set_index(idx).unwrap(); locks[n].set_index(idx).unwrap();
if data_size - BLOB_HEADER_SIZE > BLOB_DATA_SIZE {
trace!(
"{:x} corrupt coding blob[{}] data_size: {}",
debug_id,
idx,
data_size
);
corrupt = true;
}
} }
locks[n].meta = meta.clone().unwrap(); locks[n].meta = meta.clone().unwrap();
locks[n].set_size(data_size as usize); locks[n].set_size(data_size);
trace!( trace!(
"{:x} erasures[{}] ({}) size: {:x} data[0]: {}", "{:x} erasures[{}] ({}) size: {:x} data[0]: {}",
debug_id, debug_id,
@ -602,15 +621,6 @@ pub fn recover(
data_size, data_size,
locks[n].data()[0] locks[n].data()[0]
); );
if data_size > BLOB_DATA_SIZE as u64 {
trace!(
"{:x} corrupt blob[{}] data_size: {}",
debug_id,
idx,
data_size
);
corrupt = true;
}
} }
assert!(!corrupt, " {:x} ", debug_id); assert!(!corrupt, " {:x} ", debug_id);

View File

@ -22,7 +22,7 @@ pub type BlobRecycler = Recycler<Blob>;
pub const NUM_PACKETS: usize = 1024 * 8; pub const NUM_PACKETS: usize = 1024 * 8;
pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE; pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - (BLOB_HEADER_SIZE * 2);
pub const PACKET_DATA_SIZE: usize = 256; pub const PACKET_DATA_SIZE: usize = 256;
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE; pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;