remove Result<> from Blob accessors, add parent (#2608)
* remove Result<> from Blob accessors, add parent * update chacha's golden * fixup benches
This commit is contained in:
@@ -125,10 +125,9 @@ pub fn retransmit_all_leader_blocks(
|
||||
for b in dq {
|
||||
// Check if the blob is from the scheduled leader for its slot. If so,
|
||||
// add to the retransmit_queue
|
||||
if let Ok(slot) = b.read().unwrap().slot() {
|
||||
if let Some(leader_id) = leader_scheduler.read().unwrap().get_leader_for_slot(slot) {
|
||||
add_blob_to_retransmit_queue(b, leader_id, &mut retransmit_queue);
|
||||
}
|
||||
let slot = b.read().unwrap().slot();
|
||||
if let Some(leader_id) = leader_scheduler.read().unwrap().get_leader_for_slot(slot) {
|
||||
add_blob_to_retransmit_queue(b, leader_id, &mut retransmit_queue);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,7 +153,7 @@ pub fn add_blob_to_retransmit_queue(
|
||||
retransmit_queue: &mut Vec<SharedBlob>,
|
||||
) {
|
||||
let p = b.read().unwrap();
|
||||
if p.id().expect("get_id in fn add_block_to_retransmit_queue") == leader_id {
|
||||
if p.id() == leader_id {
|
||||
let nv = SharedBlob::default();
|
||||
{
|
||||
let mut mnv = nv.write().unwrap();
|
||||
@@ -186,7 +185,7 @@ pub fn process_blob(
|
||||
// Github issue: https://github.com/solana-labs/solana/issues/1899.
|
||||
let (slot, pix) = {
|
||||
let r_blob = blob.read().unwrap();
|
||||
(r_blob.slot()?, r_blob.index()?)
|
||||
(r_blob.slot(), r_blob.index())
|
||||
};
|
||||
let leader = leader_scheduler.read().unwrap().get_leader_for_slot(slot);
|
||||
|
||||
@@ -199,11 +198,7 @@ pub fn process_blob(
|
||||
// Insert the new blob into the window
|
||||
let mut consumed_entries = if is_coding {
|
||||
let blob = &blob.read().unwrap();
|
||||
db_ledger.put_coding_blob_bytes(
|
||||
slot,
|
||||
pix,
|
||||
&blob.data[..BLOB_HEADER_SIZE + blob.size().unwrap()],
|
||||
)?;
|
||||
db_ledger.put_coding_blob_bytes(slot, pix, &blob.data[..BLOB_HEADER_SIZE + blob.size()])?;
|
||||
vec![]
|
||||
} else {
|
||||
db_ledger.insert_data_blobs(vec![(*blob.read().unwrap()).borrow()])?
|
||||
@@ -280,8 +275,8 @@ fn try_erasure(db_ledger: &Arc<DbLedger>, consume_queue: &mut Vec<Entry>) -> Res
|
||||
let c = c.read().unwrap();
|
||||
db_ledger.put_coding_blob_bytes(
|
||||
meta.consumed_slot,
|
||||
c.index().unwrap(),
|
||||
&c.data[..BLOB_HEADER_SIZE + c.size().unwrap()],
|
||||
c.index(),
|
||||
&c.data[..BLOB_HEADER_SIZE + c.size()],
|
||||
)?;
|
||||
}
|
||||
|
||||
@@ -402,7 +397,7 @@ mod test {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
|
||||
// Expect blob from leader to be retransmitted
|
||||
blob.write().unwrap().set_id(&leader).unwrap();
|
||||
blob.write().unwrap().set_id(&leader);
|
||||
retransmit_all_leader_blocks(&vec![blob.clone()], &leader_scheduler, &blob_sender)
|
||||
.expect("Expect successful retransmit");
|
||||
let output_blob = blob_receiver
|
||||
@@ -412,11 +407,11 @@ mod test {
|
||||
// Retransmitted blob should be missing the leader id
|
||||
assert_ne!(*output_blob[0].read().unwrap(), *blob.read().unwrap());
|
||||
// Set the leader in the retransmitted blob, should now match the original
|
||||
output_blob[0].write().unwrap().set_id(&leader).unwrap();
|
||||
output_blob[0].write().unwrap().set_id(&leader);
|
||||
assert_eq!(*output_blob[0].read().unwrap(), *blob.read().unwrap());
|
||||
|
||||
// Expect blob from nonleader to not be retransmitted
|
||||
blob.write().unwrap().set_id(&nonleader).unwrap();
|
||||
blob.write().unwrap().set_id(&nonleader);
|
||||
retransmit_all_leader_blocks(&vec![blob], &leader_scheduler, &blob_sender)
|
||||
.expect("Expect successful retransmit");
|
||||
assert!(blob_receiver.try_recv().is_err());
|
||||
@@ -441,8 +436,8 @@ mod test {
|
||||
const ONE: u64 = 1;
|
||||
const OTHER: u64 = 4;
|
||||
|
||||
blobs[0].set_index(ONE).unwrap();
|
||||
blobs[1].set_index(OTHER).unwrap();
|
||||
blobs[0].set_index(ONE);
|
||||
blobs[1].set_index(OTHER);
|
||||
|
||||
// Insert one blob at index = first_index
|
||||
db_ledger.write_blobs(&blobs).unwrap();
|
||||
@@ -479,8 +474,8 @@ mod test {
|
||||
let num_entries = 10;
|
||||
let mut blobs = make_tiny_test_entries(num_entries).to_blobs();
|
||||
for (i, b) in blobs.iter_mut().enumerate() {
|
||||
b.set_index(i as u64 * gap).unwrap();
|
||||
b.set_slot(slot).unwrap();
|
||||
b.set_index(i as u64 * gap);
|
||||
b.set_slot(slot);
|
||||
}
|
||||
db_ledger.write_blobs(&blobs).unwrap();
|
||||
|
||||
@@ -635,7 +630,7 @@ mod test {
|
||||
.get_coding_blob_bytes(slot_height, erased_index as u64)
|
||||
.unwrap()
|
||||
.unwrap()[BLOB_HEADER_SIZE..],
|
||||
&erased_coding_l.data()[..erased_coding_l.size().unwrap() as usize],
|
||||
&erased_coding_l.data()[..erased_coding_l.size() as usize],
|
||||
);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user