Implement new Index Column (#4827)

* Implement new Index Column

* Correct slicing of blobs

* Mark coding blobs as coding when they're recovered

* Prevent broadcast stages from mixing coding and data blobs in blocktree

* Mark recovered blobs as present in the index

* Fix indexing error in recovery

* Fix broken tests, and some bug fixes

* increase min stack size for coverage runs
This commit is contained in:
Mark E. Sinclair
2019-07-10 13:08:17 -05:00
committed by Pankaj Garg
parent b1a678b2db
commit a383ea532f
13 changed files with 888 additions and 586 deletions

View File

@ -4,7 +4,7 @@
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::packet::{Blob, SharedBlob, BLOB_HEADER_SIZE};
use crate::packet::{Blob, SharedBlob};
use crate::repair_service::{RepairService, RepairStrategy};
use crate::result::{Error, Result};
use crate::service::Service;
@ -28,11 +28,12 @@ pub const NUM_THREADS: u32 = 10;
fn retransmit_blobs(blobs: &[SharedBlob], retransmit: &BlobSender, id: &Pubkey) -> Result<()> {
let mut retransmit_queue: Vec<SharedBlob> = Vec::new();
for blob in blobs {
let mut blob_guard = blob.write().unwrap();
// Don't add blobs generated by this node to the retransmit queue
if blob.read().unwrap().id() != *id {
let mut w_blob = blob.write().unwrap();
w_blob.meta.forward = w_blob.should_forward();
w_blob.set_forwarded(false);
if blob_guard.id() != *id && !blob_guard.is_coding() {
//let mut w_blob = blob.write().unwrap();
blob_guard.meta.forward = blob_guard.should_forward();
blob_guard.set_forwarded(false);
retransmit_queue.push(blob.clone());
}
}
@ -52,29 +53,17 @@ fn retransmit_blobs(blobs: &[SharedBlob], retransmit: &BlobSender, id: &Pubkey)
/// Process a blob: Add blob to the ledger window.
pub fn process_blobs(blobs: &[SharedBlob], blocktree: &Arc<Blocktree>) -> Result<()> {
// make an iterator for insert_data_blobs()
let blobs: Vec<_> = blobs.iter().map(move |blob| blob.read().unwrap()).collect();
//let blobs: Vec<_> = blobs.iter().map(move |blob| blob.read().unwrap()).collect();
blocktree.insert_data_blobs(blobs.iter().filter_map(|blob| {
if !blob.is_coding() {
Some(&(**blob))
} else {
None
}
}))?;
blocktree.write_shared_blobs(
blobs
.iter()
.filter(|blob| !blob.read().unwrap().is_coding()),
)?;
for blob in blobs {
// TODO: Once the original leader signature is added to the blob, make sure that
// the blob was originally generated by the expected leader for this slot
blocktree
.put_shared_coding_blobs(blobs.iter().filter(|blob| blob.read().unwrap().is_coding()))?;
// Insert the new blob into block tree
if blob.is_coding() {
blocktree.put_coding_blob_bytes(
blob.slot(),
blob.index(),
&blob.data[..BLOB_HEADER_SIZE + blob.size()],
)?;
}
}
Ok(())
}
@ -215,6 +204,8 @@ impl WindowService {
let bank_forks = bank_forks.clone();
let t_window = Builder::new()
.name("solana-window".to_string())
// TODO: Mark: Why is it overflowing
.stack_size(8 * 1024 * 1024)
.spawn(move || {
let _exit = Finalizer::new(exit.clone());
let id = cluster_info.read().unwrap().id();