v1.0: Add nonce to shreds repairs, add shred data size to header (#10110)
* Add nonce to shreds/repairs * Add data shred size to header * Align with future epoch Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
@@ -1982,10 +1982,11 @@ impl Blockstore {
|
||||
let data_shreds = data_shreds?;
|
||||
assert!(data_shreds.last().unwrap().data_complete());
|
||||
|
||||
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|_| {
|
||||
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
|
||||
"Could not reconstruct data block from constituent shreds".to_string(),
|
||||
)))
|
||||
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|e| {
|
||||
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!(
|
||||
"Could not reconstruct data block from constituent shreds, error: {:?}",
|
||||
e
|
||||
))))
|
||||
})?;
|
||||
|
||||
debug!("{:?} shreds in last FEC set", data_shreds.len(),);
|
||||
@@ -2830,7 +2831,7 @@ pub mod tests {
|
||||
entry::{next_entry, next_entry_mut},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
leader_schedule::{FixedSchedule, LeaderSchedule},
|
||||
shred::{max_ticks_per_n_shreds, DataShredHeader},
|
||||
shred::{max_ticks_per_n_shreds, DataShredHeader, NONCE_SHRED_PAYLOAD_SIZE},
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use bincode::serialize;
|
||||
@@ -2980,7 +2981,7 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_insert_get_bytes() {
|
||||
// Create enough entries to ensure there are at least two shreds created
|
||||
let num_entries = max_ticks_per_n_shreds(1) + 1;
|
||||
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
|
||||
assert!(num_entries > 1);
|
||||
|
||||
let (mut shreds, _) = make_slot_entries(0, 0, num_entries);
|
||||
@@ -3220,7 +3221,7 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_insert_data_shreds_basic() {
|
||||
// Create enough entries to ensure there are at least two shreds created
|
||||
let num_entries = max_ticks_per_n_shreds(1) + 1;
|
||||
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
|
||||
assert!(num_entries > 1);
|
||||
|
||||
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
|
||||
@@ -3267,7 +3268,7 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_insert_data_shreds_reverse() {
|
||||
let num_shreds = 10;
|
||||
let num_entries = max_ticks_per_n_shreds(num_shreds);
|
||||
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
|
||||
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
|
||||
let num_shreds = shreds.len() as u64;
|
||||
|
||||
@@ -3444,7 +3445,7 @@ pub mod tests {
|
||||
{
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
// Create enough entries to ensure there are at least two shreds created
|
||||
let min_entries = max_ticks_per_n_shreds(1) + 1;
|
||||
let min_entries = max_ticks_per_n_shreds(1, None) + 1;
|
||||
for i in 0..4 {
|
||||
let slot = i;
|
||||
let parent_slot = if i == 0 { 0 } else { i - 1 };
|
||||
@@ -3871,7 +3872,7 @@ pub mod tests {
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let num_slots = 15;
|
||||
// Create enough entries to ensure there are at least two shreds created
|
||||
let entries_per_slot = max_ticks_per_n_shreds(1) + 1;
|
||||
let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
|
||||
assert!(entries_per_slot > 1);
|
||||
|
||||
let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
|
||||
@@ -4241,7 +4242,7 @@ pub mod tests {
|
||||
let gap: u64 = 10;
|
||||
assert!(gap > 3);
|
||||
// Create enough entries to ensure there are at least two shreds created
|
||||
let num_entries = max_ticks_per_n_shreds(1) + 1;
|
||||
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
|
||||
let entries = create_ticks(num_entries, 0, Hash::default());
|
||||
let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
|
||||
let num_shreds = shreds.len();
|
||||
@@ -4553,6 +4554,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
|
||||
// Insert a good coding shred
|
||||
@@ -4585,6 +4587,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
let index = index_cf.get(shred.slot).unwrap().unwrap();
|
||||
assert!(Blockstore::should_insert_coding_shred(
|
||||
@@ -4600,6 +4603,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
let index = coding_shred.coding_header.position - 1;
|
||||
coding_shred.set_index(index as u32);
|
||||
@@ -4618,6 +4622,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
coding_shred.coding_header.num_coding_shreds = 0;
|
||||
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
|
||||
@@ -4634,6 +4639,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
coding_shred.coding_header.num_coding_shreds = coding_shred.coding_header.position;
|
||||
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
|
||||
@@ -4651,6 +4657,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
coding_shred.common_header.fec_set_index = std::u32::MAX - 1;
|
||||
coding_shred.coding_header.num_coding_shreds = 3;
|
||||
@@ -4683,6 +4690,7 @@ pub mod tests {
|
||||
shred.clone(),
|
||||
DataShredHeader::default(),
|
||||
coding.clone(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
|
||||
coding_shred.set_slot(*last_root.read().unwrap());
|
||||
|
@@ -9,7 +9,7 @@ use rayon::{
|
||||
slice::ParallelSlice,
|
||||
ThreadPool,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_perf::packet::Packet;
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
@@ -24,25 +24,33 @@ use std::mem::size_of;
|
||||
use std::{sync::Arc, time::Instant};
|
||||
use thiserror::Error;
|
||||
|
||||
pub type Nonce = u32;
|
||||
|
||||
/// The following constants are computed by hand, and hardcoded.
|
||||
/// `test_shred_constants` ensures that the values are correct.
|
||||
/// Constants are used over lazy_static for performance reasons.
|
||||
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 83;
|
||||
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 3;
|
||||
pub const SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD: usize = 2;
|
||||
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 6;
|
||||
pub const SIZE_OF_SIGNATURE: usize = 64;
|
||||
pub const SIZE_OF_SHRED_TYPE: usize = 1;
|
||||
pub const SIZE_OF_SHRED_SLOT: usize = 8;
|
||||
pub const SIZE_OF_SHRED_INDEX: usize = 4;
|
||||
pub const SIZE_OF_NONCE: usize = 4;
|
||||
pub const SIZE_OF_DATA_SHRED_IGNORED_TAIL: usize =
|
||||
SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER;
|
||||
pub const SIZE_OF_DATA_SHRED_PAYLOAD: usize = PACKET_DATA_SIZE
|
||||
- SIZE_OF_COMMON_SHRED_HEADER
|
||||
- SIZE_OF_DATA_SHRED_HEADER
|
||||
- SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
pub const SIZE_OF_NONCE_DATA_SHRED_PAYLOAD: usize =
|
||||
SIZE_OF_DATA_SHRED_PAYLOAD - SIZE_OF_NONCE - SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD;
|
||||
|
||||
pub const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_TYPE;
|
||||
pub const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT;
|
||||
pub const NONCE_SHRED_PAYLOAD_SIZE: usize = PACKET_DATA_SIZE - SIZE_OF_NONCE;
|
||||
pub const UNLOCK_NONCE_SLOT: Slot = 13_115_515;
|
||||
|
||||
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
|
||||
.num_threads(get_thread_count())
|
||||
@@ -107,6 +115,20 @@ pub struct ShredCommonHeader {
|
||||
pub struct DataShredHeader {
|
||||
pub parent_offset: u16,
|
||||
pub flags: u8,
|
||||
#[serde(skip_deserializing)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(serialize_with = "option_as_u16_serialize")]
|
||||
pub size: Option<u16>,
|
||||
}
|
||||
|
||||
#[allow(clippy::trivially_copy_pass_by_ref)]
|
||||
fn option_as_u16_serialize<S>(x: &Option<u16>, s: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
assert!(x.is_some());
|
||||
let num = x.unwrap();
|
||||
s.serialize_u16(num)
|
||||
}
|
||||
|
||||
/// The coding shred header has FEC information
|
||||
@@ -168,7 +190,8 @@ impl Shred {
|
||||
version: u16,
|
||||
fec_set_index: u32,
|
||||
) -> Self {
|
||||
let mut payload = vec![0; PACKET_DATA_SIZE];
|
||||
let payload_size = Self::get_expected_payload_size_from_slot(slot);
|
||||
let mut payload = vec![0; payload_size];
|
||||
let common_header = ShredCommonHeader {
|
||||
slot,
|
||||
index,
|
||||
@@ -177,9 +200,20 @@ impl Shred {
|
||||
..ShredCommonHeader::default()
|
||||
};
|
||||
|
||||
let size = if Self::is_nonce_unlocked(slot) {
|
||||
Some(
|
||||
(data.map(|d| d.len()).unwrap_or(0)
|
||||
+ SIZE_OF_DATA_SHRED_HEADER
|
||||
+ SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD
|
||||
+ SIZE_OF_COMMON_SHRED_HEADER) as u16,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut data_header = DataShredHeader {
|
||||
parent_offset,
|
||||
flags: reference_tick.min(SHRED_TICK_REFERENCE_MASK),
|
||||
size,
|
||||
};
|
||||
|
||||
if is_last_data {
|
||||
@@ -198,9 +232,10 @@ impl Shred {
|
||||
&common_header,
|
||||
)
|
||||
.expect("Failed to write header into shred buffer");
|
||||
let size_of_data_shred_header = Shredder::get_expected_data_header_size_from_slot(slot);
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_DATA_SHRED_HEADER,
|
||||
size_of_data_shred_header,
|
||||
&mut payload,
|
||||
&data_header,
|
||||
)
|
||||
@@ -218,11 +253,21 @@ impl Shred {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_from_serialized_shred(payload: Vec<u8>) -> Result<Self> {
|
||||
pub fn new_from_serialized_shred(mut payload: Vec<u8>) -> Result<Self> {
|
||||
let mut start = 0;
|
||||
let common_header: ShredCommonHeader =
|
||||
Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?;
|
||||
|
||||
let slot = common_header.slot;
|
||||
let expected_data_size = Self::get_expected_payload_size_from_slot(slot);
|
||||
// Safe because any payload from the network must have passed through
|
||||
// window service, which implies payload wll be of size
|
||||
// PACKET_DATA_SIZE, and `expected_data_size` <= PACKET_DATA_SIZE.
|
||||
//
|
||||
// On the other hand, if this function is called locally, the payload size should match
|
||||
// the `expected_data_size`.
|
||||
assert!(payload.len() >= expected_data_size);
|
||||
payload.truncate(expected_data_size);
|
||||
let shred = if common_header.shred_type == ShredType(CODING_SHRED) {
|
||||
let coding_header: CodingShredHeader =
|
||||
Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?;
|
||||
@@ -233,11 +278,14 @@ impl Shred {
|
||||
payload,
|
||||
}
|
||||
} else if common_header.shred_type == ShredType(DATA_SHRED) {
|
||||
// This doesn't need to change since we skip deserialization of the
|
||||
// "size" field in the header for now
|
||||
let size_of_data_shred_header = SIZE_OF_DATA_SHRED_HEADER;
|
||||
let data_header: DataShredHeader =
|
||||
Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?;
|
||||
Self::deserialize_obj(&mut start, size_of_data_shred_header, &payload)?;
|
||||
if u64::from(data_header.parent_offset) > common_header.slot {
|
||||
return Err(ShredError::InvalidParentOffset {
|
||||
slot: common_header.slot,
|
||||
slot,
|
||||
parent_offset: data_header.parent_offset,
|
||||
});
|
||||
}
|
||||
@@ -258,8 +306,10 @@ impl Shred {
|
||||
common_header: ShredCommonHeader,
|
||||
data_header: DataShredHeader,
|
||||
coding_header: CodingShredHeader,
|
||||
payload_size: usize,
|
||||
) -> Self {
|
||||
let mut payload = vec![0; PACKET_DATA_SIZE];
|
||||
assert!(payload_size == NONCE_SHRED_PAYLOAD_SIZE || payload_size == PACKET_DATA_SIZE);
|
||||
let mut payload = vec![0; payload_size];
|
||||
let mut start = 0;
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
@@ -268,10 +318,15 @@ impl Shred {
|
||||
&common_header,
|
||||
)
|
||||
.expect("Failed to write header into shred buffer");
|
||||
let expected_data_header_size = if payload_size == NONCE_SHRED_PAYLOAD_SIZE {
|
||||
SIZE_OF_DATA_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD
|
||||
} else {
|
||||
SIZE_OF_DATA_SHRED_HEADER
|
||||
};
|
||||
if common_header.shred_type == ShredType(DATA_SHRED) {
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_DATA_SHRED_HEADER,
|
||||
expected_data_header_size,
|
||||
&mut payload,
|
||||
&data_header,
|
||||
)
|
||||
@@ -293,11 +348,13 @@ impl Shred {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_empty_data_shred() -> Self {
|
||||
pub fn new_empty_data_shred(payload_size: usize) -> Self {
|
||||
assert!(payload_size == NONCE_SHRED_PAYLOAD_SIZE || payload_size == PACKET_DATA_SIZE);
|
||||
Self::new_empty_from_header(
|
||||
ShredCommonHeader::default(),
|
||||
DataShredHeader::default(),
|
||||
CodingShredHeader::default(),
|
||||
payload_size,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -403,6 +460,18 @@ impl Shred {
|
||||
self.signature()
|
||||
.verify(pubkey.as_ref(), &self.payload[SIZE_OF_SIGNATURE..])
|
||||
}
|
||||
|
||||
pub fn is_nonce_unlocked(slot: Slot) -> bool {
|
||||
slot > UNLOCK_NONCE_SLOT
|
||||
}
|
||||
|
||||
fn get_expected_payload_size_from_slot(slot: Slot) -> usize {
|
||||
if Self::is_nonce_unlocked(slot) {
|
||||
NONCE_SHRED_PAYLOAD_SIZE
|
||||
} else {
|
||||
PACKET_DATA_SIZE
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -467,7 +536,7 @@ impl Shredder {
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD;
|
||||
let no_header_size = Self::get_expected_data_shred_payload_size_from_slot(self.slot);
|
||||
let num_shreds = (serialized_shreds.len() + no_header_size - 1) / no_header_size;
|
||||
let last_shred_index = next_shred_index + num_shreds as u32 - 1;
|
||||
|
||||
@@ -628,7 +697,8 @@ impl Shredder {
|
||||
let start_index = data_shred_batch[0].common_header.index;
|
||||
|
||||
// All information after coding shred field in a data shred is encoded
|
||||
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
|
||||
let valid_data_len = expected_payload_size - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
let data_ptrs: Vec<_> = data_shred_batch
|
||||
.iter()
|
||||
.map(|data| &data.payload[..valid_data_len])
|
||||
@@ -646,8 +716,12 @@ impl Shredder {
|
||||
i,
|
||||
version,
|
||||
);
|
||||
let shred =
|
||||
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header);
|
||||
let shred = Shred::new_empty_from_header(
|
||||
header,
|
||||
DataShredHeader::default(),
|
||||
coding_header,
|
||||
expected_payload_size,
|
||||
);
|
||||
coding_shreds.push(shred.payload);
|
||||
});
|
||||
|
||||
@@ -701,7 +775,10 @@ impl Shredder {
|
||||
expected_index: usize,
|
||||
index_found: usize,
|
||||
present: &mut [bool],
|
||||
payload_size: usize,
|
||||
) -> Vec<Vec<u8>> {
|
||||
// Safe to assert because `new_from_serialized_shred` guarantees the size
|
||||
assert!(payload_size == NONCE_SHRED_PAYLOAD_SIZE || payload_size == PACKET_DATA_SIZE);
|
||||
let end_index = index_found.saturating_sub(1);
|
||||
// The index of current shred must be within the range of shreds that are being
|
||||
// recovered
|
||||
@@ -715,9 +792,9 @@ impl Shredder {
|
||||
.map(|missing| {
|
||||
present[missing.saturating_sub(first_index_in_fec_set)] = false;
|
||||
if missing < first_index_in_fec_set + num_data {
|
||||
Shred::new_empty_data_shred().payload
|
||||
Shred::new_empty_data_shred(payload_size).payload
|
||||
} else {
|
||||
vec![0; PACKET_DATA_SIZE]
|
||||
vec![0; payload_size]
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
@@ -732,6 +809,8 @@ impl Shredder {
|
||||
first_code_index: usize,
|
||||
slot: Slot,
|
||||
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
|
||||
let expected_payload_size =
|
||||
Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?;
|
||||
let mut recovered_data = vec![];
|
||||
let fec_set_size = num_data + num_coding;
|
||||
|
||||
@@ -751,6 +830,7 @@ impl Shredder {
|
||||
next_expected_index,
|
||||
index,
|
||||
&mut present,
|
||||
expected_payload_size,
|
||||
);
|
||||
blocks.push(shred.payload);
|
||||
next_expected_index = index + 1;
|
||||
@@ -767,6 +847,7 @@ impl Shredder {
|
||||
next_expected_index,
|
||||
first_index + fec_set_size,
|
||||
&mut present,
|
||||
expected_payload_size,
|
||||
);
|
||||
|
||||
shred_bufs.append(&mut pending_shreds);
|
||||
@@ -777,7 +858,7 @@ impl Shredder {
|
||||
|
||||
let session = Session::new(num_data, num_coding)?;
|
||||
|
||||
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
let valid_data_len = expected_payload_size - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
let coding_block_offset = SIZE_OF_CODING_SHRED_HEADER + SIZE_OF_COMMON_SHRED_HEADER;
|
||||
let mut blocks: Vec<(&mut [u8], bool)> = shred_bufs
|
||||
.iter_mut()
|
||||
@@ -822,8 +903,11 @@ impl Shredder {
|
||||
/// Combines all shreds to recreate the original buffer
|
||||
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
|
||||
let num_data = shreds.len();
|
||||
let data_shred_bufs = {
|
||||
let expected_payload_size =
|
||||
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
|
||||
let (data_shred_bufs, slot) = {
|
||||
let first_index = shreds.first().unwrap().index() as usize;
|
||||
let slot = shreds.first().unwrap().slot();
|
||||
let last_shred = shreds.last().unwrap();
|
||||
let last_index = if last_shred.data_complete() || last_shred.last_in_slot() {
|
||||
last_shred.index() as usize
|
||||
@@ -835,10 +919,32 @@ impl Shredder {
|
||||
return Err(reed_solomon_erasure::Error::TooFewDataShards);
|
||||
}
|
||||
|
||||
shreds.iter().map(|shred| &shred.payload).collect()
|
||||
(shreds.iter().map(|shred| &shred.payload).collect(), slot)
|
||||
};
|
||||
|
||||
Ok(Self::reassemble_payload(num_data, data_shred_bufs))
|
||||
let expected_data_header_size = Self::get_expected_data_header_size_from_slot(slot);
|
||||
Ok(Self::reassemble_payload(
|
||||
num_data,
|
||||
data_shred_bufs,
|
||||
expected_payload_size,
|
||||
expected_data_header_size,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn get_expected_data_shred_payload_size_from_slot(slot: Slot) -> usize {
|
||||
if Shred::is_nonce_unlocked(slot) {
|
||||
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD
|
||||
} else {
|
||||
SIZE_OF_DATA_SHRED_PAYLOAD
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_expected_data_header_size_from_slot(slot: Slot) -> usize {
|
||||
if Shred::is_nonce_unlocked(slot) {
|
||||
SIZE_OF_DATA_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD
|
||||
} else {
|
||||
SIZE_OF_DATA_SHRED_HEADER
|
||||
}
|
||||
}
|
||||
|
||||
fn get_shred_index(
|
||||
@@ -854,26 +960,60 @@ impl Shredder {
|
||||
}
|
||||
}
|
||||
|
||||
fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> {
|
||||
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
fn reassemble_payload(
|
||||
num_data: usize,
|
||||
data_shred_bufs: Vec<&Vec<u8>>,
|
||||
expected_payload_size: usize,
|
||||
expected_data_header_size: usize,
|
||||
) -> Vec<u8> {
|
||||
let valid_data_len = expected_payload_size - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
|
||||
data_shred_bufs[..num_data]
|
||||
.iter()
|
||||
.flat_map(|data| {
|
||||
let offset = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER;
|
||||
let offset = SIZE_OF_COMMON_SHRED_HEADER + expected_data_header_size;
|
||||
data[offset..valid_data_len].iter()
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn verify_consistent_shred_payload_sizes(
|
||||
caller: &str,
|
||||
shreds: &[Shred],
|
||||
) -> std::result::Result<usize, reed_solomon_erasure::Error> {
|
||||
if shreds.is_empty() {
|
||||
return Err(reed_solomon_erasure::Error::TooFewShardsPresent);
|
||||
}
|
||||
let slot = shreds[0].slot();
|
||||
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
|
||||
for shred in shreds {
|
||||
if shred.payload.len() != expected_payload_size {
|
||||
error!(
|
||||
"{} Shreds for slot: {} are inconsistent sizes. One shred: {} Another shred: {}",
|
||||
caller,
|
||||
slot,
|
||||
expected_payload_size,
|
||||
shred.payload.len()
|
||||
);
|
||||
return Err(reed_solomon_erasure::Error::IncorrectShardSize);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(expected_payload_size)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 {
|
||||
pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {
|
||||
let ticks = create_ticks(1, 0, Hash::default());
|
||||
max_entries_per_n_shred(&ticks[0], num_shreds)
|
||||
max_entries_per_n_shred(&ticks[0], num_shreds, shred_data_size)
|
||||
}
|
||||
|
||||
pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 {
|
||||
let shred_data_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
|
||||
pub fn max_entries_per_n_shred(
|
||||
entry: &Entry,
|
||||
num_shreds: u64,
|
||||
shred_data_size: Option<usize>,
|
||||
) -> u64 {
|
||||
let shred_data_size = shred_data_size.unwrap_or(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD) as u64;
|
||||
let vec_size = bincode::serialized_size(&vec![entry]).unwrap();
|
||||
let entry_size = bincode::serialized_size(entry).unwrap();
|
||||
let count_size = vec_size - entry_size;
|
||||
@@ -891,7 +1031,8 @@ pub fn verify_test_data_shred(
|
||||
is_last_in_slot: bool,
|
||||
is_last_in_fec_set: bool,
|
||||
) {
|
||||
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
|
||||
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
|
||||
assert_eq!(shred.payload.len(), expected_payload_size);
|
||||
assert!(shred.is_data());
|
||||
assert_eq!(shred.index(), index);
|
||||
assert_eq!(shred.slot(), slot);
|
||||
@@ -932,6 +1073,14 @@ pub mod tests {
|
||||
SIZE_OF_DATA_SHRED_HEADER,
|
||||
serialized_size(&DataShredHeader::default()).unwrap() as usize
|
||||
);
|
||||
let data_shred_header_with_size = DataShredHeader {
|
||||
size: Some(1000),
|
||||
..DataShredHeader::default()
|
||||
};
|
||||
assert_eq!(
|
||||
SIZE_OF_DATA_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER_SIZE_FIELD,
|
||||
serialized_size(&data_shred_header_with_size).unwrap() as usize
|
||||
);
|
||||
assert_eq!(
|
||||
SIZE_OF_SIGNATURE,
|
||||
bincode::serialized_size(&Signature::default()).unwrap() as usize
|
||||
@@ -951,17 +1100,16 @@ pub mod tests {
|
||||
}
|
||||
|
||||
fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) {
|
||||
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
|
||||
let expected_payload_size = Shred::get_expected_payload_size_from_slot(slot);
|
||||
assert_eq!(shred.payload.len(), expected_payload_size);
|
||||
assert!(!shred.is_data());
|
||||
assert_eq!(shred.index(), index);
|
||||
assert_eq!(shred.slot(), slot);
|
||||
assert_eq!(verify, shred.verify(pk));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_shredder() {
|
||||
fn run_test_data_shredder(slot: Slot) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0x123456789abcdef0;
|
||||
|
||||
// Test that parent cannot be > current slot
|
||||
assert_matches!(
|
||||
@@ -996,7 +1144,7 @@ pub mod tests {
|
||||
.collect();
|
||||
|
||||
let size = serialized_size(&entries).unwrap();
|
||||
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
|
||||
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot) as u64;
|
||||
let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size;
|
||||
let num_expected_coding_shreds =
|
||||
Shredder::calculate_num_coding_shreds(num_expected_data_shreds as f32, fec_rate);
|
||||
@@ -1051,6 +1199,12 @@ pub mod tests {
|
||||
assert_eq!(entries, deshred_entries);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_shredder() {
|
||||
run_test_data_shredder(UNLOCK_NONCE_SLOT);
|
||||
run_test_data_shredder(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_shred_payload() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
@@ -1077,12 +1231,10 @@ pub mod tests {
|
||||
assert_eq!(deserialized_shred, *data_shreds.last().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_reference_tick() {
|
||||
fn run_test_shred_reference_tick(slot: Slot) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 1;
|
||||
|
||||
let parent_slot = 0;
|
||||
let parent_slot = slot - 1;
|
||||
let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone(), 5, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
@@ -1107,6 +1259,12 @@ pub mod tests {
|
||||
assert_eq!(deserialized_shred.reference_tick(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_reference_tick() {
|
||||
run_test_shred_reference_tick(UNLOCK_NONCE_SLOT);
|
||||
run_test_shred_reference_tick(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_reference_tick_overflow() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
@@ -1143,22 +1301,21 @@ pub mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_and_code_shredder() {
|
||||
fn run_test_data_and_code_shredder(slot: Slot) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
|
||||
let slot = 0x123456789abcdef0;
|
||||
// Test that FEC rate cannot be > 1.0
|
||||
assert_matches!(
|
||||
Shredder::new(slot, slot - 5, 1.001, keypair.clone(), 0, 0),
|
||||
Err(ShredError::InvalidFecRate(_))
|
||||
);
|
||||
|
||||
let shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
// Create enough entries to make > 1 shred
|
||||
let num_entries = max_ticks_per_n_shreds(1) + 1;
|
||||
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot);
|
||||
let num_entries = max_ticks_per_n_shreds(1, Some(no_header_size)) + 1;
|
||||
let entries: Vec<_> = (0..num_entries)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
@@ -1190,9 +1347,13 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recovery_and_reassembly() {
|
||||
fn test_data_and_code_shredder() {
|
||||
run_test_data_and_code_shredder(UNLOCK_NONCE_SLOT);
|
||||
run_test_data_and_code_shredder(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
fn run_test_recovery_and_reassembly(slot: Slot) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0x123456789abcdef0;
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
@@ -1202,7 +1363,9 @@ pub mod tests {
|
||||
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
|
||||
|
||||
let num_data_shreds: usize = 5;
|
||||
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);
|
||||
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot);
|
||||
let num_entries =
|
||||
max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(no_header_size));
|
||||
let entries: Vec<_> = (0..num_entries)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
@@ -1441,6 +1604,12 @@ pub mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recovery_and_reassembly() {
|
||||
run_test_recovery_and_reassembly(UNLOCK_NONCE_SLOT);
|
||||
run_test_recovery_and_reassembly(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_version() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#![allow(clippy::implicit_hasher)]
|
||||
use crate::shred::ShredType;
|
||||
use crate::shred::{Shred, ShredType, SIZE_OF_NONCE};
|
||||
use rayon::{
|
||||
iter::{
|
||||
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator,
|
||||
@@ -16,9 +16,12 @@ use solana_perf::{
|
||||
sigverify::{self, batch_size, TxOffset},
|
||||
};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::{collections::HashMap, mem::size_of};
|
||||
|
||||
@@ -40,13 +43,12 @@ lazy_static! {
|
||||
/// ...
|
||||
/// }
|
||||
/// Signature is the first thing in the packet, and slot is the first thing in the signed message.
|
||||
fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> Option<u8> {
|
||||
pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> Option<u8> {
|
||||
let sig_start = 0;
|
||||
let sig_end = size_of::<Signature>();
|
||||
let slot_start = sig_end + size_of::<ShredType>();
|
||||
let slot_end = slot_start + size_of::<u64>();
|
||||
let msg_start = sig_end;
|
||||
let msg_end = packet.meta.size;
|
||||
if packet.meta.discard {
|
||||
return Some(0);
|
||||
}
|
||||
@@ -55,6 +57,11 @@ fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> O
|
||||
return Some(0);
|
||||
}
|
||||
let slot: u64 = limited_deserialize(&packet.data[slot_start..slot_end]).ok()?;
|
||||
let msg_end = if packet.meta.repair && Shred::is_nonce_unlocked(slot) {
|
||||
packet.meta.size.saturating_sub(SIZE_OF_NONCE)
|
||||
} else {
|
||||
packet.meta.size
|
||||
};
|
||||
trace!("slot {}", slot);
|
||||
let pubkey = slot_leaders.get(&slot)?;
|
||||
if packet.meta.size < sig_end {
|
||||
@@ -94,10 +101,10 @@ fn slot_key_data_for_gpu<
|
||||
batches: &[Packets],
|
||||
slot_keys: &HashMap<u64, T>,
|
||||
recycler_cache: &RecyclerCache,
|
||||
) -> (PinnedVec<u8>, TxOffset, usize) {
|
||||
) -> (PinnedVec<u8>, TxOffset, usize, Vec<Vec<Slot>>) {
|
||||
//TODO: mark Pubkey::default shreds as failed after the GPU returns
|
||||
assert_eq!(slot_keys.get(&std::u64::MAX), Some(&T::default()));
|
||||
let slots: Vec<Vec<u64>> = SIGVERIFY_THREAD_POOL.install(|| {
|
||||
let slots: Vec<Vec<Slot>> = SIGVERIFY_THREAD_POOL.install(|| {
|
||||
batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
@@ -157,7 +164,7 @@ fn slot_key_data_for_gpu<
|
||||
trace!("keyvec.len: {}", keyvec.len());
|
||||
trace!("keyvec: {:?}", keyvec);
|
||||
trace!("offsets: {:?}", offsets);
|
||||
(keyvec, offsets, num_in_packets)
|
||||
(keyvec, offsets, num_in_packets, slots)
|
||||
}
|
||||
|
||||
fn vec_size_in_packets(keyvec: &PinnedVec<u8>) -> usize {
|
||||
@@ -177,6 +184,7 @@ fn shred_gpu_offsets(
|
||||
mut pubkeys_end: usize,
|
||||
batches: &[Packets],
|
||||
recycler_cache: &RecyclerCache,
|
||||
slots: Option<Vec<Vec<Slot>>>,
|
||||
) -> (TxOffset, TxOffset, TxOffset, Vec<Vec<u32>>) {
|
||||
let mut signature_offsets = recycler_cache.offsets().allocate("shred_signatures");
|
||||
signature_offsets.set_pinnable();
|
||||
@@ -185,13 +193,30 @@ fn shred_gpu_offsets(
|
||||
let mut msg_sizes = recycler_cache.offsets().allocate("shred_msg_sizes");
|
||||
msg_sizes.set_pinnable();
|
||||
let mut v_sig_lens = vec![];
|
||||
for batch in batches {
|
||||
let mut slots_iter;
|
||||
let mut slots_iter_ref: &mut dyn Iterator<Item = Vec<Slot>> = &mut std::iter::repeat(vec![]);
|
||||
if let Some(slots) = slots {
|
||||
slots_iter = slots.into_iter();
|
||||
slots_iter_ref = &mut slots_iter;
|
||||
}
|
||||
for (batch, slots) in batches.iter().zip(slots_iter_ref) {
|
||||
let mut sig_lens = Vec::new();
|
||||
for packet in &batch.packets {
|
||||
let mut inner_slot_iter;
|
||||
let mut inner_slot_iter_ref: &mut dyn Iterator<Item = Slot> = &mut std::iter::repeat(0);
|
||||
if !slots.is_empty() {
|
||||
inner_slot_iter = slots.into_iter();
|
||||
inner_slot_iter_ref = &mut inner_slot_iter;
|
||||
};
|
||||
|
||||
for (packet, slot) in batch.packets.iter().zip(inner_slot_iter_ref) {
|
||||
let sig_start = pubkeys_end;
|
||||
let sig_end = sig_start + size_of::<Signature>();
|
||||
let msg_start = sig_end;
|
||||
let msg_end = sig_start + packet.meta.size;
|
||||
let msg_end = if packet.meta.repair && Shred::is_nonce_unlocked(slot) {
|
||||
sig_start + packet.meta.size.saturating_sub(SIZE_OF_NONCE)
|
||||
} else {
|
||||
sig_start + packet.meta.size
|
||||
};
|
||||
signature_offsets.push(sig_start as u32);
|
||||
msg_start_offsets.push(msg_start as u32);
|
||||
let msg_size = if msg_end < msg_start {
|
||||
@@ -222,7 +247,7 @@ pub fn verify_shreds_gpu(
|
||||
let mut elems = Vec::new();
|
||||
let mut rvs = Vec::new();
|
||||
let count = batch_size(batches);
|
||||
let (pubkeys, pubkey_offsets, mut num_packets) =
|
||||
let (pubkeys, pubkey_offsets, mut num_packets, slots) =
|
||||
slot_key_data_for_gpu(0, batches, slot_leaders, recycler_cache);
|
||||
//HACK: Pubkeys vector is passed along as a `Packets` buffer to the GPU
|
||||
//TODO: GPU needs a more opaque interface, which can handle variable sized structures for data
|
||||
@@ -230,7 +255,7 @@ pub fn verify_shreds_gpu(
|
||||
trace!("num_packets: {}", num_packets);
|
||||
trace!("pubkeys_len: {}", pubkeys_len);
|
||||
let (signature_offsets, msg_start_offsets, msg_sizes, v_sig_lens) =
|
||||
shred_gpu_offsets(pubkeys_len, batches, recycler_cache);
|
||||
shred_gpu_offsets(pubkeys_len, batches, recycler_cache, Some(slots));
|
||||
let mut out = recycler_cache.buffer().allocate("out_buffer");
|
||||
out.set_pinnable();
|
||||
elems.push(
|
||||
@@ -367,7 +392,7 @@ pub fn sign_shreds_gpu(
|
||||
|
||||
trace!("offset: {}", offset);
|
||||
let (signature_offsets, msg_start_offsets, msg_sizes, _v_sig_lens) =
|
||||
shred_gpu_offsets(offset, batches, recycler_cache);
|
||||
shred_gpu_offsets(offset, batches, recycler_cache, None);
|
||||
let total_sigs = signature_offsets.len();
|
||||
let mut signatures_out = recycler_cache.buffer().allocate("ed25519 signatures");
|
||||
signatures_out.set_pinnable();
|
||||
@@ -445,14 +470,12 @@ pub fn sign_shreds_gpu(
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::shred::SIZE_OF_DATA_SHRED_PAYLOAD;
|
||||
use crate::shred::{Shred, Shredder};
|
||||
use crate::shred::{Shred, Shredder, SIZE_OF_DATA_SHRED_PAYLOAD, UNLOCK_NONCE_SLOT};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
#[test]
|
||||
fn test_sigverify_shred_cpu() {
|
||||
|
||||
fn run_test_sigverify_shred_cpu(slot: Slot) {
|
||||
solana_logger::setup();
|
||||
let mut packet = Packet::default();
|
||||
let slot = 0xdeadc0de;
|
||||
let mut shred = Shred::new_from_data(
|
||||
slot,
|
||||
0xc0de,
|
||||
@@ -492,10 +515,14 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sigverify_shreds_cpu() {
|
||||
fn test_sigverify_shred_cpu() {
|
||||
run_test_sigverify_shred_cpu(UNLOCK_NONCE_SLOT);
|
||||
run_test_sigverify_shred_cpu(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
fn run_test_sigverify_shreds_cpu(slot: Slot) {
|
||||
solana_logger::setup();
|
||||
let mut batch = [Packets::default()];
|
||||
let slot = 0xdeadc0de;
|
||||
let mut shred = Shred::new_from_data(
|
||||
slot,
|
||||
0xc0de,
|
||||
@@ -542,12 +569,16 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sigverify_shreds_gpu() {
|
||||
fn test_sigverify_shreds_cpu() {
|
||||
run_test_sigverify_shreds_cpu(UNLOCK_NONCE_SLOT);
|
||||
run_test_sigverify_shreds_cpu(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
fn run_test_sigverify_shreds_gpu(slot: Slot) {
|
||||
solana_logger::setup();
|
||||
let recycler_cache = RecyclerCache::default();
|
||||
|
||||
let mut batch = [Packets::default()];
|
||||
let slot = 0xdeadc0de;
|
||||
let mut shred = Shred::new_from_data(
|
||||
slot,
|
||||
0xc0de,
|
||||
@@ -603,14 +634,18 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sigverify_shreds_sign_gpu() {
|
||||
fn test_sigverify_shreds_gpu() {
|
||||
run_test_sigverify_shreds_gpu(UNLOCK_NONCE_SLOT);
|
||||
run_test_sigverify_shreds_gpu(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
fn run_test_sigverify_shreds_sign_gpu(slot: Slot) {
|
||||
solana_logger::setup();
|
||||
let recycler_cache = RecyclerCache::default();
|
||||
|
||||
let mut packets = Packets::default();
|
||||
let num_packets = 32;
|
||||
let num_batches = 100;
|
||||
let slot = 0xdeadc0de;
|
||||
packets.packets.resize(num_packets, Packet::default());
|
||||
for (i, p) in packets.packets.iter_mut().enumerate() {
|
||||
let shred = Shred::new_from_data(
|
||||
@@ -650,11 +685,15 @@ pub mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sigverify_shreds_sign_cpu() {
|
||||
fn test_sigverify_shreds_sign_gpu() {
|
||||
run_test_sigverify_shreds_sign_gpu(UNLOCK_NONCE_SLOT);
|
||||
run_test_sigverify_shreds_sign_gpu(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
||||
fn run_test_sigverify_shreds_sign_cpu(slot: Slot) {
|
||||
solana_logger::setup();
|
||||
|
||||
let mut batch = [Packets::default()];
|
||||
let slot = 0xdeadc0de;
|
||||
let keypair = Keypair::new();
|
||||
let shred = Shred::new_from_data(
|
||||
slot,
|
||||
@@ -685,4 +724,10 @@ pub mod tests {
|
||||
let rv = verify_shreds_cpu(&batch, &pubkeys);
|
||||
assert_eq!(rv, vec![vec![1]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sigverify_shreds_sign_cpu() {
|
||||
run_test_sigverify_shreds_sign_cpu(UNLOCK_NONCE_SLOT);
|
||||
run_test_sigverify_shreds_sign_cpu(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
}
|
||||
|
@@ -1,16 +1,15 @@
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::shred::{
|
||||
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
|
||||
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder,
|
||||
MAX_DATA_SHREDS_PER_FEC_BLOCK, UNLOCK_NONCE_SLOT,
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::{hash::Hash, system_transaction};
|
||||
use solana_sdk::{clock::Slot, hash::Hash, system_transaction};
|
||||
use std::convert::TryInto;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn test_multi_fec_block_coding() {
|
||||
fn run_test_multi_fec_block_coding(slot: Slot) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0x123456789abcdef0;
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
@@ -20,7 +19,8 @@ fn test_multi_fec_block_coding() {
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
|
||||
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);
|
||||
let no_header_size = Shredder::get_expected_data_shred_payload_size_from_slot(slot);
|
||||
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(no_header_size));
|
||||
|
||||
let entries: Vec<_> = (0..num_entries)
|
||||
.map(|_| {
|
||||
@@ -94,3 +94,9 @@ fn test_multi_fec_block_coding() {
|
||||
let result = Shredder::deshred(&all_shreds[..]).unwrap();
|
||||
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_fec_block_coding() {
|
||||
run_test_multi_fec_block_coding(UNLOCK_NONCE_SLOT);
|
||||
run_test_multi_fec_block_coding(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
|
Reference in New Issue
Block a user