master: Add nonce to shreds repairs, add shred data size to header (#10109)

* Add nonce to shreds/repairs

* Add data shred size to header

Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
carllin
2020-05-19 12:38:18 -07:00
committed by GitHub
parent 427c78d891
commit 97f2bcff69
15 changed files with 598 additions and 256 deletions

View File

@ -2126,10 +2126,11 @@ impl Blockstore {
let data_shreds = data_shreds?;
assert!(data_shreds.last().unwrap().data_complete());
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|_| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
"Could not reconstruct data block from constituent shreds".to_string(),
)))
let deshred_payload = Shredder::deshred(&data_shreds).map_err(|e| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(format!(
"Could not reconstruct data block from constituent shreds, error: {:?}",
e
))))
})?;
debug!("{:?} shreds in last FEC set", data_shreds.len(),);
@ -3187,7 +3188,7 @@ pub mod tests {
#[test]
fn test_insert_get_bytes() {
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
assert!(num_entries > 1);
let (mut shreds, _) = make_slot_entries(0, 0, num_entries);
@ -3447,7 +3448,7 @@ pub mod tests {
#[test]
fn test_insert_data_shreds_basic() {
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
assert!(num_entries > 1);
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
@ -3494,7 +3495,7 @@ pub mod tests {
#[test]
fn test_insert_data_shreds_reverse() {
let num_shreds = 10;
let num_entries = max_ticks_per_n_shreds(num_shreds);
let num_entries = max_ticks_per_n_shreds(num_shreds, None);
let (mut shreds, entries) = make_slot_entries(0, 0, num_entries);
let num_shreds = shreds.len() as u64;
@ -3671,7 +3672,7 @@ pub mod tests {
{
let blockstore = Blockstore::open(&blockstore_path).unwrap();
// Create enough entries to ensure there are at least two shreds created
let min_entries = max_ticks_per_n_shreds(1) + 1;
let min_entries = max_ticks_per_n_shreds(1, None) + 1;
for i in 0..4 {
let slot = i;
let parent_slot = if i == 0 { 0 } else { i - 1 };
@ -4096,7 +4097,7 @@ pub mod tests {
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let num_slots = 15;
// Create enough entries to ensure there are at least two shreds created
let entries_per_slot = max_ticks_per_n_shreds(1) + 1;
let entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
assert!(entries_per_slot > 1);
let (mut shreds, _) = make_many_slot_entries(0, num_slots, entries_per_slot);
@ -4463,7 +4464,7 @@ pub mod tests {
let gap: u64 = 10;
assert!(gap > 3);
// Create enough entries to ensure there are at least two shreds created
let num_entries = max_ticks_per_n_shreds(1) + 1;
let num_entries = max_ticks_per_n_shreds(1, None) + 1;
let entries = create_ticks(num_entries, 0, Hash::default());
let mut shreds = entries_to_test_shreds(entries, slot, 0, true, 0);
let num_shreds = shreds.len();
@ -4902,7 +4903,7 @@ pub mod tests {
// Trying to insert value into slot <= than last root should fail
{
let mut coding_shred =
Shred::new_empty_from_header(shred, DataShredHeader::default(), coding);
Shred::new_empty_from_header(shred.clone(), DataShredHeader::default(), coding);
let index = index_cf.get(coding_shred.slot()).unwrap().unwrap();
coding_shred.set_slot(*last_root.read().unwrap());
assert!(!Blockstore::should_insert_coding_shred(

View File

@ -24,26 +24,31 @@ use std::mem::size_of;
use std::{sync::Arc, time::Instant};
use thiserror::Error;
pub type Nonce = u32;
/// The following constants are computed by hand, and hardcoded.
/// `test_shred_constants` ensures that the values are correct.
/// Constants are used over lazy_static for performance reasons.
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 83;
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 3;
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 5;
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 6;
pub const SIZE_OF_SIGNATURE: usize = 64;
pub const SIZE_OF_SHRED_TYPE: usize = 1;
pub const SIZE_OF_SHRED_SLOT: usize = 8;
pub const SIZE_OF_SHRED_INDEX: usize = 4;
pub const SIZE_OF_NONCE: usize = 4;
pub const SIZE_OF_DATA_SHRED_IGNORED_TAIL: usize =
SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_CODING_SHRED_HEADER;
pub const SIZE_OF_DATA_SHRED_PAYLOAD: usize = PACKET_DATA_SIZE
- SIZE_OF_COMMON_SHRED_HEADER
- SIZE_OF_DATA_SHRED_HEADER
- SIZE_OF_DATA_SHRED_IGNORED_TAIL;
- SIZE_OF_DATA_SHRED_IGNORED_TAIL
- SIZE_OF_NONCE;
pub const OFFSET_OF_SHRED_TYPE: usize = SIZE_OF_SIGNATURE;
pub const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_TYPE;
pub const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT;
pub const SHRED_PAYLOAD_SIZE: usize = PACKET_DATA_SIZE - SIZE_OF_NONCE;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
@ -108,6 +113,7 @@ pub struct ShredCommonHeader {
pub struct DataShredHeader {
pub parent_offset: u16,
pub flags: u8,
pub size: u16,
}
/// The coding shred header has FEC information
@ -169,7 +175,8 @@ impl Shred {
version: u16,
fec_set_index: u32,
) -> Self {
let mut payload = vec![0; PACKET_DATA_SIZE];
let payload_size = SHRED_PAYLOAD_SIZE;
let mut payload = vec![0; payload_size];
let common_header = ShredCommonHeader {
slot,
index,
@ -178,9 +185,13 @@ impl Shred {
..ShredCommonHeader::default()
};
let size = (data.map(|d| d.len()).unwrap_or(0)
+ SIZE_OF_DATA_SHRED_HEADER
+ SIZE_OF_COMMON_SHRED_HEADER) as u16;
let mut data_header = DataShredHeader {
parent_offset,
flags: reference_tick.min(SHRED_TICK_REFERENCE_MASK),
size,
};
if is_last_data {
@ -199,9 +210,10 @@ impl Shred {
&common_header,
)
.expect("Failed to write header into shred buffer");
let size_of_data_shred_header = SIZE_OF_DATA_SHRED_HEADER;
Self::serialize_obj_into(
&mut start,
SIZE_OF_DATA_SHRED_HEADER,
size_of_data_shred_header,
&mut payload,
&data_header,
)
@ -219,11 +231,21 @@ impl Shred {
}
}
pub fn new_from_serialized_shred(payload: Vec<u8>) -> Result<Self> {
pub fn new_from_serialized_shred(mut payload: Vec<u8>) -> Result<Self> {
let mut start = 0;
let common_header: ShredCommonHeader =
Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?;
let slot = common_header.slot;
let expected_data_size = SHRED_PAYLOAD_SIZE;
// Safe because any payload from the network must have passed through
// window service, which implies payload wll be of size
// PACKET_DATA_SIZE, and `expected_data_size` <= PACKET_DATA_SIZE.
//
// On the other hand, if this function is called locally, the payload size should match
// the `expected_data_size`.
assert!(payload.len() >= expected_data_size);
payload.truncate(expected_data_size);
let shred = if common_header.shred_type == ShredType(CODING_SHRED) {
let coding_header: CodingShredHeader =
Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?;
@ -234,11 +256,12 @@ impl Shred {
payload,
}
} else if common_header.shred_type == ShredType(DATA_SHRED) {
let size_of_data_shred_header = SIZE_OF_DATA_SHRED_HEADER;
let data_header: DataShredHeader =
Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?;
Self::deserialize_obj(&mut start, size_of_data_shred_header, &payload)?;
if u64::from(data_header.parent_offset) > common_header.slot {
return Err(ShredError::InvalidParentOffset {
slot: common_header.slot,
slot,
parent_offset: data_header.parent_offset,
});
}
@ -260,7 +283,7 @@ impl Shred {
data_header: DataShredHeader,
coding_header: CodingShredHeader,
) -> Self {
let mut payload = vec![0; PACKET_DATA_SIZE];
let mut payload = vec![0; SHRED_PAYLOAD_SIZE];
let mut start = 0;
Self::serialize_obj_into(
&mut start,
@ -396,7 +419,9 @@ impl Shred {
}
pub fn reference_tick_from_data(data: &[u8]) -> u8 {
let flags = data[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER - size_of::<u8>()];
let flags = data[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER
- size_of::<u8>()
- size_of::<u16>()];
flags & SHRED_TICK_REFERENCE_MASK
}
@ -629,7 +654,7 @@ impl Shredder {
let start_index = data_shred_batch[0].common_header.index;
// All information after coding shred field in a data shred is encoded
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let data_ptrs: Vec<_> = data_shred_batch
.iter()
.map(|data| &data.payload[..valid_data_len])
@ -718,7 +743,7 @@ impl Shredder {
if missing < first_index_in_fec_set + num_data {
Shred::new_empty_data_shred().payload
} else {
vec![0; PACKET_DATA_SIZE]
vec![0; SHRED_PAYLOAD_SIZE]
}
})
.collect();
@ -733,6 +758,7 @@ impl Shredder {
first_code_index: usize,
slot: Slot,
) -> std::result::Result<Vec<Shred>, reed_solomon_erasure::Error> {
Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?;
let mut recovered_data = vec![];
let fec_set_size = num_data + num_coding;
@ -778,7 +804,7 @@ impl Shredder {
let session = Session::new(num_data, num_coding)?;
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let coding_block_offset = SIZE_OF_CODING_SHRED_HEADER + SIZE_OF_COMMON_SHRED_HEADER;
let mut blocks: Vec<(&mut [u8], bool)> = shred_bufs
.iter_mut()
@ -823,6 +849,7 @@ impl Shredder {
/// Combines all shreds to recreate the original buffer
pub fn deshred(shreds: &[Shred]) -> std::result::Result<Vec<u8>, reed_solomon_erasure::Error> {
let num_data = shreds.len();
Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?;
let data_shred_bufs = {
let first_index = shreds.first().unwrap().index() as usize;
let last_shred = shreds.last().unwrap();
@ -856,7 +883,7 @@ impl Shredder {
}
fn reassemble_payload(num_data: usize, data_shred_bufs: Vec<&Vec<u8>>) -> Vec<u8> {
let valid_data_len = PACKET_DATA_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
let valid_data_len = SHRED_PAYLOAD_SIZE - SIZE_OF_DATA_SHRED_IGNORED_TAIL;
data_shred_bufs[..num_data]
.iter()
.flat_map(|data| {
@ -866,15 +893,43 @@ impl Shredder {
.cloned()
.collect()
}
fn verify_consistent_shred_payload_sizes(
caller: &str,
shreds: &[Shred],
) -> std::result::Result<(), reed_solomon_erasure::Error> {
if shreds.is_empty() {
return Err(reed_solomon_erasure::Error::TooFewShardsPresent);
}
let slot = shreds[0].slot();
for shred in shreds {
if shred.payload.len() != SHRED_PAYLOAD_SIZE {
error!(
"{} Shreds for slot: {} are inconsistent sizes. Expected: {} actual: {}",
caller,
slot,
SHRED_PAYLOAD_SIZE,
shred.payload.len()
);
return Err(reed_solomon_erasure::Error::IncorrectShardSize);
}
}
Ok(())
}
}
pub fn max_ticks_per_n_shreds(num_shreds: u64) -> u64 {
pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {
let ticks = create_ticks(1, 0, Hash::default());
max_entries_per_n_shred(&ticks[0], num_shreds)
max_entries_per_n_shred(&ticks[0], num_shreds, shred_data_size)
}
pub fn max_entries_per_n_shred(entry: &Entry, num_shreds: u64) -> u64 {
let shred_data_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
pub fn max_entries_per_n_shred(
entry: &Entry,
num_shreds: u64,
shred_data_size: Option<usize>,
) -> u64 {
let shred_data_size = shred_data_size.unwrap_or(SIZE_OF_DATA_SHRED_PAYLOAD) as u64;
let vec_size = bincode::serialized_size(&vec![entry]).unwrap();
let entry_size = bincode::serialized_size(entry).unwrap();
let count_size = vec_size - entry_size;
@ -892,7 +947,7 @@ pub fn verify_test_data_shred(
is_last_in_slot: bool,
is_last_in_fec_set: bool,
) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
assert_eq!(shred.payload.len(), SHRED_PAYLOAD_SIZE);
assert!(shred.is_data());
assert_eq!(shred.index(), index);
assert_eq!(shred.slot(), slot);
@ -933,6 +988,14 @@ pub mod tests {
SIZE_OF_DATA_SHRED_HEADER,
serialized_size(&DataShredHeader::default()).unwrap() as usize
);
let data_shred_header_with_size = DataShredHeader {
size: 1000,
..DataShredHeader::default()
};
assert_eq!(
SIZE_OF_DATA_SHRED_HEADER,
serialized_size(&data_shred_header_with_size).unwrap() as usize
);
assert_eq!(
SIZE_OF_SIGNATURE,
bincode::serialized_size(&Signature::default()).unwrap() as usize
@ -952,17 +1015,15 @@ pub mod tests {
}
fn verify_test_code_shred(shred: &Shred, index: u32, slot: Slot, pk: &Pubkey, verify: bool) {
assert_eq!(shred.payload.len(), PACKET_DATA_SIZE);
assert_eq!(shred.payload.len(), SHRED_PAYLOAD_SIZE);
assert!(!shred.is_data());
assert_eq!(shred.index(), index);
assert_eq!(shred.slot(), slot);
assert_eq!(verify, shred.verify(pk));
}
#[test]
fn test_data_shredder() {
fn run_test_data_shredder(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x1234_5678_9abc_def0;
// Test that parent cannot be > current slot
assert_matches!(
@ -1052,6 +1113,11 @@ pub mod tests {
assert_eq!(entries, deshred_entries);
}
#[test]
fn test_data_shredder() {
run_test_data_shredder(0x1234_5678_9abc_def0);
}
#[test]
fn test_deserialize_shred_payload() {
let keypair = Arc::new(Keypair::new());
@ -1144,22 +1210,21 @@ pub mod tests {
);
}
#[test]
fn test_data_and_code_shredder() {
fn run_test_data_and_code_shredder(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x1234_5678_9abc_def0;
// Test that FEC rate cannot be > 1.0
assert_matches!(
Shredder::new(slot, slot - 5, 1.001, keypair.clone(), 0, 0),
Err(ShredError::InvalidFecRate(_))
);
let shredder = Shredder::new(0x1234_5678_9abc_def0, slot - 5, 1.0, keypair.clone(), 0, 0)
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");
// Create enough entries to make > 1 shred
let num_entries = max_ticks_per_n_shreds(1) + 1;
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let num_entries = max_ticks_per_n_shreds(1, Some(no_header_size)) + 1;
let entries: Vec<_> = (0..num_entries)
.map(|_| {
let keypair0 = Keypair::new();
@ -1191,9 +1256,12 @@ pub mod tests {
}
#[test]
fn test_recovery_and_reassembly() {
fn test_data_and_code_shredder() {
run_test_data_and_code_shredder(0x1234_5678_9abc_def0);
}
fn run_test_recovery_and_reassembly(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x1234_5678_9abc_def0;
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");
@ -1203,7 +1271,9 @@ pub mod tests {
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
let num_data_shreds: usize = 5;
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let num_entries =
max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(no_header_size));
let entries: Vec<_> = (0..num_entries)
.map(|_| {
let keypair0 = Keypair::new();
@ -1442,6 +1512,11 @@ pub mod tests {
);
}
#[test]
fn test_recovery_and_reassembly() {
run_test_recovery_and_reassembly(0x1234_5678_9abc_def0);
}
#[test]
fn test_shred_version() {
let keypair = Arc::new(Keypair::new());

View File

@ -1,5 +1,5 @@
#![allow(clippy::implicit_hasher)]
use crate::shred::ShredType;
use crate::shred::{ShredType, SIZE_OF_NONCE};
use rayon::{
iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator,
@ -16,9 +16,12 @@ use solana_perf::{
sigverify::{self, batch_size, TxOffset},
};
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::{
clock::Slot,
pubkey::Pubkey,
signature::Signature,
signature::{Keypair, Signer},
};
use std::sync::Arc;
use std::{collections::HashMap, mem::size_of};
@ -40,13 +43,12 @@ lazy_static! {
/// ...
/// }
/// Signature is the first thing in the packet, and slot is the first thing in the signed message.
fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> Option<u8> {
pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> Option<u8> {
let sig_start = 0;
let sig_end = size_of::<Signature>();
let slot_start = sig_end + size_of::<ShredType>();
let slot_end = slot_start + size_of::<u64>();
let msg_start = sig_end;
let msg_end = packet.meta.size;
if packet.meta.discard {
return Some(0);
}
@ -55,6 +57,11 @@ fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap<u64, [u8; 32]>) -> O
return Some(0);
}
let slot: u64 = limited_deserialize(&packet.data[slot_start..slot_end]).ok()?;
let msg_end = if packet.meta.repair {
packet.meta.size.saturating_sub(SIZE_OF_NONCE)
} else {
packet.meta.size
};
trace!("slot {}", slot);
let pubkey = slot_leaders.get(&slot)?;
if packet.meta.size < sig_end {
@ -97,7 +104,7 @@ fn slot_key_data_for_gpu<
) -> (PinnedVec<u8>, TxOffset, usize) {
//TODO: mark Pubkey::default shreds as failed after the GPU returns
assert_eq!(slot_keys.get(&std::u64::MAX), Some(&T::default()));
let slots: Vec<Vec<u64>> = SIGVERIFY_THREAD_POOL.install(|| {
let slots: Vec<Vec<Slot>> = SIGVERIFY_THREAD_POOL.install(|| {
batches
.into_par_iter()
.map(|p| {
@ -185,13 +192,17 @@ fn shred_gpu_offsets(
let mut msg_sizes = recycler_cache.offsets().allocate("shred_msg_sizes");
msg_sizes.set_pinnable();
let mut v_sig_lens = vec![];
for batch in batches {
for batch in batches.iter() {
let mut sig_lens = Vec::new();
for packet in &batch.packets {
for packet in batch.packets.iter() {
let sig_start = pubkeys_end;
let sig_end = sig_start + size_of::<Signature>();
let msg_start = sig_end;
let msg_end = sig_start + packet.meta.size;
let msg_end = if packet.meta.repair {
sig_start + packet.meta.size.saturating_sub(SIZE_OF_NONCE)
} else {
sig_start + packet.meta.size
};
signature_offsets.push(sig_start as u32);
msg_start_offsets.push(msg_start as u32);
let msg_size = if msg_end < msg_start {
@ -445,14 +456,12 @@ pub fn sign_shreds_gpu(
#[cfg(test)]
pub mod tests {
use super::*;
use crate::shred::SIZE_OF_DATA_SHRED_PAYLOAD;
use crate::shred::{Shred, Shredder};
use crate::shred::{Shred, Shredder, SIZE_OF_DATA_SHRED_PAYLOAD};
use solana_sdk::signature::{Keypair, Signer};
#[test]
fn test_sigverify_shred_cpu() {
fn run_test_sigverify_shred_cpu(slot: Slot) {
solana_logger::setup();
let mut packet = Packet::default();
let slot = 0xdead_c0de;
let mut shred = Shred::new_from_data(
slot,
0xc0de,
@ -492,10 +501,13 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_cpu() {
fn test_sigverify_shred_cpu() {
run_test_sigverify_shred_cpu(0xdead_c0de);
}
fn run_test_sigverify_shreds_cpu(slot: Slot) {
solana_logger::setup();
let mut batch = [Packets::default()];
let slot = 0xdead_c0de;
let mut shred = Shred::new_from_data(
slot,
0xc0de,
@ -542,12 +554,15 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_gpu() {
fn test_sigverify_shreds_cpu() {
run_test_sigverify_shreds_cpu(0xdead_c0de);
}
fn run_test_sigverify_shreds_gpu(slot: Slot) {
solana_logger::setup();
let recycler_cache = RecyclerCache::default();
let mut batch = [Packets::default()];
let slot = 0xdead_c0de;
let mut shred = Shred::new_from_data(
slot,
0xc0de,
@ -603,14 +618,17 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_sign_gpu() {
fn test_sigverify_shreds_gpu() {
run_test_sigverify_shreds_gpu(0xdead_c0de);
}
fn run_test_sigverify_shreds_sign_gpu(slot: Slot) {
solana_logger::setup();
let recycler_cache = RecyclerCache::default();
let mut packets = Packets::default();
let num_packets = 32;
let num_batches = 100;
let slot = 0xdead_c0de;
packets.packets.resize(num_packets, Packet::default());
for (i, p) in packets.packets.iter_mut().enumerate() {
let shred = Shred::new_from_data(
@ -650,11 +668,14 @@ pub mod tests {
}
#[test]
fn test_sigverify_shreds_sign_cpu() {
fn test_sigverify_shreds_sign_gpu() {
run_test_sigverify_shreds_sign_gpu(0xdead_c0de);
}
fn run_test_sigverify_shreds_sign_cpu(slot: Slot) {
solana_logger::setup();
let mut batch = [Packets::default()];
let slot = 0xdead_c0de;
let keypair = Keypair::new();
let shred = Shred::new_from_data(
slot,
@ -685,4 +706,9 @@ pub mod tests {
let rv = verify_shreds_cpu(&batch, &pubkeys);
assert_eq!(rv, vec![vec![1]]);
}
#[test]
fn test_sigverify_shreds_sign_cpu() {
run_test_sigverify_shreds_sign_cpu(0xdead_c0de);
}
}

View File

@ -1,16 +1,15 @@
use solana_ledger::entry::Entry;
use solana_ledger::shred::{
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK,
max_entries_per_n_shred, verify_test_data_shred, Shred, Shredder,
MAX_DATA_SHREDS_PER_FEC_BLOCK, SIZE_OF_DATA_SHRED_PAYLOAD,
};
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::{hash::Hash, system_transaction};
use solana_sdk::{clock::Slot, hash::Hash, system_transaction};
use std::convert::TryInto;
use std::sync::Arc;
#[test]
fn test_multi_fec_block_coding() {
fn run_test_multi_fec_block_coding(slot: Slot) {
let keypair = Arc::new(Keypair::new());
let slot = 0x1234_5678_9abc_def0;
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
.expect("Failed in creating shredder");
@ -20,7 +19,8 @@ fn test_multi_fec_block_coding() {
let keypair1 = Keypair::new();
let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
let entry = Entry::new(&Hash::default(), 1, vec![tx0]);
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64);
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD;
let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(no_header_size));
let entries: Vec<_> = (0..num_entries)
.map(|_| {
@ -94,3 +94,8 @@ fn test_multi_fec_block_coding() {
let result = Shredder::deshred(&all_shreds[..]).unwrap();
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
}
#[test]
fn test_multi_fec_block_coding() {
run_test_multi_fec_block_coding(0x1234_5678_9abc_def0);
}