Merge pull request from GHSA-8v47-8c53-wwrc

* Track transaction check time separately from account loads

* banking packet process metrics

* Remove signature clone in status cache lookup

* Reduce allocations when converting packets to transactions

* Add blake3 hash of transaction messages in status cache

* Bug fixes

* fix tests and run fmt

* Address feedback

* fix simd tx entry verification

* Fix rebase

* Feedback

* clean up

* Add tests

* Remove feature switch and fall back to signature check

* Bump programs/bpf Cargo.lock

* clippy

* nudge benches

* Bump `BankSlotDelta` frozen ABI hash`

* Add blake3 to sdk/programs/Cargo.lock

* nudge bpf tests

* short circuit status cache checks

Co-authored-by: Trent Nelson <trent@solana.com>
This commit is contained in:
Justin Starry
2021-04-13 14:28:08 +08:00
committed by GitHub
parent 70f3f7e679
commit 85eb37fab0
30 changed files with 938 additions and 617 deletions

View File

@@ -3,7 +3,7 @@ use crate::{
blockstore::Blockstore,
blockstore_db::BlockstoreError,
blockstore_meta::SlotMeta,
entry::{create_ticks, Entry, EntrySlice, EntryVerificationStatus, VerifyRecyclers},
entry::{create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers},
leader_schedule_cache::LeaderScheduleCache,
};
use chrono_humanize::{Accuracy, HumanTime, Tense};
@@ -34,6 +34,7 @@ use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signature},
timing,
transaction::{Result, Transaction, TransactionError},
};
use solana_transaction_status::token_balances::{
@@ -75,7 +76,7 @@ fn get_first_error(
fee_collection_results: Vec<Result<()>>,
) -> Option<(Result<()>, Signature)> {
let mut first_err = None;
for (result, transaction) in fee_collection_results.iter().zip(batch.transactions()) {
for (result, transaction) in fee_collection_results.iter().zip(batch.transactions_iter()) {
if let Err(ref err) = result {
if first_err.is_none() {
first_err = Some((result.clone(), transaction.signatures[0]));
@@ -124,7 +125,7 @@ fn execute_batch(
timings,
);
bank_utils::find_and_send_votes(batch.transactions(), &tx_results, replay_vote_sender);
bank_utils::find_and_send_votes(batch.hashed_transactions(), &tx_results, replay_vote_sender);
let TransactionResults {
fee_collection_results,
@@ -133,6 +134,7 @@ fn execute_batch(
} = tx_results;
if let Some(transaction_status_sender) = transaction_status_sender {
let txs = batch.transactions_iter().cloned().collect();
let post_token_balances = if record_token_balances {
collect_token_balances(&bank, &batch, &mut mint_decimals)
} else {
@@ -144,7 +146,7 @@ fn execute_batch(
transaction_status_sender.send_transaction_status_batch(
bank.clone(),
batch.transactions(),
txs,
execution_results,
balances,
token_balances,
@@ -209,9 +211,10 @@ pub fn process_entries(
replay_vote_sender: Option<&ReplayVoteSender>,
) -> Result<()> {
let mut timings = ExecuteTimings::default();
let mut entry_types: Vec<_> = entries.iter().map(EntryType::from).collect();
let result = process_entries_with_callback(
bank,
entries,
&mut entry_types,
randomize,
None,
transaction_status_sender,
@@ -226,7 +229,7 @@ pub fn process_entries(
// Note: If randomize is true this will shuffle entries' transactions in-place.
fn process_entries_with_callback(
bank: &Arc<Bank>,
entries: &mut [Entry],
entries: &mut [EntryType],
randomize: bool,
entry_callback: Option<&ProcessCallback>,
transaction_status_sender: Option<TransactionStatusSender>,
@@ -236,76 +239,78 @@ fn process_entries_with_callback(
// accumulator for entries that can be processed in parallel
let mut batches = vec![];
let mut tick_hashes = vec![];
if randomize {
let mut rng = thread_rng();
for entry in entries.iter_mut() {
entry.transactions.shuffle(&mut rng);
}
}
for entry in entries {
if entry.is_tick() {
// If it's a tick, save it for later
tick_hashes.push(entry.hash);
if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
// If it's a tick that will cause a new blockhash to be created,
// execute the group and register the tick
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender.clone(),
replay_vote_sender,
timings,
)?;
batches.clear();
for hash in &tick_hashes {
bank.register_tick(hash);
}
tick_hashes.clear();
}
continue;
}
// else loop on processing the entry
loop {
// try to lock the accounts
let batch = bank.prepare_batch(&entry.transactions);
let first_lock_err = first_err(batch.lock_results());
let mut rng = thread_rng();
// if locking worked
if first_lock_err.is_ok() {
batches.push(batch);
// done with this entry
break;
for entry in entries {
match entry {
EntryType::Tick(hash) => {
// If it's a tick, save it for later
tick_hashes.push(hash);
if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
// If it's a tick that will cause a new blockhash to be created,
// execute the group and register the tick
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender.clone(),
replay_vote_sender,
timings,
)?;
batches.clear();
for hash in &tick_hashes {
bank.register_tick(hash);
}
tick_hashes.clear();
}
}
// else we failed to lock, 2 possible reasons
if batches.is_empty() {
// An entry has account lock conflicts with *itself*, which should not happen
// if generated by a properly functioning leader
datapoint_error!(
"validator_process_entry_error",
(
"error",
format!(
"Lock accounts error, entry conflicts with itself, txs: {:?}",
entry.transactions
),
String
)
);
// bail
first_lock_err?;
} else {
// else we have an entry that conflicts with a prior entry
// execute the current queue and try to process this entry again
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender.clone(),
replay_vote_sender,
timings,
)?;
batches.clear();
EntryType::Transactions(transactions) => {
if randomize {
transactions.shuffle(&mut rng);
}
loop {
// try to lock the accounts
let batch = bank.prepare_hashed_batch(transactions);
let first_lock_err = first_err(batch.lock_results());
// if locking worked
if first_lock_err.is_ok() {
batches.push(batch);
// done with this entry
break;
}
// else we failed to lock, 2 possible reasons
if batches.is_empty() {
// An entry has account lock conflicts with *itself*, which should not happen
// if generated by a properly functioning leader
datapoint_error!(
"validator_process_entry_error",
(
"error",
format!(
"Lock accounts error, entry conflicts with itself, txs: {:?}",
transactions
),
String
)
);
// bail
first_lock_err?;
} else {
// else we have an entry that conflicts with a prior entry
// execute the current queue and try to process this entry again
execute_batches(
bank,
&batches,
entry_callback,
transaction_status_sender.clone(),
replay_vote_sender,
timings,
)?;
batches.clear();
}
}
}
}
}
@@ -668,7 +673,7 @@ pub fn confirm_slot(
) -> result::Result<(), BlockstoreProcessorError> {
let slot = bank.slot();
let (mut entries, num_shreds, slot_full) = {
let (entries, num_shreds, slot_full) = {
let mut load_elapsed = Measure::start("load_elapsed");
let load_result = blockstore
.get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots)
@@ -711,13 +716,10 @@ pub fn confirm_slot(
})?;
}
let last_entry_hash = entries.last().map(|e| e.hash);
let verifier = if !skip_verification {
datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64));
let entry_state = entries.start_verify(
&progress.last_entry,
recyclers.clone(),
bank.secp256k1_program_enabled(),
);
let entry_state = entries.start_verify(&progress.last_entry, recyclers.clone());
if entry_state.status() == EntryVerificationStatus::Failure {
warn!("Ledger proof of history failed at slot: {}", slot);
return Err(BlockError::InvalidEntryHash.into());
@@ -727,6 +729,16 @@ pub fn confirm_slot(
None
};
let check_start = Instant::now();
let check_result =
entries.verify_and_hash_transactions(skip_verification, bank.secp256k1_program_enabled());
if check_result.is_none() {
warn!("Ledger proof of history failed at slot: {}", slot);
return Err(BlockError::InvalidEntryHash.into());
}
let transaction_duration_us = timing::duration_as_us(&check_start.elapsed());
let mut entries = check_result.unwrap();
let mut replay_elapsed = Measure::start("replay_elapsed");
let mut execute_timings = ExecuteTimings::default();
// Note: This will shuffle entries' transactions in-place.
@@ -746,9 +758,9 @@ pub fn confirm_slot(
timing.execute_timings.accumulate(&execute_timings);
if let Some(mut verifier) = verifier {
let verified = verifier.finish_verify(&entries);
let verified = verifier.finish_verify();
timing.poh_verify_elapsed += verifier.poh_duration_us();
timing.transaction_verify_elapsed += verifier.transaction_duration_us();
timing.transaction_verify_elapsed += transaction_duration_us;
if !verified {
warn!("Ledger proof of history failed at slot: {}", bank.slot());
return Err(BlockError::InvalidEntryHash.into());
@@ -760,8 +772,8 @@ pub fn confirm_slot(
progress.num_shreds += num_shreds;
progress.num_entries += num_entries;
progress.num_txs += num_txs;
if let Some(last_entry) = entries.last() {
progress.last_entry = last_entry.hash;
if let Some(last_entry_hash) = last_entry_hash {
progress.last_entry = last_entry_hash;
}
Ok(())
@@ -1070,7 +1082,7 @@ fn process_single_slot(
timing: &mut ExecuteTimings,
) -> result::Result<(), BlockstoreProcessorError> {
// Mark corrupt slots as dead so validators don't replay this slot and
// see DuplicateSignature errors later in ReplayStage
// see AlreadyProcessed errors later in ReplayStage
confirm_full_slot(blockstore, bank, opts, recyclers, progress, transaction_status_sender, replay_vote_sender, timing).map_err(|err| {
let slot = bank.slot();
warn!("slot {} failed to verify: {}", slot, err);
@@ -1114,7 +1126,7 @@ impl TransactionStatusSender {
pub fn send_transaction_status_batch(
&self,
bank: Arc<Bank>,
transactions: &[Transaction],
transactions: Vec<Transaction>,
statuses: Vec<TransactionExecutionResult>,
balances: TransactionBalancesSet,
token_balances: TransactionTokenBalancesSet,
@@ -1131,7 +1143,7 @@ impl TransactionStatusSender {
.sender
.send(TransactionStatusMessage::Batch(TransactionStatusBatch {
bank,
transactions: transactions.to_vec(),
transactions,
statuses,
balances,
token_balances,
@@ -1838,22 +1850,22 @@ pub mod tests {
fn test_first_err() {
assert_eq!(first_err(&[Ok(())]), Ok(()));
assert_eq!(
first_err(&[Ok(()), Err(TransactionError::DuplicateSignature)]),
Err(TransactionError::DuplicateSignature)
first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]),
Err(TransactionError::AlreadyProcessed)
);
assert_eq!(
first_err(&[
Ok(()),
Err(TransactionError::DuplicateSignature),
Err(TransactionError::AlreadyProcessed),
Err(TransactionError::AccountInUse)
]),
Err(TransactionError::DuplicateSignature)
Err(TransactionError::AlreadyProcessed)
);
assert_eq!(
first_err(&[
Ok(()),
Err(TransactionError::AccountInUse),
Err(TransactionError::DuplicateSignature)
Err(TransactionError::AlreadyProcessed)
]),
Err(TransactionError::AccountInUse)
);
@@ -1861,7 +1873,7 @@ pub mod tests {
first_err(&[
Err(TransactionError::AccountInUse),
Ok(()),
Err(TransactionError::DuplicateSignature)
Err(TransactionError::AlreadyProcessed)
]),
Err(TransactionError::AccountInUse)
);
@@ -2279,13 +2291,13 @@ pub mod tests {
// Check all accounts are unlocked
let txs1 = &entry_1_to_mint.transactions[..];
let txs2 = &entry_2_to_3_mint_to_1.transactions[..];
let batch1 = bank.prepare_batch(txs1);
let batch1 = bank.prepare_batch(txs1.iter());
for result in batch1.lock_results() {
assert!(result.is_ok());
}
// txs1 and txs2 have accounts that conflict, so we must drop txs1 first
drop(batch1);
let batch2 = bank.prepare_batch(txs2);
let batch2 = bank.prepare_batch(txs2.iter());
for result in batch2.lock_results() {
assert!(result.is_ok());
}
@@ -2656,7 +2668,7 @@ pub mod tests {
);
assert_eq!(
bank.transfer(10_001, &mint_keypair, &pubkey),
Err(TransactionError::DuplicateSignature)
Err(TransactionError::AlreadyProcessed)
);
// Make sure other errors don't update the signature cache
@@ -2964,16 +2976,7 @@ pub mod tests {
let entry = next_entry(&new_blockhash, 1, vec![tx]);
entries.push(entry);
process_entries_with_callback(
&bank0,
&mut entries,
true,
None,
None,
None,
&mut ExecuteTimings::default(),
)
.unwrap();
process_entries(&bank0, &mut entries, true, None, None).unwrap();
assert_eq!(bank0.get_balance(&keypair.pubkey()), 1)
}
@@ -3047,7 +3050,7 @@ pub mod tests {
);
account_loaded_twice.message.account_keys[1] = mint_keypair.pubkey();
let transactions = [account_not_found_tx, account_loaded_twice];
let batch = bank.prepare_batch(&transactions);
let batch = bank.prepare_batch(transactions.iter());
let (
TransactionResults {
fee_collection_results,

View File

@@ -17,10 +17,12 @@ use solana_perf::cuda_runtime::PinnedVec;
use solana_perf::perf_libs;
use solana_perf::recycler::Recycler;
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::hashed_transaction::HashedTransaction;
use solana_sdk::hash::Hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::timing;
use solana_sdk::transaction::Transaction;
use std::borrow::Cow;
use std::cell::RefCell;
use std::ffi::OsStr;
use std::sync::mpsc::{Receiver, Sender};
@@ -118,6 +120,28 @@ pub struct Entry {
pub transactions: Vec<Transaction>,
}
/// Typed entry to distinguish between transaction and tick entries
pub enum EntryType<'a> {
Transactions(Vec<HashedTransaction<'a>>),
Tick(Hash),
}
impl<'a> From<&'a Entry> for EntryType<'a> {
fn from(entry: &'a Entry) -> Self {
if entry.transactions.is_empty() {
EntryType::Tick(entry.hash)
} else {
EntryType::Transactions(
entry
.transactions
.iter()
.map(HashedTransaction::from)
.collect(),
)
}
}
}
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(prev_hash: &Hash, mut num_hashes: u64, transactions: Vec<Transaction>) -> Self {
@@ -207,10 +231,20 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction
}
}
/// Last action required to verify an entry
enum VerifyAction {
/// Mixin a hash before computing the last hash for a transaction entry
Mixin(Hash),
/// Compute one last hash for a tick entry
Tick,
/// No action needed (tick entry with no hashes)
None,
}
pub struct GpuVerificationData {
thread_h: Option<JoinHandle<u64>>,
hashes: Option<Arc<Mutex<PinnedVec<Hash>>>>,
tx_hashes: Vec<Option<Hash>>,
verifications: Option<Vec<(VerifyAction, Hash)>>,
}
pub enum DeviceVerificationData {
@@ -221,7 +255,6 @@ pub enum DeviceVerificationData {
pub struct EntryVerificationState {
verification_status: EntryVerificationStatus,
poh_duration_us: u64,
transaction_duration_us: u64,
device_verification_data: DeviceVerificationData,
}
@@ -256,15 +289,7 @@ impl EntryVerificationState {
self.poh_duration_us
}
pub fn set_transaction_duration_us(&mut self, new: u64) {
self.transaction_duration_us = new;
}
pub fn transaction_duration_us(&self) -> u64 {
self.transaction_duration_us
}
pub fn finish_verify(&mut self, entries: &[Entry]) -> bool {
pub fn finish_verify(&mut self) -> bool {
match &mut self.device_verification_data {
DeviceVerificationData::Gpu(verification_state) => {
let gpu_time_us = verification_state.thread_h.take().unwrap().join().unwrap();
@@ -279,19 +304,17 @@ impl EntryVerificationState {
thread_pool.borrow().install(|| {
hashes
.into_par_iter()
.zip(&verification_state.tx_hashes)
.zip(entries)
.all(|((hash, tx_hash), answer)| {
if answer.num_hashes == 0 {
*hash == answer.hash
} else {
let mut poh = Poh::new(*hash, None);
if let Some(mixin) = tx_hash {
poh.record(*mixin).unwrap().hash == answer.hash
} else {
poh.tick().unwrap().hash == answer.hash
.cloned()
.zip(verification_state.verifications.take().unwrap())
.all(|(hash, (action, expected))| {
let actual = match action {
VerifyAction::Mixin(mixin) => {
Poh::new(hash, None).record(mixin).unwrap().hash
}
}
VerifyAction::Tick => Poh::new(hash, None).tick().unwrap().hash,
VerifyAction::None => hash,
};
actual == expected
})
})
});
@@ -314,17 +337,17 @@ impl EntryVerificationState {
}
fn compare_hashes(computed_hash: Hash, ref_entry: &Entry) -> bool {
if ref_entry.num_hashes == 0 {
computed_hash == ref_entry.hash
} else {
let actual = if !ref_entry.transactions.is_empty() {
let tx_hash = hash_transactions(&ref_entry.transactions);
let mut poh = Poh::new(computed_hash, None);
if ref_entry.transactions.is_empty() {
poh.tick().unwrap().hash == ref_entry.hash
} else {
let tx_hash = hash_transactions(&ref_entry.transactions);
poh.record(tx_hash).unwrap().hash == ref_entry.hash
}
}
poh.record(tx_hash).unwrap().hash
} else if ref_entry.num_hashes > 0 {
let mut poh = Poh::new(computed_hash, None);
poh.tick().unwrap().hash
} else {
computed_hash
};
actual == ref_entry.hash
}
// an EntrySlice is a slice of Entries
@@ -333,12 +356,8 @@ pub trait EntrySlice {
fn verify_cpu(&self, start_hash: &Hash) -> EntryVerificationState;
fn verify_cpu_generic(&self, start_hash: &Hash) -> EntryVerificationState;
fn verify_cpu_x86_simd(&self, start_hash: &Hash, simd_len: usize) -> EntryVerificationState;
fn start_verify(
&self,
start_hash: &Hash,
recyclers: VerifyRecyclers,
secp256k1_program_enabled: bool,
) -> EntryVerificationState;
fn start_verify(&self, start_hash: &Hash, recyclers: VerifyRecyclers)
-> EntryVerificationState;
fn verify(&self, start_hash: &Hash) -> bool;
/// Checks that each entry tick has the correct number of hashes. Entry slices do not
/// necessarily end in a tick, so `tick_hash_count` is used to carry over the hash count
@@ -346,13 +365,17 @@ pub trait EntrySlice {
fn verify_tick_hash_count(&self, tick_hash_count: &mut u64, hashes_per_tick: u64) -> bool;
/// Counts tick entries
fn tick_count(&self) -> u64;
fn verify_transaction_signatures(&self, secp256k1_program_enabled: bool) -> bool;
fn verify_and_hash_transactions(
&self,
skip_verification: bool,
secp256k1_program_enabled: bool,
) -> Option<Vec<EntryType<'_>>>;
}
impl EntrySlice for [Entry] {
fn verify(&self, start_hash: &Hash) -> bool {
self.start_verify(start_hash, VerifyRecyclers::default(), true)
.finish_verify(self)
self.start_verify(start_hash, VerifyRecyclers::default())
.finish_verify()
}
fn verify_cpu_generic(&self, start_hash: &Hash) -> EntryVerificationState {
@@ -388,7 +411,6 @@ impl EntrySlice for [Entry] {
EntryVerificationStatus::Failure
},
poh_duration_us,
transaction_duration_us: 0,
device_verification_data: DeviceVerificationData::Cpu(),
}
}
@@ -472,7 +494,6 @@ impl EntrySlice for [Entry] {
EntryVerificationStatus::Failure
},
poh_duration_us,
transaction_duration_us: 0,
device_verification_data: DeviceVerificationData::Cpu(),
}
}
@@ -499,25 +520,46 @@ impl EntrySlice for [Entry] {
}
}
fn verify_transaction_signatures(&self, secp256k1_program_enabled: bool) -> bool {
let verify = |tx: &Transaction| {
tx.verify().is_ok()
&& {
match bincode::serialized_size(tx) {
Ok(size) => size <= PACKET_DATA_SIZE as u64,
Err(_) => false,
}
fn verify_and_hash_transactions<'a>(
&'a self,
skip_verification: bool,
secp256k1_program_enabled: bool,
) -> Option<Vec<EntryType<'a>>> {
let verify_and_hash = |tx: &'a Transaction| -> Option<HashedTransaction<'a>> {
let message_hash = if !skip_verification {
let size = bincode::serialized_size(tx).ok()?;
if size > PACKET_DATA_SIZE as u64 {
return None;
}
&& (
if secp256k1_program_enabled {
// Verify tx precompiles if secp256k1 program is enabled.
!secp256k1_program_enabled || tx.verify_precompiles().is_ok()
)
tx.verify_precompiles().ok()?;
}
tx.verify_and_hash_message().ok()?
} else {
tx.message().hash()
};
Some(HashedTransaction::new(Cow::Borrowed(tx), message_hash))
};
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
self.par_iter()
.flat_map(|entry| &entry.transactions)
.all(verify)
.map(|entry| {
if entry.transactions.is_empty() {
Some(EntryType::Tick(entry.hash))
} else {
Some(EntryType::Transactions(
entry
.transactions
.par_iter()
.map(verify_and_hash)
.collect::<Option<Vec<HashedTransaction>>>()?,
))
}
})
.collect()
})
})
}
@@ -526,26 +568,11 @@ impl EntrySlice for [Entry] {
&self,
start_hash: &Hash,
recyclers: VerifyRecyclers,
secp256k1_program_enabled: bool,
) -> EntryVerificationState {
let start = Instant::now();
let res = self.verify_transaction_signatures(secp256k1_program_enabled);
let transaction_duration_us = timing::duration_as_us(&start.elapsed());
if !res {
return EntryVerificationState {
verification_status: EntryVerificationStatus::Failure,
transaction_duration_us,
poh_duration_us: 0,
device_verification_data: DeviceVerificationData::Cpu(),
};
}
let start = Instant::now();
let api = perf_libs::api();
if api.is_none() {
let mut res: EntryVerificationState = self.verify_cpu(start_hash);
res.set_transaction_duration_us(transaction_duration_us);
return res;
return self.verify_cpu(start_hash);
}
let api = api.unwrap();
inc_new_counter_info!("entry_verify-num_entries", self.len() as usize);
@@ -600,15 +627,21 @@ impl EntrySlice for [Entry] {
timing::duration_as_us(&gpu_wait.elapsed())
});
let tx_hashes = PAR_THREAD_POOL.with(|thread_pool| {
let verifications = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
self.into_par_iter()
.map(|entry| {
if entry.transactions.is_empty() {
None
let answer = entry.hash;
let action = if entry.transactions.is_empty() {
if entry.num_hashes == 0 {
VerifyAction::None
} else {
VerifyAction::Tick
}
} else {
Some(hash_transactions(&entry.transactions))
}
VerifyAction::Mixin(hash_transactions(&entry.transactions))
};
(action, answer)
})
.collect()
})
@@ -616,13 +649,12 @@ impl EntrySlice for [Entry] {
let device_verification_data = DeviceVerificationData::Gpu(GpuVerificationData {
thread_h: Some(gpu_verify_thread),
tx_hashes,
verifications: Some(verifications),
hashes: Some(hashes),
});
EntryVerificationState {
verification_status: EntryVerificationStatus::Pending,
poh_duration_us: timing::duration_as_us(&start.elapsed()),
transaction_duration_us,
device_verification_data,
}
}
@@ -704,6 +736,7 @@ mod tests {
use solana_sdk::{
hash::{hash, new_rand as hash_new_rand, Hash},
message::Message,
packet::PACKET_DATA_SIZE,
signature::{Keypair, Signer},
system_transaction,
transaction::Transaction,
@@ -909,7 +942,7 @@ mod tests {
}
#[test]
fn test_verify_transaction_signatures_packet_data_size() {
fn test_verify_and_hash_transactions_packet_data_size() {
let mut rng = rand::thread_rng();
let recent_blockhash = hash_new_rand(&mut rng);
let keypair = Keypair::new();
@@ -931,14 +964,18 @@ mod tests {
let tx = make_transaction(5);
let entries = vec![next_entry(&recent_blockhash, 1, vec![tx.clone()])];
assert!(bincode::serialized_size(&tx).unwrap() <= PACKET_DATA_SIZE as u64);
assert!(entries[..].verify_transaction_signatures(false));
assert!(entries[..]
.verify_and_hash_transactions(false, false)
.is_some());
}
// Big transaction.
{
let tx = make_transaction(15);
let entries = vec![next_entry(&recent_blockhash, 1, vec![tx.clone()])];
assert!(bincode::serialized_size(&tx).unwrap() > PACKET_DATA_SIZE as u64);
assert!(!entries[..].verify_transaction_signatures(false));
assert!(entries[..]
.verify_and_hash_transactions(false, false)
.is_none());
}
// Assert that verify fails as soon as serialized
// size exceeds packet data size.
@@ -947,7 +984,9 @@ mod tests {
let entries = vec![next_entry(&recent_blockhash, 1, vec![tx.clone()])];
assert_eq!(
bincode::serialized_size(&tx).unwrap() <= PACKET_DATA_SIZE as u64,
entries[..].verify_transaction_signatures(false),
entries[..]
.verify_and_hash_transactions(false, false)
.is_some(),
);
}
}