move core tests to core (#3355)
* move core tests to core * remove window * fix up flaky tests * test_entryfication needs a singly-threaded banking_stage * move core benches to core * remove unnecessary dependencies * remove core as a member for now, test it like runtime * stop running tests twice * remove duplicate runs of tests in perf
This commit is contained in:
248
core/benches/append_vec.rs
Normal file
248
core/benches/append_vec.rs
Normal file
@ -0,0 +1,248 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate rand;
|
||||
extern crate test;
|
||||
|
||||
use bincode::{deserialize, serialize_into, serialized_size};
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_runtime::append_vec::{
|
||||
deserialize_account, get_serialized_size, serialize_account, AppendVec,
|
||||
};
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::env;
|
||||
use std::io::Cursor;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::spawn;
|
||||
use test::Bencher;
|
||||
|
||||
const START_SIZE: u64 = 4 * 1024 * 1024;
|
||||
const INC_SIZE: u64 = 1 * 1024 * 1024;
|
||||
|
||||
macro_rules! align_up {
|
||||
($addr: expr, $align: expr) => {
|
||||
($addr + ($align - 1)) & !($align - 1)
|
||||
};
|
||||
}
|
||||
|
||||
fn get_append_vec_bench_path(path: &str) -> PathBuf {
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let mut buf = PathBuf::new();
|
||||
buf.push(&format!("{}/{}", out_dir, path));
|
||||
buf
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn append_vec_atomic_append(bencher: &mut Bencher) {
|
||||
let path = get_append_vec_bench_path("bench_append");
|
||||
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
|
||||
bencher.iter(|| {
|
||||
if vec.append(AtomicUsize::new(0)).is_none() {
|
||||
assert!(vec.grow_file().is_ok());
|
||||
assert!(vec.append(AtomicUsize::new(0)).is_some());
|
||||
}
|
||||
});
|
||||
std::fs::remove_file(path).unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn append_vec_atomic_random_access(bencher: &mut Bencher) {
|
||||
let path = get_append_vec_bench_path("bench_ra");
|
||||
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
|
||||
let size = 1_000_000;
|
||||
for _ in 0..size {
|
||||
if vec.append(AtomicUsize::new(0)).is_none() {
|
||||
assert!(vec.grow_file().is_ok());
|
||||
assert!(vec.append(AtomicUsize::new(0)).is_some());
|
||||
}
|
||||
}
|
||||
bencher.iter(|| {
|
||||
let index = thread_rng().gen_range(0, size as u64);
|
||||
vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
|
||||
});
|
||||
std::fs::remove_file(path).unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn append_vec_atomic_random_change(bencher: &mut Bencher) {
|
||||
let path = get_append_vec_bench_path("bench_rax");
|
||||
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
|
||||
let size = 1_000_000;
|
||||
for k in 0..size {
|
||||
if vec.append(AtomicUsize::new(k)).is_none() {
|
||||
assert!(vec.grow_file().is_ok());
|
||||
assert!(vec.append(AtomicUsize::new(k)).is_some());
|
||||
}
|
||||
}
|
||||
bencher.iter(|| {
|
||||
let index = thread_rng().gen_range(0, size as u64);
|
||||
let atomic1 = vec.get(index * std::mem::size_of::<AtomicUsize>() as u64);
|
||||
let current1 = atomic1.load(Ordering::Relaxed);
|
||||
assert_eq!(current1, index as usize);
|
||||
let next = current1 + 1;
|
||||
let mut index = vec.append(AtomicUsize::new(next));
|
||||
if index.is_none() {
|
||||
assert!(vec.grow_file().is_ok());
|
||||
index = vec.append(AtomicUsize::new(next));
|
||||
}
|
||||
let atomic2 = vec.get(index.unwrap());
|
||||
let current2 = atomic2.load(Ordering::Relaxed);
|
||||
assert_eq!(current2, next);
|
||||
});
|
||||
std::fs::remove_file(path).unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn append_vec_atomic_random_read(bencher: &mut Bencher) {
|
||||
let path = get_append_vec_bench_path("bench_read");
|
||||
let mut vec = AppendVec::<AtomicUsize>::new(&path, true, START_SIZE, INC_SIZE);
|
||||
let size = 1_000_000;
|
||||
for _ in 0..size {
|
||||
if vec.append(AtomicUsize::new(0)).is_none() {
|
||||
assert!(vec.grow_file().is_ok());
|
||||
assert!(vec.append(AtomicUsize::new(0)).is_some());
|
||||
}
|
||||
}
|
||||
bencher.iter(|| {
|
||||
let index = thread_rng().gen_range(0, size);
|
||||
let atomic1 = vec.get((index * std::mem::size_of::<AtomicUsize>()) as u64);
|
||||
let current1 = atomic1.load(Ordering::Relaxed);
|
||||
assert_eq!(current1, 0);
|
||||
});
|
||||
std::fs::remove_file(path).unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn append_vec_concurrent_lock_append(bencher: &mut Bencher) {
|
||||
let path = get_append_vec_bench_path("bench_lock_append");
|
||||
let vec = Arc::new(RwLock::new(AppendVec::<AtomicUsize>::new(
|
||||
&path, true, START_SIZE, INC_SIZE,
|
||||
)));
|
||||
let vec1 = vec.clone();
|
||||
let size = 1_000_000;
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count1 = count.clone();
|
||||
spawn(move || loop {
|
||||
let mut len = count.load(Ordering::Relaxed);
|
||||
{
|
||||
let rlock = vec1.read().unwrap();
|
||||
loop {
|
||||
if rlock.append(AtomicUsize::new(0)).is_none() {
|
||||
break;
|
||||
}
|
||||
len = count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
if len >= size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut wlock = vec1.write().unwrap();
|
||||
if len >= size {
|
||||
break;
|
||||
}
|
||||
assert!(wlock.grow_file().is_ok());
|
||||
}
|
||||
});
|
||||
bencher.iter(|| {
|
||||
let _rlock = vec.read().unwrap();
|
||||
let len = count1.load(Ordering::Relaxed);
|
||||
assert!(len < size * 2);
|
||||
});
|
||||
std::fs::remove_file(path).unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn append_vec_concurrent_get_append(bencher: &mut Bencher) {
|
||||
let path = get_append_vec_bench_path("bench_get_append");
|
||||
let vec = Arc::new(RwLock::new(AppendVec::<AtomicUsize>::new(
|
||||
&path, true, START_SIZE, INC_SIZE,
|
||||
)));
|
||||
let vec1 = vec.clone();
|
||||
let size = 1_000_000;
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
let count1 = count.clone();
|
||||
spawn(move || loop {
|
||||
let mut len = count.load(Ordering::Relaxed);
|
||||
{
|
||||
let rlock = vec1.read().unwrap();
|
||||
loop {
|
||||
if rlock.append(AtomicUsize::new(0)).is_none() {
|
||||
break;
|
||||
}
|
||||
len = count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
if len >= size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut wlock = vec1.write().unwrap();
|
||||
if len >= size {
|
||||
break;
|
||||
}
|
||||
assert!(wlock.grow_file().is_ok());
|
||||
}
|
||||
});
|
||||
bencher.iter(|| {
|
||||
let rlock = vec.read().unwrap();
|
||||
let len = count1.load(Ordering::Relaxed);
|
||||
if len > 0 {
|
||||
let index = thread_rng().gen_range(0, len);
|
||||
rlock.get((index * std::mem::size_of::<AtomicUsize>()) as u64);
|
||||
}
|
||||
});
|
||||
std::fs::remove_file(path).unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_account_serialize(bencher: &mut Bencher) {
|
||||
let num: usize = 1000;
|
||||
let account = Account::new(2, 100, &Keypair::new().pubkey());
|
||||
let len = get_serialized_size(&account);
|
||||
let ser_len = align_up!(len + std::mem::size_of::<u64>(), std::mem::size_of::<u64>());
|
||||
let mut memory = vec![0; num * ser_len];
|
||||
bencher.iter(|| {
|
||||
for i in 0..num {
|
||||
let start = i * ser_len;
|
||||
serialize_account(&mut memory[start..start + ser_len], &account, len);
|
||||
}
|
||||
});
|
||||
|
||||
// make sure compiler doesn't delete the code.
|
||||
let index = thread_rng().gen_range(0, num);
|
||||
if memory[index] != 0 {
|
||||
println!("memory: {}", memory[index]);
|
||||
}
|
||||
|
||||
let start = index * ser_len;
|
||||
let new_account = deserialize_account(&memory[start..start + ser_len], 0, num * len).unwrap();
|
||||
assert_eq!(new_account, account);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_account_serialize_bincode(bencher: &mut Bencher) {
|
||||
let num: usize = 1000;
|
||||
let account = Account::new(2, 100, &Keypair::new().pubkey());
|
||||
let len = serialized_size(&account).unwrap() as usize;
|
||||
let mut memory = vec![0u8; num * len];
|
||||
bencher.iter(|| {
|
||||
for i in 0..num {
|
||||
let start = i * len;
|
||||
let cursor = Cursor::new(&mut memory[start..start + len]);
|
||||
serialize_into(cursor, &account).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
// make sure compiler doesn't delete the code.
|
||||
let index = thread_rng().gen_range(0, len);
|
||||
if memory[index] != 0 {
|
||||
println!("memory: {}", memory[index]);
|
||||
}
|
||||
|
||||
let start = index * len;
|
||||
let new_account: Account = deserialize(&memory[start..start + len]).unwrap();
|
||||
assert_eq!(new_account, account);
|
||||
}
|
241
core/benches/banking_stage.rs
Normal file
241
core/benches/banking_stage.rs
Normal file
@ -0,0 +1,241 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana::banking_stage::{create_test_recorder, BankingStage};
|
||||
use solana::cluster_info::ClusterInfo;
|
||||
use solana::cluster_info::Node;
|
||||
use solana::packet::to_packets_chunked;
|
||||
use solana::poh_recorder::WorkingBankEntries;
|
||||
use solana::service::Service;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{KeypairUtil, Signature};
|
||||
use solana_sdk::system_transaction::SystemTransaction;
|
||||
use solana_sdk::timing::{DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES};
|
||||
use std::iter;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use test::Bencher;
|
||||
|
||||
fn check_txs(receiver: &Receiver<WorkingBankEntries>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
loop {
|
||||
let entries = receiver.recv_timeout(Duration::new(1, 0));
|
||||
if let Ok((_, entries)) = entries {
|
||||
for (entry, _) in &entries {
|
||||
total += entry.transactions.len();
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
// a multiple of packet chunk 2X duplicates to avoid races
|
||||
let txes = 192 * 50 * num_threads * 2;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let (genesis_block, mint_keypair) = GenesisBlock::new(mint_total);
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let dummy = SystemTransaction::new_move(
|
||||
&mint_keypair,
|
||||
&mint_keypair.pubkey(),
|
||||
1,
|
||||
genesis_block.hash(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
})
|
||||
.collect();
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = SystemTransaction::new_move(
|
||||
&mint_keypair,
|
||||
&tx.account_keys[0],
|
||||
mint_total / txes as u64,
|
||||
genesis_block.hash(),
|
||||
0,
|
||||
);
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
let mut id = genesis_block.hash();
|
||||
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_tick(&id);
|
||||
}
|
||||
|
||||
let half_len = verified.len() / 2;
|
||||
let mut start = 0;
|
||||
bencher.iter(move || {
|
||||
// make sure the transactions are still valid
|
||||
bank.register_tick(&genesis_block.hash());
|
||||
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes / 2);
|
||||
bank.clear_signatures();
|
||||
start += half_len;
|
||||
start %= verified.len();
|
||||
});
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
poh_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
||||
let progs = 4;
|
||||
let num_threads = BankingStage::num_threads() as usize;
|
||||
// a multiple of packet chunk 2X duplicates to avoid races
|
||||
let txes = 96 * 100 * num_threads * 2;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let (genesis_block, mint_keypair) = GenesisBlock::new(mint_total);
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let bank = Arc::new(Bank::new(&genesis_block));
|
||||
let dummy = SystemTransaction::new_move(
|
||||
&mint_keypair,
|
||||
&mint_keypair.pubkey(),
|
||||
1,
|
||||
genesis_block.hash(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
let prog = new.instructions[0].clone();
|
||||
for i in 1..progs {
|
||||
//generate programs that spend to random keys
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let to_key = Pubkey::new(&to[0..32]);
|
||||
new.account_keys.push(to_key);
|
||||
assert_eq!(new.account_keys.len(), i + 2);
|
||||
new.instructions.push(prog.clone());
|
||||
assert_eq!(new.instructions.len(), i + 1);
|
||||
new.instructions[i].accounts[1] = 1 + i as u8;
|
||||
assert_eq!(new.key(i, 1), Some(&to_key));
|
||||
assert_eq!(
|
||||
new.account_keys[new.instructions[i].accounts[1] as usize],
|
||||
to_key
|
||||
);
|
||||
}
|
||||
assert_eq!(new.instructions.len(), progs);
|
||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
||||
new
|
||||
})
|
||||
.collect();
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = SystemTransaction::new_move(
|
||||
&mint_keypair,
|
||||
&tx.account_keys[0],
|
||||
mint_total / txes as u64,
|
||||
genesis_block.hash(),
|
||||
0,
|
||||
);
|
||||
bank.process_transaction(&fund).unwrap();
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
let mut id = genesis_block.hash();
|
||||
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_tick(&id);
|
||||
}
|
||||
|
||||
let half_len = verified.len() / 2;
|
||||
let mut start = 0;
|
||||
bencher.iter(move || {
|
||||
// make sure the transactions are still valid
|
||||
bank.register_tick(&genesis_block.hash());
|
||||
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes / 2);
|
||||
bank.clear_signatures();
|
||||
start += half_len;
|
||||
start %= verified.len();
|
||||
});
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
poh_service.join().unwrap();
|
||||
}
|
194
core/benches/blocktree.rs
Normal file
194
core/benches/blocktree.rs
Normal file
@ -0,0 +1,194 @@
|
||||
#![feature(test)]
|
||||
use rand;
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana;
|
||||
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
|
||||
use solana::entry::{make_large_test_entries, make_tiny_test_entries, EntrySlice};
|
||||
use solana::packet::{Blob, BLOB_HEADER_SIZE};
|
||||
use test::Bencher;
|
||||
|
||||
// Given some blobs and a ledger at ledger_path, benchmark writing the blobs to the ledger
|
||||
fn bench_write_blobs(bench: &mut Bencher, blobs: &mut Vec<Blob>, ledger_path: &str) {
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
|
||||
let num_blobs = blobs.len();
|
||||
|
||||
bench.iter(move || {
|
||||
for blob in blobs.iter_mut() {
|
||||
let index = blob.index();
|
||||
|
||||
blocktree
|
||||
.put_data_blob_bytes(
|
||||
blob.slot(),
|
||||
index,
|
||||
&blob.data[..BLOB_HEADER_SIZE + blob.size()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
blob.set_index(index + num_blobs as u64);
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
// Insert some blobs into the ledger in preparation for read benchmarks
|
||||
fn setup_read_bench(
|
||||
blocktree: &mut Blocktree,
|
||||
num_small_blobs: u64,
|
||||
num_large_blobs: u64,
|
||||
slot: u64,
|
||||
) {
|
||||
// Make some big and small entries
|
||||
let mut entries = make_large_test_entries(num_large_blobs as usize);
|
||||
entries.extend(make_tiny_test_entries(num_small_blobs as usize));
|
||||
|
||||
// Convert the entries to blobs, write the blobs to the ledger
|
||||
let mut blobs = entries.to_blobs();
|
||||
for (index, b) in blobs.iter_mut().enumerate() {
|
||||
b.set_index(index as u64);
|
||||
b.set_slot(slot);
|
||||
}
|
||||
blocktree
|
||||
.write_blobs(&blobs)
|
||||
.expect("Expectd successful insertion of blobs into ledger");
|
||||
}
|
||||
|
||||
// Write small blobs to the ledger
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_write_small(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = make_tiny_test_entries(num_entries);
|
||||
let mut blobs = entries.to_blobs();
|
||||
for (index, b) in blobs.iter_mut().enumerate() {
|
||||
b.set_index(index as u64);
|
||||
}
|
||||
bench_write_blobs(bench, &mut blobs, &ledger_path);
|
||||
}
|
||||
|
||||
// Write big blobs to the ledger
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_write_big(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = make_large_test_entries(num_entries);
|
||||
let mut blobs = entries.to_blobs();
|
||||
for (index, b) in blobs.iter_mut().enumerate() {
|
||||
b.set_index(index as u64);
|
||||
}
|
||||
|
||||
bench_write_blobs(bench, &mut blobs, &ledger_path);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_read_sequential(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let mut blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
|
||||
// Insert some big and small blobs into the ledger
|
||||
let num_small_blobs = 32 * 1024;
|
||||
let num_large_blobs = 32 * 1024;
|
||||
let total_blobs = num_small_blobs + num_large_blobs;
|
||||
let slot = 0;
|
||||
setup_read_bench(&mut blocktree, num_small_blobs, num_large_blobs, slot);
|
||||
|
||||
let num_reads = total_blobs / 15;
|
||||
let mut rng = rand::thread_rng();
|
||||
bench.iter(move || {
|
||||
// Generate random starting point in the range [0, total_blobs - 1], read num_reads blobs sequentially
|
||||
let start_index = rng.gen_range(0, num_small_blobs + num_large_blobs);
|
||||
for i in start_index..start_index + num_reads {
|
||||
let _ = blocktree.get_data_blob(slot, i as u64 % total_blobs);
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_read_random(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let mut blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
|
||||
// Insert some big and small blobs into the ledger
|
||||
let num_small_blobs = 32 * 1024;
|
||||
let num_large_blobs = 32 * 1024;
|
||||
let total_blobs = num_small_blobs + num_large_blobs;
|
||||
let slot = 0;
|
||||
setup_read_bench(&mut blocktree, num_small_blobs, num_large_blobs, slot);
|
||||
|
||||
let num_reads = total_blobs / 15;
|
||||
|
||||
// Generate a num_reads sized random sample of indexes in range [0, total_blobs - 1],
|
||||
// simulating random reads
|
||||
let mut rng = rand::thread_rng();
|
||||
let indexes: Vec<usize> = (0..num_reads)
|
||||
.map(|_| rng.gen_range(0, total_blobs) as usize)
|
||||
.collect();
|
||||
bench.iter(move || {
|
||||
for i in indexes.iter() {
|
||||
let _ = blocktree.get_data_blob(slot, *i as u64);
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_insert_data_blob_small(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = make_tiny_test_entries(num_entries);
|
||||
let mut blobs = entries.to_blobs();
|
||||
|
||||
blobs.shuffle(&mut thread_rng());
|
||||
|
||||
bench.iter(move || {
|
||||
for blob in blobs.iter_mut() {
|
||||
let index = blob.index();
|
||||
blob.set_index(index + num_entries as u64);
|
||||
}
|
||||
blocktree.write_blobs(&blobs).unwrap();
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
||||
fn bench_insert_data_blob_big(bench: &mut Bencher) {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blocktree =
|
||||
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = make_large_test_entries(num_entries);
|
||||
let mut shared_blobs = entries.to_shared_blobs();
|
||||
shared_blobs.shuffle(&mut thread_rng());
|
||||
|
||||
bench.iter(move || {
|
||||
for blob in shared_blobs.iter_mut() {
|
||||
let index = blob.read().unwrap().index();
|
||||
blocktree.write_shared_blobs(vec![blob.clone()]).unwrap();
|
||||
blob.write().unwrap().set_index(index + num_entries as u64);
|
||||
}
|
||||
});
|
||||
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
29
core/benches/chacha.rs
Normal file
29
core/benches/chacha.rs
Normal file
@ -0,0 +1,29 @@
|
||||
//#![feature(test)]
|
||||
//
|
||||
//extern crate solana;
|
||||
//extern crate test;
|
||||
//
|
||||
//use solana::chacha::chacha_cbc_encrypt_files;
|
||||
//use std::fs::remove_file;
|
||||
//use std::fs::File;
|
||||
//use std::io::Write;
|
||||
//use std::path::Path;
|
||||
//use test::Bencher;
|
||||
//
|
||||
//#[bench]
|
||||
//fn bench_chacha_encrypt(bench: &mut Bencher) {
|
||||
// let in_path = Path::new("bench_chacha_encrypt_file_input.txt");
|
||||
// let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc");
|
||||
// {
|
||||
// let mut in_file = File::create(in_path).unwrap();
|
||||
// for _ in 0..1024 {
|
||||
// in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
// }
|
||||
// }
|
||||
// bench.iter(move || {
|
||||
// chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap();
|
||||
// });
|
||||
//
|
||||
// remove_file(in_path).unwrap();
|
||||
// remove_file(out_path).unwrap();
|
||||
//}
|
12
core/benches/gen_keys.rs
Normal file
12
core/benches/gen_keys.rs
Normal file
@ -0,0 +1,12 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use solana::gen_keys::GenKeys;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let mut rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
24
core/benches/ledger.rs
Normal file
24
core/benches/ledger.rs
Normal file
@ -0,0 +1,24 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use solana::entry::{next_entries, reconstruct_entries_from_blobs, EntrySlice};
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction::SystemTransaction;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = SystemTransaction::new_move(&keypair, &keypair.pubkey(), 1, one, 0);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
bencher.iter(|| {
|
||||
let blobs = entries.to_blobs();
|
||||
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap().0, entries);
|
||||
});
|
||||
}
|
21
core/benches/sigverify.rs
Normal file
21
core/benches/sigverify.rs
Normal file
@ -0,0 +1,21 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use solana::packet::to_packets;
|
||||
use solana::sigverify;
|
||||
use solana::test_tx::test_tx;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_sigverify(bencher: &mut Bencher) {
|
||||
let tx = test_tx();
|
||||
|
||||
// generate packet vector
|
||||
let batches = to_packets(&vec![tx; 128]);
|
||||
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
let _ans = sigverify::ed25519_verify(&batches);
|
||||
})
|
||||
}
|
Reference in New Issue
Block a user