Fix resetting PohRecorder to wrong bank (#3553) (#3574)

* Check whether future slot already has transmission
This commit is contained in:
carllin
2019-03-30 02:06:49 -07:00
committed by GitHub
parent 9568a1da03
commit bf11d7ef43
9 changed files with 1173 additions and 842 deletions

View File

@ -1,10 +1,13 @@
#![feature(test)]
extern crate test;
#[macro_use]
extern crate solana;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana::banking_stage::{create_test_recorder, BankingStage};
use solana::blocktree::{get_tmp_ledger_path, Blocktree};
use solana::cluster_info::ClusterInfo;
use solana::cluster_info::Node;
use solana::packet::to_packets_chunked;
@ -104,33 +107,41 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[bench]
@ -211,31 +222,40 @@ fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
poh_recorder.lock().unwrap().set_bank(&bank);
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
let half_len = verified.len() / 2;
let mut start = 0;
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}

View File

@ -1,7 +1,7 @@
//! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::entry::Entry;
use crate::leader_schedule_utils;
@ -437,6 +437,7 @@ impl Service for BankingStage {
pub fn create_test_recorder(
bank: &Arc<Bank>,
blocktree: &Arc<Blocktree>,
) -> (
Arc<AtomicBool>,
Arc<Mutex<PohRecorder>>,
@ -451,6 +452,7 @@ pub fn create_test_recorder(
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
blocktree,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &PohServiceConfig::default(), &exit);
@ -460,10 +462,12 @@ pub fn create_test_recorder(
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::cluster_info::Node;
use crate::entry::EntrySlice;
use crate::packet::to_packets;
use crate::poh_recorder::WorkingBank;
use crate::{get_tmp_ledger_path, tmp_ledger_name};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::native_program::ProgramError;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -476,14 +480,22 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, _entry_receiever) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();
poh_service.join().unwrap();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@ -494,28 +506,36 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_block));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
trace!("sending bank");
sleep(Duration::from_millis(600));
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
trace!("sending bank");
sleep(Duration::from_millis(600));
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_block.ticks_per_slot as usize - 1);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_block.ticks_per_slot as usize - 1);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@ -524,54 +544,63 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_block));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
// good tx
let keypair = mint_keypair;
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// good tx
let keypair = mint_keypair;
let tx = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// good tx, but no verify
let tx_no_ver =
SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// good tx, but no verify
let tx_no_ver =
SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let tx_anf = SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let tx_anf =
SystemTransaction::new_account(&keypair, &keypair.pubkey(), 1, start_hash, 0);
// send 'em over
let packets = to_packets(&[tx, tx_no_ver, tx_anf]);
// send 'em over
let packets = to_packets(&[tx, tx_no_ver, tx_anf]);
// glad they all fit
assert_eq!(packets.len(), 1);
verified_sender // tx, no_ver, anf
.send(vec![(packets[0].clone(), vec![1u8, 0u8, 1u8])])
.unwrap();
// glad they all fit
assert_eq!(packets.len(), 1);
verified_sender // tx, no_ver, anf
.send(vec![(packets[0].clone(), vec![1u8, 0u8, 1u8])])
.unwrap();
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
//receive entries + ticks
let entries: Vec<Vec<Entry>> = entry_receiver
.iter()
.map(|x| x.1.into_iter().map(|e| e.0).collect())
.collect();
//receive entries + ticks
let entries: Vec<Vec<Entry>> = entry_receiver
.iter()
.map(|x| x.1.into_iter().map(|e| e.0).collect())
.collect();
assert!(entries.len() >= 1);
assert!(entries.len() >= 1);
let mut blockhash = start_hash;
entries.iter().for_each(|entries| {
assert_eq!(entries.len(), 1);
assert!(entries.verify(&blockhash));
blockhash = entries.last().unwrap().hash;
});
drop(entry_receiver);
banking_stage.join().unwrap();
let mut blockhash = start_hash;
entries.iter().for_each(|entries| {
assert_eq!(entries.len(), 1);
assert!(entries.verify(&blockhash));
blockhash = entries.last().unwrap().hash;
});
drop(entry_receiver);
banking_stage.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@ -582,71 +611,74 @@ mod tests {
let (genesis_block, mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let (verified_sender, verified_receiver) = channel();
let (exit, poh_recorder, poh_service, entry_receiver) = create_test_recorder(&bank);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
poh_recorder.lock().unwrap().set_bank(&bank);
let _banking_stage = BankingStage::new(&cluster_info, &poh_recorder, verified_receiver);
// Process a batch that includes a transaction that receives two lamports.
let alice = Keypair::new();
let tx = SystemTransaction::new_account(
&mint_keypair,
&alice.pubkey(),
2,
genesis_block.hash(),
0,
);
// Process a batch that includes a transaction that receives two lamports.
let alice = Keypair::new();
let tx = SystemTransaction::new_account(
&mint_keypair,
&alice.pubkey(),
2,
genesis_block.hash(),
0,
);
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
// Process a second batch that spends one of those lamports.
let tx = SystemTransaction::new_account(
&alice,
&mint_keypair.pubkey(),
1,
genesis_block.hash(),
0,
);
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
// Process a second batch that spends one of those lamports.
let tx = SystemTransaction::new_account(
&alice,
&mint_keypair.pubkey(),
1,
genesis_block.hash(),
0,
);
let packets = to_packets(&[tx]);
verified_sender
.send(vec![(packets[0].clone(), vec![1u8])])
.unwrap();
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
drop(verified_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
// Poll the entry_receiver, feeding it into a new bank
// until the balance is what we expect.
let bank = Bank::new(&genesis_block);
for _ in 0..10 {
let entries: Vec<_> = entry_receiver
.iter()
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
for entry in &entries {
bank.process_transactions(&entry.transactions)
// Poll the entry_receiver, feeding it into a new bank
// until the balance is what we expect.
let bank = Bank::new(&genesis_block);
for _ in 0..10 {
let entries: Vec<_> = entry_receiver
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
.flat_map(|x| x.1.into_iter().map(|e| e.0))
.collect();
if bank.get_balance(&alice.pubkey()) == 1 {
break;
}
for entry in &entries {
bank.process_transactions(&entry.transactions)
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
sleep(Duration::from_millis(100));
if bank.get_balance(&alice.pubkey()) == 1 {
break;
}
sleep(Duration::from_millis(100));
}
}
// Assert the user holds one lamport, not two. If the stage only outputs one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 1);
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@ -659,43 +691,50 @@ mod tests {
max_tick_height: std::u64::MAX,
};
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Keypair::new().pubkey();
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let pubkey = Keypair::new().pubkey();
let transactions = vec![
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
];
let transactions = vec![
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
SystemTransaction::new_move(&mint_keypair, &pubkey, 1, genesis_block.hash(), 0),
];
let mut results = vec![Ok(()), Ok(())];
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
let mut results = vec![Ok(()), Ok(())];
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
// ProgramErrors should still be recorded
results[0] = Err(BankError::ProgramError(
1,
ProgramError::ResultWithNegativeLamports,
));
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
// ProgramErrors should still be recorded
results[0] = Err(BankError::ProgramError(
1,
ProgramError::ResultWithNegativeLamports,
));
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len());
// Other BankErrors should not be recorded
results[0] = Err(BankError::AccountNotFound);
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
// Other BankErrors should not be recorded
results[0] = Err(BankError::AccountNotFound);
BankingStage::record_transactions(&transactions, &results, &poh_recorder).unwrap();
let (_, entries) = entry_receiver.recv().unwrap();
assert_eq!(entries[0].0.transactions.len(), transactions.len() - 1);
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
@ -718,53 +757,61 @@ mod tests {
min_tick_height: bank.tick_height(),
max_tick_height: bank.tick_height() + 1,
};
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&pubkey,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blocktree),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder).unwrap();
poh_recorder.lock().unwrap().tick();
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder)
.unwrap();
poh_recorder.lock().unwrap().tick();
let mut done = false;
// read entries until I find mine, might be ticks...
while let Ok((_, entries)) = entry_receiver.recv() {
for (entry, _) in entries {
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
assert_eq!(bank.get_balance(&pubkey), 1);
done = true;
let mut done = false;
// read entries until I find mine, might be ticks...
while let Ok((_, entries)) = entry_receiver.recv() {
for (entry, _) in entries {
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
assert_eq!(bank.get_balance(&pubkey), 1);
done = true;
}
}
if done {
break;
}
}
if done {
break;
}
trace!("done ticking");
assert_eq!(done, true);
let transactions = vec![SystemTransaction::new_move(
&mint_keypair,
&pubkey,
2,
genesis_block.hash(),
0,
)];
assert_matches!(
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder),
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
);
assert_eq!(bank.get_balance(&pubkey), 1);
}
trace!("done ticking");
assert_eq!(done, true);
let transactions = vec![SystemTransaction::new_move(
&mint_keypair,
&pubkey,
2,
genesis_block.hash(),
0,
)];
assert_matches!(
BankingStage::process_and_record_transactions(&bank, &transactions, &poh_recorder),
Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached))
);
assert_eq!(bank.get_balance(&pubkey), 1);
Blocktree::destroy(&ledger_path).unwrap();
}
}

View File

@ -108,13 +108,15 @@ impl Fullnode {
bank.tick_height(),
bank.last_blockhash(),
);
let blocktree = Arc::new(blocktree);
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.slot(),
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank),
leader_schedule_utils::next_leader_slot(&id, bank.slot(), &bank, Some(&blocktree)),
bank.ticks_per_slot(),
&id,
&blocktree,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &config.tick_config, &exit);
@ -133,7 +135,6 @@ impl Fullnode {
node.sockets.gossip.local_addr().unwrap()
);
let blocktree = Arc::new(blocktree);
let bank_forks = Arc::new(RwLock::new(bank_forks));
node.info.wallclock = timestamp();

View File

@ -1,3 +1,4 @@
use crate::blocktree::Blocktree;
use crate::leader_schedule::LeaderSchedule;
use crate::staking_utils;
use solana_runtime::bank::Bank;
@ -44,7 +45,12 @@ pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
}
/// Return the next slot after the given current_slot that the given node will be leader
pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) -> Option<u64> {
pub fn next_leader_slot(
pubkey: &Pubkey,
mut current_slot: u64,
bank: &Bank,
blocktree: Option<&Blocktree>,
) -> Option<u64> {
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
while let Some(leader_schedule) = leader_schedule(epoch, bank) {
// clippy thinks I should do this:
@ -59,6 +65,15 @@ pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) ->
for i in start_index..bank.get_slots_in_epoch(epoch) {
current_slot += 1;
if *pubkey == leader_schedule[i] {
if let Some(blocktree) = blocktree {
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
// We have already sent a blob for this slot, so skip it
if meta.received > 0 {
continue;
}
}
}
return Some(current_slot);
}
}
@ -82,9 +97,13 @@ pub fn tick_height_to_slot(ticks_per_slot: u64, tick_height: u64) -> u64 {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::tests::make_slot_entries;
use crate::staking_utils;
use crate::voting_keypair::tests::new_vote_account_with_delegate;
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::Arc;
#[test]
fn test_next_leader_slot() {
@ -99,13 +118,14 @@ mod tests {
let bank = Bank::new(&genesis_block);
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
assert_eq!(next_leader_slot(&pubkey, 0, &bank), Some(1));
assert_eq!(next_leader_slot(&pubkey, 1, &bank), Some(2));
assert_eq!(next_leader_slot(&pubkey, 0, &bank, None), Some(1));
assert_eq!(next_leader_slot(&pubkey, 1, &bank, None), Some(2));
assert_eq!(
next_leader_slot(
&pubkey,
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank
&bank,
None
),
None
);
@ -114,12 +134,133 @@ mod tests {
next_leader_slot(
&Keypair::new().pubkey(), // not in leader_schedule
0,
&bank
&bank,
None
),
None
);
}
#[test]
fn test_next_leader_slot_blocktree() {
let pubkey = Keypair::new().pubkey();
let mut genesis_block = GenesisBlock::new_with_leader(
BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
)
.0;
genesis_block.epoch_warmup = false;
let bank = Bank::new(&genesis_block);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
// Check that the next leader slot after 0 is slot 1
assert_eq!(
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
Some(1)
);
// Write a blob into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped
let (blobs, _) = make_slot_entries(2, 1, 1);
blocktree.write_blobs(&blobs[..]).unwrap();
assert_eq!(
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
Some(1)
);
// Write a blob into slot 1
let (blobs, _) = make_slot_entries(1, 0, 1);
// Check that slot 1 and 2 are skipped
blocktree.write_blobs(&blobs[..]).unwrap();
assert_eq!(
next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)),
Some(3)
);
// Integrity checks
assert_eq!(
next_leader_slot(
&pubkey,
2 * genesis_block.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank,
Some(&blocktree)
),
None
);
assert_eq!(
next_leader_slot(
&Keypair::new().pubkey(), // not in leader_schedule
0,
&bank,
Some(&blocktree)
),
None
);
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
fn test_next_leader_slot_next_epoch() {
let pubkey = Keypair::new().pubkey();
let (mut genesis_block, mint_keypair) = GenesisBlock::new_with_leader(
2 * BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
);
genesis_block.epoch_warmup = false;
let bank = Bank::new(&genesis_block);
let delegate_id = Keypair::new().pubkey();
// Create new vote account
let new_voting_keypair = Keypair::new();
new_vote_account_with_delegate(
&mint_keypair,
&new_voting_keypair,
&delegate_id,
&bank,
BOOTSTRAP_LEADER_LAMPORTS,
);
// Have to wait until the epoch at after the epoch stakes generated at genesis
// for the new votes to take effect.
let mut target_slot = 1;
let epoch = bank.get_stakers_epoch(0);
while bank.get_stakers_epoch(target_slot) == epoch {
target_slot += 1;
}
let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot);
let mut expected_slot = 0;
let epoch = bank.get_stakers_epoch(target_slot);
for i in 0..epoch {
expected_slot += bank.get_slots_in_epoch(i);
}
let schedule = leader_schedule(epoch, &bank).unwrap();
let mut index = 0;
while schedule[index] != delegate_id {
index += 1
}
expected_slot += index;
assert_eq!(
next_leader_slot(&delegate_id, 0, &bank, None),
Some(expected_slot),
);
}
#[test]
fn test_leader_schedule_via_bank() {
let pubkey = Keypair::new().pubkey();

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,5 @@
//! The `poh_service` module implements a service that records the passing of
//! "ticks", a measure of time in the PoH stream
use crate::poh_recorder::PohRecorder;
use crate::service::Service;
use solana_sdk::timing::NUM_TICKS_PER_SECOND;
@ -98,6 +97,7 @@ impl Service for PohService {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::poh_recorder::WorkingBank;
use crate::result::Result;
use crate::test_tx::test_tx;
@ -111,83 +111,90 @@ mod tests {
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Arc::new(Bank::new(&genesis_block));
let prev_hash = bank.last_blockhash();
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
prev_hash,
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let exit = Arc::new(AtomicBool::new(false));
let working_bank = WorkingBank {
bank: bank.clone(),
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
let ledger_path = get_tmp_ledger_path!();
{
let blocktree =
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver) = PohRecorder::new(
bank.tick_height(),
prev_hash,
bank.slot(),
Some(4),
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blocktree),
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let exit = Arc::new(AtomicBool::new(false));
let working_bank = WorkingBank {
bank: bank.clone(),
min_tick_height: bank.tick_height(),
max_tick_height: std::u64::MAX,
};
let entry_producer: JoinHandle<Result<()>> = {
let poh_recorder = poh_recorder.clone();
let exit = exit.clone();
let entry_producer: JoinHandle<Result<()>> = {
let poh_recorder = poh_recorder.clone();
let exit = exit.clone();
Builder::new()
.name("solana-poh-service-entry_producer".to_string())
.spawn(move || {
loop {
// send some data
let h1 = hash(b"hello world!");
let tx = test_tx();
poh_recorder.lock().unwrap().record(h1, vec![tx]).unwrap();
Builder::new()
.name("solana-poh-service-entry_producer".to_string())
.spawn(move || {
loop {
// send some data
let h1 = hash(b"hello world!");
let tx = test_tx();
poh_recorder.lock().unwrap().record(h1, vec![tx]).unwrap();
if exit.load(Ordering::Relaxed) {
break Ok(());
if exit.load(Ordering::Relaxed) {
break Ok(());
}
}
}
})
.unwrap()
};
})
.unwrap()
};
const HASHES_PER_TICK: u64 = 2;
let poh_service = PohService::new(
poh_recorder.clone(),
&PohServiceConfig::Tick(HASHES_PER_TICK as usize),
&exit,
);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
const HASHES_PER_TICK: u64 = 2;
let poh_service = PohService::new(
poh_recorder.clone(),
&PohServiceConfig::Tick(HASHES_PER_TICK as usize),
&exit,
);
poh_recorder.lock().unwrap().set_working_bank(working_bank);
// get some events
let mut hashes = 0;
let mut need_tick = true;
let mut need_entry = true;
let mut need_partial = true;
// get some events
let mut hashes = 0;
let mut need_tick = true;
let mut need_entry = true;
let mut need_partial = true;
while need_tick || need_entry || need_partial {
for entry in entry_receiver.recv().unwrap().1 {
let entry = &entry.0;
if entry.is_tick() {
assert!(entry.num_hashes <= HASHES_PER_TICK);
while need_tick || need_entry || need_partial {
for entry in entry_receiver.recv().unwrap().1 {
let entry = &entry.0;
if entry.is_tick() {
assert!(entry.num_hashes <= HASHES_PER_TICK);
if entry.num_hashes == HASHES_PER_TICK {
need_tick = false;
if entry.num_hashes == HASHES_PER_TICK {
need_tick = false;
} else {
need_partial = false;
}
hashes += entry.num_hashes;
assert_eq!(hashes, HASHES_PER_TICK);
hashes = 0;
} else {
need_partial = false;
assert!(entry.num_hashes >= 1);
need_entry = false;
hashes += entry.num_hashes - 1;
}
hashes += entry.num_hashes;
assert_eq!(hashes, HASHES_PER_TICK);
hashes = 0;
} else {
assert!(entry.num_hashes >= 1);
need_entry = false;
hashes += entry.num_hashes - 1;
}
}
exit.store(true, Ordering::Relaxed);
let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap();
}
exit.store(true, Ordering::Relaxed);
let _ = poh_service.join().unwrap();
let _ = entry_producer.join().unwrap();
Blocktree::destroy(&ledger_path).unwrap();
}
}

View File

@ -158,8 +158,12 @@ impl ReplayStage {
locktower.update_epoch(&bank);
cluster_info.write().unwrap().push_vote(vote);
}
let next_leader_slot =
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
let next_leader_slot = leader_schedule_utils::next_leader_slot(
&my_id,
bank.slot(),
&bank,
Some(&blocktree),
);
poh_recorder.lock().unwrap().reset(
bank.tick_height(),
bank.last_blockhash(),
@ -195,7 +199,6 @@ impl ReplayStage {
&bank_forks,
&poh_recorder,
&cluster_info,
&blocktree,
poh_slot,
reached_leader_tick,
grace_ticks,
@ -228,35 +231,11 @@ impl ReplayStage {
bank_forks: &Arc<RwLock<BankForks>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
blocktree: &Blocktree,
poh_slot: u64,
reached_leader_tick: bool,
grace_ticks: u64,
) {
trace!("{} checking poh slot {}", my_id, poh_slot);
if blocktree.meta(poh_slot).unwrap().is_some() {
// We've already broadcasted entries for this slot, skip it
// Since we are skipping our leader slot, let's tell poh recorder when we should be
// leader again
if reached_leader_tick {
let _ = bank_forks.read().unwrap().get(poh_slot).map(|bank| {
let next_leader_slot =
leader_schedule_utils::next_leader_slot(&my_id, bank.slot(), &bank);
let mut poh = poh_recorder.lock().unwrap();
let start_slot = poh.start_slot();
poh.reset(
bank.tick_height(),
bank.last_blockhash(),
start_slot,
next_leader_slot,
bank.ticks_per_slot(),
);
});
}
return;
}
if bank_forks.read().unwrap().get(poh_slot).is_none() {
let frozen = bank_forks.read().unwrap().frozen_banks();
let parent_slot = poh_recorder.lock().unwrap().start_slot();
@ -579,7 +558,8 @@ mod test {
let bank = bank_forks.working_bank();
let blocktree = Arc::new(blocktree);
let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank);
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree);
let (replay_stage, _slot_full_receiver, ledger_writer_recv) = ReplayStage::new(
&my_keypair.pubkey(),
&voting_keypair.pubkey(),

View File

@ -202,8 +202,10 @@ pub mod tests {
let blocktree_path = get_tmp_ledger_path!();
let (blocktree, l_receiver) = Blocktree::open_with_signal(&blocktree_path)
.expect("Expected to successfully open ledger");
let blocktree = Arc::new(blocktree);
let bank = bank_forks.working_bank();
let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank);
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blocktree);
let voting_keypair = Keypair::new();
let tvu = Tvu::new(
&voting_keypair.pubkey(),
@ -218,7 +220,7 @@ pub mod tests {
fetch: target1.sockets.tvu,
}
},
Arc::new(blocktree),
blocktree,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,

View File

@ -34,7 +34,6 @@ fn new_gossip(
GossipService::new(&cluster_info, None, None, gossip, exit)
}
/// Test that message sent from leader to target1 and replayed to target2
#[test]
fn test_replay() {
solana_logger::setup();
@ -99,8 +98,9 @@ fn test_replay() {
let dr_1 = new_gossip(cref1.clone(), target1.sockets.gossip, &exit);
let voting_keypair = Keypair::new();
let blocktree = Arc::new(blocktree);
let (poh_service_exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank);
create_test_recorder(&bank, &blocktree);
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(Arc::new(voting_keypair)),
@ -114,7 +114,7 @@ fn test_replay() {
fetch: target1.sockets.tvu,
}
},
Arc::new(blocktree),
blocktree,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
@ -185,6 +185,7 @@ fn test_replay() {
dr_1.join().unwrap();
t_receiver.join().unwrap();
t_responder.join().unwrap();
drop(poh_recorder);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
let _ignored = remove_dir_all(&blocktree_path);
}