Compare commits

..

9 Commits

Author SHA1 Message Date
Anatoly Yakovenko
8f181b4350 keep track of locktower slots and stakes 2019-03-25 16:36:19 -07:00
Rob Walker
48844924e5 Setup staking (#3480) 2019-03-25 14:19:14 -07:00
Pankaj Garg
f84593ad5f Revert "Disable accounts squash call from bank"
This reverts commit 7685ba2805.
2019-03-25 12:21:32 -07:00
Sathish
0469dc52ac Ensure accounts are unlocked (#3458) 2019-03-25 12:21:32 -07:00
Carl
4cf418f33f Fix wrong keypair 2019-03-23 16:33:50 -07:00
carllin
6c46fcfa4e Restart node test (#3459)
* Add test to local_cluster for restarting a node

* fix so that we don't hit end of epoch - leader not found before trying to transfer
2019-03-23 15:00:23 -07:00
Carl
12ec5304f2 Revert "fix so that we don't hit end of epoch - leader not found before trying to transfer"
Revert "Add test to local_cluster for restarting a node"
2019-03-22 21:46:08 -07:00
Carl
e32f798d5f fix so that we don't hit end of epoch - leader not found before trying to transfer 2019-03-22 20:47:32 -07:00
Carl
68a8b955bc Add test to local_cluster for restarting a node 2019-03-22 19:30:14 -07:00
12 changed files with 248 additions and 57 deletions

View File

@@ -223,24 +223,22 @@ impl BankingStage {
Ok(()) Ok(())
} }
pub fn process_and_record_transactions( fn process_and_record_transactions_locked(
bank: &Bank, bank: &Bank,
txs: &[Transaction], txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>, poh: &Arc<Mutex<PohRecorder>>,
lock_results: &[bank::Result<()>],
) -> Result<()> { ) -> Result<()> {
let now = Instant::now();
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let lock_results = bank.lock_accounts(txs);
let lock_time = now.elapsed();
let now = Instant::now(); let now = Instant::now();
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce // Use a shorter maximum age when adding transactions into the pipeline. This will reduce
// the likelihood of any single thread getting starved and processing old ids. // the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue // TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires. // expires.
let (loaded_accounts, results) = let (loaded_accounts, results) = bank.load_and_execute_transactions(
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2); txs,
lock_results.to_vec(),
MAX_RECENT_BLOCKHASHES / 2,
);
let load_execute_time = now.elapsed(); let load_execute_time = now.elapsed();
let record_time = { let record_time = {
@@ -255,21 +253,45 @@ impl BankingStage {
now.elapsed() now.elapsed()
}; };
let now = Instant::now();
// Once the accounts are new transactions can enter the pipeline to process them
bank.unlock_accounts(&txs, &results);
let unlock_time = now.elapsed();
debug!( debug!(
"bank: {} lock: {}us load_execute: {}us record: {}us commit: {}us unlock: {}us txs_len: {}", "bank: {} load_execute: {}us record: {}us commit: {}us txs_len: {}",
bank.slot(), bank.slot(),
duration_as_us(&lock_time),
duration_as_us(&load_execute_time), duration_as_us(&load_execute_time),
duration_as_us(&record_time), duration_as_us(&record_time),
duration_as_us(&commit_time), duration_as_us(&commit_time),
txs.len(),
);
Ok(())
}
pub fn process_and_record_transactions(
bank: &Bank,
txs: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
) -> Result<()> {
let now = Instant::now();
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let lock_results = bank.lock_accounts(txs);
let lock_time = now.elapsed();
let results = Self::process_and_record_transactions_locked(bank, txs, poh, &lock_results);
let now = Instant::now();
// Once the accounts are new transactions can enter the pipeline to process them
bank.unlock_accounts(&txs, &lock_results);
let unlock_time = now.elapsed();
debug!(
"bank: {} lock: {}us unlock: {}us txs_len: {}",
bank.slot(),
duration_as_us(&lock_time),
duration_as_us(&unlock_time), duration_as_us(&unlock_time),
txs.len(), txs.len(),
); );
Ok(())
results
} }
/// Sends transactions to the bank. /// Sends transactions to the bank.

6
core/src/cluster.rs Normal file
View File

@@ -0,0 +1,6 @@
use solana_sdk::pubkey::Pubkey;
pub trait Cluster {
fn get_node_ids(&self) -> Vec<Pubkey>;
fn restart_node(&mut self, pubkey: Pubkey);
}

View File

@@ -7,11 +7,14 @@ use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::entry::{Entry, EntrySlice}; use crate::entry::{Entry, EntrySlice};
use crate::gossip_service::discover; use crate::gossip_service::discover;
use crate::poh_service::PohServiceConfig;
use solana_client::client::create_client; use solana_client::client::create_client;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction; use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND}; use solana_sdk::timing::{
duration_as_ms, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, NUM_TICKS_PER_SECOND,
};
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
@@ -117,6 +120,25 @@ pub fn verify_ledger_ticks(ledger_path: &str, ticks_per_slot: usize) {
} }
} }
pub fn sleep_n_epochs(
num_epochs: f64,
config: &PohServiceConfig,
ticks_per_slot: u64,
slots_per_epoch: u64,
) {
let num_ticks_per_second = {
match config {
PohServiceConfig::Sleep(d) => (1000 / duration_as_ms(d)) as f64,
_ => panic!("Unsuppported tick config for testing"),
}
};
let num_ticks_to_sleep = num_epochs * ticks_per_slot as f64 * slots_per_epoch as f64;
sleep(Duration::from_secs(
((num_ticks_to_sleep + num_ticks_per_second - 1.0) / num_ticks_per_second) as u64,
));
}
pub fn kill_entry_and_spend_and_verify_rest( pub fn kill_entry_and_spend_and_verify_rest(
entry_point_info: &ContactInfo, entry_point_info: &ContactInfo,
funding_keypair: &Keypair, funding_keypair: &Keypair,

View File

@@ -37,6 +37,7 @@ use std::thread::JoinHandle;
use std::thread::{spawn, Result}; use std::thread::{spawn, Result};
use std::time::Duration; use std::time::Duration;
#[derive(Clone)]
pub struct FullnodeConfig { pub struct FullnodeConfig {
pub sigverify_disabled: bool, pub sigverify_disabled: bool,
pub voting_disabled: bool, pub voting_disabled: bool,
@@ -265,15 +266,6 @@ impl Fullnode {
// Used for notifying many nodes in parallel to exit // Used for notifying many nodes in parallel to exit
pub fn exit(&self) { pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed); self.exit.store(true, Ordering::Relaxed);
// Need to force the poh_recorder to drop the WorkingBank,
// which contains the channel to BroadcastStage. This should be
// sufficient as long as no other rotations are happening that
// can cause the Tpu to restart a BankingStage and reset a
// WorkingBank in poh_recorder. It follows no other rotations can be
// in motion because exit()/close() are only called by the run() loop
// which is the sole initiator of rotations.
self.poh_recorder.lock().unwrap().clear_bank();
} }
pub fn close(self) -> Result<()> { pub fn close(self) -> Result<()> {

View File

@@ -27,6 +27,7 @@ pub mod blocktree;
pub mod blockstream; pub mod blockstream;
pub mod blockstream_service; pub mod blockstream_service;
pub mod blocktree_processor; pub mod blocktree_processor;
pub mod cluster;
pub mod cluster_info; pub mod cluster_info;
pub mod cluster_tests; pub mod cluster_tests;
pub mod db_window; pub mod db_window;

View File

@@ -1,4 +1,5 @@
use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree}; use crate::blocktree::{create_new_tmp_ledger, tmp_copy_blocktree};
use crate::cluster::Cluster;
use crate::cluster_info::{Node, FULLNODE_PORT_RANGE}; use crate::cluster_info::{Node, FULLNODE_PORT_RANGE};
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::fullnode::{Fullnode, FullnodeConfig}; use crate::fullnode::{Fullnode, FullnodeConfig};
@@ -14,17 +15,33 @@ use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT; use solana_sdk::timing::DEFAULT_TICKS_PER_SLOT;
use solana_vote_api::vote_state::VoteState; use solana_vote_api::vote_state::VoteState;
use solana_vote_api::vote_transaction::VoteTransaction; use solana_vote_api::vote_transaction::VoteTransaction;
use std::collections::HashMap;
use std::fs::remove_dir_all; use std::fs::remove_dir_all;
use std::io::{Error, ErrorKind, Result}; use std::io::{Error, ErrorKind, Result};
use std::sync::Arc; use std::sync::Arc;
pub struct FullnodeInfo {
pub keypair: Arc<Keypair>,
pub ledger_path: String,
}
impl FullnodeInfo {
fn new(keypair: Arc<Keypair>, ledger_path: String) -> Self {
Self {
keypair,
ledger_path,
}
}
}
pub struct LocalCluster { pub struct LocalCluster {
/// Keypair with funding to particpiate in the network /// Keypair with funding to particpiate in the network
pub funding_keypair: Keypair, pub funding_keypair: Keypair,
pub fullnode_config: FullnodeConfig,
/// Entry point from which the rest of the network can be discovered /// Entry point from which the rest of the network can be discovered
pub entry_point_info: ContactInfo, pub entry_point_info: ContactInfo,
pub ledger_paths: Vec<String>, pub fullnodes: HashMap<Pubkey, Fullnode>,
fullnodes: Vec<Fullnode>, pub fullnode_infos: HashMap<Pubkey, FullnodeInfo>,
} }
impl LocalCluster { impl LocalCluster {
@@ -63,9 +80,6 @@ impl LocalCluster {
genesis_block.slots_per_epoch = slots_per_epoch; genesis_block.slots_per_epoch = slots_per_epoch;
let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block); let (genesis_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path); let leader_ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
let mut ledger_paths = vec![];
ledger_paths.push(genesis_ledger_path.clone());
ledger_paths.push(leader_ledger_path.clone());
let voting_keypair = Keypair::new(); let voting_keypair = Keypair::new();
let leader_contact_info = leader_node.info.clone(); let leader_contact_info = leader_node.info.clone();
let leader_server = Fullnode::new( let leader_server = Fullnode::new(
@@ -77,7 +91,14 @@ impl LocalCluster {
None, None,
fullnode_config, fullnode_config,
); );
let mut fullnodes = vec![leader_server]; let mut fullnodes = HashMap::new();
let mut fullnode_infos = HashMap::new();
fullnodes.insert(leader_pubkey, leader_server);
fullnode_infos.insert(
leader_pubkey,
FullnodeInfo::new(leader_keypair.clone(), leader_ledger_path),
);
let mut client = create_client( let mut client = create_client(
leader_contact_info.client_facing_addr(), leader_contact_info.client_facing_addr(),
FULLNODE_PORT_RANGE, FULLNODE_PORT_RANGE,
@@ -90,7 +111,6 @@ impl LocalCluster {
let validator_pubkey = validator_keypair.pubkey(); let validator_pubkey = validator_keypair.pubkey();
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey()); let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
let ledger_path = tmp_copy_blocktree!(&genesis_ledger_path); let ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
ledger_paths.push(ledger_path.clone());
// Send each validator some lamports to vote // Send each validator some lamports to vote
let validator_balance = let validator_balance =
@@ -116,34 +136,40 @@ impl LocalCluster {
Some(&leader_contact_info), Some(&leader_contact_info),
fullnode_config, fullnode_config,
); );
fullnodes.push(validator_server); fullnodes.insert(validator_keypair.pubkey(), validator_server);
fullnode_infos.insert(
validator_keypair.pubkey(),
FullnodeInfo::new(validator_keypair.clone(), ledger_path),
);
} }
discover(&leader_contact_info.gossip, node_stakes.len()).unwrap(); discover(&leader_contact_info.gossip, node_stakes.len()).unwrap();
Self { Self {
funding_keypair: mint_keypair, funding_keypair: mint_keypair,
entry_point_info: leader_contact_info, entry_point_info: leader_contact_info,
fullnodes, fullnodes,
ledger_paths, fullnode_config: fullnode_config.clone(),
fullnode_infos,
} }
} }
pub fn exit(&self) { pub fn exit(&self) {
for node in &self.fullnodes { for node in self.fullnodes.values() {
node.exit(); node.exit();
} }
} }
pub fn close_preserve_ledgers(&mut self) { pub fn close_preserve_ledgers(&mut self) {
self.exit(); self.exit();
while let Some(node) = self.fullnodes.pop() { for (_, node) in self.fullnodes.drain() {
node.join().unwrap(); node.join().unwrap();
} }
} }
fn close(&mut self) { fn close(&mut self) {
self.close_preserve_ledgers(); self.close_preserve_ledgers();
for path in &self.ledger_paths { for info in self.fullnode_infos.values() {
remove_dir_all(path).unwrap_or_else(|_| panic!("Unable to remove {}", path)); remove_dir_all(&info.ledger_path)
.unwrap_or_else(|_| panic!("Unable to remove {}", info.ledger_path));
} }
} }
@@ -223,6 +249,38 @@ impl LocalCluster {
} }
} }
impl Cluster for LocalCluster {
fn restart_node(&mut self, pubkey: Pubkey) {
// Shut down the fullnode
let node = self.fullnodes.remove(&pubkey).unwrap();
node.exit();
node.join().unwrap();
// Restart the node
let fullnode_info = &self.fullnode_infos[&pubkey];
let node = Node::new_localhost_with_pubkey(&fullnode_info.keypair.pubkey());
if pubkey == self.entry_point_info.id {
self.entry_point_info = node.info.clone();
}
let new_voting_keypair = Keypair::new();
let restarted_node = Fullnode::new(
node,
&fullnode_info.keypair,
&fullnode_info.ledger_path,
&new_voting_keypair.pubkey(),
new_voting_keypair,
None,
&self.fullnode_config,
);
self.fullnodes.insert(pubkey, restarted_node);
}
fn get_node_ids(&self) -> Vec<Pubkey> {
self.fullnodes.keys().cloned().collect()
}
}
impl Drop for LocalCluster { impl Drop for LocalCluster {
fn drop(&mut self) { fn drop(&mut self) {
self.close(); self.close();

View File

@@ -1,6 +1,7 @@
use crate::bank_forks::BankForks; use crate::bank_forks::BankForks;
use crate::staking_utils; use crate::staking_utils;
use hashbrown::{HashMap, HashSet}; use hashbrown::{HashMap, HashSet};
use solana_metrics::influxdb;
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::account::Account; use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
@@ -14,6 +15,7 @@ pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
pub struct EpochStakes { pub struct EpochStakes {
slot: u64, slot: u64,
stakes: HashMap<Pubkey, u64>, stakes: HashMap<Pubkey, u64>,
self_staked: u64,
total_staked: u64, total_staked: u64,
} }
@@ -32,26 +34,32 @@ pub struct Locktower {
} }
impl EpochStakes { impl EpochStakes {
pub fn new(slot: u64, stakes: HashMap<Pubkey, u64>) -> Self { pub fn new(slot: u64, stakes: HashMap<Pubkey, u64>, self_id: &Pubkey) -> Self {
let total_staked = stakes.values().sum(); let total_staked = stakes.values().sum();
let self_staked = *stakes.get(&self_id).unwrap_or(&0);
Self { Self {
slot, slot,
stakes, stakes,
total_staked, total_staked,
self_staked,
} }
} }
pub fn new_for_tests(lamports: u64) -> Self { pub fn new_for_tests(lamports: u64) -> Self {
Self::new(0, vec![(Pubkey::default(), lamports)].into_iter().collect()) Self::new(
0,
vec![(Pubkey::default(), lamports)].into_iter().collect(),
&Pubkey::default(),
)
} }
pub fn new_from_stake_accounts(slot: u64, accounts: &[(Pubkey, Account)]) -> Self { pub fn new_from_stake_accounts(slot: u64, accounts: &[(Pubkey, Account)]) -> Self {
let stakes = accounts.iter().map(|(k, v)| (*k, v.lamports)).collect(); let stakes = accounts.iter().map(|(k, v)| (*k, v.lamports)).collect();
Self::new(slot, stakes) Self::new(slot, stakes, &accounts[0].0)
} }
pub fn new_from_bank(bank: &Bank) -> Self { pub fn new_from_bank(bank: &Bank) -> Self {
let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0; let bank_epoch = bank.get_epoch_and_slot_index(bank.slot()).0;
let stakes = staking_utils::vote_account_balances_at_epoch(bank, bank_epoch) let stakes = staking_utils::vote_account_balances_at_epoch(bank, bank_epoch)
.expect("voting require a bank with stakes"); .expect("voting require a bank with stakes");
Self::new(bank_epoch, stakes) Self::new(bank_epoch, stakes, &bank.collector_id())
} }
} }
@@ -150,12 +158,37 @@ impl Locktower {
"epoch_stakes cannot move backwards" "epoch_stakes cannot move backwards"
); );
self.epoch_stakes = EpochStakes::new_from_bank(bank); self.epoch_stakes = EpochStakes::new_from_bank(bank);
solana_metrics::submit(
influxdb::Point::new("counter-locktower-epoch")
.add_field(
"slot",
influxdb::Value::Integer(self.epoch_stakes.slot as i64),
)
.add_field(
"self_staked",
influxdb::Value::Integer(self.epoch_stakes.self_staked as i64),
)
.add_field(
"total_staked",
influxdb::Value::Integer(self.epoch_stakes.total_staked as i64),
)
.to_owned(),
);
} }
} }
pub fn record_vote(&mut self, slot: u64) -> Option<u64> { pub fn record_vote(&mut self, slot: u64) -> Option<u64> {
let root_slot = self.lockouts.root_slot; let root_slot = self.lockouts.root_slot;
self.lockouts.process_vote(Vote { slot }); self.lockouts.process_vote(Vote { slot });
solana_metrics::submit(
influxdb::Point::new("counter-locktower-vote")
.add_field("latest", influxdb::Value::Integer(slot as i64))
.add_field(
"root",
influxdb::Value::Integer(self.lockouts.root_slot.unwrap_or(0) as i64),
)
.to_owned(),
);
if root_slot != self.lockouts.root_slot { if root_slot != self.lockouts.root_slot {
Some(self.lockouts.root_slot.unwrap()) Some(self.lockouts.root_slot.unwrap())
} else { } else {

29
run.sh
View File

@@ -40,8 +40,20 @@ dataDir=$PWD/target/"$(basename "$0" .sh)"
set -x set -x
solana-keygen -o "$dataDir"/config/leader-keypair.json solana-keygen -o "$dataDir"/config/leader-keypair.json
solana-keygen -o "$dataDir"/config/leader-staking-account-keypair.json
solana-keygen -o "$dataDir"/config/drone-keypair.json solana-keygen -o "$dataDir"/config/drone-keypair.json
leaderPubkey=$(\
solana-wallet \
--keypair "$dataDir"/config/leader-keypair.json \
address \
)
leaderStakingAccountPubkey=$(\
solana-wallet \
--keypair "$dataDir"/config/leader-staking-account-keypair.json \
address \
)
solana-genesis \ solana-genesis \
--lamports 1000000000 \ --lamports 1000000000 \
--mint "$dataDir"/config/drone-keypair.json \ --mint "$dataDir"/config/drone-keypair.json \
@@ -53,6 +65,8 @@ drone=$!
args=( args=(
--identity "$dataDir"/config/leader-keypair.json --identity "$dataDir"/config/leader-keypair.json
--voting-keypair "$dataDir"/config/leader-staking-account-keypair.json
--staking-account "$leaderStakingAccountPubkey"
--ledger "$dataDir"/ledger/ --ledger "$dataDir"/ledger/
--rpc-port 8899 --rpc-port 8899
--rpc-drone-address 127.0.0.1:9900 --rpc-drone-address 127.0.0.1:9900
@@ -64,9 +78,20 @@ solana-fullnode "${args[@]}" &
fullnode=$! fullnode=$!
abort() { abort() {
set +e
kill "$drone" "$fullnode" kill "$drone" "$fullnode"
} }
trap abort INT TERM EXIT
solana-wallet --keypair "$dataDir"/config/leader-keypair.json airdrop 42
solana-wallet \
--keypair "$dataDir"/config/leader-keypair.json \
create-staking-account "$leaderStakingAccountPubkey" 42
solana-wallet \
--keypair "$dataDir"/config/leader-staking-account-keypair.json \
configure-staking-account \
--delegate-account "$leaderPubkey" \
--authorize-voter "$leaderStakingAccountPubkey"
solana-wallet --keypair "$dataDir"/config/leader-keypair.json balance
trap abort SIGINT SIGTERM
wait "$fullnode" wait "$fullnode"
kill "$drone" "$fullnode"

Binary file not shown.

View File

@@ -727,7 +727,6 @@ impl AccountsDB {
.map_or(0, |fork_info| fork_info.transaction_count) .map_or(0, |fork_info| fork_info.transaction_count)
} }
#[allow(dead_code)]
fn remove_parents(&self, fork: Fork) -> Vec<Fork> { fn remove_parents(&self, fork: Fork) -> Vec<Fork> {
let mut info = self.fork_infos.write().unwrap(); let mut info = self.fork_infos.write().unwrap();
let fork_info = info.get_mut(&fork).unwrap(); let fork_info = info.get_mut(&fork).unwrap();
@@ -744,7 +743,6 @@ impl AccountsDB {
.is_empty() .is_empty()
} }
#[allow(dead_code)]
fn get_merged_account_map( fn get_merged_account_map(
&self, &self,
fork: Fork, fork: Fork,
@@ -765,7 +763,6 @@ impl AccountsDB {
} }
/// make fork a root, i.e. forget its heritage /// make fork a root, i.e. forget its heritage
#[allow(dead_code)]
fn squash(&self, fork: Fork) { fn squash(&self, fork: Fork) {
let parents = self.remove_parents(fork); let parents = self.remove_parents(fork);
@@ -994,7 +991,6 @@ impl Accounts {
/// accounts starts with an empty data structure for every child/fork /// accounts starts with an empty data structure for every child/fork
/// this function squashes all the parents into this instance /// this function squashes all the parents into this instance
#[allow(dead_code)]
pub fn squash(&self, fork: Fork) { pub fn squash(&self, fork: Fork) {
assert!(!self.account_locks.lock().unwrap().contains_key(&fork)); assert!(!self.account_locks.lock().unwrap().contains_key(&fork));
self.accounts_db.squash(fork); self.accounts_db.squash(fork);

View File

@@ -300,7 +300,7 @@ impl Bank {
let parents = self.parents(); let parents = self.parents();
*self.parent.write().unwrap() = None; *self.parent.write().unwrap() = None;
// self.accounts().squash(self.accounts_id); self.accounts().squash(self.accounts_id);
let parent_caches: Vec<_> = parents let parent_caches: Vec<_> = parents
.iter() .iter()

View File

@@ -1,11 +1,12 @@
extern crate solana; extern crate solana;
use solana::cluster::Cluster;
use solana::cluster_tests; use solana::cluster_tests;
use solana::fullnode::FullnodeConfig; use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover; use solana::gossip_service::discover;
use solana::local_cluster::LocalCluster; use solana::local_cluster::LocalCluster;
use solana::poh_service::PohServiceConfig; use solana::poh_service::PohServiceConfig;
use std::thread::sleep; use solana_sdk::timing;
use std::time::Duration; use std::time::Duration;
#[test] #[test]
@@ -109,14 +110,16 @@ fn test_two_unbalanced_stakes() {
num_ticks_per_slot, num_ticks_per_slot,
num_slots_per_epoch, num_slots_per_epoch,
); );
let num_epochs_to_sleep = 10; cluster_tests::sleep_n_epochs(
let num_ticks_to_sleep = num_epochs_to_sleep * num_ticks_per_slot * num_slots_per_epoch; 10.0,
sleep(Duration::from_millis( &fullnode_config.tick_config,
num_ticks_to_sleep / num_ticks_per_second as u64 * 100, num_ticks_per_slot,
)); num_slots_per_epoch,
);
cluster.close_preserve_ledgers(); cluster.close_preserve_ledgers();
let leader_ledger = cluster.ledger_paths[1].clone(); let leader_id = cluster.entry_point_info.id;
let leader_ledger = cluster.fullnode_infos[&leader_id].ledger_path.clone();
cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize); cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize);
} }
@@ -137,3 +140,36 @@ fn test_forwarding() {
// Confirm that transactions were forwarded to and processed by the leader. // Confirm that transactions were forwarded to and processed by the leader.
cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 20); cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 20);
} }
#[test]
fn test_restart_node() {
let fullnode_config = FullnodeConfig::default();
let slots_per_epoch = 8;
let ticks_per_slot = 16;
let mut cluster = LocalCluster::new_with_tick_config(
&[3],
100,
&fullnode_config,
ticks_per_slot,
slots_per_epoch,
);
let nodes = cluster.get_node_ids();
cluster_tests::sleep_n_epochs(
1.0,
&fullnode_config.tick_config,
timing::DEFAULT_TICKS_PER_SLOT,
slots_per_epoch,
);
cluster.restart_node(nodes[0]);
cluster_tests::sleep_n_epochs(
0.5,
&fullnode_config.tick_config,
timing::DEFAULT_TICKS_PER_SLOT,
slots_per_epoch,
);
cluster_tests::spend_and_verify_all_nodes(
&cluster.entry_point_info,
&cluster.funding_keypair,
1,
);
}