From ee1300a671514a7adb10741c192bcc42c0990460 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2020 19:30:00 -0800 Subject: [PATCH] Improve bench-tps keypair generation (#7723) (#7853) automerge --- bench-tps/src/bench.rs | 519 ++++++++++++++++++----------------- bench-tps/src/main.rs | 20 +- bench-tps/tests/bench_tps.rs | 12 +- 3 files changed, 275 insertions(+), 276 deletions(-) diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 4e44397abc..c6e0ccee8a 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -21,8 +21,7 @@ use solana_sdk::{ transaction::Transaction, }; use std::{ - cmp, - collections::VecDeque, + collections::{HashSet, VecDeque}, net::SocketAddr, process::exit, sync::{ @@ -66,10 +65,9 @@ fn get_recent_blockhash(client: &T) -> (Hash, FeeCalculator) { } pub fn do_bench_tps( - clients: Vec, + client: Arc, config: Config, gen_keypairs: Vec, - keypair0_balance: u64, libra_args: Option, ) -> u64 where @@ -82,13 +80,9 @@ where duration, tx_count, sustained, - num_lamports_per_account, .. } = config; - let clients: Vec<_> = clients.into_iter().map(Arc::new).collect(); - let client = &clients[0]; - let mut source_keypair_chunks: Vec> = Vec::new(); let mut dest_keypair_chunks: Vec> = Vec::new(); assert!(gen_keypairs.len() >= 2 * tx_count); @@ -115,20 +109,17 @@ where let maxes = Arc::new(RwLock::new(Vec::new())); let sample_period = 1; // in seconds info!("Sampling TPS every {} second...", sample_period); - let v_threads: Vec<_> = clients - .iter() - .map(|client| { - let exit_signal = exit_signal.clone(); - let maxes = maxes.clone(); - let client = client.clone(); - Builder::new() - .name("solana-client-sample".to_string()) - .spawn(move || { - sample_txs(&exit_signal, &maxes, sample_period, &client); - }) - .unwrap() - }) - .collect(); + let sample_thread = { + let exit_signal = exit_signal.clone(); + let maxes = maxes.clone(); + let client = client.clone(); + Builder::new() + .name("solana-client-sample".to_string()) + .spawn(move || { + sample_txs(&exit_signal, &maxes, sample_period, &client); + }) + .unwrap() + }; let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new())); @@ -174,11 +165,10 @@ where // generate and send transactions for the specified duration let start = Instant::now(); - let keypair_chunks = source_keypair_chunks.len() as u64; + let keypair_chunks = source_keypair_chunks.len(); let mut reclaim_lamports_back_to_source_account = false; - let mut i = keypair0_balance; + let mut chunk_index = 0; while start.elapsed() < duration { - let chunk_index = (i % keypair_chunks) as usize; generate_txs( &shared_txs, &recent_blockhash, @@ -206,8 +196,11 @@ where // transaction signatures even when blockhash is reused. dest_keypair_chunks[chunk_index].rotate_left(1); - i += 1; - if should_switch_directions(num_lamports_per_account, keypair_chunks, i) { + // Move on to next chunk + chunk_index = (chunk_index + 1) % keypair_chunks; + + // Switch directions after transfering for each "chunk" + if chunk_index == 0 { reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account; } } @@ -215,11 +208,9 @@ where // Stop the sampling threads so it will collect the stats exit_signal.store(true, Ordering::Relaxed); - info!("Waiting for validator threads..."); - for t in v_threads { - if let Err(err) = t.join() { - info!(" join() failed with: {:?}", err); - } + info!("Waiting for sampler threads..."); + if let Err(err) = sample_thread.join() { + info!(" join() failed with: {:?}", err); } // join the tx send threads @@ -500,177 +491,218 @@ fn do_tx_transfers( } } -fn verify_funding_transfer(client: &T, tx: &Transaction, amount: u64) -> bool { +fn verify_funding_transfer(client: &Arc, tx: &Transaction, amount: u64) -> bool { for a in &tx.message().account_keys[1..] { - if client - .get_balance_with_commitment(a, CommitmentConfig::recent()) - .unwrap_or(0) - >= amount - { - return true; + match client.get_balance_with_commitment(a, CommitmentConfig::recent()) { + Ok(balance) => return balance >= amount, + Err(err) => error!("failed to get balance {:?}", err), } } false } +trait FundingTransactions<'a> { + fn fund( + &mut self, + client: &Arc, + to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)], + to_lamports: u64, + ); + fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]); + fn sign(&mut self, blockhash: Hash); + fn send(&self, client: &Arc); + fn verify(&mut self, client: &Arc, to_lamports: u64); +} + +impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { + fn fund( + &mut self, + client: &Arc, + to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)], + to_lamports: u64, + ) { + self.make(to_fund); + + let mut tries = 0; + while !self.is_empty() { + info!( + "{} {} each to {} accounts in {} txs", + if tries == 0 { + "transferring" + } else { + " retrying" + }, + to_lamports, + self.len() * MAX_SPENDS_PER_TX as usize, + self.len(), + ); + + let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref()); + + // re-sign retained to_fund_txes with updated blockhash + self.sign(blockhash); + self.send(&client); + + // Sleep a few slots to allow transactions to process + sleep(Duration::from_secs(1)); + + self.verify(&client, to_lamports); + + // retry anything that seems to have dropped through cracks + // again since these txs are all or nothing, they're fine to + // retry + tries += 1; + } + info!("transferred"); + } + + fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]) { + let mut make_txs = Measure::start("make_txs"); + let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund + .par_iter() + .map(|(k, t)| { + let tx = Transaction::new_unsigned_instructions(system_instruction::transfer_many( + &k.pubkey(), + &t, + )); + (*k, tx) + }) + .collect(); + make_txs.stop(); + debug!( + "make {} unsigned txs: {}us", + to_fund_txs.len(), + make_txs.as_us() + ); + self.extend(to_fund_txs); + } + + fn sign(&mut self, blockhash: Hash) { + let mut sign_txs = Measure::start("sign_txs"); + self.par_iter_mut().for_each(|(k, tx)| { + tx.sign(&[*k], blockhash); + }); + sign_txs.stop(); + debug!("sign {} txs: {}us", self.len(), sign_txs.as_us()); + } + + fn send(&self, client: &Arc) { + let mut send_txs = Measure::start("send_txs"); + self.iter().for_each(|(_, tx)| { + client.async_send_transaction(tx.clone()).expect("transfer"); + }); + send_txs.stop(); + debug!("send {} txs: {}us", self.len(), send_txs.as_us()); + } + + fn verify(&mut self, client: &Arc, to_lamports: u64) { + let starting_txs = self.len(); + let verified_txs = Arc::new(AtomicUsize::new(0)); + let too_many_failures = Arc::new(AtomicBool::new(false)); + let loops = if starting_txs < 1000 { 3 } else { 1 }; + // Only loop multiple times for small (quick) transaction batches + for _ in 0..loops { + let failed_verify = Arc::new(AtomicUsize::new(0)); + let client = client.clone(); + let verified_txs = &verified_txs; + let failed_verify = &failed_verify; + let too_many_failures = &too_many_failures; + let verified_set: HashSet = self + .par_iter() + .filter_map(move |(k, tx)| { + if too_many_failures.load(Ordering::Relaxed) { + return None; + } + + let verified = if verify_funding_transfer(&client, &tx, to_lamports) { + verified_txs.fetch_add(1, Ordering::Relaxed); + Some(k.pubkey()) + } else { + failed_verify.fetch_add(1, Ordering::Relaxed); + None + }; + + let verified_txs = verified_txs.load(Ordering::Relaxed); + let failed_verify = failed_verify.load(Ordering::Relaxed); + let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); + if failed_verify > 100 && failed_verify > verified_txs { + too_many_failures.store(true, Ordering::Relaxed); + warn!( + "Too many failed transfers... {} remaining, {} verified, {} failures", + remaining_count, verified_txs, failed_verify + ); + } + if remaining_count % 100 == 0 { + info!( + "Verifying transfers... {} remaining, {} verified, {} failures", + remaining_count, verified_txs, failed_verify + ); + } + + verified + }) + .collect(); + + self.retain(|(k, _)| !verified_set.contains(&k.pubkey())); + if self.is_empty() { + break; + } + info!("Looping verifications"); + + let verified_txs = verified_txs.load(Ordering::Relaxed); + let failed_verify = failed_verify.load(Ordering::Relaxed); + let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); + info!( + "Verifying transfers... {} remaining, {} verified, {} failures", + remaining_count, verified_txs, failed_verify + ); + sleep(Duration::from_millis(100)); + } + } +} + /// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX /// on every iteration. This allows us to replay the transfers because the source is either empty, /// or full -pub fn fund_keys( - client: &T, +pub fn fund_keys( + client: Arc, source: &Keypair, dests: &[Keypair], total: u64, max_fee: u64, - mut extra: u64, + lamports_per_account: u64, ) { - let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)]; - let mut notfunded: Vec<&Keypair> = dests.iter().collect(); - let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1); - - info!( - "funding keys {} with lamports: {:?} total: {}", - dests.len(), - client.get_balance(&source.pubkey()), - total - ); - while !notfunded.is_empty() { - let mut new_funded: Vec<(&Keypair, u64)> = vec![]; - let mut to_fund = vec![]; - info!("creating from... {}", funded.len()); - let mut build_to_fund = Measure::start("build_to_fund"); - for f in &mut funded { - let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX); - if max_units == 0 { - break; - } - let start = notfunded.len() - max_units as usize; - let fees = if extra > 0 { max_fee } else { 0 }; - let per_unit = (f.1 - lamports_per_account - fees) / max_units; - let moves: Vec<_> = notfunded[start..] - .iter() - .map(|k| (k.pubkey(), per_unit)) - .collect(); - notfunded[start..] - .iter() - .for_each(|k| new_funded.push((k, per_unit))); - notfunded.truncate(start); - if !moves.is_empty() { - to_fund.push((f.0, moves)); - } - extra -= 1; + let mut funded: Vec<&Keypair> = vec![source]; + let mut funded_funds = total; + let mut not_funded: Vec<&Keypair> = dests.iter().collect(); + while !not_funded.is_empty() { + // Build to fund list and prepare funding sources for next iteration + let mut new_funded: Vec<&Keypair> = vec![]; + let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![]; + let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX; + for f in funded { + let start = not_funded.len() - MAX_SPENDS_PER_TX as usize; + let dests: Vec<_> = not_funded.drain(start..).collect(); + let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect(); + to_fund.push((f, spends)); + new_funded.extend(dests.into_iter()); } - build_to_fund.stop(); - debug!("build to_fund vec: {}us", build_to_fund.as_us()); // try to transfer a "few" at a time with recent blockhash // assume 4MB network buffers, and 512 byte packets const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512; to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| { - let mut tries = 0; - - let mut make_txs = Measure::start("make_txs"); - // this set of transactions just initializes us for bookkeeping - #[allow(clippy::clone_double_ref)] // sigh - let mut to_fund_txs: Vec<_> = chunk - .par_iter() - .map(|(k, m)| { - let tx = Transaction::new_unsigned_instructions( - system_instruction::transfer_many(&k.pubkey(), &m), - ); - (k.clone(), tx) - }) - .collect(); - make_txs.stop(); - debug!( - "make {} unsigned txs: {}us", - to_fund_txs.len(), - make_txs.as_us() + Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund( + &client, + chunk, + to_lamports, ); - - let amount = chunk[0].1[0].1; - - while !to_fund_txs.is_empty() { - let receivers = to_fund_txs - .iter() - .fold(0, |len, (_, tx)| len + tx.message().instructions.len()); - - info!( - "{} {} to {} in {} txs", - if tries == 0 { - "transferring" - } else { - " retrying" - }, - amount, - receivers, - to_fund_txs.len(), - ); - - let (blockhash, _fee_calculator) = get_recent_blockhash(client); - - // re-sign retained to_fund_txes with updated blockhash - let mut sign_txs = Measure::start("sign_txs"); - to_fund_txs.par_iter_mut().for_each(|(k, tx)| { - tx.sign(&[*k], blockhash); - }); - sign_txs.stop(); - debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us()); - - let mut send_txs = Measure::start("send_txs"); - to_fund_txs.iter().for_each(|(_, tx)| { - client.async_send_transaction(tx.clone()).expect("transfer"); - }); - send_txs.stop(); - debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us()); - - let mut verify_txs = Measure::start("verify_txs"); - let mut starting_txs = to_fund_txs.len(); - let mut verified_txs = 0; - let mut failed_verify = 0; - // Only loop multiple times for small (quick) transaction batches - for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) { - let mut timer = Instant::now(); - to_fund_txs.retain(|(_, tx)| { - if timer.elapsed() >= Duration::from_secs(5) { - if failed_verify > 0 { - debug!("total txs failed verify: {}", failed_verify); - } - info!( - "Verifying transfers... {} remaining", - starting_txs - verified_txs - ); - timer = Instant::now(); - } - let verified = verify_funding_transfer(client, &tx, amount); - if verified { - verified_txs += 1; - } else { - failed_verify += 1; - } - !verified - }); - if to_fund_txs.is_empty() { - break; - } - debug!("Looping verifications"); - info!("Verifying transfers... {} remaining", to_fund_txs.len()); - sleep(Duration::from_millis(100)); - } - starting_txs -= to_fund_txs.len(); - verify_txs.stop(); - debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us()); - - // retry anything that seems to have dropped through cracks - // again since these txs are all or nothing, they're fine to - // retry - tries += 1; - } - info!("transferred"); }); - info!("funded: {} left: {}", new_funded.len(), notfunded.len()); + + info!("funded: {} left: {}", new_funded.len(), not_funded.len()); funded = new_funded; + funded_funds = to_lamports; } } @@ -678,14 +710,14 @@ pub fn airdrop_lamports( client: &T, faucet_addr: &SocketAddr, id: &Keypair, - tx_count: u64, + desired_balance: u64, ) -> Result<()> { let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0); metrics_submit_lamport_balance(starting_balance); info!("starting balance {}", starting_balance); - if starting_balance < tx_count { - let airdrop_amount = tx_count - starting_balance; + if starting_balance < desired_balance { + let airdrop_amount = desired_balance - starting_balance; info!( "Airdropping {:?} lamports from {} for {}", airdrop_amount, @@ -810,17 +842,6 @@ fn compute_and_report_stats( ); } -// First transfer 2/3 of the lamports to the dest accounts -// then ping-pong 1/3 of the lamports back to the other account -// this leaves 1/3 lamport buffer in each account -fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool { - if i < keypair_chunks * (2 * num_lamports_per_account) / 3 { - return false; - } - - i % (keypair_chunks * num_lamports_per_account / 3) == 0 -} - pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec, u64) { let mut seed = [0u8; 32]; seed.copy_from_slice(&seed_keypair.to_bytes()[..32]); @@ -1004,23 +1025,25 @@ fn fund_move_keys( info!("done funding keys, took {} ms", funding_time.as_ms()); } -pub fn generate_and_fund_keypairs( - client: &T, +pub fn generate_and_fund_keypairs( + client: Arc, faucet_addr: Option, funding_key: &Keypair, keypair_count: usize, lamports_per_account: u64, use_move: bool, -) -> Result<(Vec, Option, u64)> { +) -> Result<(Vec, Option)> { info!("Creating {} keypairs...", keypair_count); let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64); info!("Get lamports..."); - // Sample the first keypair, see if it has lamports, if so then resume. - // This logic is to prevent lamport loss on repeated solana-bench-tps executions - let last_keypair_balance = client - .get_balance(&keypairs[keypair_count - 1].pubkey()) - .unwrap_or(0); + // Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions + let first_key = keypairs[0].pubkey(); + let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0); + + // Sample the last keypair, to check if funding was already completed + let last_key = keypairs[keypair_count - 1].pubkey(); + let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0); #[cfg(feature = "move")] let mut move_keypairs_ret = None; @@ -1028,31 +1051,38 @@ pub fn generate_and_fund_keypairs( #[cfg(not(feature = "move"))] let move_keypairs_ret = None; - if lamports_per_account > last_keypair_balance { - let (_blockhash, fee_calculator) = get_recent_blockhash(client); - let account_desired_balance = - lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature; - let extra_fees = extra * fee_calculator.max_lamports_per_signature; - let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees; + // Repeated runs will eat up keypair balances from transaction fees. In order to quickly + // start another bench-tps run without re-funding all of the keypairs, check if the + // keypairs still have at least 80% of the expected funds. That should be enough to + // pay for the transaction fees in a new run. + let enough_lamports = 8 * lamports_per_account / 10; + if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports { + let (_blockhash, fee_calculator) = get_recent_blockhash(client.as_ref()); + let max_fee = fee_calculator.max_lamports_per_signature; + let extra_fees = extra * max_fee; + let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair + let mut total = lamports_per_account * total_keypairs + extra_fees; if use_move { total *= 3; } - info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}", - last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra, - account_desired_balance, total - ); + let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0); + info!( + "Funding keypair balance: {} max_fee: {} lamports_per_account: {} extra: {} total: {}", + funding_key_balance, max_fee, lamports_per_account, extra, total + ); if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total { - airdrop_lamports(client, &faucet_addr.unwrap(), funding_key, total)?; + airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?; } #[cfg(feature = "move")] { if use_move { - let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000); - let libra_mint_program_id = upload_mint_script(&funding_key, client); - let libra_pay_program_id = upload_payment_script(&funding_key, client); + let libra_genesis_keypair = + create_genesis(&funding_key, client.as_ref(), 10_000_000); + let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref()); + let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref()); // Generate another set of keypairs for move accounts. // Still fund the solana ones which will be used for fees. @@ -1060,7 +1090,7 @@ pub fn generate_and_fund_keypairs( let mut rnd = GenKeys::new(seed); let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64); fund_move_keys( - client, + client.as_ref(), funding_key, &move_keypairs, total / 3, @@ -1085,15 +1115,15 @@ pub fn generate_and_fund_keypairs( funding_key, &keypairs, total, - fee_calculator.max_lamports_per_signature, - extra, + max_fee, + lamports_per_account, ); } // 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys. keypairs.truncate(keypair_count); - Ok((keypairs, move_keypairs_ret, last_keypair_balance)) + Ok((keypairs, move_keypairs_ret)) } #[cfg(test)] @@ -1105,30 +1135,11 @@ mod tests { use solana_sdk::fee_calculator::FeeCalculator; use solana_sdk::genesis_config::create_genesis_config; - #[test] - fn test_switch_directions() { - assert_eq!(should_switch_directions(30, 1, 0), false); - assert_eq!(should_switch_directions(30, 1, 1), false); - assert_eq!(should_switch_directions(30, 1, 20), true); - assert_eq!(should_switch_directions(30, 1, 21), false); - assert_eq!(should_switch_directions(30, 1, 30), true); - assert_eq!(should_switch_directions(30, 1, 90), true); - assert_eq!(should_switch_directions(30, 1, 91), false); - - assert_eq!(should_switch_directions(30, 2, 0), false); - assert_eq!(should_switch_directions(30, 2, 1), false); - assert_eq!(should_switch_directions(30, 2, 20), false); - assert_eq!(should_switch_directions(30, 2, 40), true); - assert_eq!(should_switch_directions(30, 2, 90), false); - assert_eq!(should_switch_directions(30, 2, 100), true); - assert_eq!(should_switch_directions(30, 2, 101), false); - } - #[test] fn test_bench_tps_bank_client() { let (genesis_config, id) = create_genesis_config(10_000); let bank = Bank::new(&genesis_config); - let clients = vec![BankClient::new(bank)]; + let client = Arc::new(BankClient::new(bank)); let mut config = Config::default(); config.id = id; @@ -1136,23 +1147,24 @@ mod tests { config.duration = Duration::from_secs(5); let keypair_count = config.tx_count * config.keypair_multiplier; - let (keypairs, _move_keypairs, _keypair_balance) = - generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false) + let (keypairs, _move_keypairs) = + generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false) .unwrap(); - do_bench_tps(clients, config, keypairs, 0, None); + do_bench_tps(client, config, keypairs, None); } #[test] fn test_bench_tps_fund_keys() { let (genesis_config, id) = create_genesis_config(10_000); let bank = Bank::new(&genesis_config); - let client = BankClient::new(bank); + let client = Arc::new(BankClient::new(bank)); let keypair_count = 20; let lamports = 20; - let (keypairs, _move_keypairs, _keypair_balance) = - generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap(); + let (keypairs, _move_keypairs) = + generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false) + .unwrap(); for kp in &keypairs { assert_eq!( @@ -1170,23 +1182,16 @@ mod tests { let fee_calculator = FeeCalculator::new(11, 0); genesis_config.fee_calculator = fee_calculator; let bank = Bank::new(&genesis_config); - let client = BankClient::new(bank); + let client = Arc::new(BankClient::new(bank)); let keypair_count = 20; let lamports = 20; - let (keypairs, _move_keypairs, _keypair_balance) = - generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap(); + let (keypairs, _move_keypairs) = + generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false) + .unwrap(); - let max_fee = client - .get_recent_blockhash_with_commitment(CommitmentConfig::recent()) - .unwrap() - .1 - .max_lamports_per_signature; for kp in &keypairs { - assert_eq!( - client.get_balance(&kp.pubkey()).unwrap(), - lamports + max_fee - ); + assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports); } } } diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index c66f061a84..a7b3822d22 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -6,7 +6,7 @@ use solana_genesis::Base64Account; use solana_sdk::fee_calculator::FeeCalculator; use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::system_program; -use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit}; +use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc}; /// Number of signatures for all transactions in ~1 week at ~100K TPS pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7; @@ -82,12 +82,12 @@ fn main() { ); exit(1); } - client + Arc::new(client) } else { - get_client(&nodes) + Arc::new(get_client(&nodes)) }; - let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move { + let (keypairs, move_keypairs) = if *read_from_client_file && !use_move { let path = Path::new(&client_ids_and_stake_file); let file = File::open(path).unwrap(); @@ -117,10 +117,10 @@ fn main() { // This prevents the amount of storage needed for bench-tps accounts from creeping up // across multiple runs. keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string())); - (keypairs, None, last_balance) + (keypairs, None) } else { generate_and_fund_keypairs( - &client, + client.clone(), Some(*faucet_addr), &id, keypair_count, @@ -133,11 +133,5 @@ fn main() { }) }; - do_bench_tps( - vec![client], - cli_config, - keypairs, - keypair_balance, - move_keypairs, - ); + do_bench_tps(client, cli_config, keypairs, move_keypairs); } diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 7655709403..7d826cc05b 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -9,7 +9,7 @@ use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster}; #[cfg(feature = "move")] use solana_sdk::move_loader::solana_move_loader_program; use solana_sdk::signature::{Keypair, KeypairUtil}; -use std::sync::mpsc::channel; +use std::sync::{mpsc::channel, Arc}; use std::time::Duration; fn test_bench_tps_local_cluster(config: Config) { @@ -36,10 +36,10 @@ fn test_bench_tps_local_cluster(config: Config) { 100_000_000, ); - let client = create_client( + let client = Arc::new(create_client( (cluster.entry_point_info.rpc, cluster.entry_point_info.tpu), VALIDATOR_PORT_RANGE, - ); + )); let (addr_sender, addr_receiver) = channel(); run_local_faucet(faucet_keypair, addr_sender, None); @@ -48,8 +48,8 @@ fn test_bench_tps_local_cluster(config: Config) { let lamports_per_account = 100; let keypair_count = config.tx_count * config.keypair_multiplier; - let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs( - &client, + let (keypairs, move_keypairs) = generate_and_fund_keypairs( + client.clone(), Some(faucet_addr), &config.id, keypair_count, @@ -58,7 +58,7 @@ fn test_bench_tps_local_cluster(config: Config) { ) .unwrap(); - let _total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs); + let _total = do_bench_tps(client, config, keypairs, move_keypairs); #[cfg(not(debug_assertions))] assert!(_total > 100);