Clippy cleanup for all targets and nighly rust (also support 1.44.0) (#10445)
* address warnings from 'rustup run beta cargo clippy --workspace' minor refactoring in: - cli/src/cli.rs - cli/src/offline/blockhash_query.rs - logger/src/lib.rs - runtime/src/accounts_db.rs expect some performance improvement AccountsDB::clean_accounts() * address warnings from 'rustup run beta cargo clippy --workspace --tests' * address warnings from 'rustup run nightly cargo clippy --workspace --all-targets' * rustfmt * fix warning stragglers * properly fix clippy warnings test_vote_subscribe() replace ref-to-arc with ref parameters where arc not cloned * Remove lock around JsonRpcRequestProcessor (#10417) automerge * make ancestors parameter optional to avoid forcing construction of empty hash maps Co-authored-by: Greg Fitzgerald <greg@solana.com>
This commit is contained in:
parent
fa3a6c5584
commit
e23340d89e
@ -209,7 +209,7 @@ fn main() {
|
|||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blockstore = Arc::new(
|
let blockstore = Arc::new(
|
||||||
|
@ -754,25 +754,18 @@ pub fn parse_command(
|
|||||||
("airdrop", Some(matches)) => {
|
("airdrop", Some(matches)) => {
|
||||||
let faucet_port = matches
|
let faucet_port = matches
|
||||||
.value_of("faucet_port")
|
.value_of("faucet_port")
|
||||||
.unwrap()
|
.ok_or_else(|| CliError::BadParameter("Missing faucet port".to_string()))?
|
||||||
.parse()
|
.parse()
|
||||||
.or_else(|err| {
|
.map_err(|err| CliError::BadParameter(format!("Invalid faucet port: {}", err)))?;
|
||||||
Err(CliError::BadParameter(format!(
|
|
||||||
"Invalid faucet port: {}",
|
|
||||||
err
|
|
||||||
)))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let faucet_host = if let Some(faucet_host) = matches.value_of("faucet_host") {
|
let faucet_host = matches
|
||||||
Some(solana_net_utils::parse_host(faucet_host).or_else(|err| {
|
.value_of("faucet_host")
|
||||||
Err(CliError::BadParameter(format!(
|
.map(|faucet_host| {
|
||||||
"Invalid faucet host: {}",
|
solana_net_utils::parse_host(faucet_host).map_err(|err| {
|
||||||
err
|
CliError::BadParameter(format!("Invalid faucet host: {}", err))
|
||||||
)))
|
})
|
||||||
})?)
|
})
|
||||||
} else {
|
.transpose()?;
|
||||||
None
|
|
||||||
};
|
|
||||||
let pubkey = pubkey_of_signer(matches, "to", wallet_manager)?;
|
let pubkey = pubkey_of_signer(matches, "to", wallet_manager)?;
|
||||||
let signers = if pubkey.is_some() {
|
let signers = if pubkey.is_some() {
|
||||||
vec![]
|
vec![]
|
||||||
|
@ -659,7 +659,7 @@ pub fn process_get_epoch_info(
|
|||||||
commitment_config: CommitmentConfig,
|
commitment_config: CommitmentConfig,
|
||||||
) -> ProcessResult {
|
) -> ProcessResult {
|
||||||
let epoch_info: CliEpochInfo = rpc_client
|
let epoch_info: CliEpochInfo = rpc_client
|
||||||
.get_epoch_info_with_commitment(commitment_config.clone())?
|
.get_epoch_info_with_commitment(commitment_config)?
|
||||||
.into();
|
.into();
|
||||||
Ok(config.output_format.formatted_string(&epoch_info))
|
Ok(config.output_format.formatted_string(&epoch_info))
|
||||||
}
|
}
|
||||||
@ -673,7 +673,7 @@ pub fn process_get_slot(
|
|||||||
rpc_client: &RpcClient,
|
rpc_client: &RpcClient,
|
||||||
commitment_config: CommitmentConfig,
|
commitment_config: CommitmentConfig,
|
||||||
) -> ProcessResult {
|
) -> ProcessResult {
|
||||||
let slot = rpc_client.get_slot_with_commitment(commitment_config.clone())?;
|
let slot = rpc_client.get_slot_with_commitment(commitment_config)?;
|
||||||
Ok(slot.to_string())
|
Ok(slot.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -681,7 +681,7 @@ pub fn process_get_epoch(
|
|||||||
rpc_client: &RpcClient,
|
rpc_client: &RpcClient,
|
||||||
commitment_config: CommitmentConfig,
|
commitment_config: CommitmentConfig,
|
||||||
) -> ProcessResult {
|
) -> ProcessResult {
|
||||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
|
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config)?;
|
||||||
Ok(epoch_info.epoch.to_string())
|
Ok(epoch_info.epoch.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -868,7 +868,7 @@ pub fn process_supply(
|
|||||||
commitment_config: CommitmentConfig,
|
commitment_config: CommitmentConfig,
|
||||||
print_accounts: bool,
|
print_accounts: bool,
|
||||||
) -> ProcessResult {
|
) -> ProcessResult {
|
||||||
let supply_response = rpc_client.supply_with_commitment(commitment_config.clone())?;
|
let supply_response = rpc_client.supply_with_commitment(commitment_config)?;
|
||||||
let mut supply: CliSupply = supply_response.value.into();
|
let mut supply: CliSupply = supply_response.value.into();
|
||||||
supply.print_accounts = print_accounts;
|
supply.print_accounts = print_accounts;
|
||||||
Ok(config.output_format.formatted_string(&supply))
|
Ok(config.output_format.formatted_string(&supply))
|
||||||
@ -878,7 +878,7 @@ pub fn process_total_supply(
|
|||||||
rpc_client: &RpcClient,
|
rpc_client: &RpcClient,
|
||||||
commitment_config: CommitmentConfig,
|
commitment_config: CommitmentConfig,
|
||||||
) -> ProcessResult {
|
) -> ProcessResult {
|
||||||
let total_supply = rpc_client.total_supply_with_commitment(commitment_config.clone())?;
|
let total_supply = rpc_client.total_supply_with_commitment(commitment_config)?;
|
||||||
Ok(format!("{} SOL", lamports_to_sol(total_supply)))
|
Ok(format!("{} SOL", lamports_to_sol(total_supply)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -886,8 +886,7 @@ pub fn process_get_transaction_count(
|
|||||||
rpc_client: &RpcClient,
|
rpc_client: &RpcClient,
|
||||||
commitment_config: CommitmentConfig,
|
commitment_config: CommitmentConfig,
|
||||||
) -> ProcessResult {
|
) -> ProcessResult {
|
||||||
let transaction_count =
|
let transaction_count = rpc_client.get_transaction_count_with_commitment(commitment_config)?;
|
||||||
rpc_client.get_transaction_count_with_commitment(commitment_config.clone())?;
|
|
||||||
Ok(transaction_count.to_string())
|
Ok(transaction_count.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -952,10 +951,8 @@ pub fn process_ping(
|
|||||||
Ok(signature) => {
|
Ok(signature) => {
|
||||||
let transaction_sent = Instant::now();
|
let transaction_sent = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
let signature_status = rpc_client.get_signature_status_with_commitment(
|
let signature_status = rpc_client
|
||||||
&signature,
|
.get_signature_status_with_commitment(&signature, commitment_config)?;
|
||||||
commitment_config.clone(),
|
|
||||||
)?;
|
|
||||||
let elapsed_time = Instant::now().duration_since(transaction_sent);
|
let elapsed_time = Instant::now().duration_since(transaction_sent);
|
||||||
if let Some(transaction_status) = signature_status {
|
if let Some(transaction_status) = signature_status {
|
||||||
match transaction_status {
|
match transaction_status {
|
||||||
|
@ -35,16 +35,11 @@ impl Source {
|
|||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
Self::NonceAccount(ref pubkey) => {
|
Self::NonceAccount(ref pubkey) => {
|
||||||
let res = nonce::get_account(rpc_client, pubkey)
|
let res = nonce::get_account(rpc_client, pubkey)?;
|
||||||
.and_then(|ref a| nonce::data_from_account(a))
|
let res = nonce::data_from_account(&res)?;
|
||||||
.and_then(|d| {
|
Ok(Some(res)
|
||||||
if d.blockhash == *blockhash {
|
.filter(|d| d.blockhash == *blockhash)
|
||||||
Ok(Some(d.fee_calculator))
|
.map(|d| d.fee_calculator))
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
Ok(res)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -664,7 +664,7 @@ impl RpcClient {
|
|||||||
) -> ClientResult<u64> {
|
) -> ClientResult<u64> {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
match self.get_balance_with_commitment(&pubkey, commitment_config.clone()) {
|
match self.get_balance_with_commitment(&pubkey, commitment_config) {
|
||||||
Ok(bal) => {
|
Ok(bal) => {
|
||||||
return Ok(bal.value);
|
return Ok(bal.value);
|
||||||
}
|
}
|
||||||
@ -699,8 +699,7 @@ impl RpcClient {
|
|||||||
) -> Option<u64> {
|
) -> Option<u64> {
|
||||||
const LAST: usize = 30;
|
const LAST: usize = 30;
|
||||||
for run in 0..LAST {
|
for run in 0..LAST {
|
||||||
let balance_result =
|
let balance_result = self.poll_get_balance_with_commitment(pubkey, commitment_config);
|
||||||
self.poll_get_balance_with_commitment(pubkey, commitment_config.clone());
|
|
||||||
if expected_balance.is_none() {
|
if expected_balance.is_none() {
|
||||||
return balance_result.ok();
|
return balance_result.ok();
|
||||||
}
|
}
|
||||||
@ -734,7 +733,7 @@ impl RpcClient {
|
|||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
if let Ok(Some(_)) =
|
if let Ok(Some(_)) =
|
||||||
self.get_signature_status_with_commitment(&signature, commitment_config.clone())
|
self.get_signature_status_with_commitment(&signature, commitment_config)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -136,12 +136,12 @@ mod tests {
|
|||||||
fn test_build_request_json() {
|
fn test_build_request_json() {
|
||||||
let test_request = RpcRequest::GetAccountInfo;
|
let test_request = RpcRequest::GetAccountInfo;
|
||||||
let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
|
let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
|
||||||
let request = test_request.build_request_json(1, json!([addr.clone()]));
|
let request = test_request.build_request_json(1, json!([addr]));
|
||||||
assert_eq!(request["method"], "getAccountInfo");
|
assert_eq!(request["method"], "getAccountInfo");
|
||||||
assert_eq!(request["params"], json!([addr]));
|
assert_eq!(request["params"], json!([addr]));
|
||||||
|
|
||||||
let test_request = RpcRequest::GetBalance;
|
let test_request = RpcRequest::GetBalance;
|
||||||
let request = test_request.build_request_json(1, json!([addr.clone()]));
|
let request = test_request.build_request_json(1, json!([addr]));
|
||||||
assert_eq!(request["method"], "getBalance");
|
assert_eq!(request["method"], "getBalance");
|
||||||
|
|
||||||
let test_request = RpcRequest::GetEpochInfo;
|
let test_request = RpcRequest::GetEpochInfo;
|
||||||
@ -186,13 +186,12 @@ mod tests {
|
|||||||
|
|
||||||
// Test request with CommitmentConfig and no params
|
// Test request with CommitmentConfig and no params
|
||||||
let test_request = RpcRequest::GetRecentBlockhash;
|
let test_request = RpcRequest::GetRecentBlockhash;
|
||||||
let request = test_request.build_request_json(1, json!([commitment_config.clone()]));
|
let request = test_request.build_request_json(1, json!([commitment_config]));
|
||||||
assert_eq!(request["params"], json!([commitment_config.clone()]));
|
assert_eq!(request["params"], json!([commitment_config.clone()]));
|
||||||
|
|
||||||
// Test request with CommitmentConfig and params
|
// Test request with CommitmentConfig and params
|
||||||
let test_request = RpcRequest::GetBalance;
|
let test_request = RpcRequest::GetBalance;
|
||||||
let request =
|
let request = test_request.build_request_json(1, json!([addr, commitment_config]));
|
||||||
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
|
|
||||||
assert_eq!(request["params"], json!([addr, commitment_config]));
|
assert_eq!(request["params"], json!([addr, commitment_config]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,6 @@ fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Tra
|
|||||||
fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
|
fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
|
||||||
let progs = 4;
|
let progs = 4;
|
||||||
(0..txes)
|
(0..txes)
|
||||||
.into_iter()
|
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let mut instructions = vec![];
|
let mut instructions = vec![];
|
||||||
let from_key = Keypair::new();
|
let from_key = Keypair::new();
|
||||||
@ -181,7 +180,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
|||||||
assert!(r.is_ok(), "sanity parallel execution");
|
assert!(r.is_ok(), "sanity parallel execution");
|
||||||
}
|
}
|
||||||
bank.clear_signatures();
|
bank.clear_signatures();
|
||||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
let ledger_path = get_tmp_ledger_path!();
|
||||||
{
|
{
|
||||||
let blockstore = Arc::new(
|
let blockstore = Arc::new(
|
||||||
@ -207,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
|||||||
// If it is dropped before poh_service, then poh_service will error when
|
// If it is dropped before poh_service, then poh_service will error when
|
||||||
// calling send() on the channel.
|
// calling send() on the channel.
|
||||||
let signal_receiver = Arc::new(signal_receiver);
|
let signal_receiver = Arc::new(signal_receiver);
|
||||||
let signal_receiver2 = signal_receiver.clone();
|
let signal_receiver2 = signal_receiver;
|
||||||
bencher.iter(move || {
|
bencher.iter(move || {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut sent = 0;
|
let mut sent = 0;
|
||||||
@ -262,7 +261,7 @@ fn simulate_process_entries(
|
|||||||
mint_keypair: &Keypair,
|
mint_keypair: &Keypair,
|
||||||
mut tx_vector: Vec<Transaction>,
|
mut tx_vector: Vec<Transaction>,
|
||||||
genesis_config: &GenesisConfig,
|
genesis_config: &GenesisConfig,
|
||||||
keypairs: &Vec<Keypair>,
|
keypairs: &[Keypair],
|
||||||
initial_lamports: u64,
|
initial_lamports: u64,
|
||||||
num_accounts: usize,
|
num_accounts: usize,
|
||||||
) {
|
) {
|
||||||
@ -288,7 +287,7 @@ fn simulate_process_entries(
|
|||||||
hash: next_hash(&bank.last_blockhash(), 1, &tx_vector),
|
hash: next_hash(&bank.last_blockhash(), 1, &tx_vector),
|
||||||
transactions: tx_vector,
|
transactions: tx_vector,
|
||||||
};
|
};
|
||||||
process_entries(&bank, &vec![entry], randomize_txs, None).unwrap();
|
process_entries(&bank, &[entry], randomize_txs, None).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) {
|
fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) {
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
#![feature(test)]
|
#![feature(test)]
|
||||||
use rand;
|
|
||||||
|
|
||||||
extern crate solana_ledger;
|
extern crate solana_ledger;
|
||||||
extern crate test;
|
extern crate test;
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let leader_pubkey = Pubkey::new_rand();
|
let leader_pubkey = Pubkey::new_rand();
|
||||||
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
|
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
|
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
|
||||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
|
||||||
const NUM_SHREDS: usize = 32;
|
const NUM_SHREDS: usize = 32;
|
||||||
@ -37,7 +37,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
|||||||
}
|
}
|
||||||
let stakes = Arc::new(stakes);
|
let stakes = Arc::new(stakes);
|
||||||
let cluster_info = Arc::new(cluster_info);
|
let cluster_info = Arc::new(cluster_info);
|
||||||
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
|
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes));
|
||||||
let shreds = Arc::new(shreds);
|
let shreds = Arc::new(shreds);
|
||||||
let last_datapoint = Arc::new(AtomicU64::new(0));
|
let last_datapoint = Arc::new(AtomicU64::new(0));
|
||||||
bencher.iter(move || {
|
bencher.iter(move || {
|
||||||
|
@ -14,7 +14,6 @@ const NUM_ENTRIES: usize = 800;
|
|||||||
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let mut cur_hash = hash(&zero.as_ref());
|
let mut cur_hash = hash(&zero.as_ref());
|
||||||
let start = *&cur_hash;
|
|
||||||
|
|
||||||
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
|
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
|
||||||
for _ in 0..NUM_ENTRIES {
|
for _ in 0..NUM_ENTRIES {
|
||||||
@ -22,7 +21,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
ticks.verify(&start);
|
ticks.verify(&cur_hash);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -30,7 +29,6 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
|
|||||||
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
|
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let mut cur_hash = hash(&zero.as_ref());
|
let mut cur_hash = hash(&zero.as_ref());
|
||||||
let start = *&cur_hash;
|
|
||||||
|
|
||||||
let keypair1 = Keypair::new();
|
let keypair1 = Keypair::new();
|
||||||
let pubkey1 = keypair1.pubkey();
|
let pubkey1 = keypair1.pubkey();
|
||||||
@ -42,6 +40,6 @@ fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
ticks.verify(&start);
|
ticks.verify(&cur_hash);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,10 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
|||||||
let tx = test_tx();
|
let tx = test_tx();
|
||||||
const NUM_PACKETS: usize = 50;
|
const NUM_PACKETS: usize = 50;
|
||||||
let chunk_size = NUM_PACKETS / (4 * NUM_THREADS);
|
let chunk_size = NUM_PACKETS / (4 * NUM_THREADS);
|
||||||
let batches = to_packets_chunked(&vec![tx; NUM_PACKETS], chunk_size);
|
let batches = to_packets_chunked(
|
||||||
|
&std::iter::repeat(tx).take(NUM_PACKETS).collect::<Vec<_>>(),
|
||||||
|
chunk_size,
|
||||||
|
);
|
||||||
info!("batches: {}", batches.len());
|
info!("batches: {}", batches.len());
|
||||||
|
|
||||||
let retransmitter_handles = retransmitter(
|
let retransmitter_handles = retransmitter(
|
||||||
@ -80,7 +83,6 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
|||||||
bencher.iter(move || {
|
bencher.iter(move || {
|
||||||
let peer_sockets1 = peer_sockets.clone();
|
let peer_sockets1 = peer_sockets.clone();
|
||||||
let handles: Vec<_> = (0..NUM_PEERS)
|
let handles: Vec<_> = (0..NUM_PEERS)
|
||||||
.into_iter()
|
|
||||||
.map(|p| {
|
.map(|p| {
|
||||||
let peer_sockets2 = peer_sockets1.clone();
|
let peer_sockets2 = peer_sockets1.clone();
|
||||||
let total2 = total.clone();
|
let total2 = total.clone();
|
||||||
|
@ -37,16 +37,14 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
|
|||||||
let from_keypair = Keypair::new();
|
let from_keypair = Keypair::new();
|
||||||
let to_keypair = Keypair::new();
|
let to_keypair = Keypair::new();
|
||||||
let txs: Vec<_> = (0..len)
|
let txs: Vec<_> = (0..len)
|
||||||
.into_iter()
|
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
let amount = thread_rng().gen();
|
let amount = thread_rng().gen();
|
||||||
let tx = system_transaction::transfer(
|
system_transaction::transfer(
|
||||||
&from_keypair,
|
&from_keypair,
|
||||||
&to_keypair.pubkey(),
|
&to_keypair.pubkey(),
|
||||||
amount,
|
amount,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
);
|
)
|
||||||
tx
|
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
to_packets_chunked(&txs, chunk_size)
|
to_packets_chunked(&txs, chunk_size)
|
||||||
|
@ -438,7 +438,7 @@ impl ClusterInfo {
|
|||||||
|
|
||||||
pub fn update_contact_info<F>(&self, modify: F)
|
pub fn update_contact_info<F>(&self, modify: F)
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut ContactInfo) -> (),
|
F: FnOnce(&mut ContactInfo),
|
||||||
{
|
{
|
||||||
let my_id = self.id();
|
let my_id = self.id();
|
||||||
modify(&mut self.my_contact_info.write().unwrap());
|
modify(&mut self.my_contact_info.write().unwrap());
|
||||||
@ -1917,19 +1917,18 @@ impl ClusterInfo {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|(from, prune_set)| {
|
.filter_map(|(from, prune_set)| {
|
||||||
inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
|
inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
|
||||||
me.lookup_contact_info(&from, |ci| ci.clone())
|
me.lookup_contact_info(&from, |ci| ci.clone()).map(|ci| {
|
||||||
.and_then(|ci| {
|
let mut prune_msg = PruneData {
|
||||||
let mut prune_msg = PruneData {
|
pubkey: self_id,
|
||||||
pubkey: self_id,
|
prunes: prune_set.into_iter().collect(),
|
||||||
prunes: prune_set.into_iter().collect(),
|
signature: Signature::default(),
|
||||||
signature: Signature::default(),
|
destination: from,
|
||||||
destination: from,
|
wallclock: timestamp(),
|
||||||
wallclock: timestamp(),
|
};
|
||||||
};
|
prune_msg.sign(&me.keypair);
|
||||||
prune_msg.sign(&me.keypair);
|
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
||||||
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
(ci.gossip, rsp)
|
||||||
Some((ci.gossip, rsp))
|
})
|
||||||
})
|
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
if rsp.is_empty() {
|
if rsp.is_empty() {
|
||||||
@ -2932,7 +2931,7 @@ mod tests {
|
|||||||
assert_eq!(slots.len(), 1);
|
assert_eq!(slots.len(), 1);
|
||||||
assert!(since.is_some());
|
assert!(since.is_some());
|
||||||
|
|
||||||
let (slots, since2) = cluster_info.get_epoch_slots_since(since.clone());
|
let (slots, since2) = cluster_info.get_epoch_slots_since(since);
|
||||||
assert!(slots.is_empty());
|
assert!(slots.is_empty());
|
||||||
assert_eq!(since2, since);
|
assert_eq!(since2, since);
|
||||||
}
|
}
|
||||||
|
@ -385,7 +385,7 @@ impl ClusterInfoVoteListener {
|
|||||||
&vote_txs_receiver,
|
&vote_txs_receiver,
|
||||||
&vote_tracker,
|
&vote_tracker,
|
||||||
root_bank.slot(),
|
root_bank.slot(),
|
||||||
subscriptions.clone(),
|
&subscriptions,
|
||||||
epoch_stakes,
|
epoch_stakes,
|
||||||
) {
|
) {
|
||||||
match e {
|
match e {
|
||||||
@ -404,9 +404,9 @@ impl ClusterInfoVoteListener {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn get_and_process_votes_for_tests(
|
pub fn get_and_process_votes_for_tests(
|
||||||
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||||
vote_tracker: &Arc<VoteTracker>,
|
vote_tracker: &VoteTracker,
|
||||||
last_root: Slot,
|
last_root: Slot,
|
||||||
subscriptions: Arc<RpcSubscriptions>,
|
subscriptions: &RpcSubscriptions,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
Self::get_and_process_votes(
|
Self::get_and_process_votes(
|
||||||
vote_txs_receiver,
|
vote_txs_receiver,
|
||||||
@ -419,9 +419,9 @@ impl ClusterInfoVoteListener {
|
|||||||
|
|
||||||
fn get_and_process_votes(
|
fn get_and_process_votes(
|
||||||
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||||
vote_tracker: &Arc<VoteTracker>,
|
vote_tracker: &VoteTracker,
|
||||||
last_root: Slot,
|
last_root: Slot,
|
||||||
subscriptions: Arc<RpcSubscriptions>,
|
subscriptions: &RpcSubscriptions,
|
||||||
epoch_stakes: Option<&EpochStakes>,
|
epoch_stakes: Option<&EpochStakes>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::from_millis(200);
|
let timer = Duration::from_millis(200);
|
||||||
@ -443,7 +443,7 @@ impl ClusterInfoVoteListener {
|
|||||||
vote_tracker: &VoteTracker,
|
vote_tracker: &VoteTracker,
|
||||||
vote_txs: Vec<Transaction>,
|
vote_txs: Vec<Transaction>,
|
||||||
root: Slot,
|
root: Slot,
|
||||||
subscriptions: Arc<RpcSubscriptions>,
|
subscriptions: &RpcSubscriptions,
|
||||||
epoch_stakes: Option<&EpochStakes>,
|
epoch_stakes: Option<&EpochStakes>,
|
||||||
) {
|
) {
|
||||||
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
||||||
@ -574,7 +574,7 @@ impl ClusterInfoVoteListener {
|
|||||||
fn notify_for_stake_change(
|
fn notify_for_stake_change(
|
||||||
current_stake: u64,
|
current_stake: u64,
|
||||||
previous_stake: u64,
|
previous_stake: u64,
|
||||||
subscriptions: &Arc<RpcSubscriptions>,
|
subscriptions: &RpcSubscriptions,
|
||||||
epoch_stakes: Option<&EpochStakes>,
|
epoch_stakes: Option<&EpochStakes>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
) {
|
) {
|
||||||
@ -804,7 +804,7 @@ mod tests {
|
|||||||
&votes_receiver,
|
&votes_receiver,
|
||||||
&vote_tracker,
|
&vote_tracker,
|
||||||
0,
|
0,
|
||||||
subscriptions,
|
&subscriptions,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -854,7 +854,7 @@ mod tests {
|
|||||||
&votes_receiver,
|
&votes_receiver,
|
||||||
&vote_tracker,
|
&vote_tracker,
|
||||||
0,
|
0,
|
||||||
subscriptions,
|
&subscriptions,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -974,13 +974,7 @@ mod tests {
|
|||||||
&validator0_keypairs.vote_keypair,
|
&validator0_keypairs.vote_keypair,
|
||||||
)];
|
)];
|
||||||
|
|
||||||
ClusterInfoVoteListener::process_votes(
|
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0, &subscriptions, None);
|
||||||
&vote_tracker,
|
|
||||||
vote_tx,
|
|
||||||
0,
|
|
||||||
subscriptions.clone(),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
let ref_count = Arc::strong_count(
|
let ref_count = Arc::strong_count(
|
||||||
&vote_tracker
|
&vote_tracker
|
||||||
.keys
|
.keys
|
||||||
@ -1031,7 +1025,7 @@ mod tests {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
|
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, &subscriptions, None);
|
||||||
|
|
||||||
let ref_count = Arc::strong_count(
|
let ref_count = Arc::strong_count(
|
||||||
&vote_tracker
|
&vote_tracker
|
||||||
|
@ -165,7 +165,7 @@ impl Tower {
|
|||||||
let key = all_pubkeys.get_or_insert(&key);
|
let key = all_pubkeys.get_or_insert(&key);
|
||||||
lockout_intervals
|
lockout_intervals
|
||||||
.entry(vote.expiration_slot())
|
.entry(vote.expiration_slot())
|
||||||
.or_insert_with(|| vec![])
|
.or_insert_with(Vec::new)
|
||||||
.push((vote.slot, key));
|
.push((vote.slot, key));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ impl CrdsGossipPush {
|
|||||||
let new_value = crds.new_versioned(now, value);
|
let new_value = crds.new_versioned(now, value);
|
||||||
let value_hash = new_value.value_hash;
|
let value_hash = new_value.value_hash;
|
||||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||||
received_set.insert(from.clone());
|
received_set.insert(*from);
|
||||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||||
}
|
}
|
||||||
let old = crds.insert_versioned(new_value);
|
let old = crds.insert_versioned(new_value);
|
||||||
@ -160,7 +160,7 @@ impl CrdsGossipPush {
|
|||||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||||
}
|
}
|
||||||
let mut received_set = HashSet::new();
|
let mut received_set = HashSet::new();
|
||||||
received_set.insert(from.clone());
|
received_set.insert(*from);
|
||||||
self.push_messages.insert(label, value_hash);
|
self.push_messages.insert(label, value_hash);
|
||||||
self.received_cache.insert(value_hash, (now, received_set));
|
self.received_cache.insert(value_hash, (now, received_set));
|
||||||
Ok(old.ok().and_then(|opt| opt))
|
Ok(old.ok().and_then(|opt| opt))
|
||||||
|
@ -459,7 +459,7 @@ mod test {
|
|||||||
fn test_keys_and_values() {
|
fn test_keys_and_values() {
|
||||||
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||||
assert_eq!(v.wallclock(), 0);
|
assert_eq!(v.wallclock(), 0);
|
||||||
let key = v.clone().contact_info().unwrap().id;
|
let key = v.contact_info().unwrap().id;
|
||||||
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
|
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
|
||||||
|
|
||||||
let v = CrdsValue::new_unsigned(CrdsData::Vote(
|
let v = CrdsValue::new_unsigned(CrdsData::Vote(
|
||||||
@ -467,7 +467,7 @@ mod test {
|
|||||||
Vote::new(&Pubkey::default(), test_tx(), 0),
|
Vote::new(&Pubkey::default(), test_tx(), 0),
|
||||||
));
|
));
|
||||||
assert_eq!(v.wallclock(), 0);
|
assert_eq!(v.wallclock(), 0);
|
||||||
let key = v.clone().vote().unwrap().from;
|
let key = v.vote().unwrap().from;
|
||||||
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
|
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
|
||||||
|
|
||||||
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
||||||
@ -475,7 +475,7 @@ mod test {
|
|||||||
LowestSlot::new(Pubkey::default(), 0, 0),
|
LowestSlot::new(Pubkey::default(), 0, 0),
|
||||||
));
|
));
|
||||||
assert_eq!(v.wallclock(), 0);
|
assert_eq!(v.wallclock(), 0);
|
||||||
let key = v.clone().lowest_slot().unwrap().from;
|
let key = v.lowest_slot().unwrap().from;
|
||||||
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
|
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ fn make_gossip_node(
|
|||||||
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
|
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
|
||||||
}
|
}
|
||||||
let cluster_info = Arc::new(cluster_info);
|
let cluster_info = Arc::new(cluster_info);
|
||||||
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
|
let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &exit);
|
||||||
(gossip_service, ip_echo, cluster_info)
|
(gossip_service, ip_echo, cluster_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -721,7 +721,7 @@ mod tests {
|
|||||||
assert_eq!(poh_recorder.tick_height, 5);
|
assert_eq!(poh_recorder.tick_height, 5);
|
||||||
assert!(poh_recorder.working_bank.is_none());
|
assert!(poh_recorder.working_bank.is_none());
|
||||||
let mut num_entries = 0;
|
let mut num_entries = 0;
|
||||||
while let Ok(_) = entry_receiver.try_recv() {
|
while entry_receiver.try_recv().is_ok() {
|
||||||
num_entries += 1;
|
num_entries += 1;
|
||||||
}
|
}
|
||||||
assert_eq!(num_entries, 3);
|
assert_eq!(num_entries, 3);
|
||||||
@ -1409,7 +1409,7 @@ mod tests {
|
|||||||
for _ in 0..(bank.ticks_per_slot() * 2) {
|
for _ in 0..(bank.ticks_per_slot() * 2) {
|
||||||
poh_recorder.tick();
|
poh_recorder.tick();
|
||||||
}
|
}
|
||||||
poh_recorder.set_bank(&bank.clone());
|
poh_recorder.set_bank(&bank);
|
||||||
assert_eq!(Some(false), bank.check_hash_age(&genesis_hash, 1));
|
assert_eq!(Some(false), bank.check_hash_age(&genesis_hash, 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1289,11 +1289,11 @@ impl ReplayStage {
|
|||||||
let newly_voted_pubkeys = slot_vote_tracker
|
let newly_voted_pubkeys = slot_vote_tracker
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates())
|
.and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates())
|
||||||
.unwrap_or_else(|| vec![]);
|
.unwrap_or_else(Vec::new);
|
||||||
|
|
||||||
let cluster_slot_pubkeys = cluster_slot_pubkeys
|
let cluster_slot_pubkeys = cluster_slot_pubkeys
|
||||||
.map(|v| v.read().unwrap().keys().cloned().collect())
|
.map(|v| v.read().unwrap().keys().cloned().collect())
|
||||||
.unwrap_or_else(|| vec![]);
|
.unwrap_or_else(Vec::new);
|
||||||
|
|
||||||
Self::update_fork_propagated_threshold_from_votes(
|
Self::update_fork_propagated_threshold_from_votes(
|
||||||
progress,
|
progress,
|
||||||
|
@ -431,7 +431,7 @@ impl RetransmitStage {
|
|||||||
epoch_schedule,
|
epoch_schedule,
|
||||||
duplicate_slots_reset_sender,
|
duplicate_slots_reset_sender,
|
||||||
};
|
};
|
||||||
let leader_schedule_cache = leader_schedule_cache.clone();
|
let leader_schedule_cache_clone = leader_schedule_cache.clone();
|
||||||
let window_service = WindowService::new(
|
let window_service = WindowService::new(
|
||||||
blockstore,
|
blockstore,
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
@ -440,7 +440,7 @@ impl RetransmitStage {
|
|||||||
repair_socket,
|
repair_socket,
|
||||||
exit,
|
exit,
|
||||||
repair_info,
|
repair_info,
|
||||||
&leader_schedule_cache.clone(),
|
leader_schedule_cache,
|
||||||
move |id, shred, working_bank, last_root| {
|
move |id, shred, working_bank, last_root| {
|
||||||
let is_connected = cfg
|
let is_connected = cfg
|
||||||
.as_ref()
|
.as_ref()
|
||||||
@ -449,7 +449,7 @@ impl RetransmitStage {
|
|||||||
let rv = should_retransmit_and_persist(
|
let rv = should_retransmit_and_persist(
|
||||||
shred,
|
shred,
|
||||||
working_bank,
|
working_bank,
|
||||||
&leader_schedule_cache,
|
&leader_schedule_cache_clone,
|
||||||
id,
|
id,
|
||||||
last_root,
|
last_root,
|
||||||
shred_version,
|
shred_version,
|
||||||
|
@ -1171,7 +1171,7 @@ impl RpcSol for RpcSolImpl {
|
|||||||
leader_schedule.get_slot_leaders().iter().enumerate()
|
leader_schedule.get_slot_leaders().iter().enumerate()
|
||||||
{
|
{
|
||||||
let pubkey = pubkey.to_string();
|
let pubkey = pubkey.to_string();
|
||||||
map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index);
|
map.entry(pubkey).or_insert_with(Vec::new).push(slot_index);
|
||||||
}
|
}
|
||||||
map
|
map
|
||||||
},
|
},
|
||||||
@ -1314,7 +1314,7 @@ impl RpcSol for RpcSolImpl {
|
|||||||
let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?;
|
let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?;
|
||||||
let pubkey = verify_pubkey(pubkey_str)?;
|
let pubkey = verify_pubkey(pubkey_str)?;
|
||||||
|
|
||||||
let blockhash = meta.bank(commitment.clone())?.confirmed_last_blockhash().0;
|
let blockhash = meta.bank(commitment)?.confirmed_last_blockhash().0;
|
||||||
let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash)
|
let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
info!("request_airdrop_transaction failed: {:?}", err);
|
info!("request_airdrop_transaction failed: {:?}", err);
|
||||||
|
@ -387,7 +387,7 @@ mod tests {
|
|||||||
};
|
};
|
||||||
|
|
||||||
fn process_transaction_and_notify(
|
fn process_transaction_and_notify(
|
||||||
bank_forks: &Arc<RwLock<BankForks>>,
|
bank_forks: &RwLock<BankForks>,
|
||||||
tx: &Transaction,
|
tx: &Transaction,
|
||||||
subscriptions: &RpcSubscriptions,
|
subscriptions: &RpcSubscriptions,
|
||||||
current_slot: Slot,
|
current_slot: Slot,
|
||||||
@ -921,13 +921,11 @@ mod tests {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Process votes and check they were notified.
|
// Process votes and check they were notified.
|
||||||
// FIX-ME-BETTER-LATER - clone below is required for testcase to pass
|
|
||||||
#[allow(clippy::redundant_clone)]
|
|
||||||
ClusterInfoVoteListener::get_and_process_votes_for_tests(
|
ClusterInfoVoteListener::get_and_process_votes_for_tests(
|
||||||
&votes_receiver,
|
&votes_receiver,
|
||||||
&vote_tracker,
|
&vote_tracker,
|
||||||
0,
|
0,
|
||||||
rpc.subscriptions.clone(),
|
&rpc.subscriptions,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ pub struct ValidatorExit {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ValidatorExit {
|
impl ValidatorExit {
|
||||||
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() -> () + Send + Sync>) {
|
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() + Send + Sync>) {
|
||||||
self.exits.push(exit);
|
self.exits.push(exit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ fn run_insert<F>(
|
|||||||
metrics: &mut BlockstoreInsertionMetrics,
|
metrics: &mut BlockstoreInsertionMetrics,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
F: Fn(Shred) -> (),
|
F: Fn(Shred),
|
||||||
{
|
{
|
||||||
let timer = Duration::from_millis(200);
|
let timer = Duration::from_millis(200);
|
||||||
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
|
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
|
||||||
@ -503,8 +503,8 @@ impl WindowService {
|
|||||||
|
|
||||||
fn should_exit_on_error<F, H>(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool
|
fn should_exit_on_error<F, H>(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool
|
||||||
where
|
where
|
||||||
F: FnMut() -> (),
|
F: FnMut(),
|
||||||
H: Fn() -> (),
|
H: Fn(),
|
||||||
{
|
{
|
||||||
match e {
|
match e {
|
||||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true,
|
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true,
|
||||||
|
@ -33,7 +33,7 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSoc
|
|||||||
/// tests that actually use this function are below
|
/// tests that actually use this function are below
|
||||||
fn run_gossip_topo<F>(num: usize, topo: F)
|
fn run_gossip_topo<F>(num: usize, topo: F)
|
||||||
where
|
where
|
||||||
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>) -> (),
|
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
|
||||||
{
|
{
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let listen: Vec<_> = (0..num).map(|_| test_node(&exit)).collect();
|
let listen: Vec<_> = (0..num).map(|_| test_node(&exit)).collect();
|
||||||
|
@ -142,22 +142,22 @@ impl Faucet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn process_faucet_request(&mut self, bytes: &BytesMut) -> Result<Bytes, io::Error> {
|
pub fn process_faucet_request(&mut self, bytes: &BytesMut) -> Result<Bytes, io::Error> {
|
||||||
let req: FaucetRequest = deserialize(bytes).or_else(|err| {
|
let req: FaucetRequest = deserialize(bytes).map_err(|err| {
|
||||||
Err(io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
format!("deserialize packet in faucet: {:?}", err),
|
format!("deserialize packet in faucet: {:?}", err),
|
||||||
))
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
info!("Airdrop transaction requested...{:?}", req);
|
info!("Airdrop transaction requested...{:?}", req);
|
||||||
let res = self.build_airdrop_transaction(req);
|
let res = self.build_airdrop_transaction(req);
|
||||||
match res {
|
match res {
|
||||||
Ok(tx) => {
|
Ok(tx) => {
|
||||||
let response_vec = bincode::serialize(&tx).or_else(|err| {
|
let response_vec = bincode::serialize(&tx).map_err(|err| {
|
||||||
Err(io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
format!("deserialize packet in faucet: {:?}", err),
|
format!("deserialize packet in faucet: {:?}", err),
|
||||||
))
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let mut response_vec_with_length = vec![0; 2];
|
let mut response_vec_with_length = vec![0; 2];
|
||||||
@ -205,12 +205,12 @@ pub fn request_airdrop_transaction(
|
|||||||
|
|
||||||
// Read length of transaction
|
// Read length of transaction
|
||||||
let mut buffer = [0; 2];
|
let mut buffer = [0; 2];
|
||||||
stream.read_exact(&mut buffer).or_else(|err| {
|
stream.read_exact(&mut buffer).map(|err| {
|
||||||
info!(
|
info!(
|
||||||
"request_airdrop_transaction: buffer length read_exact error: {:?}",
|
"request_airdrop_transaction: buffer length read_exact error: {:?}",
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
Err(Error::new(ErrorKind::Other, "Airdrop failed"))
|
Error::new(ErrorKind::Other, "Airdrop failed")
|
||||||
})?;
|
})?;
|
||||||
let transaction_length = LittleEndian::read_u16(&buffer) as usize;
|
let transaction_length = LittleEndian::read_u16(&buffer) as usize;
|
||||||
if transaction_length >= PACKET_DATA_SIZE {
|
if transaction_length >= PACKET_DATA_SIZE {
|
||||||
@ -226,19 +226,19 @@ pub fn request_airdrop_transaction(
|
|||||||
// Read the transaction
|
// Read the transaction
|
||||||
let mut buffer = Vec::new();
|
let mut buffer = Vec::new();
|
||||||
buffer.resize(transaction_length, 0);
|
buffer.resize(transaction_length, 0);
|
||||||
stream.read_exact(&mut buffer).or_else(|err| {
|
stream.read_exact(&mut buffer).map_err(|err| {
|
||||||
info!(
|
info!(
|
||||||
"request_airdrop_transaction: buffer read_exact error: {:?}",
|
"request_airdrop_transaction: buffer read_exact error: {:?}",
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
Err(Error::new(ErrorKind::Other, "Airdrop failed"))
|
Error::new(ErrorKind::Other, "Airdrop failed")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let transaction: Transaction = deserialize(&buffer).or_else(|err| {
|
let transaction: Transaction = deserialize(&buffer).map_err(|err| {
|
||||||
Err(Error::new(
|
Error::new(
|
||||||
ErrorKind::Other,
|
ErrorKind::Other,
|
||||||
format!("request_airdrop_transaction deserialize failure: {:?}", err),
|
format!("request_airdrop_transaction deserialize failure: {:?}", err),
|
||||||
))
|
)
|
||||||
})?;
|
})?;
|
||||||
Ok(transaction)
|
Ok(transaction)
|
||||||
}
|
}
|
||||||
|
@ -452,7 +452,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let native_instruction_processors =
|
let native_instruction_processors =
|
||||||
solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(|| vec![]);
|
solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(Vec::new);
|
||||||
let inflation = solana_genesis_programs::get_inflation(operating_mode, 0).unwrap();
|
let inflation = solana_genesis_programs::get_inflation(operating_mode, 0).unwrap();
|
||||||
|
|
||||||
let mut genesis_config = GenesisConfig {
|
let mut genesis_config = GenesisConfig {
|
||||||
|
@ -255,7 +255,7 @@ pub fn main() -> Result<(), String> {
|
|||||||
let program_arguments = matches
|
let program_arguments = matches
|
||||||
.values_of("program_arguments")
|
.values_of("program_arguments")
|
||||||
.map(Iterator::collect)
|
.map(Iterator::collect)
|
||||||
.unwrap_or_else(|| vec![]);
|
.unwrap_or_else(Vec::new);
|
||||||
|
|
||||||
command::run(config_file, program_name, program_arguments)
|
command::run(config_file, program_name, program_arguments)
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
|
|||||||
|
|
||||||
let mut packets = Packets::default();
|
let mut packets = Packets::default();
|
||||||
packets.packets.set_pinnable();
|
packets.packets.set_pinnable();
|
||||||
let slot = 0xdeadc0de;
|
let slot = 0xdead_c0de;
|
||||||
// need to pin explicitly since the resize will not cause re-allocation
|
// need to pin explicitly since the resize will not cause re-allocation
|
||||||
packets.packets.reserve_and_pin(NUM_PACKETS);
|
packets.packets.reserve_and_pin(NUM_PACKETS);
|
||||||
packets.packets.resize(NUM_PACKETS, Packet::default());
|
packets.packets.resize(NUM_PACKETS, Packet::default());
|
||||||
@ -54,7 +54,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) {
|
|||||||
#[bench]
|
#[bench]
|
||||||
fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) {
|
fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) {
|
||||||
let mut packets = Packets::default();
|
let mut packets = Packets::default();
|
||||||
let slot = 0xdeadc0de;
|
let slot = 0xdead_c0de;
|
||||||
packets.packets.resize(NUM_PACKETS, Packet::default());
|
packets.packets.resize(NUM_PACKETS, Packet::default());
|
||||||
for p in packets.packets.iter_mut() {
|
for p in packets.packets.iter_mut() {
|
||||||
let shred = Shred::new_from_data(
|
let shred = Shred::new_from_data(
|
||||||
|
@ -621,7 +621,7 @@ impl Blockstore {
|
|||||||
metrics: &mut BlockstoreInsertionMetrics,
|
metrics: &mut BlockstoreInsertionMetrics,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
F: Fn(Shred) -> (),
|
F: Fn(Shred),
|
||||||
{
|
{
|
||||||
let mut total_start = Measure::start("Total elapsed");
|
let mut total_start = Measure::start("Total elapsed");
|
||||||
let mut start = Measure::start("Blockstore lock");
|
let mut start = Measure::start("Blockstore lock");
|
||||||
@ -918,7 +918,7 @@ impl Blockstore {
|
|||||||
is_recovered: bool,
|
is_recovered: bool,
|
||||||
) -> bool
|
) -> bool
|
||||||
where
|
where
|
||||||
F: Fn(Shred) -> (),
|
F: Fn(Shred),
|
||||||
{
|
{
|
||||||
let slot = shred.slot();
|
let slot = shred.slot();
|
||||||
let shred_index = u64::from(shred.index());
|
let shred_index = u64::from(shred.index());
|
||||||
@ -1533,7 +1533,7 @@ impl Blockstore {
|
|||||||
let blockhash = get_last_hash(slot_entries.iter())
|
let blockhash = get_last_hash(slot_entries.iter())
|
||||||
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot));
|
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot));
|
||||||
|
|
||||||
let rewards = self.rewards_cf.get(slot)?.unwrap_or_else(|| vec![]);
|
let rewards = self.rewards_cf.get(slot)?.unwrap_or_else(Vec::new);
|
||||||
|
|
||||||
let block = ConfirmedBlock {
|
let block = ConfirmedBlock {
|
||||||
previous_blockhash: previous_blockhash.to_string(),
|
previous_blockhash: previous_blockhash.to_string(),
|
||||||
@ -1743,7 +1743,7 @@ impl Blockstore {
|
|||||||
"blockstore-rpc-api",
|
"blockstore-rpc-api",
|
||||||
("method", "get_confirmed_transaction".to_string(), String)
|
("method", "get_confirmed_transaction".to_string(), String)
|
||||||
);
|
);
|
||||||
if let Some((slot, status)) = self.get_transaction_status(signature.clone())? {
|
if let Some((slot, status)) = self.get_transaction_status(signature)? {
|
||||||
let transaction = self.find_transaction_in_slot(slot, signature)?
|
let transaction = self.find_transaction_in_slot(slot, signature)?
|
||||||
.expect("Transaction to exist in slot entries if it exists in statuses and hasn't been cleaned up");
|
.expect("Transaction to exist in slot entries if it exists in statuses and hasn't been cleaned up");
|
||||||
let encoding = encoding.unwrap_or(TransactionEncoding::Json);
|
let encoding = encoding.unwrap_or(TransactionEncoding::Json);
|
||||||
@ -4948,7 +4948,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// Insert will fail, slot < root
|
// Insert will fail, slot < root
|
||||||
blockstore
|
blockstore
|
||||||
.insert_shreds(shreds1.clone()[..].to_vec(), None, false)
|
.insert_shreds(shreds1[..].to_vec(), None, false)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert!(blockstore.get_data_shred(1, 0).unwrap().is_none());
|
assert!(blockstore.get_data_shred(1, 0).unwrap().is_none());
|
||||||
|
|
||||||
@ -5229,7 +5229,7 @@ pub mod tests {
|
|||||||
stakes.insert(keypair.pubkey(), (1 + i as u64, Account::default()));
|
stakes.insert(keypair.pubkey(), (1 + i as u64, Account::default()));
|
||||||
}
|
}
|
||||||
let slot_duration = Duration::from_millis(400);
|
let slot_duration = Duration::from_millis(400);
|
||||||
let block_time_slot_3 = blockstore.get_block_time(3, slot_duration.clone(), &stakes);
|
let block_time_slot_3 = blockstore.get_block_time(3, slot_duration, &stakes);
|
||||||
|
|
||||||
let mut total_stake = 0;
|
let mut total_stake = 0;
|
||||||
let mut expected_time: u64 = (0..6)
|
let mut expected_time: u64 = (0..6)
|
||||||
@ -5246,7 +5246,7 @@ pub mod tests {
|
|||||||
assert_eq!(block_time_slot_3.unwrap().unwrap() as u64, expected_time);
|
assert_eq!(block_time_slot_3.unwrap().unwrap() as u64, expected_time);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
blockstore
|
blockstore
|
||||||
.get_block_time(8, slot_duration.clone(), &stakes)
|
.get_block_time(8, slot_duration, &stakes)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap() as u64,
|
.unwrap() as u64,
|
||||||
expected_time + 2 // At 400ms block duration, 5 slots == 2sec
|
expected_time + 2 // At 400ms block duration, 5 slots == 2sec
|
||||||
|
@ -257,7 +257,7 @@ pub enum BlockstoreProcessorError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Callback for accessing bank state while processing the blockstore
|
/// Callback for accessing bank state while processing the blockstore
|
||||||
pub type ProcessCallback = Arc<dyn Fn(&Bank) -> () + Sync + Send>;
|
pub type ProcessCallback = Arc<dyn Fn(&Bank) + Sync + Send>;
|
||||||
|
|
||||||
#[derive(Default, Clone)]
|
#[derive(Default, Clone)]
|
||||||
pub struct ProcessOptions {
|
pub struct ProcessOptions {
|
||||||
|
@ -133,7 +133,7 @@ fn slot_key_data_for_gpu<
|
|||||||
let key = slot_keys.get(slot).unwrap();
|
let key = slot_keys.get(slot).unwrap();
|
||||||
keys_to_slots
|
keys_to_slots
|
||||||
.entry(*key)
|
.entry(*key)
|
||||||
.or_insert_with(|| vec![])
|
.or_insert_with(Vec::new)
|
||||||
.push(*slot);
|
.push(*slot);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ fn sort_data_coding_into_fec_sets(
|
|||||||
data_slot_and_index.insert(key);
|
data_slot_and_index.insert(key);
|
||||||
let fec_entry = fec_data
|
let fec_entry = fec_data
|
||||||
.entry(shred.common_header.fec_set_index)
|
.entry(shred.common_header.fec_set_index)
|
||||||
.or_insert_with(|| vec![]);
|
.or_insert_with(Vec::new);
|
||||||
fec_entry.push(shred);
|
fec_entry.push(shred);
|
||||||
}
|
}
|
||||||
for shred in coding_shreds {
|
for shred in coding_shreds {
|
||||||
@ -188,7 +188,7 @@ fn sort_data_coding_into_fec_sets(
|
|||||||
coding_slot_and_index.insert(key);
|
coding_slot_and_index.insert(key);
|
||||||
let fec_entry = fec_coding
|
let fec_entry = fec_coding
|
||||||
.entry(shred.common_header.fec_set_index)
|
.entry(shred.common_header.fec_set_index)
|
||||||
.or_insert_with(|| vec![]);
|
.or_insert_with(Vec::new);
|
||||||
fec_entry.push(shred);
|
fec_entry.push(shred);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,8 +213,8 @@ fn run_cluster_partition<E, F>(
|
|||||||
on_partition_start: E,
|
on_partition_start: E,
|
||||||
on_partition_resolved: F,
|
on_partition_resolved: F,
|
||||||
) where
|
) where
|
||||||
E: Fn(&mut LocalCluster) -> (),
|
E: Fn(&mut LocalCluster),
|
||||||
F: Fn(&mut LocalCluster) -> (),
|
F: Fn(&mut LocalCluster),
|
||||||
{
|
{
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
info!("PARTITION_TEST!");
|
info!("PARTITION_TEST!");
|
||||||
|
@ -23,10 +23,8 @@ impl log::Log for LoggerShim {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn replace_logger(logger: env_logger::Logger) {
|
fn replace_logger(logger: env_logger::Logger) {
|
||||||
let max_level = logger.filter();
|
log::set_max_level(logger.filter());
|
||||||
log::set_max_level(max_level);
|
*LOGGER.write().unwrap() = logger;
|
||||||
let mut rw = LOGGER.write().unwrap();
|
|
||||||
std::mem::replace(&mut *rw, logger);
|
|
||||||
let _ = log::set_boxed_logger(Box::new(LoggerShim {}));
|
let _ = log::set_boxed_logger(Box::new(LoggerShim {}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -332,9 +332,8 @@ lazy_static! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_host_id(host_id: String) {
|
pub fn set_host_id(host_id: String) {
|
||||||
let mut rw = HOST_ID.write().unwrap();
|
|
||||||
info!("host id: {}", host_id);
|
info!("host id: {}", host_id);
|
||||||
std::mem::replace(&mut *rw, host_id);
|
*HOST_ID.write().unwrap() = host_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Submits a new point from any thread. Note that points are internally queued
|
/// Submits a new point from any thread. Note that points are internally queued
|
||||||
|
@ -85,11 +85,11 @@ pub fn ip_echo_server(tcp: std::net::TcpListener) -> IpEchoServer {
|
|||||||
|
|
||||||
bincode::deserialize::<IpEchoServerMessage>(&data[4..])
|
bincode::deserialize::<IpEchoServerMessage>(&data[4..])
|
||||||
.map(Some)
|
.map(Some)
|
||||||
.or_else(|err| {
|
.map_err(|err| {
|
||||||
Err(io::Error::new(
|
io::Error::new(
|
||||||
io::ErrorKind::Other,
|
io::ErrorKind::Other,
|
||||||
format!("Failed to deserialize IpEchoServerMessage: {:?}", err),
|
format!("Failed to deserialize IpEchoServerMessage: {:?}", err),
|
||||||
))
|
)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.and_then(move |maybe_msg| {
|
.and_then(move |maybe_msg| {
|
||||||
|
@ -372,7 +372,7 @@ pub fn bind_to(ip_addr: IpAddr, port: u16, reuseaddr: bool) -> io::Result<UdpSoc
|
|||||||
let addr = SocketAddr::new(ip_addr, port);
|
let addr = SocketAddr::new(ip_addr, port);
|
||||||
|
|
||||||
sock.bind(&SockAddr::from(addr))
|
sock.bind(&SockAddr::from(addr))
|
||||||
.and_then(|_| Result::Ok(sock.into_udp_socket()))
|
.map(|_| sock.into_udp_socket())
|
||||||
}
|
}
|
||||||
|
|
||||||
// binds both a UdpSocket and a TcpListener
|
// binds both a UdpSocket and a TcpListener
|
||||||
@ -385,9 +385,8 @@ pub fn bind_common(
|
|||||||
|
|
||||||
let addr = SocketAddr::new(ip_addr, port);
|
let addr = SocketAddr::new(ip_addr, port);
|
||||||
let sock_addr = SockAddr::from(addr);
|
let sock_addr = SockAddr::from(addr);
|
||||||
sock.bind(&sock_addr).and_then(|_| {
|
sock.bind(&sock_addr)
|
||||||
TcpListener::bind(&addr).and_then(|listener| Result::Ok((sock.into_udp_socket(), listener)))
|
.and_then(|_| TcpListener::bind(&addr).map(|listener| (sock.into_udp_socket(), listener)))
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_available_port_in_range(ip_addr: IpAddr, range: PortRange) -> io::Result<u16> {
|
pub fn find_available_port_in_range(ip_addr: IpAddr, range: PortRange) -> io::Result<u16> {
|
||||||
|
@ -13,7 +13,7 @@ fn bench_sigverify(bencher: &mut Bencher) {
|
|||||||
let tx = test_tx();
|
let tx = test_tx();
|
||||||
|
|
||||||
// generate packet vector
|
// generate packet vector
|
||||||
let batches = to_packets(&vec![tx; 128]);
|
let batches = to_packets(&std::iter::repeat(tx).take(128).collect::<Vec<_>>());
|
||||||
|
|
||||||
let recycler = Recycler::default();
|
let recycler = Recycler::default();
|
||||||
let recycler_out = Recycler::default();
|
let recycler_out = Recycler::default();
|
||||||
@ -28,7 +28,7 @@ fn bench_get_offsets(bencher: &mut Bencher) {
|
|||||||
let tx = test_tx();
|
let tx = test_tx();
|
||||||
|
|
||||||
// generate packet vector
|
// generate packet vector
|
||||||
let batches = to_packets(&vec![tx; 1024]);
|
let batches = to_packets(&std::iter::repeat(tx).take(1024).collect::<Vec<_>>());
|
||||||
|
|
||||||
let recycler = Recycler::default();
|
let recycler = Recycler::default();
|
||||||
// verify packets
|
// verify packets
|
||||||
|
@ -7,7 +7,6 @@ use chrono::prelude::*;
|
|||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::mem;
|
|
||||||
|
|
||||||
/// The types of events a payment plan can process.
|
/// The types of events a payment plan can process.
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
@ -256,7 +255,7 @@ impl BudgetExpr {
|
|||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
if let Some(expr) = new_expr {
|
if let Some(expr) = new_expr {
|
||||||
mem::replace(self, *expr);
|
*self = *expr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec<Pubkey>, num: usize) {
|
|||||||
for t in 0..num {
|
for t in 0..num {
|
||||||
let pubkey = Pubkey::new_rand();
|
let pubkey = Pubkey::new_rand();
|
||||||
let account = Account::new((t + 1) as u64, 0, &Account::default().owner);
|
let account = Account::new((t + 1) as u64, 0, &Account::default().owner);
|
||||||
pubkeys.push(pubkey.clone());
|
pubkeys.push(pubkey);
|
||||||
assert!(bank.get_account(&pubkey).is_none());
|
assert!(bank.get_account(&pubkey).is_none());
|
||||||
bank.deposit(&pubkey, (t + 1) as u64);
|
bank.deposit(&pubkey, (t + 1) as u64);
|
||||||
assert_eq!(bank.get_account(&pubkey).unwrap(), account);
|
assert_eq!(bank.get_account(&pubkey).unwrap(), account);
|
||||||
@ -48,7 +48,7 @@ fn test_accounts_squash(bencher: &mut Bencher) {
|
|||||||
&[],
|
&[],
|
||||||
));
|
));
|
||||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||||
deposit_many(&bank1, &mut pubkeys, 250000);
|
deposit_many(&bank1, &mut pubkeys, 250_000);
|
||||||
bank1.freeze();
|
bank1.freeze();
|
||||||
|
|
||||||
// Measures the performance of the squash operation.
|
// Measures the performance of the squash operation.
|
||||||
|
@ -10,18 +10,15 @@ use test::Bencher;
|
|||||||
#[bench]
|
#[bench]
|
||||||
fn bench_accounts_index(bencher: &mut Bencher) {
|
fn bench_accounts_index(bencher: &mut Bencher) {
|
||||||
const NUM_PUBKEYS: usize = 10_000;
|
const NUM_PUBKEYS: usize = 10_000;
|
||||||
let pubkeys: Vec<_> = (0..NUM_PUBKEYS)
|
let pubkeys: Vec<_> = (0..NUM_PUBKEYS).map(|_| Pubkey::new_rand()).collect();
|
||||||
.into_iter()
|
|
||||||
.map(|_| Pubkey::new_rand())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
const NUM_FORKS: u64 = 16;
|
const NUM_FORKS: u64 = 16;
|
||||||
|
|
||||||
let mut reclaims = vec![];
|
let mut reclaims = vec![];
|
||||||
let mut index = AccountsIndex::<AccountInfo>::default();
|
let mut index = AccountsIndex::<AccountInfo>::default();
|
||||||
for f in 0..NUM_FORKS {
|
for f in 0..NUM_FORKS {
|
||||||
for _p in 0..NUM_PUBKEYS {
|
for pubkey in pubkeys.iter().take(NUM_PUBKEYS) {
|
||||||
index.insert(f, &pubkeys[_p], AccountInfo::default(), &mut reclaims);
|
index.insert(f, pubkey, AccountInfo::default(), &mut reclaims);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,7 +32,6 @@ fn append_vec_append(bencher: &mut Bencher) {
|
|||||||
|
|
||||||
fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> {
|
fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> {
|
||||||
(0..size)
|
(0..size)
|
||||||
.into_iter()
|
|
||||||
.filter_map(|sample| {
|
.filter_map(|sample| {
|
||||||
let (meta, account) = create_test_account(sample);
|
let (meta, account) = create_test_account(sample);
|
||||||
vec.append_account(meta, &account, Hash::default())
|
vec.append_account(meta, &account, Hash::default())
|
||||||
@ -92,7 +91,7 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) {
|
|||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
let len = indexes.lock().unwrap().len();
|
let len = indexes.lock().unwrap().len();
|
||||||
let random_index: usize = thread_rng().gen_range(0, len);
|
let random_index: usize = thread_rng().gen_range(0, len);
|
||||||
let (sample, pos) = indexes.lock().unwrap().get(random_index).unwrap().clone();
|
let (sample, pos) = *indexes.lock().unwrap().get(random_index).unwrap();
|
||||||
let (account, _next) = vec.get_account(pos).unwrap();
|
let (account, _next) = vec.get_account(pos).unwrap();
|
||||||
let (_meta, test) = create_test_account(sample);
|
let (_meta, test) = create_test_account(sample);
|
||||||
assert_eq!(account.data, test.data.as_slice());
|
assert_eq!(account.data, test.data.as_slice());
|
||||||
@ -112,12 +111,7 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let random_index: usize = thread_rng().gen_range(0, len + 1);
|
let random_index: usize = thread_rng().gen_range(0, len + 1);
|
||||||
let (sample, pos) = indexes1
|
let (sample, pos) = *indexes1.lock().unwrap().get(random_index % len).unwrap();
|
||||||
.lock()
|
|
||||||
.unwrap()
|
|
||||||
.get(random_index % len)
|
|
||||||
.unwrap()
|
|
||||||
.clone();
|
|
||||||
let (account, _next) = vec1.get_account(pos).unwrap();
|
let (account, _next) = vec1.get_account(pos).unwrap();
|
||||||
let (_meta, test) = create_test_account(sample);
|
let (_meta, test) = create_test_account(sample);
|
||||||
assert_eq!(account.data, test.data.as_slice());
|
assert_eq!(account.data, test.data.as_slice());
|
||||||
|
@ -19,13 +19,13 @@ use std::{sync::Arc, thread::sleep, time::Duration};
|
|||||||
use test::Bencher;
|
use test::Bencher;
|
||||||
|
|
||||||
const BUILTIN_PROGRAM_ID: [u8; 32] = [
|
const BUILTIN_PROGRAM_ID: [u8; 32] = [
|
||||||
098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0,
|
98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
];
|
];
|
||||||
|
|
||||||
const NOOP_PROGRAM_ID: [u8; 32] = [
|
const NOOP_PROGRAM_ID: [u8; 32] = [
|
||||||
098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0,
|
98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
|
||||||
];
|
];
|
||||||
|
|
||||||
fn process_instruction(
|
fn process_instruction(
|
||||||
@ -43,13 +43,12 @@ pub fn create_builtin_transactions(
|
|||||||
let program_id = Pubkey::new(&BUILTIN_PROGRAM_ID);
|
let program_id = Pubkey::new(&BUILTIN_PROGRAM_ID);
|
||||||
|
|
||||||
(0..4096)
|
(0..4096)
|
||||||
.into_iter()
|
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
// Seed the signer account
|
// Seed the signer account
|
||||||
let rando0 = Keypair::new();
|
let rando0 = Keypair::new();
|
||||||
bank_client
|
bank_client
|
||||||
.transfer(10_000, &mint_keypair, &rando0.pubkey())
|
.transfer(10_000, &mint_keypair, &rando0.pubkey())
|
||||||
.expect(&format!("{}:{}", line!(), file!()));
|
.unwrap_or_else(|_| panic!("{}:{}", line!(), file!()));
|
||||||
|
|
||||||
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
|
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
|
||||||
let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
|
let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
|
||||||
@ -65,13 +64,12 @@ pub fn create_native_loader_transactions(
|
|||||||
let program_id = Pubkey::new(&NOOP_PROGRAM_ID);
|
let program_id = Pubkey::new(&NOOP_PROGRAM_ID);
|
||||||
|
|
||||||
(0..4096)
|
(0..4096)
|
||||||
.into_iter()
|
|
||||||
.map(|_| {
|
.map(|_| {
|
||||||
// Seed the signer account©41
|
// Seed the signer account©41
|
||||||
let rando0 = Keypair::new();
|
let rando0 = Keypair::new();
|
||||||
bank_client
|
bank_client
|
||||||
.transfer(10_000, &mint_keypair, &rando0.pubkey())
|
.transfer(10_000, &mint_keypair, &rando0.pubkey())
|
||||||
.expect(&format!("{}:{}", line!(), file!()));
|
.unwrap_or_else(|_| panic!("{}:{}", line!(), file!()));
|
||||||
|
|
||||||
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
|
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
|
||||||
let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
|
let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
|
||||||
@ -80,13 +78,13 @@ pub fn create_native_loader_transactions(
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sync_bencher(bank: &Arc<Bank>, _bank_client: &BankClient, transactions: &Vec<Transaction>) {
|
fn sync_bencher(bank: &Arc<Bank>, _bank_client: &BankClient, transactions: &[Transaction]) {
|
||||||
let results = bank.process_transactions(&transactions);
|
let results = bank.process_transactions(&transactions);
|
||||||
assert!(results.iter().all(Result::is_ok));
|
assert!(results.iter().all(Result::is_ok));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &Vec<Transaction>) {
|
fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &[Transaction]) {
|
||||||
for transaction in transactions.clone() {
|
for transaction in transactions.to_owned() {
|
||||||
bank_client.async_send_transaction(transaction).unwrap();
|
bank_client.async_send_transaction(transaction).unwrap();
|
||||||
}
|
}
|
||||||
for _ in 0..1_000_000_000_u64 {
|
for _ in 0..1_000_000_000_u64 {
|
||||||
@ -98,23 +96,23 @@ fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &Vec<
|
|||||||
}
|
}
|
||||||
sleep(Duration::from_nanos(1));
|
sleep(Duration::from_nanos(1));
|
||||||
}
|
}
|
||||||
if !bank
|
if bank
|
||||||
.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_ok()
|
.is_err()
|
||||||
{
|
{
|
||||||
error!(
|
error!(
|
||||||
"transaction failed: {:?}",
|
"transaction failed: {:?}",
|
||||||
bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
);
|
);
|
||||||
assert!(false);
|
panic!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_bench_transactions(
|
fn do_bench_transactions(
|
||||||
bencher: &mut Bencher,
|
bencher: &mut Bencher,
|
||||||
bench_work: &dyn Fn(&Arc<Bank>, &BankClient, &Vec<Transaction>),
|
bench_work: &dyn Fn(&Arc<Bank>, &BankClient, &[Transaction]),
|
||||||
create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec<Transaction>,
|
create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec<Transaction>,
|
||||||
) {
|
) {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
@ -47,10 +47,7 @@ fn bench_sigs_bloom(bencher: &mut Bencher) {
|
|||||||
// https://hur.st/bloomfilter/?n=1000000&p=1.0E-8&m=&k=
|
// https://hur.st/bloomfilter/?n=1000000&p=1.0E-8&m=&k=
|
||||||
let blockhash = hash(Hash::default().as_ref());
|
let blockhash = hash(Hash::default().as_ref());
|
||||||
// info!("blockhash = {:?}", blockhash);
|
// info!("blockhash = {:?}", blockhash);
|
||||||
let keys = (0..27)
|
let keys = (0..27).map(|i| blockhash.hash_at_index(i)).collect();
|
||||||
.into_iter()
|
|
||||||
.map(|i| blockhash.hash_at_index(i))
|
|
||||||
.collect();
|
|
||||||
let mut sigs: Bloom<Signature> = Bloom::new(38_340_234, keys);
|
let mut sigs: Bloom<Signature> = Bloom::new(38_340_234, keys);
|
||||||
|
|
||||||
let mut id = blockhash;
|
let mut id = blockhash;
|
||||||
|
@ -30,6 +30,6 @@ fn test_statuscache_serialize(bencher: &mut Bencher) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
let _ = serialize(&status_cache.slot_deltas(&vec![0])).unwrap();
|
let _ = serialize(&status_cache.slot_deltas(&[0])).unwrap();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -153,12 +153,12 @@ impl Accounts {
|
|||||||
}
|
}
|
||||||
let (account, rent) =
|
let (account, rent) =
|
||||||
AccountsDB::load(storage, ancestors, accounts_index, key)
|
AccountsDB::load(storage, ancestors, accounts_index, key)
|
||||||
.and_then(|(mut account, _)| {
|
.map(|(mut account, _)| {
|
||||||
if message.is_writable(i) && !account.executable {
|
if message.is_writable(i) && !account.executable {
|
||||||
let rent_due = rent_collector.update(&key, &mut account);
|
let rent_due = rent_collector.update(&key, &mut account);
|
||||||
Some((account, rent_due))
|
(account, rent_due)
|
||||||
} else {
|
} else {
|
||||||
Some((account, 0))
|
(account, 0)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
@ -617,7 +617,6 @@ impl AccountsDB {
|
|||||||
pub fn clean_accounts(&self) {
|
pub fn clean_accounts(&self) {
|
||||||
self.report_store_stats();
|
self.report_store_stats();
|
||||||
|
|
||||||
let no_ancestors = HashMap::new();
|
|
||||||
let mut accounts_scan = Measure::start("accounts_scan");
|
let mut accounts_scan = Measure::start("accounts_scan");
|
||||||
let accounts_index = self.accounts_index.read().unwrap();
|
let accounts_index = self.accounts_index.read().unwrap();
|
||||||
let pubkeys: Vec<Pubkey> = accounts_index.account_maps.keys().cloned().collect();
|
let pubkeys: Vec<Pubkey> = accounts_index.account_maps.keys().cloned().collect();
|
||||||
@ -628,7 +627,7 @@ impl AccountsDB {
|
|||||||
let mut purges_in_root = Vec::new();
|
let mut purges_in_root = Vec::new();
|
||||||
let mut purges = HashMap::new();
|
let mut purges = HashMap::new();
|
||||||
for pubkey in pubkeys {
|
for pubkey in pubkeys {
|
||||||
if let Some((list, index)) = accounts_index.get(pubkey, &no_ancestors) {
|
if let Some((list, index)) = accounts_index.get(pubkey, None) {
|
||||||
let (slot, account_info) = &list[index];
|
let (slot, account_info) = &list[index];
|
||||||
if account_info.lamports == 0 {
|
if account_info.lamports == 0 {
|
||||||
purges.insert(*pubkey, accounts_index.would_purge(pubkey));
|
purges.insert(*pubkey, accounts_index.would_purge(pubkey));
|
||||||
@ -641,16 +640,11 @@ impl AccountsDB {
|
|||||||
})
|
})
|
||||||
.reduce(
|
.reduce(
|
||||||
|| (HashMap::new(), Vec::new()),
|
|| (HashMap::new(), Vec::new()),
|
||||||
|m1, m2| {
|
|mut m1, m2| {
|
||||||
// Collapse down the hashmaps/vecs into one.
|
// Collapse down the hashmaps/vecs into one.
|
||||||
let x = m2.0.iter().fold(m1.0, |mut acc, (k, vs)| {
|
m1.0.extend(m2.0);
|
||||||
acc.insert(k.clone(), vs.clone());
|
m1.1.extend(m2.1);
|
||||||
acc
|
m1
|
||||||
});
|
|
||||||
let mut y = vec![];
|
|
||||||
y.extend(m1.1);
|
|
||||||
y.extend(m2.1);
|
|
||||||
(x, y)
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -806,7 +800,6 @@ impl AccountsDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let alive_accounts: Vec<_> = {
|
let alive_accounts: Vec<_> = {
|
||||||
let no_ancestors = HashMap::new();
|
|
||||||
let accounts_index = self.accounts_index.read().unwrap();
|
let accounts_index = self.accounts_index.read().unwrap();
|
||||||
stored_accounts
|
stored_accounts
|
||||||
.iter()
|
.iter()
|
||||||
@ -819,7 +812,7 @@ impl AccountsDB {
|
|||||||
(store_id, offset),
|
(store_id, offset),
|
||||||
_write_version,
|
_write_version,
|
||||||
)| {
|
)| {
|
||||||
if let Some((list, _)) = accounts_index.get(pubkey, &no_ancestors) {
|
if let Some((list, _)) = accounts_index.get(pubkey, None) {
|
||||||
list.iter()
|
list.iter()
|
||||||
.any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset)
|
.any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset)
|
||||||
} else {
|
} else {
|
||||||
@ -927,7 +920,7 @@ impl AccountsDB {
|
|||||||
|
|
||||||
pub fn scan_accounts<F, A>(&self, ancestors: &Ancestors, scan_func: F) -> A
|
pub fn scan_accounts<F, A>(&self, ancestors: &Ancestors, scan_func: F) -> A
|
||||||
where
|
where
|
||||||
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (),
|
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
|
||||||
A: Default,
|
A: Default,
|
||||||
{
|
{
|
||||||
let mut collector = A::default();
|
let mut collector = A::default();
|
||||||
@ -946,7 +939,7 @@ impl AccountsDB {
|
|||||||
|
|
||||||
pub fn range_scan_accounts<F, A, R>(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A
|
pub fn range_scan_accounts<F, A, R>(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A
|
||||||
where
|
where
|
||||||
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (),
|
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
|
||||||
A: Default,
|
A: Default,
|
||||||
R: RangeBounds<Pubkey>,
|
R: RangeBounds<Pubkey>,
|
||||||
{
|
{
|
||||||
@ -968,7 +961,7 @@ impl AccountsDB {
|
|||||||
// PERF: Sequentially read each storage entry in parallel
|
// PERF: Sequentially read each storage entry in parallel
|
||||||
pub fn scan_account_storage<F, B>(&self, slot: Slot, scan_func: F) -> Vec<B>
|
pub fn scan_account_storage<F, B>(&self, slot: Slot, scan_func: F) -> Vec<B>
|
||||||
where
|
where
|
||||||
F: Fn(&StoredAccount, AppendVecId, &mut B) -> () + Send + Sync,
|
F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync,
|
||||||
B: Send + Default,
|
B: Send + Default,
|
||||||
{
|
{
|
||||||
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
|
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
|
||||||
@ -1020,7 +1013,7 @@ impl AccountsDB {
|
|||||||
accounts_index: &AccountsIndex<AccountInfo>,
|
accounts_index: &AccountsIndex<AccountInfo>,
|
||||||
pubkey: &Pubkey,
|
pubkey: &Pubkey,
|
||||||
) -> Option<(Account, Slot)> {
|
) -> Option<(Account, Slot)> {
|
||||||
let (lock, index) = accounts_index.get(pubkey, ancestors)?;
|
let (lock, index) = accounts_index.get(pubkey, Some(ancestors))?;
|
||||||
let slot = lock[index].0;
|
let slot = lock[index].0;
|
||||||
//TODO: thread this as a ref
|
//TODO: thread this as a ref
|
||||||
if let Some(slot_storage) = storage.0.get(&slot) {
|
if let Some(slot_storage) = storage.0.get(&slot) {
|
||||||
@ -1037,7 +1030,7 @@ impl AccountsDB {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash {
|
fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash {
|
||||||
let accounts_index = self.accounts_index.read().unwrap();
|
let accounts_index = self.accounts_index.read().unwrap();
|
||||||
let (lock, index) = accounts_index.get(pubkey, ancestors).unwrap();
|
let (lock, index) = accounts_index.get(pubkey, Some(ancestors)).unwrap();
|
||||||
let slot = lock[index].0;
|
let slot = lock[index].0;
|
||||||
let storage = self.storage.read().unwrap();
|
let storage = self.storage.read().unwrap();
|
||||||
let slot_storage = storage.0.get(&slot).unwrap();
|
let slot_storage = storage.0.get(&slot).unwrap();
|
||||||
@ -1449,7 +1442,7 @@ impl AccountsDB {
|
|||||||
let hashes: Vec<_> = keys
|
let hashes: Vec<_> = keys
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.filter_map(|pubkey| {
|
.filter_map(|pubkey| {
|
||||||
if let Some((list, index)) = accounts_index.get(pubkey, ancestors) {
|
if let Some((list, index)) = accounts_index.get(pubkey, Some(ancestors)) {
|
||||||
let (slot, account_info) = &list[index];
|
let (slot, account_info) = &list[index];
|
||||||
if account_info.lamports != 0 {
|
if account_info.lamports != 0 {
|
||||||
storage
|
storage
|
||||||
@ -1839,7 +1832,7 @@ impl AccountsDB {
|
|||||||
};
|
};
|
||||||
let entry = accum
|
let entry = accum
|
||||||
.entry(stored_account.meta.pubkey)
|
.entry(stored_account.meta.pubkey)
|
||||||
.or_insert_with(|| vec![]);
|
.or_insert_with(Vec::new);
|
||||||
entry.push((stored_account.meta.write_version, account_info));
|
entry.push((stored_account.meta.write_version, account_info));
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
@ -1847,7 +1840,7 @@ impl AccountsDB {
|
|||||||
let mut accounts_map: HashMap<Pubkey, Vec<(u64, AccountInfo)>> = HashMap::new();
|
let mut accounts_map: HashMap<Pubkey, Vec<(u64, AccountInfo)>> = HashMap::new();
|
||||||
for accumulator_entry in accumulator.iter() {
|
for accumulator_entry in accumulator.iter() {
|
||||||
for (pubkey, storage_entry) in accumulator_entry {
|
for (pubkey, storage_entry) in accumulator_entry {
|
||||||
let entry = accounts_map.entry(*pubkey).or_insert_with(|| vec![]);
|
let entry = accounts_map.entry(*pubkey).or_insert_with(Vec::new);
|
||||||
entry.extend(storage_entry.iter().cloned());
|
entry.extend(storage_entry.iter().cloned());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2118,7 +2111,7 @@ pub mod tests {
|
|||||||
.accounts_index
|
.accounts_index
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get(&key, &ancestors)
|
.get(&key, Some(&ancestors))
|
||||||
.is_some());
|
.is_some());
|
||||||
assert_load_account(&db, unrooted_slot, key, 1);
|
assert_load_account(&db, unrooted_slot, key, 1);
|
||||||
|
|
||||||
@ -2139,7 +2132,7 @@ pub mod tests {
|
|||||||
.accounts_index
|
.accounts_index
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.get(&key, &ancestors)
|
.get(&key, Some(&ancestors))
|
||||||
.is_none());
|
.is_none());
|
||||||
|
|
||||||
// Test we can store for the same slot again and get the right information
|
// Test we can store for the same slot again and get the right information
|
||||||
@ -2188,14 +2181,14 @@ pub mod tests {
|
|||||||
for t in 0..num {
|
for t in 0..num {
|
||||||
let pubkey = Pubkey::new_rand();
|
let pubkey = Pubkey::new_rand();
|
||||||
let account = Account::new((t + 1) as u64, space, &Account::default().owner);
|
let account = Account::new((t + 1) as u64, space, &Account::default().owner);
|
||||||
pubkeys.push(pubkey.clone());
|
pubkeys.push(pubkey);
|
||||||
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
|
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
|
||||||
accounts.store(slot, &[(&pubkey, &account)]);
|
accounts.store(slot, &[(&pubkey, &account)]);
|
||||||
}
|
}
|
||||||
for t in 0..num_vote {
|
for t in 0..num_vote {
|
||||||
let pubkey = Pubkey::new_rand();
|
let pubkey = Pubkey::new_rand();
|
||||||
let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id());
|
let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id());
|
||||||
pubkeys.push(pubkey.clone());
|
pubkeys.push(pubkey);
|
||||||
let ancestors = vec![(slot, 0)].into_iter().collect();
|
let ancestors = vec![(slot, 0)].into_iter().collect();
|
||||||
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
|
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
|
||||||
accounts.store(slot, &[(&pubkey, &account)]);
|
accounts.store(slot, &[(&pubkey, &account)]);
|
||||||
@ -2435,7 +2428,7 @@ pub mod tests {
|
|||||||
let ancestors = vec![(0, 0)].into_iter().collect();
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
let id = {
|
let id = {
|
||||||
let index = accounts.accounts_index.read().unwrap();
|
let index = accounts.accounts_index.read().unwrap();
|
||||||
let (list, idx) = index.get(&pubkey, &ancestors).unwrap();
|
let (list, idx) = index.get(&pubkey, Some(&ancestors)).unwrap();
|
||||||
list[idx].1.store_id
|
list[idx].1.store_id
|
||||||
};
|
};
|
||||||
accounts.add_root(1);
|
accounts.add_root(1);
|
||||||
|
@ -24,29 +24,29 @@ pub struct AccountsIndex<T> {
|
|||||||
impl<'a, T: 'a + Clone> AccountsIndex<T> {
|
impl<'a, T: 'a + Clone> AccountsIndex<T> {
|
||||||
fn do_scan_accounts<F, I>(&self, ancestors: &Ancestors, mut func: F, iter: I)
|
fn do_scan_accounts<F, I>(&self, ancestors: &Ancestors, mut func: F, iter: I)
|
||||||
where
|
where
|
||||||
F: FnMut(&Pubkey, (&T, Slot)) -> (),
|
F: FnMut(&Pubkey, (&T, Slot)),
|
||||||
I: Iterator<Item = (&'a Pubkey, &'a AccountMapEntry<T>)>,
|
I: Iterator<Item = (&'a Pubkey, &'a AccountMapEntry<T>)>,
|
||||||
{
|
{
|
||||||
for (pubkey, list) in iter {
|
for (pubkey, list) in iter {
|
||||||
let list_r = &list.1.read().unwrap();
|
let list_r = &list.1.read().unwrap();
|
||||||
if let Some(index) = self.latest_slot(ancestors, &list_r) {
|
if let Some(index) = self.latest_slot(Some(ancestors), &list_r) {
|
||||||
func(pubkey, (&list_r[index].1, list_r[index].0));
|
func(pubkey, (&list_r[index].1, list_r[index].0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// call func with every pubkey and index visible from a given set of ancestors
|
/// call func with every pubkey and index visible from a given set of ancestors
|
||||||
pub fn scan_accounts<F>(&self, ancestors: &Ancestors, func: F)
|
pub(crate) fn scan_accounts<F>(&self, ancestors: &Ancestors, func: F)
|
||||||
where
|
where
|
||||||
F: FnMut(&Pubkey, (&T, Slot)) -> (),
|
F: FnMut(&Pubkey, (&T, Slot)),
|
||||||
{
|
{
|
||||||
self.do_scan_accounts(ancestors, func, self.account_maps.iter());
|
self.do_scan_accounts(ancestors, func, self.account_maps.iter());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// call func with every pubkey and index visible from a given set of ancestors with range
|
/// call func with every pubkey and index visible from a given set of ancestors with range
|
||||||
pub fn range_scan_accounts<F, R>(&self, ancestors: &Ancestors, range: R, func: F)
|
pub(crate) fn range_scan_accounts<F, R>(&self, ancestors: &Ancestors, range: R, func: F)
|
||||||
where
|
where
|
||||||
F: FnMut(&Pubkey, (&T, Slot)) -> (),
|
F: FnMut(&Pubkey, (&T, Slot)),
|
||||||
R: RangeBounds<Pubkey>,
|
R: RangeBounds<Pubkey>,
|
||||||
{
|
{
|
||||||
self.do_scan_accounts(ancestors, func, self.account_maps.range(range));
|
self.do_scan_accounts(ancestors, func, self.account_maps.range(range));
|
||||||
@ -76,11 +76,14 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
|
|||||||
|
|
||||||
// find the latest slot and T in a slice for a given ancestor
|
// find the latest slot and T in a slice for a given ancestor
|
||||||
// returns index into 'slice' if found, None if not.
|
// returns index into 'slice' if found, None if not.
|
||||||
fn latest_slot(&self, ancestors: &Ancestors, slice: SlotSlice<T>) -> Option<usize> {
|
fn latest_slot(&self, ancestors: Option<&Ancestors>, slice: SlotSlice<T>) -> Option<usize> {
|
||||||
let mut max = 0;
|
let mut max = 0;
|
||||||
let mut rv = None;
|
let mut rv = None;
|
||||||
for (i, (slot, _t)) in slice.iter().rev().enumerate() {
|
for (i, (slot, _t)) in slice.iter().rev().enumerate() {
|
||||||
if *slot >= max && (ancestors.contains_key(slot) || self.is_root(*slot)) {
|
if *slot >= max
|
||||||
|
&& (ancestors.map_or(false, |ancestors| ancestors.contains_key(slot))
|
||||||
|
|| self.is_root(*slot))
|
||||||
|
{
|
||||||
rv = Some((slice.len() - 1) - i);
|
rv = Some((slice.len() - 1) - i);
|
||||||
max = *slot;
|
max = *slot;
|
||||||
}
|
}
|
||||||
@ -90,10 +93,10 @@ impl<'a, T: 'a + Clone> AccountsIndex<T> {
|
|||||||
|
|
||||||
/// Get an account
|
/// Get an account
|
||||||
/// The latest account that appears in `ancestors` or `roots` is returned.
|
/// The latest account that appears in `ancestors` or `roots` is returned.
|
||||||
pub fn get(
|
pub(crate) fn get(
|
||||||
&self,
|
&self,
|
||||||
pubkey: &Pubkey,
|
pubkey: &Pubkey,
|
||||||
ancestors: &Ancestors,
|
ancestors: Option<&Ancestors>,
|
||||||
) -> Option<(RwLockReadGuard<SlotList<T>>, usize)> {
|
) -> Option<(RwLockReadGuard<SlotList<T>>, usize)> {
|
||||||
self.account_maps.get(pubkey).and_then(|list| {
|
self.account_maps.get(pubkey).and_then(|list| {
|
||||||
let list_r = list.1.read().unwrap();
|
let list_r = list.1.read().unwrap();
|
||||||
@ -245,7 +248,8 @@ mod tests {
|
|||||||
let key = Keypair::new();
|
let key = Keypair::new();
|
||||||
let index = AccountsIndex::<bool>::default();
|
let index = AccountsIndex::<bool>::default();
|
||||||
let ancestors = HashMap::new();
|
let ancestors = HashMap::new();
|
||||||
assert!(index.get(&key.pubkey(), &ancestors).is_none());
|
assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none());
|
||||||
|
assert!(index.get(&key.pubkey(), None).is_none());
|
||||||
|
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
|
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
|
||||||
@ -261,7 +265,8 @@ mod tests {
|
|||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
|
|
||||||
let ancestors = HashMap::new();
|
let ancestors = HashMap::new();
|
||||||
assert!(index.get(&key.pubkey(), &ancestors).is_none());
|
assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none());
|
||||||
|
assert!(index.get(&key.pubkey(), None).is_none());
|
||||||
|
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
|
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
|
||||||
@ -277,7 +282,7 @@ mod tests {
|
|||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
|
|
||||||
let ancestors = vec![(1, 1)].into_iter().collect();
|
let ancestors = vec![(1, 1)].into_iter().collect();
|
||||||
assert!(index.get(&key.pubkey(), &ancestors).is_none());
|
assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none());
|
||||||
|
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
|
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
|
||||||
@ -293,7 +298,7 @@ mod tests {
|
|||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
|
|
||||||
let ancestors = vec![(0, 0)].into_iter().collect();
|
let ancestors = vec![(0, 0)].into_iter().collect();
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
|
||||||
assert_eq!(list[idx], (0, true));
|
assert_eq!(list[idx], (0, true));
|
||||||
|
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
@ -324,9 +329,8 @@ mod tests {
|
|||||||
index.insert(0, &key.pubkey(), true, &mut gc);
|
index.insert(0, &key.pubkey(), true, &mut gc);
|
||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
|
|
||||||
let ancestors = vec![].into_iter().collect();
|
|
||||||
index.add_root(0);
|
index.add_root(0);
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
let (list, idx) = index.get(&key.pubkey(), None).unwrap();
|
||||||
assert_eq!(list[idx], (0, true));
|
assert_eq!(list[idx], (0, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -369,14 +373,14 @@ mod tests {
|
|||||||
let mut gc = Vec::new();
|
let mut gc = Vec::new();
|
||||||
index.insert(0, &key.pubkey(), true, &mut gc);
|
index.insert(0, &key.pubkey(), true, &mut gc);
|
||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
|
||||||
assert_eq!(list[idx], (0, true));
|
assert_eq!(list[idx], (0, true));
|
||||||
drop(list);
|
drop(list);
|
||||||
|
|
||||||
let mut gc = Vec::new();
|
let mut gc = Vec::new();
|
||||||
index.insert(0, &key.pubkey(), false, &mut gc);
|
index.insert(0, &key.pubkey(), false, &mut gc);
|
||||||
assert_eq!(gc, vec![(0, true)]);
|
assert_eq!(gc, vec![(0, true)]);
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
|
||||||
assert_eq!(list[idx], (0, false));
|
assert_eq!(list[idx], (0, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -391,10 +395,10 @@ mod tests {
|
|||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
index.insert(1, &key.pubkey(), false, &mut gc);
|
index.insert(1, &key.pubkey(), false, &mut gc);
|
||||||
assert!(gc.is_empty());
|
assert!(gc.is_empty());
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
|
||||||
assert_eq!(list[idx], (0, true));
|
assert_eq!(list[idx], (0, true));
|
||||||
let ancestors = vec![(1, 0)].into_iter().collect();
|
let ancestors = vec![(1, 0)].into_iter().collect();
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap();
|
||||||
assert_eq!(list[idx], (1, false));
|
assert_eq!(list[idx], (1, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,13 +417,12 @@ mod tests {
|
|||||||
index.add_root(3);
|
index.add_root(3);
|
||||||
index.insert(4, &key.pubkey(), true, &mut gc);
|
index.insert(4, &key.pubkey(), true, &mut gc);
|
||||||
assert_eq!(gc, vec![(0, true), (1, false), (2, true)]);
|
assert_eq!(gc, vec![(0, true), (1, false), (2, true)]);
|
||||||
let ancestors = vec![].into_iter().collect();
|
let (list, idx) = index.get(&key.pubkey(), None).unwrap();
|
||||||
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
|
|
||||||
assert_eq!(list[idx], (3, true));
|
assert_eq!(list[idx], (3, true));
|
||||||
|
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
let mut found_key = false;
|
let mut found_key = false;
|
||||||
index.scan_accounts(&ancestors, |pubkey, _index| {
|
index.scan_accounts(&Ancestors::new(), |pubkey, _index| {
|
||||||
if pubkey == &key.pubkey() {
|
if pubkey == &key.pubkey() {
|
||||||
found_key = true;
|
found_key = true;
|
||||||
assert_eq!(_index, (&true, 3));
|
assert_eq!(_index, (&true, 3));
|
||||||
|
@ -150,7 +150,7 @@ impl StatusCacheRc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type EnteredEpochCallback = Box<dyn Fn(&mut Bank) -> () + Sync + Send>;
|
pub type EnteredEpochCallback = Box<dyn Fn(&mut Bank) + Sync + Send>;
|
||||||
|
|
||||||
pub type TransactionProcessResult = (Result<()>, Option<HashAgeKind>);
|
pub type TransactionProcessResult = (Result<()>, Option<HashAgeKind>);
|
||||||
pub struct TransactionResults {
|
pub struct TransactionResults {
|
||||||
@ -3854,7 +3854,7 @@ mod tests {
|
|||||||
impl Bank {
|
impl Bank {
|
||||||
fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec<Slot> {
|
fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec<Slot> {
|
||||||
let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap();
|
let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap();
|
||||||
let (accounts, _) = accounts_index.get(&pubkey, &ancestors).unwrap();
|
let (accounts, _) = accounts_index.get(&pubkey, Some(&ancestors)).unwrap();
|
||||||
accounts
|
accounts
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(slot, _)| *slot)
|
.map(|(slot, _)| *slot)
|
||||||
@ -4988,7 +4988,7 @@ mod tests {
|
|||||||
let (genesis_config, mint_keypair) = create_genesis_config(2_000);
|
let (genesis_config, mint_keypair) = create_genesis_config(2_000);
|
||||||
let bank0 = Arc::new(Bank::new(&genesis_config));
|
let bank0 = Arc::new(Bank::new(&genesis_config));
|
||||||
let initial_state = bank0.hash_internal_state();
|
let initial_state = bank0.hash_internal_state();
|
||||||
let bank1 = Bank::new_from_parent(&bank0.clone(), &Pubkey::default(), 1);
|
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
|
||||||
assert_ne!(bank1.hash_internal_state(), initial_state);
|
assert_ne!(bank1.hash_internal_state(), initial_state);
|
||||||
|
|
||||||
info!("transfer bank1");
|
info!("transfer bank1");
|
||||||
|
@ -42,15 +42,20 @@ impl<T: BloomHashIndex> Bloom<T> {
|
|||||||
let keys: Vec<u64> = (0..num_keys).map(|_| rand::thread_rng().gen()).collect();
|
let keys: Vec<u64> = (0..num_keys).map(|_| rand::thread_rng().gen()).collect();
|
||||||
Self::new(num_bits, keys)
|
Self::new(num_bits, keys)
|
||||||
}
|
}
|
||||||
pub fn num_bits(num_items: f64, false_rate: f64) -> f64 {
|
fn num_bits(num_items: f64, false_rate: f64) -> f64 {
|
||||||
let n = num_items;
|
let n = num_items;
|
||||||
let p = false_rate;
|
let p = false_rate;
|
||||||
((n * p.ln()) / (1f64 / 2f64.powf(2f64.ln())).ln()).ceil()
|
((n * p.ln()) / (1f64 / 2f64.powf(2f64.ln())).ln()).ceil()
|
||||||
}
|
}
|
||||||
pub fn num_keys(num_bits: f64, num_items: f64) -> f64 {
|
fn num_keys(num_bits: f64, num_items: f64) -> f64 {
|
||||||
let n = num_items;
|
let n = num_items;
|
||||||
let m = num_bits;
|
let m = num_bits;
|
||||||
1f64.max(((m / n) * 2f64.ln()).round())
|
// infinity as usize is zero in rust 1.43 but 2^64-1 in rust 1.45; ensure it's zero here
|
||||||
|
if n == 0.0 {
|
||||||
|
0.0
|
||||||
|
} else {
|
||||||
|
1f64.max(((m / n) * 2f64.ln()).round())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fn pos(&self, key: &T, k: u64) -> u64 {
|
fn pos(&self, key: &T, k: u64) -> u64 {
|
||||||
key.hash_at_index(k) % self.bits.len()
|
key.hash_at_index(k) % self.bits.len()
|
||||||
|
@ -930,7 +930,7 @@ mod tests {
|
|||||||
|
|
||||||
fn with_create_zero_lamport<F>(callback: F)
|
fn with_create_zero_lamport<F>(callback: F)
|
||||||
where
|
where
|
||||||
F: Fn(&Bank) -> (),
|
F: Fn(&Bank),
|
||||||
{
|
{
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
|
@ -206,7 +206,7 @@ mod tests {
|
|||||||
fn verify_nonce_ok() {
|
fn verify_nonce_ok() {
|
||||||
with_test_keyed_account(42, true, |nonce_account| {
|
with_test_keyed_account(42, true, |nonce_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(nonce_account.signer_key().unwrap());
|
||||||
let state: State = nonce_account.state().unwrap();
|
let state: State = nonce_account.state().unwrap();
|
||||||
// New is in Uninitialzed state
|
// New is in Uninitialzed state
|
||||||
assert_eq!(state, State::Uninitialized);
|
assert_eq!(state, State::Uninitialized);
|
||||||
@ -236,7 +236,7 @@ mod tests {
|
|||||||
fn verify_nonce_bad_query_hash_fail() {
|
fn verify_nonce_bad_query_hash_fail() {
|
||||||
with_test_keyed_account(42, true, |nonce_account| {
|
with_test_keyed_account(42, true, |nonce_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(nonce_account.signer_key().unwrap());
|
||||||
let state: State = nonce_account.state().unwrap();
|
let state: State = nonce_account.state().unwrap();
|
||||||
// New is in Uninitialzed state
|
// New is in Uninitialzed state
|
||||||
assert_eq!(state, State::Uninitialized);
|
assert_eq!(state, State::Uninitialized);
|
||||||
|
@ -48,11 +48,16 @@ impl RentCollector {
|
|||||||
.map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1))
|
.map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1))
|
||||||
.sum();
|
.sum();
|
||||||
|
|
||||||
let (rent_due, exempt) = self.rent.due(
|
// avoid infinite rent in rust 1.45
|
||||||
account.lamports,
|
let years_elapsed = if self.slots_per_year != 0.0 {
|
||||||
account.data.len(),
|
slots_elapsed as f64 / self.slots_per_year
|
||||||
slots_elapsed as f64 / self.slots_per_year,
|
} else {
|
||||||
);
|
0.0
|
||||||
|
};
|
||||||
|
|
||||||
|
let (rent_due, exempt) =
|
||||||
|
self.rent
|
||||||
|
.due(account.lamports, account.data.len(), years_elapsed);
|
||||||
|
|
||||||
if exempt || rent_due != 0 {
|
if exempt || rent_due != 0 {
|
||||||
if account.lamports > rent_due {
|
if account.lamports > rent_due {
|
||||||
|
@ -271,7 +271,7 @@ impl<T: Serialize + Clone> StatusCache<T> {
|
|||||||
.or_insert((slot, sig_index, HashMap::new()));
|
.or_insert((slot, sig_index, HashMap::new()));
|
||||||
sig_map.0 = std::cmp::max(slot, sig_map.0);
|
sig_map.0 = std::cmp::max(slot, sig_map.0);
|
||||||
|
|
||||||
let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(|| vec![]);
|
let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(Vec::new);
|
||||||
sig_forks.push((slot, res.clone()));
|
sig_forks.push((slot, res.clone()));
|
||||||
let slot_deltas = self.slot_deltas.entry(slot).or_default();
|
let slot_deltas = self.slot_deltas.entry(slot).or_default();
|
||||||
let mut fork_entry = slot_deltas.lock().unwrap();
|
let mut fork_entry = slot_deltas.lock().unwrap();
|
||||||
|
@ -933,7 +933,7 @@ mod tests {
|
|||||||
|
|
||||||
fn with_create_zero_lamport<F>(callback: F)
|
fn with_create_zero_lamport<F>(callback: F)
|
||||||
where
|
where
|
||||||
F: Fn(&Bank) -> (),
|
F: Fn(&Bank),
|
||||||
{
|
{
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
|
@ -8,14 +8,14 @@ use test::Bencher;
|
|||||||
// Return a ShortVec with 127 bytes
|
// Return a ShortVec with 127 bytes
|
||||||
fn create_encoded_short_vec() -> Vec<u8> {
|
fn create_encoded_short_vec() -> Vec<u8> {
|
||||||
let mut bytes = vec![127];
|
let mut bytes = vec![127];
|
||||||
bytes.extend_from_slice(&vec![0u8; 127]);
|
bytes.extend_from_slice(&[0u8; 127]);
|
||||||
bytes
|
bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a Vec with 127 bytes
|
// Return a Vec with 127 bytes
|
||||||
fn create_encoded_vec() -> Vec<u8> {
|
fn create_encoded_vec() -> Vec<u8> {
|
||||||
let mut bytes = vec![127, 0, 0, 0, 0, 0, 0, 0];
|
let mut bytes = vec![127, 0, 0, 0, 0, 0, 0, 0];
|
||||||
bytes.extend_from_slice(&vec![0u8; 127]);
|
bytes.extend_from_slice(&[0u8; 127]);
|
||||||
bytes
|
bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ fn bench_slot_history_add_new(b: &mut Bencher) {
|
|||||||
b.iter(|| {
|
b.iter(|| {
|
||||||
for _ in 0..5 {
|
for _ in 0..5 {
|
||||||
slot_history.add(slot);
|
slot_history.add(slot);
|
||||||
slot += 100000;
|
slot += 100_000;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -298,7 +298,7 @@ impl<T: AbiExample> AbiExample for Box<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> AbiExample for Box<dyn Fn(&mut T) -> () + Sync + Send> {
|
impl<T> AbiExample for Box<dyn Fn(&mut T) + Sync + Send> {
|
||||||
fn example() -> Self {
|
fn example() -> Self {
|
||||||
info!("AbiExample for (Box<T>): {}", type_name::<Self>());
|
info!("AbiExample for (Box<T>): {}", type_name::<Self>());
|
||||||
Box::new(move |_t: &mut T| {})
|
Box::new(move |_t: &mut T| {})
|
||||||
|
@ -211,7 +211,7 @@ mod test {
|
|||||||
..nonce::state::Data::default()
|
..nonce::state::Data::default()
|
||||||
};
|
};
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(keyed_account.signer_key().unwrap().clone());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let state = AccountUtilsState::<Versions>::state(keyed_account)
|
let state = AccountUtilsState::<Versions>::state(keyed_account)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.convert_to_current();
|
.convert_to_current();
|
||||||
@ -326,7 +326,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(keyed_account.signer_key().unwrap().clone());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(0);
|
let recent_blockhashes = create_test_recent_blockhashes(0);
|
||||||
let authorized = *keyed_account.unsigned_key();
|
let authorized = *keyed_account.unsigned_key();
|
||||||
keyed_account
|
keyed_account
|
||||||
@ -347,7 +347,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(keyed_account.signer_key().unwrap().clone());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(63);
|
let recent_blockhashes = create_test_recent_blockhashes(63);
|
||||||
let authorized = *keyed_account.unsigned_key();
|
let authorized = *keyed_account.unsigned_key();
|
||||||
keyed_account
|
keyed_account
|
||||||
@ -367,7 +367,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(keyed_account.signer_key().unwrap().clone());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(63);
|
let recent_blockhashes = create_test_recent_blockhashes(63);
|
||||||
let result = keyed_account.advance_nonce_account(&recent_blockhashes, &signers);
|
let result = keyed_account.advance_nonce_account(&recent_blockhashes, &signers);
|
||||||
assert_eq!(result, Err(NonceError::BadAccountState.into()));
|
assert_eq!(result, Err(NonceError::BadAccountState.into()));
|
||||||
@ -384,14 +384,14 @@ mod test {
|
|||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
||||||
with_test_keyed_account(42, true, |nonce_authority| {
|
with_test_keyed_account(42, true, |nonce_authority| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(*nonce_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(63);
|
let recent_blockhashes = create_test_recent_blockhashes(63);
|
||||||
let authorized = *nonce_authority.unsigned_key();
|
let authorized = *nonce_authority.unsigned_key();
|
||||||
nonce_account
|
nonce_account
|
||||||
.initialize_nonce_account(&authorized, &recent_blockhashes, &rent)
|
.initialize_nonce_account(&authorized, &recent_blockhashes, &rent)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_authority.signer_key().unwrap().clone());
|
signers.insert(*nonce_authority.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(31);
|
let recent_blockhashes = create_test_recent_blockhashes(31);
|
||||||
let result = nonce_account.advance_nonce_account(&recent_blockhashes, &signers);
|
let result = nonce_account.advance_nonce_account(&recent_blockhashes, &signers);
|
||||||
assert_eq!(result, Ok(()));
|
assert_eq!(result, Ok(()));
|
||||||
@ -409,7 +409,7 @@ mod test {
|
|||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
||||||
with_test_keyed_account(42, false, |nonce_authority| {
|
with_test_keyed_account(42, false, |nonce_authority| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(*nonce_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(63);
|
let recent_blockhashes = create_test_recent_blockhashes(63);
|
||||||
let authorized = *nonce_authority.unsigned_key();
|
let authorized = *nonce_authority.unsigned_key();
|
||||||
nonce_account
|
nonce_account
|
||||||
@ -435,7 +435,7 @@ mod test {
|
|||||||
assert_eq!(state, State::Uninitialized);
|
assert_eq!(state, State::Uninitialized);
|
||||||
with_test_keyed_account(42, false, |to_keyed| {
|
with_test_keyed_account(42, false, |to_keyed| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(0);
|
let recent_blockhashes = create_test_recent_blockhashes(0);
|
||||||
let withdraw_lamports = nonce_keyed.account.borrow().lamports;
|
let withdraw_lamports = nonce_keyed.account.borrow().lamports;
|
||||||
let expect_nonce_lamports =
|
let expect_nonce_lamports =
|
||||||
@ -506,7 +506,7 @@ mod test {
|
|||||||
assert_eq!(state, State::Uninitialized);
|
assert_eq!(state, State::Uninitialized);
|
||||||
with_test_keyed_account(42, false, |to_keyed| {
|
with_test_keyed_account(42, false, |to_keyed| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(0);
|
let recent_blockhashes = create_test_recent_blockhashes(0);
|
||||||
let lamports = nonce_keyed.account.borrow().lamports + 1;
|
let lamports = nonce_keyed.account.borrow().lamports + 1;
|
||||||
let result = nonce_keyed.withdraw_nonce_account(
|
let result = nonce_keyed.withdraw_nonce_account(
|
||||||
@ -531,7 +531,7 @@ mod test {
|
|||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| {
|
||||||
with_test_keyed_account(42, false, |to_keyed| {
|
with_test_keyed_account(42, false, |to_keyed| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(0);
|
let recent_blockhashes = create_test_recent_blockhashes(0);
|
||||||
let withdraw_lamports = nonce_keyed.account.borrow().lamports / 2;
|
let withdraw_lamports = nonce_keyed.account.borrow().lamports / 2;
|
||||||
let nonce_expect_lamports =
|
let nonce_expect_lamports =
|
||||||
@ -584,7 +584,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(31);
|
let recent_blockhashes = create_test_recent_blockhashes(31);
|
||||||
let authority = *nonce_keyed.unsigned_key();
|
let authority = *nonce_keyed.unsigned_key();
|
||||||
nonce_keyed
|
nonce_keyed
|
||||||
@ -659,7 +659,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
with_test_keyed_account(42, false, |to_keyed| {
|
with_test_keyed_account(42, false, |to_keyed| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let withdraw_lamports = nonce_keyed.account.borrow().lamports;
|
let withdraw_lamports = nonce_keyed.account.borrow().lamports;
|
||||||
let result = nonce_keyed.withdraw_nonce_account(
|
let result = nonce_keyed.withdraw_nonce_account(
|
||||||
withdraw_lamports,
|
withdraw_lamports,
|
||||||
@ -689,7 +689,7 @@ mod test {
|
|||||||
with_test_keyed_account(42, false, |to_keyed| {
|
with_test_keyed_account(42, false, |to_keyed| {
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(63);
|
let recent_blockhashes = create_test_recent_blockhashes(63);
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let withdraw_lamports = nonce_keyed.account.borrow().lamports + 1;
|
let withdraw_lamports = nonce_keyed.account.borrow().lamports + 1;
|
||||||
let result = nonce_keyed.withdraw_nonce_account(
|
let result = nonce_keyed.withdraw_nonce_account(
|
||||||
withdraw_lamports,
|
withdraw_lamports,
|
||||||
@ -719,7 +719,7 @@ mod test {
|
|||||||
with_test_keyed_account(42, false, |to_keyed| {
|
with_test_keyed_account(42, false, |to_keyed| {
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(63);
|
let recent_blockhashes = create_test_recent_blockhashes(63);
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_keyed.signer_key().unwrap().clone());
|
signers.insert(*nonce_keyed.signer_key().unwrap());
|
||||||
let withdraw_lamports = nonce_keyed.account.borrow().lamports - min_lamports + 1;
|
let withdraw_lamports = nonce_keyed.account.borrow().lamports - min_lamports + 1;
|
||||||
let result = nonce_keyed.withdraw_nonce_account(
|
let result = nonce_keyed.withdraw_nonce_account(
|
||||||
withdraw_lamports,
|
withdraw_lamports,
|
||||||
@ -746,7 +746,7 @@ mod test {
|
|||||||
.convert_to_current();
|
.convert_to_current();
|
||||||
assert_eq!(state, State::Uninitialized);
|
assert_eq!(state, State::Uninitialized);
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(keyed_account.signer_key().unwrap().clone());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(0);
|
let recent_blockhashes = create_test_recent_blockhashes(0);
|
||||||
let authority = *keyed_account.unsigned_key();
|
let authority = *keyed_account.unsigned_key();
|
||||||
let result =
|
let result =
|
||||||
@ -773,7 +773,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
with_test_keyed_account(min_lamports + 42, true, |keyed_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(keyed_account.signer_key().unwrap().clone());
|
signers.insert(*keyed_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = RecentBlockhashes::from_iter(vec![].into_iter());
|
let recent_blockhashes = RecentBlockhashes::from_iter(vec![].into_iter());
|
||||||
let authorized = *keyed_account.unsigned_key();
|
let authorized = *keyed_account.unsigned_key();
|
||||||
let result =
|
let result =
|
||||||
@ -827,7 +827,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(*nonce_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(31);
|
let recent_blockhashes = create_test_recent_blockhashes(31);
|
||||||
let authorized = *nonce_account.unsigned_key();
|
let authorized = *nonce_account.unsigned_key();
|
||||||
nonce_account
|
nonce_account
|
||||||
@ -857,7 +857,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(*nonce_account.signer_key().unwrap());
|
||||||
let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers);
|
let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers);
|
||||||
assert_eq!(result, Err(NonceError::BadAccountState.into()));
|
assert_eq!(result, Err(NonceError::BadAccountState.into()));
|
||||||
})
|
})
|
||||||
@ -872,7 +872,7 @@ mod test {
|
|||||||
let min_lamports = rent.minimum_balance(State::size());
|
let min_lamports = rent.minimum_balance(State::size());
|
||||||
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
with_test_keyed_account(min_lamports + 42, true, |nonce_account| {
|
||||||
let mut signers = HashSet::new();
|
let mut signers = HashSet::new();
|
||||||
signers.insert(nonce_account.signer_key().unwrap().clone());
|
signers.insert(*nonce_account.signer_key().unwrap());
|
||||||
let recent_blockhashes = create_test_recent_blockhashes(31);
|
let recent_blockhashes = create_test_recent_blockhashes(31);
|
||||||
let authorized = &Pubkey::default().clone();
|
let authorized = &Pubkey::default().clone();
|
||||||
nonce_account
|
nonce_account
|
||||||
|
@ -152,12 +152,7 @@ fn start_gossip_node(
|
|||||||
let cluster_info = Arc::new(cluster_info);
|
let cluster_info = Arc::new(cluster_info);
|
||||||
|
|
||||||
let gossip_exit_flag = Arc::new(AtomicBool::new(false));
|
let gossip_exit_flag = Arc::new(AtomicBool::new(false));
|
||||||
let gossip_service = GossipService::new(
|
let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &gossip_exit_flag);
|
||||||
&cluster_info.clone(),
|
|
||||||
None,
|
|
||||||
gossip_socket,
|
|
||||||
&gossip_exit_flag,
|
|
||||||
);
|
|
||||||
(cluster_info, gossip_exit_flag, gossip_service)
|
(cluster_info, gossip_exit_flag, gossip_service)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -955,7 +950,7 @@ pub fn main() {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let snapshot_interval_slots = value_t_or_exit!(matches, "snapshot_interval_slots", u64);
|
let snapshot_interval_slots = value_t_or_exit!(matches, "snapshot_interval_slots", u64);
|
||||||
let snapshot_path = ledger_path.clone().join("snapshot");
|
let snapshot_path = ledger_path.join("snapshot");
|
||||||
fs::create_dir_all(&snapshot_path).unwrap_or_else(|err| {
|
fs::create_dir_all(&snapshot_path).unwrap_or_else(|err| {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Failed to create snapshots directory {:?}: {}",
|
"Failed to create snapshots directory {:?}: {}",
|
||||||
@ -1235,7 +1230,7 @@ pub fn main() {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.and_then(|_| {
|
.map(|_| {
|
||||||
if !validator_config.voting_disabled && !no_check_vote_account {
|
if !validator_config.voting_disabled && !no_check_vote_account {
|
||||||
check_vote_account(
|
check_vote_account(
|
||||||
&rpc_client,
|
&rpc_client,
|
||||||
@ -1254,7 +1249,6 @@ pub fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if result.is_ok() {
|
if result.is_ok() {
|
||||||
|
@ -125,7 +125,7 @@ fn get_config() -> Config {
|
|||||||
let json_rpc_url =
|
let json_rpc_url =
|
||||||
value_t!(matches, "json_rpc_url", String).unwrap_or_else(|_| config.json_rpc_url);
|
value_t!(matches, "json_rpc_url", String).unwrap_or_else(|_| config.json_rpc_url);
|
||||||
let validator_identity_pubkeys: Vec<_> = pubkeys_of(&matches, "validator_identities")
|
let validator_identity_pubkeys: Vec<_> = pubkeys_of(&matches, "validator_identities")
|
||||||
.unwrap_or_else(|| vec![])
|
.unwrap_or_else(Vec::new)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|i| i.to_string())
|
.map(|i| i.to_string())
|
||||||
.collect();
|
.collect();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user