adds validator flag to allow private ip addresses (backport #18850) (#18975)

* adds validator flag to allow private ip addresses (#18850)

(cherry picked from commit d2d5f36a3c)

# Conflicts:
#	accounts-cluster-bench/Cargo.toml
#	bench-tps/Cargo.toml
#	cli/Cargo.toml
#	core/benches/cluster_info.rs
#	core/src/banking_stage.rs
#	core/src/broadcast_stage.rs
#	core/src/broadcast_stage/broadcast_duplicates_run.rs
#	core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs
#	core/src/broadcast_stage/standard_broadcast_run.rs
#	core/src/cluster_slots_service.rs
#	core/src/repair_service.rs
#	core/src/tvu.rs
#	core/src/validator.rs
#	dos/Cargo.toml
#	gossip/src/cluster_info.rs
#	gossip/src/crds_gossip_pull.rs
#	gossip/src/crds_gossip_push.rs
#	gossip/src/gossip_service.rs
#	local-cluster/Cargo.toml
#	local-cluster/src/cluster_tests.rs
#	local-cluster/tests/local_cluster.rs
#	rpc/Cargo.toml
#	rpc/src/rpc.rs
#	tokens/Cargo.toml
#	validator/Cargo.toml
#	validator/src/main.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
This commit is contained in:
mergify[bot]
2021-07-29 21:43:24 +00:00
committed by GitHub
parent 3c200ae45a
commit eacc69efba
72 changed files with 1281 additions and 400 deletions

9
Cargo.lock generated
View File

@@ -4084,6 +4084,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"spl-token",
@@ -4186,6 +4187,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
]
@@ -4225,6 +4227,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
]
@@ -4322,6 +4325,7 @@ dependencies = [
"solana-net-utils",
"solana-remote-wallet",
"solana-sdk",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"solana-vote-program",
@@ -4533,6 +4537,7 @@ dependencies = [
"solana-net-utils",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
]
@@ -4896,6 +4901,7 @@ dependencies = [
"solana-runtime",
"solana-sdk",
"solana-stake-program",
"solana-streamer",
"solana-vote-program",
"tempfile",
]
@@ -5259,6 +5265,7 @@ dependencies = [
"solana-sdk",
"solana-stake-program",
"solana-storage-bigtable",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"solana-vote-program",
@@ -5558,6 +5565,7 @@ dependencies = [
"solana-remote-wallet",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-transaction-status",
"solana-version",
"spl-associated-token-account",
@@ -5636,6 +5644,7 @@ dependencies = [
"solana-rpc",
"solana-runtime",
"solana-sdk",
"solana-streamer",
"solana-version",
"solana-vote-program",
"symlink",

View File

@@ -24,6 +24,7 @@ solana-measure = { path = "../measure", version = "=1.7.9" }
solana-net-utils = { path = "../net-utils", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-transaction-status = { path = "../transaction-status", version = "=1.7.9" }
solana-version = { path = "../version", version = "=1.7.9" }
spl-token-v2-0 = { package = "spl-token", version = "=3.2.0", features = ["no-entrypoint"] }

View File

@@ -20,6 +20,7 @@ use solana_sdk::{
timing::timestamp,
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
use std::{
net::SocketAddr,
@@ -670,6 +671,7 @@ fn main() {
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
SocketAddrSpace::Unspecified,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
@@ -721,7 +723,7 @@ pub mod test {
};
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let iterations = 10;
let maybe_space = None;
let batch_size = 100;

View File

@@ -25,6 +25,7 @@ use solana_sdk::{
timing::{duration_as_us, timestamp},
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
thread::sleep,
@@ -215,7 +216,11 @@ fn main() {
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,

View File

@@ -30,6 +30,7 @@ solana-metrics = { path = "../metrics", version = "=1.7.9" }
solana-net-utils = { path = "../net-utils", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-version = { path = "../version", version = "=1.7.9" }
[dev-dependencies]

View File

@@ -7,6 +7,7 @@ use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_excha
use log::*;
use solana_gossip::gossip_service::{discover_cluster, get_multi_client};
use solana_sdk::signature::Signer;
use solana_streamer::socket::SocketAddrSpace;
fn main() {
solana_logger::setup();
@@ -55,11 +56,12 @@ fn main() {
);
} else {
info!("Connecting to the cluster");
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let nodes = discover_cluster(&entrypoint_addr, num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|_| {
panic!("Failed to discover nodes");
});
let (client, num_clients) = get_multi_client(&nodes);
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
info!("{} nodes found", num_clients);
if num_clients < num_nodes {

View File

@@ -15,6 +15,7 @@ use solana_sdk::{
genesis_config::create_genesis_config,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use std::{process::exit, sync::mpsc::channel, time::Duration};
#[test]
@@ -43,13 +44,19 @@ fn test_exchange_local_cluster() {
} = config;
let accounts_in_groups = batch_size * account_groups;
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
});
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![100_000; NUM_NODES],
cluster_lamports: 100_000_000_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default(),
NUM_NODES,
),
native_instruction_processors: [solana_exchange_program!()].to_vec(),
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let faucet_keypair = Keypair::new();
cluster.transfer(
@@ -66,13 +73,17 @@ fn test_exchange_local_cluster() {
.expect("faucet_addr");
info!("Connecting to the cluster");
let nodes =
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
exit(1);
});
let nodes = discover_cluster(
&cluster.entry_point_info.gossip,
NUM_NODES,
SocketAddrSpace::Unspecified,
)
.unwrap_or_else(|err| {
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
exit(1);
});
let (client, num_clients) = get_multi_client(&nodes);
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
info!("clients: {}", num_clients);
assert!(num_clients >= NUM_NODES);

View File

@@ -27,6 +27,7 @@ solana-measure = { path = "../measure", version = "=1.7.9" }
solana-net-utils = { path = "../net-utils", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-version = { path = "../version", version = "=1.7.9" }
[dev-dependencies]

View File

@@ -7,6 +7,7 @@ use solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_clie
use solana_sdk::fee_calculator::FeeRateGovernor;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_program;
use solana_streamer::socket::SocketAddrSpace;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
/// Number of signatures for all transactions in ~1 week at ~100K TPS
@@ -68,13 +69,14 @@ fn main() {
}
info!("Connecting to the cluster");
let nodes = discover_cluster(entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
exit(1);
});
let client = if *multi_client {
let (client, num_clients) = get_multi_client(&nodes);
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
if nodes.len() < num_clients {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
@@ -88,7 +90,7 @@ fn main() {
let mut target_client = None;
for node in nodes {
if node.id == *target_node {
target_client = Some(Arc::new(get_client(&[node])));
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
break;
}
}
@@ -97,7 +99,7 @@ fn main() {
exit(1);
})
} else {
Arc::new(get_client(&nodes))
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
};
let keypairs = if *read_from_client_file {

View File

@@ -13,6 +13,7 @@ use solana_local_cluster::{
validator_configs::make_identical_validator_configs,
};
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::socket::SocketAddrSpace;
use std::{
sync::{mpsc::channel, Arc},
time::Duration,
@@ -23,13 +24,19 @@ fn test_bench_tps_local_cluster(config: Config) {
solana_logger::setup();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
native_instruction_processors,
..ClusterConfig::default()
});
let cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 200_000_000,
validator_configs: make_identical_validator_configs(
&ValidatorConfig::default(),
NUM_NODES,
),
native_instruction_processors,
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let faucet_keypair = Keypair::new();
cluster.transfer(

View File

@@ -52,6 +52,7 @@ url = "2.1.1"
[dev-dependencies]
solana-core = { path = "../core", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -18,13 +18,15 @@ use solana_sdk::{
signature::{keypair_from_seed, Keypair, Signer},
system_program,
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_nonce() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
full_battery_tests(test_validator, None, false);
}
@@ -34,7 +36,8 @@ fn test_nonce_with_seed() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
full_battery_tests(test_validator, Some(String::from("seed")), false);
}
@@ -44,7 +47,8 @@ fn test_nonce_with_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
full_battery_tests(test_validator, None, true);
}
@@ -216,7 +220,12 @@ fn test_create_account_with_seed() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let offline_nonce_authority_signer = keypair_from_seed(&[1u8; 32]).unwrap();
let online_nonce_creator_signer = keypair_from_seed(&[2u8; 32]).unwrap();

View File

@@ -15,6 +15,7 @@ use solana_sdk::{
pubkey::Pubkey,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use std::{env, fs::File, io::Read, path::PathBuf, str::FromStr};
#[test]
@@ -30,7 +31,8 @@ fn test_cli_program_deploy_non_upgradeable() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -146,7 +148,8 @@ fn test_cli_program_deploy_no_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -229,7 +232,8 @@ fn test_cli_program_deploy_with_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -557,7 +561,8 @@ fn test_cli_program_write_buffer() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -839,7 +844,8 @@ fn test_cli_program_set_buffer_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -952,7 +958,8 @@ fn test_cli_program_mismatch_buffer_authority() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -1041,7 +1048,8 @@ fn test_cli_program_show() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -1221,7 +1229,8 @@ fn test_cli_program_dump() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@@ -6,13 +6,15 @@ use solana_sdk::{
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_cli_request_airdrop() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let mut bob_config = CliConfig::recent_for_tests();
bob_config.json_rpc_url = test_validator.rpc_url();

View File

@@ -24,13 +24,15 @@ use solana_sdk::{
state::{Lockup, StakeAuthorize, StakeState},
},
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_stake_delegation_force() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -120,7 +122,8 @@ fn test_seed_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -206,7 +209,8 @@ fn test_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -288,7 +292,8 @@ fn test_offline_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -431,7 +436,8 @@ fn test_nonced_stake_delegation_and_deactivation() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -548,7 +554,8 @@ fn test_stake_authorize() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -862,7 +869,12 @@ fn test_stake_authorize_with_fee_payer() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, SIG_FEE, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
SIG_FEE,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -1010,7 +1022,12 @@ fn test_stake_split() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -1154,7 +1171,12 @@ fn test_stake_set_lockup() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -1420,7 +1442,8 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -1644,7 +1667,8 @@ fn test_stake_checked_instructions() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@@ -18,6 +18,7 @@ use solana_sdk::{
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
stake,
};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_transfer() {
@@ -25,7 +26,12 @@ fn test_transfer() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -277,7 +283,12 @@ fn test_transfer_multisession_signing() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let to_pubkey = Pubkey::new(&[1u8; 32]);
let offline_from_signer = keypair_from_seed(&[2u8; 32]).unwrap();
@@ -404,7 +415,12 @@ fn test_transfer_all() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -452,7 +468,12 @@ fn test_transfer_unfunded_recipient() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
@@ -500,7 +521,12 @@ fn test_transfer_with_seed() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_custom_fees(mint_pubkey, 1, Some(faucet_addr));
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
Some(faucet_addr),
SocketAddrSpace::Unspecified,
);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@@ -14,6 +14,7 @@ use solana_sdk::{
commitment_config::CommitmentConfig,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::vote_state::{VoteAuthorize, VoteState, VoteStateVersions};
#[test]
@@ -21,7 +22,8 @@ fn test_vote_authorize_and_withdraw() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let faucet_addr = run_local_faucet(mint_keypair, None);
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
let test_validator =
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
let rpc_client =
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());

View File

@@ -29,6 +29,7 @@ use solana_sdk::system_instruction;
use solana_sdk::system_transaction;
use solana_sdk::timing::{duration_as_us, timestamp};
use solana_sdk::transaction::Transaction;
use solana_streamer::socket::SocketAddrSpace;
use std::collections::VecDeque;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver;
@@ -201,7 +202,11 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let cluster_info = Arc::new(cluster_info);
let (s, _r) = unbounded();
let _banking_stage = BankingStage::new(

View File

@@ -12,8 +12,8 @@ use solana_gossip::{
contact_info::ContactInfo,
};
use solana_ledger::shred::Shred;
use solana_sdk::pubkey;
use solana_sdk::timing::timestamp;
use solana_sdk::{pubkey, signature::Keypair, timing::timestamp};
use solana_streamer::socket::SocketAddrSpace;
use std::{
collections::HashMap,
net::UdpSocket,
@@ -26,7 +26,11 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let cluster_info = ClusterInfo::new(
leader_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const NUM_SHREDS: usize = 32;
@@ -51,6 +55,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
&cluster_nodes,
&last_datapoint,
&mut TransmitShredsStats::default(),
&SocketAddrSpace::Unspecified,
)
.unwrap();
});

View File

@@ -21,6 +21,7 @@ use solana_sdk::pubkey;
use solana_sdk::signature::{Keypair, Signer};
use solana_sdk::system_transaction;
use solana_sdk::timing::timestamp;
use solana_streamer::socket::SocketAddrSpace;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
@@ -35,7 +36,11 @@ use test::Bencher;
#[allow(clippy::same_item_push)]
fn bench_retransmitter(bencher: &mut Bencher) {
solana_logger::setup();
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = ClusterInfo::new(
Node::new_localhost().info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
const NUM_PEERS: usize = 4;
let mut peer_sockets = Vec::new();
for _ in 0..NUM_PEERS {

View File

@@ -223,13 +223,22 @@ mod tests {
hash::hash,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn test_should_halt() {
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = new_test_cluster_info(contact_info);
let cluster_info = Arc::new(cluster_info);
let mut trusted_validators = HashSet::new();
@@ -265,7 +274,7 @@ mod tests {
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = new_test_cluster_info(contact_info);
let cluster_info = Arc::new(cluster_info);
let trusted_validators = HashSet::new();

View File

@@ -1388,7 +1388,7 @@ mod tests {
use super::*;
use crossbeam_channel::unbounded;
use itertools::Itertools;
use solana_gossip::cluster_info::Node;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_ledger::{
blockstore::{entries_to_test_shreds, Blockstore},
entry::{next_entry, Entry, EntrySlice},
@@ -1411,6 +1411,7 @@ mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionWithStatusMeta;
use std::{
net::SocketAddr,
@@ -1422,6 +1423,14 @@ mod tests {
thread::sleep,
};
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn test_banking_stage_shutdown1() {
let genesis_config = create_genesis_config(2).genesis_config;
@@ -1437,7 +1446,7 @@ mod tests {
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let banking_stage = BankingStage::new(
&cluster_info,
@@ -1480,7 +1489,7 @@ mod tests {
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
@@ -1549,7 +1558,7 @@ mod tests {
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
@@ -1697,8 +1706,7 @@ mod tests {
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info =
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let _banking_stage = BankingStage::new_num_threads(
&cluster_info,

View File

@@ -22,7 +22,7 @@ use solana_poh::poh_recorder::WorkingBankEntry;
use solana_runtime::bank::Bank;
use solana_sdk::timing::timestamp;
use solana_sdk::{clock::Slot, pubkey::Pubkey};
use solana_streamer::{sendmmsg::send_mmsg, socket::is_global};
use solana_streamer::{sendmmsg::send_mmsg, socket::SocketAddrSpace};
use std::sync::atomic::AtomicU64;
use std::{
collections::HashMap,
@@ -386,6 +386,7 @@ pub fn broadcast_shreds(
cluster_nodes: &ClusterNodes<BroadcastStage>,
last_datapoint_submit: &Arc<AtomicU64>,
transmit_stats: &mut TransmitShredsStats,
socket_addr_space: &SocketAddrSpace,
) -> Result<()> {
let broadcast_len = cluster_nodes.num_peers();
if broadcast_len == 0 {
@@ -397,7 +398,7 @@ pub fn broadcast_shreds(
.iter()
.filter_map(|shred| {
let node = cluster_nodes.get_broadcast_peer(shred.seed())?;
if is_global(&node.tvu) {
if socket_addr_space.check(&node.tvu) {
Some((&shred.payload, &node.tvu))
} else {
None
@@ -585,7 +586,11 @@ pub mod test {
let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey());
// Fill the cluster_info with the buddy's info
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let cluster_info = ClusterInfo::new(
leader_info.info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster_info.insert_info(broadcast_buddy.info);
let cluster_info = Arc::new(cluster_info);

View File

@@ -301,11 +301,14 @@ impl BroadcastRun for BroadcastDuplicatesRun {
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
let stakes = stakes.unwrap();
let socket_addr_space = cluster_info.socket_addr_space();
for peer in cluster_info.tvu_peers() {
// Forward shreds to circumvent gossip
if stakes.get(&peer.id).is_some() {
shreds.iter().for_each(|shred| {
sock.send_to(&shred.payload, &peer.tvu_forwards).unwrap();
if socket_addr_space.check(&peer.tvu_forwards) {
sock.send_to(&shred.payload, &peer.tvu_forwards).unwrap();
}
});
}
@@ -313,7 +316,9 @@ impl BroadcastRun for BroadcastDuplicatesRun {
if let Some(shreds) = delayed_shreds.as_ref() {
if Some(peer.id) == delayed_recipient {
shreds.iter().for_each(|shred| {
sock.send_to(&shred.payload, &peer.tvu).unwrap();
if socket_addr_space.check(&peer.tvu) {
sock.send_to(&shred.payload, &peer.tvu).unwrap();
}
});
}
}

View File

@@ -139,14 +139,16 @@ impl BroadcastRun for BroadcastFakeShredsRun {
mod tests {
use super::*;
use solana_gossip::contact_info::ContactInfo;
use solana_streamer::socket::SocketAddrSpace;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
#[test]
fn test_tvu_peers_ordering() {
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
0,
));
let cluster = ClusterInfo::new(
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
8080,

View File

@@ -145,6 +145,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
&cluster_nodes,
&Arc::new(AtomicU64::new(0)),
&mut TransmitShredsStats::default(),
cluster_info.socket_addr_space(),
)?;
Ok(())

View File

@@ -365,6 +365,7 @@ impl StandardBroadcastRun {
&cluster_nodes,
&self.last_datapoint_submit,
&mut transmit_stats,
cluster_info.socket_addr_space(),
)?;
drop(cluster_nodes);
transmit_time.stop();
@@ -498,6 +499,7 @@ mod test {
genesis_config::GenesisConfig,
signature::{Keypair, Signer},
};
use solana_streamer::socket::SocketAddrSpace;
use std::sync::Arc;
use std::time::Duration;
@@ -519,7 +521,11 @@ mod test {
let leader_keypair = Arc::new(Keypair::new());
let leader_pubkey = leader_keypair.pubkey();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(leader_info.info));
let cluster_info = Arc::new(ClusterInfo::new(
leader_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut genesis_config = create_genesis_config(10_000).genesis_config;
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;

View File

@@ -246,8 +246,9 @@ mod tests {
sorted_stakes_with_index,
},
},
solana_sdk::timing::timestamp,
std::iter::repeat_with,
solana_sdk::{signature::Keypair, timing::timestamp},
solana_streamer::socket::SocketAddrSpace,
std::{iter::repeat_with, sync::Arc},
};
// Legacy methods copied for testing backward compatibility.
@@ -293,7 +294,11 @@ mod tests {
.collect();
// Add some staked nodes with no contact-info.
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
let cluster_info = ClusterInfo::new_with_invalid_keypair(this_node);
let cluster_info = ClusterInfo::new(
this_node,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
{
let now = timestamp();
let mut gossip = cluster_info.gossip.write().unwrap();

View File

@@ -180,14 +180,19 @@ mod test {
use {
super::*,
solana_gossip::{cluster_info::Node, crds_value::CrdsValueLabel},
solana_sdk::pubkey::Pubkey,
solana_sdk::{pubkey::Pubkey, signature::Keypair},
solana_streamer::socket::SocketAddrSpace,
};
#[test]
pub fn test_update_lowest_slot() {
let pubkey = Pubkey::new_unique();
let node_info = Node::new_localhost_with_pubkey(&pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
let cluster_info = ClusterInfo::new(
node_info.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
ClusterSlotsService::update_lowest_slot(5, &cluster_info);
cluster_info.flush_push_queue();
let lowest = {

View File

@@ -676,17 +676,27 @@ impl RepairService {
mod test {
use super::*;
use crossbeam_channel::unbounded;
use solana_gossip::cluster_info::Node;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_ledger::blockstore::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use solana_runtime::genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs};
use solana_sdk::signature::Keypair;
use solana_sdk::signature::Signer;
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::vote_transaction;
use std::collections::HashSet;
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
pub fn test_repair_orphan() {
let blockstore_path = get_tmp_ledger_path!();
@@ -978,7 +988,8 @@ mod test {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let cluster_slots = ClusterSlots::default();
let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info);
let serve_repair =
ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info)));
let mut duplicate_slot_repair_statuses = HashMap::new();
let dead_slot = 9;
let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap();
@@ -1060,9 +1071,7 @@ mod test {
Pubkey::default(),
UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap(),
));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
Node::new_localhost().info,
));
let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info));
let serve_repair = ServeRepair::new(cluster_info.clone());
let valid_repair_peer = Node::new_localhost().info;
@@ -1122,7 +1131,8 @@ mod test {
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let cluster_slots = ClusterSlots::default();
let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info);
let serve_repair =
ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info)));
let mut duplicate_slot_repair_statuses = HashMap::new();
let duplicate_slot = 9;

View File

@@ -2544,6 +2544,7 @@ mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionWithStatusMeta;
use solana_vote_program::{
vote_state::{VoteState, VoteStateVersions},
@@ -2617,6 +2618,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
Node::new_localhost_with_pubkey(&my_pubkey).info,
Arc::new(Keypair::from_bytes(&my_keypairs.node_keypair.to_bytes()).unwrap()),
SocketAddrSpace::Unspecified,
);
assert_eq!(my_pubkey, cluster_info.id());

View File

@@ -336,6 +336,7 @@ fn retransmit(
epoch_cache_update.stop();
let my_id = cluster_info.id();
let socket_addr_space = cluster_info.socket_addr_space();
let mut discard_total = 0;
let mut repair_total = 0;
let mut retransmit_total = 0;
@@ -405,6 +406,7 @@ fn retransmit(
packet,
sock,
/*forward socket=*/ true,
socket_addr_space,
);
}
ClusterInfo::retransmit_to(
@@ -412,6 +414,7 @@ fn retransmit(
packet,
sock,
!anchor_node, // send to forward socket!
socket_addr_space,
);
retransmit_time.stop();
retransmit_total += retransmit_time.as_us();
@@ -643,6 +646,8 @@ mod tests {
use solana_ledger::shred::Shred;
use solana_net_utils::find_available_port_in_range;
use solana_perf::packet::{Packet, Packets};
use solana_sdk::signature::Keypair;
use solana_streamer::socket::SocketAddrSpace;
use std::net::{IpAddr, Ipv4Addr};
#[test]
@@ -679,7 +684,11 @@ mod tests {
.find(|pk| me.id < *pk)
.unwrap();
let other = ContactInfo::new_localhost(&other, 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
let cluster_info = ClusterInfo::new(
other,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster_info.insert_info(me);
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);

View File

@@ -149,13 +149,6 @@ impl RepairPeers {
}
impl ServeRepair {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(Arc::new(ClusterInfo::new_with_invalid_keypair(
contact_info,
)))
}
pub fn new(cluster_info: Arc<ClusterInfo>) -> Self {
let (keypair, my_info) = { (cluster_info.keypair.clone(), cluster_info.my_contact_info()) };
Self {
@@ -652,7 +645,8 @@ mod tests {
shred::{max_ticks_per_n_shreds, Shred},
};
use solana_perf::packet::Packet;
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, timing::timestamp};
use solana_streamer::socket::SocketAddrSpace;
#[test]
fn test_run_highest_window_request() {
@@ -797,11 +791,19 @@ mod tests {
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
}
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn window_index_request() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me));
let cluster_info = Arc::new(new_test_cluster_info(me));
let serve_repair = ServeRepair::new(cluster_info.clone());
let mut outstanding_requests = OutstandingRepairs::default();
let rv = serve_repair.repair_request(
@@ -1028,7 +1030,7 @@ mod tests {
fn test_repair_with_repair_validators() {
let cluster_slots = ClusterSlots::default();
let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me.clone()));
let cluster_info = Arc::new(new_test_cluster_info(me.clone()));
// Insert two peers on the network
let contact_info2 =

View File

@@ -1,7 +1,7 @@
use crate::serve_repair::ServeRepair;
use solana_ledger::blockstore::Blockstore;
use solana_perf::recycler::Recycler;
use solana_streamer::streamer;
use solana_streamer::{socket::SocketAddrSpace, streamer};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::channel;
@@ -17,6 +17,7 @@ impl ServeRepairService {
serve_repair: &Arc<RwLock<ServeRepair>>,
blockstore: Option<Arc<Blockstore>>,
serve_repair_socket: UdpSocket,
socket_addr_space: SocketAddrSpace,
exit: &Arc<AtomicBool>,
) -> Self {
let (request_sender, request_receiver) = channel();
@@ -36,8 +37,12 @@ impl ServeRepairService {
false,
);
let (response_sender, response_receiver) = channel();
let t_responder =
streamer::responder("serve-repairs", serve_repair_socket, response_receiver);
let t_responder = streamer::responder(
"serve-repairs",
serve_repair_socket,
response_receiver,
socket_addr_space,
);
let t_listen = ServeRepair::listen(
serve_repair.clone(),
blockstore,

View File

@@ -24,6 +24,7 @@ use {
rent::Rent,
signature::{read_keypair_file, write_keypair_file, Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::HashMap,
fs::remove_dir_all,
@@ -263,8 +264,9 @@ impl TestValidatorGenesis {
pub fn start_with_mint_address(
&self,
mint_address: Pubkey,
socket_addr_space: SocketAddrSpace,
) -> Result<TestValidator, Box<dyn std::error::Error>> {
TestValidator::start(mint_address, self)
TestValidator::start(mint_address, self, socket_addr_space)
}
/// Start a test validator
@@ -273,9 +275,9 @@ impl TestValidatorGenesis {
/// created at genesis.
///
/// This function panics on initialization failure.
pub fn start(&self) -> (TestValidator, Keypair) {
pub fn start(&self, socket_addr_space: SocketAddrSpace) -> (TestValidator, Keypair) {
let mint_keypair = Keypair::new();
TestValidator::start(mint_keypair.pubkey(), self)
TestValidator::start(mint_keypair.pubkey(), self, socket_addr_space)
.map(|test_validator| (test_validator, mint_keypair))
.expect("Test validator failed to start")
}
@@ -297,7 +299,11 @@ impl TestValidator {
/// Faucet optional.
///
/// This function panics on initialization failure.
pub fn with_no_fees(mint_address: Pubkey, faucet_addr: Option<SocketAddr>) -> Self {
pub fn with_no_fees(
mint_address: Pubkey,
faucet_addr: Option<SocketAddr>,
socket_addr_space: SocketAddrSpace,
) -> Self {
TestValidatorGenesis::default()
.fee_rate_governor(FeeRateGovernor::new(0, 0))
.rent(Rent {
@@ -306,7 +312,7 @@ impl TestValidator {
..Rent::default()
})
.faucet_addr(faucet_addr)
.start_with_mint_address(mint_address)
.start_with_mint_address(mint_address, socket_addr_space)
.expect("validator start failed")
}
@@ -318,6 +324,7 @@ impl TestValidator {
mint_address: Pubkey,
target_lamports_per_signature: u64,
faucet_addr: Option<SocketAddr>,
socket_addr_space: SocketAddrSpace,
) -> Self {
TestValidatorGenesis::default()
.fee_rate_governor(FeeRateGovernor::new(target_lamports_per_signature, 0))
@@ -327,7 +334,7 @@ impl TestValidator {
..Rent::default()
})
.faucet_addr(faucet_addr)
.start_with_mint_address(mint_address)
.start_with_mint_address(mint_address, socket_addr_space)
.expect("validator start failed")
}
@@ -430,6 +437,7 @@ impl TestValidator {
fn start(
mint_address: Pubkey,
config: &TestValidatorGenesis,
socket_addr_space: SocketAddrSpace,
) -> Result<Self, Box<dyn std::error::Error>> {
let preserve_ledger = config.ledger_path.is_some();
let ledger_path = TestValidator::initialize_ledger(mint_address, config)?;
@@ -513,11 +521,12 @@ impl TestValidator {
&validator_config,
true, // should_check_duplicate_instance
config.start_progress.clone(),
socket_addr_space,
));
// Needed to avoid panics in `solana-responder-gossip` in tests that create a number of
// test validators concurrently...
discover_cluster(&gossip, 1)
discover_cluster(&gossip, 1, socket_addr_space)
.map_err(|err| format!("TestValidator startup failed: {:?}", err))?;
// This is a hack to delay until the fees are non-zero for test consistency

View File

@@ -362,6 +362,8 @@ pub mod tests {
use solana_poh::poh_recorder::create_test_recorder;
use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
use solana_runtime::bank::Bank;
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::socket::SocketAddrSpace;
use std::sync::atomic::Ordering;
#[ignore]
@@ -379,7 +381,11 @@ pub mod tests {
let bank_forks = BankForks::new(Bank::new(&genesis_config));
//start cluster_info1
let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
let cluster_info1 = ClusterInfo::new(
target1.info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
cluster_info1.insert_info(leader.info);
let cref1 = Arc::new(cluster_info1);

View File

@@ -70,6 +70,7 @@ use solana_sdk::{
signature::{Keypair, Signer},
timing::timestamp,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::vote_state::VoteState;
use std::{
collections::HashSet,
@@ -273,6 +274,7 @@ pub(crate) fn abort() -> ! {
}
impl Validator {
#[allow(clippy::too_many_arguments)]
pub fn new(
mut node: Node,
identity_keypair: &Arc<Keypair>,
@@ -283,6 +285,7 @@ impl Validator {
config: &ValidatorConfig,
should_check_duplicate_instance: bool,
start_progress: Arc<RwLock<ValidatorStartProgress>>,
socket_addr_space: SocketAddrSpace,
) -> Self {
let id = identity_keypair.pubkey();
assert_eq!(id, node.info.id);
@@ -433,7 +436,11 @@ impl Validator {
}
}
let mut cluster_info = ClusterInfo::new(node.info.clone(), identity_keypair.clone());
let mut cluster_info = ClusterInfo::new(
node.info.clone(),
identity_keypair.clone(),
socket_addr_space,
);
cluster_info.set_contact_debug_interval(config.contact_debug_interval);
cluster_info.set_entrypoints(cluster_entrypoints);
cluster_info.restore_contact_info(ledger_path, config.contact_save_interval);
@@ -506,10 +513,16 @@ impl Validator {
optimistically_confirmed_bank_tracker,
bank_notification_sender,
) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs {
if ContactInfo::is_valid_address(&node.info.rpc) {
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
if ContactInfo::is_valid_address(&node.info.rpc, &socket_addr_space) {
assert!(ContactInfo::is_valid_address(
&node.info.rpc_pubsub,
&socket_addr_space
));
} else {
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
assert!(!ContactInfo::is_valid_address(
&node.info.rpc_pubsub,
&socket_addr_space
));
}
let (bank_notification_sender, bank_notification_receiver) = unbounded();
(
@@ -590,6 +603,7 @@ impl Validator {
&serve_repair,
Some(blockstore.clone()),
node.sockets.serve_repair,
socket_addr_space,
&exit,
);
@@ -1591,6 +1605,7 @@ mod tests {
&config,
true, // should_check_duplicate_instance
start_progress.clone(),
SocketAddrSpace::Unspecified,
);
assert_eq!(
*start_progress.read().unwrap(),
@@ -1669,6 +1684,7 @@ mod tests {
&config,
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
SocketAddrSpace::Unspecified,
)
})
.collect();
@@ -1694,6 +1710,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let (genesis_config, _mint_keypair) = create_genesis_config(1);

View File

@@ -626,6 +626,7 @@ mod test {
signature::{Keypair, Signer},
timing::timestamp,
};
use solana_streamer::socket::SocketAddrSpace;
use std::sync::Arc;
fn local_entries_to_shred(
@@ -776,7 +777,11 @@ mod test {
assert!(!blockstore.has_duplicate_shreds_in_slot(duplicate_shred_slot));
let keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(keypair),
SocketAddrSpace::Unspecified,
);
run_check_duplicate(
&cluster_info,
&blockstore,

View File

@@ -18,6 +18,7 @@ use solana_sdk::{
signature::{Keypair, Signer},
system_transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
net::{IpAddr, SocketAddr},
sync::{
@@ -34,7 +35,8 @@ fn test_rpc_client() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let bob_pubkey = solana_sdk::pubkey::new_rand();

View File

@@ -21,6 +21,7 @@ use solana_sdk::{
system_transaction,
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionStatus;
use std::{
collections::HashSet,
@@ -58,7 +59,8 @@ fn test_rpc_send_tx() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let rpc_url = test_validator.rpc_url();
let bob_pubkey = solana_sdk::pubkey::new_rand();
@@ -122,7 +124,8 @@ fn test_rpc_invalid_requests() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let rpc_url = test_validator.rpc_url();
let bob_pubkey = solana_sdk::pubkey::new_rand();
@@ -153,7 +156,8 @@ fn test_rpc_invalid_requests() {
fn test_rpc_slot_updates() {
solana_logger::setup();
let test_validator = TestValidator::with_no_fees(Pubkey::new_unique(), None);
let test_validator =
TestValidator::with_no_fees(Pubkey::new_unique(), None, SocketAddrSpace::Unspecified);
// Create the pub sub runtime
let rt = Runtime::new().unwrap();
@@ -218,7 +222,8 @@ fn test_rpc_subscriptions() {
solana_logger::setup();
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
transactions_socket.connect(test_validator.tpu()).unwrap();
@@ -385,7 +390,8 @@ fn test_rpc_subscriptions() {
fn test_tpu_send_transaction() {
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
let test_validator = TestValidator::with_no_fees(mint_pubkey, None);
let test_validator =
TestValidator::with_no_fees(mint_pubkey, None, SocketAddrSpace::Unspecified);
let rpc_client = Arc::new(RpcClient::new_with_commitment(
test_validator.rpc_url(),
CommitmentConfig::processed(),

View File

@@ -60,6 +60,7 @@ mod tests {
signature::{Keypair, Signer},
system_transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
collections::HashSet,
fs,
@@ -442,7 +443,11 @@ mod tests {
// channel hold hard links to these deleted snapshots. We verify this is the case below.
let exit = Arc::new(AtomicBool::new(false));
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let pending_snapshot_package = PendingSnapshotPackage::default();
let snapshot_packager_service = SnapshotPackagerService::new(

View File

@@ -22,6 +22,7 @@ solana-logger = { path = "../logger", version = "=1.7.9" }
solana-net-utils = { path = "../net-utils", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-version = { path = "../version", version = "=1.7.9" }
solana-client = { path = "../client", version = "=1.7.9" }

View File

@@ -6,6 +6,7 @@ use solana_client::rpc_client::RpcClient;
use solana_core::serve_repair::RepairProtocol;
use solana_gossip::{contact_info::ContactInfo, gossip_service::discover};
use solana_sdk::pubkey::Pubkey;
use solana_streamer::socket::SocketAddrSpace;
use std::net::{SocketAddr, UdpSocket};
use std::process::exit;
use std::str::FromStr;
@@ -197,6 +198,13 @@ fn main() {
.long("skip-gossip")
.help("Just use entrypoint address directly"),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.get_matches();
let mut entrypoint_addr = SocketAddr::from(([127, 0, 0, 1], 8001));
@@ -216,6 +224,7 @@ fn main() {
let mut nodes = vec![];
if !skip_gossip {
info!("Finding cluster entry: {:?}", entrypoint_addr);
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let (gossip_nodes, _validators) = discover(
None, // keypair
Some(&entrypoint_addr),
@@ -225,6 +234,7 @@ fn main() {
Some(&entrypoint_addr), // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
socket_addr_space,
)
.unwrap_or_else(|err| {
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);

View File

@@ -64,7 +64,7 @@ use {
solana_streamer::{
packet,
sendmmsg::multicast,
socket::is_global,
socket::SocketAddrSpace,
streamer::{PacketReceiver, PacketSender},
},
solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY,
@@ -158,12 +158,7 @@ pub struct ClusterInfo {
contact_save_interval: u64, // milliseconds, 0 = disabled
instance: NodeInstance,
contact_info_path: PathBuf,
}
impl Default for ClusterInfo {
fn default() -> Self {
Self::new_with_invalid_keypair(ContactInfo::default())
}
socket_addr_space: SocketAddrSpace,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
@@ -392,12 +387,11 @@ fn retain_staked(values: &mut Vec<CrdsValue>, stakes: &HashMap<Pubkey, u64>) {
}
impl ClusterInfo {
/// Without a valid keypair gossip will not function. Only useful for tests.
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
Self::new(contact_info, Arc::new(Keypair::new()))
}
pub fn new(contact_info: ContactInfo, keypair: Arc<Keypair>) -> Self {
pub fn new(
contact_info: ContactInfo,
keypair: Arc<Keypair>,
socket_addr_space: SocketAddrSpace,
) -> Self {
let id = contact_info.id;
let me = Self {
gossip: RwLock::new(CrdsGossip::default()),
@@ -417,6 +411,7 @@ impl ClusterInfo {
instance: NodeInstance::new(&mut thread_rng(), id, timestamp()),
contact_info_path: PathBuf::default(),
contact_save_interval: 0, // disabled
socket_addr_space,
};
me.insert_self();
me.push_self(&HashMap::new(), None);
@@ -448,6 +443,7 @@ impl ClusterInfo {
instance: NodeInstance::new(&mut thread_rng(), *new_id, timestamp()),
contact_info_path: PathBuf::default(),
contact_save_interval: 0, // disabled
..*self
}
}
@@ -455,6 +451,10 @@ impl ClusterInfo {
self.contact_debug_interval = new;
}
pub fn socket_addr_space(&self) -> &SocketAddrSpace {
&self.socket_addr_space
}
fn push_self(
&self,
stakes: &HashMap<Pubkey, u64>,
@@ -483,6 +483,7 @@ impl ClusterInfo {
shred_version,
stakes,
gossip_validators,
&self.socket_addr_space,
);
}
@@ -665,7 +666,7 @@ impl ClusterInfo {
.all_peers()
.into_iter()
.filter_map(|(node, last_updated)| {
if !ContactInfo::is_valid_address(&node.rpc) {
if !ContactInfo::is_valid_address(&node.rpc, &self.socket_addr_space) {
return None;
}
@@ -676,8 +677,8 @@ impl ClusterInfo {
return None;
}
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
if ContactInfo::is_valid_address(addr) {
let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String {
if ContactInfo::is_valid_address(addr, &self.socket_addr_space) {
if &addr.ip() == default_ip {
addr.port().to_string()
} else {
@@ -686,7 +687,7 @@ impl ClusterInfo {
} else {
"none".to_string()
}
}
};
let rpc_addr = node.rpc.ip();
Some(format!(
@@ -730,7 +731,7 @@ impl ClusterInfo {
.all_peers()
.into_iter()
.filter_map(|(node, last_updated)| {
let is_spy_node = Self::is_spy_node(&node);
let is_spy_node = Self::is_spy_node(&node, &self.socket_addr_space);
if is_spy_node {
total_spy_nodes = total_spy_nodes.saturating_add(1);
}
@@ -743,8 +744,8 @@ impl ClusterInfo {
if is_spy_node {
shred_spy_nodes = shred_spy_nodes.saturating_add(1);
}
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
if ContactInfo::is_valid_address(addr) {
let addr_to_string = |default_ip: &IpAddr, addr: &SocketAddr| -> String {
if ContactInfo::is_valid_address(addr, &self.socket_addr_space) {
if &addr.ip() == default_ip {
addr.port().to_string()
} else {
@@ -753,11 +754,11 @@ impl ClusterInfo {
} else {
"none".to_string()
}
}
};
let ip_addr = node.gossip.ip();
Some(format!(
"{:15} {:2}| {:5} | {:44} |{:^9}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
if ContactInfo::is_valid_address(&node.gossip, &self.socket_addr_space) {
ip_addr.to_string()
} else {
"none".to_string()
@@ -1138,12 +1139,16 @@ impl ClusterInfo {
/// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
let self_pubkey = self.id();
self.gossip
.read()
.unwrap()
.crds
.get_nodes_contact_info()
.filter(|x| x.id != self.id() && ContactInfo::is_valid_address(&x.rpc))
.filter(|x| {
x.id != self_pubkey
&& ContactInfo::is_valid_address(&x.rpc, &self.socket_addr_space)
})
.cloned()
.collect()
}
@@ -1167,17 +1172,23 @@ impl ClusterInfo {
.crds
.get_nodes_contact_info()
// shred_version not considered for gossip peers (ie, spy nodes do not set shred_version)
.filter(|x| x.id != me && ContactInfo::is_valid_address(&x.gossip))
.filter(|x| {
x.id != me && ContactInfo::is_valid_address(&x.gossip, &self.socket_addr_space)
})
.cloned()
.collect()
}
/// all validators that have a valid tvu port regardless of `shred_version`.
pub fn all_tvu_peers(&self) -> Vec<ContactInfo> {
let self_pubkey = self.id();
self.time_gossip_read_lock("all_tvu_peers", &self.stats.all_tvu_peers)
.crds
.get_nodes_contact_info()
.filter(|x| ContactInfo::is_valid_address(&x.tvu) && x.id != self.id())
.filter(|x| {
ContactInfo::is_valid_address(&x.tvu, &self.socket_addr_space)
&& x.id != self_pubkey
})
.cloned()
.collect()
}
@@ -1210,7 +1221,7 @@ impl ClusterInfo {
nodes
.into_iter()
.filter(|node| {
ContactInfo::is_valid_address(&node.serve_repair)
ContactInfo::is_valid_address(&node.serve_repair, &self.socket_addr_space)
&& match gossip.crds.get_lowest_slot(node.id) {
None => true, // fallback to legacy behavior
Some(lowest_slot) => lowest_slot.lowest <= slot,
@@ -1222,20 +1233,24 @@ impl ClusterInfo {
nodes
}
fn is_spy_node(contact_info: &ContactInfo) -> bool {
!ContactInfo::is_valid_address(&contact_info.tpu)
|| !ContactInfo::is_valid_address(&contact_info.gossip)
|| !ContactInfo::is_valid_address(&contact_info.tvu)
fn is_spy_node(contact_info: &ContactInfo, socket_addr_space: &SocketAddrSpace) -> bool {
!ContactInfo::is_valid_address(&contact_info.tpu, socket_addr_space)
|| !ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
|| !ContactInfo::is_valid_address(&contact_info.tvu, socket_addr_space)
}
/// compute broadcast table
pub fn tpu_peers(&self) -> Vec<ContactInfo> {
let self_pubkey = self.id();
self.gossip
.read()
.unwrap()
.crds
.get_nodes_contact_info()
.filter(|x| x.id != self.id() && ContactInfo::is_valid_address(&x.tpu))
.filter(|x| {
x.id != self_pubkey
&& ContactInfo::is_valid_address(&x.tpu, &self.socket_addr_space)
})
.cloned()
.collect()
}
@@ -1243,20 +1258,25 @@ impl ClusterInfo {
/// retransmit messages to a list of nodes
/// # Remarks
/// We need to avoid having obj locked while doing a io, such as the `send_to`
pub fn retransmit_to(peers: &[&ContactInfo], packet: &Packet, s: &UdpSocket, forwarded: bool) {
pub fn retransmit_to(
peers: &[&ContactInfo],
packet: &Packet,
s: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.map(|peer| &peer.tvu_forwards)
.filter(|addr| ContactInfo::is_valid_address(addr))
.filter(|addr| is_global(addr))
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect()
} else {
peers
.iter()
.map(|peer| &peer.tvu)
.filter(|addr| is_global(addr))
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
let mut dests = &dests[..];
@@ -1406,6 +1426,7 @@ impl ClusterInfo {
MAX_BLOOM_SIZE,
&self.ping_cache,
&mut pings,
&self.socket_addr_space,
) {
Err(_) => Vec::default(),
Ok((peer, filters)) => vec![(peer, filters)],
@@ -1879,7 +1900,7 @@ impl ClusterInfo {
// incoming pull-requests, pings are also sent to request.from_addr (as
// opposed to caller.gossip address).
move |request| {
ContactInfo::is_valid_address(&request.from_addr) && {
ContactInfo::is_valid_address(&request.from_addr, &self.socket_addr_space) && {
let node = (request.caller.pubkey(), request.from_addr);
*cache.entry(node).or_insert_with(|| hard_check(node))
}
@@ -2246,7 +2267,7 @@ impl ClusterInfo {
let new_push_requests = self.new_push_requests(stakes, require_stake_for_gossip);
inc_new_counter_debug!("cluster_info-push_message-pushes", new_push_requests.len());
for (address, request) in new_push_requests {
if ContactInfo::is_valid_address(&address) {
if ContactInfo::is_valid_address(&address, &self.socket_addr_space) {
match Packet::from_data(Some(&address), &request) {
Ok(packet) => packets.packets.push(packet),
Err(err) => error!("failed to write push-request packet: {:?}", err),
@@ -2896,13 +2917,14 @@ pub fn push_messages_to_peer(
messages: Vec<CrdsValue>,
self_id: Pubkey,
peer_gossip: SocketAddr,
socket_addr_space: &SocketAddrSpace,
) -> Result<(), GossipError> {
let reqs: Vec<_> = ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, messages)
.map(move |payload| (peer_gossip, Protocol::PushMessage(self_id, payload)))
.collect();
let packets = to_packets_with_destination(PacketsRecycler::default(), &reqs);
let sock = UdpSocket::bind("0.0.0.0:0").unwrap();
packet::send_to(&packets, &sock)?;
packet::send_to(&packets, &sock, socket_addr_space)?;
Ok(())
}
@@ -2996,20 +3018,30 @@ mod tests {
fn test_gossip_node() {
//check that a gossip nodes always show up as spies
let (node, _, _) = ClusterInfo::spy_node(&solana_sdk::pubkey::new_rand(), 0);
assert!(ClusterInfo::is_spy_node(&node));
assert!(ClusterInfo::is_spy_node(
&node,
&SocketAddrSpace::Unspecified
));
let (node, _, _) = ClusterInfo::gossip_node(
&solana_sdk::pubkey::new_rand(),
&"1.1.1.1:1111".parse().unwrap(),
0,
);
assert!(ClusterInfo::is_spy_node(&node));
assert!(ClusterInfo::is_spy_node(
&node,
&SocketAddrSpace::Unspecified
));
}
#[test]
fn test_handle_pull() {
solana_logger::setup();
let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let cluster_info = Arc::new(ClusterInfo::new(
node.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let data = test_crds_values(entrypoint_pubkey);
@@ -3066,6 +3098,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&this_node.pubkey(), timestamp()),
this_node.clone(),
SocketAddrSpace::Unspecified,
);
let remote_nodes: Vec<(Keypair, SocketAddr)> =
repeat_with(|| new_rand_remote_node(&mut rng))
@@ -3120,6 +3153,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&this_node.pubkey(), timestamp()),
this_node.clone(),
SocketAddrSpace::Unspecified,
);
let remote_nodes: Vec<(Keypair, SocketAddr)> =
repeat_with(|| new_rand_remote_node(&mut rng))
@@ -3280,7 +3314,11 @@ mod tests {
//check that gossip doesn't try to push to invalid addresses
let node = Node::new_localhost();
let (spy, _, _) = ClusterInfo::spy_node(&solana_sdk::pubkey::new_rand(), 0);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let cluster_info = Arc::new(ClusterInfo::new(
node.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
cluster_info.insert_info(spy);
{
let mut gossip = cluster_info.gossip.write().unwrap();
@@ -3289,6 +3327,7 @@ mod tests {
cluster_info.my_shred_version(),
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
}
let reqs = cluster_info.generate_new_gossip_requests(
@@ -3300,7 +3339,7 @@ mod tests {
);
//assert none of the addrs are invalid.
reqs.iter().all(|(addr, _)| {
let res = ContactInfo::is_valid_address(addr);
let res = ContactInfo::is_valid_address(addr, &SocketAddrSpace::Unspecified);
assert!(res);
res
});
@@ -3309,14 +3348,19 @@ mod tests {
#[test]
fn test_cluster_info_new() {
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
let cluster_info = ClusterInfo::new(
d.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
assert_eq!(d.id, cluster_info.id());
}
#[test]
fn insert_info_test() {
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d);
let cluster_info =
ClusterInfo::new(d, Arc::new(Keypair::new()), SocketAddrSpace::Unspecified);
let d = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
let label = CrdsValueLabel::ContactInfo(d.id);
cluster_info.insert_info(d);
@@ -3395,7 +3439,11 @@ mod tests {
let peer_keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer = ContactInfo::new_localhost(&peer_keypair.pubkey(), 0);
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(keypair),
SocketAddrSpace::Unspecified,
);
cluster_info
.ping_cache
.lock()
@@ -3409,6 +3457,7 @@ mod tests {
cluster_info.my_shred_version(),
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
}
//check that all types of gossip messages are signed correctly
@@ -3438,6 +3487,7 @@ mod tests {
MAX_BLOOM_SIZE,
&cluster_info.ping_cache,
&mut pings,
&cluster_info.socket_addr_space,
)
.ok()
.unwrap();
@@ -3447,7 +3497,11 @@ mod tests {
fn test_refresh_vote() {
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// Construct and push a vote for some other slot
let unrefresh_slot = 5;
@@ -3534,7 +3588,11 @@ mod tests {
let mut rng = rand::thread_rng();
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// make sure empty crds is handled correctly
let mut cursor = Cursor::default();
@@ -3605,7 +3663,11 @@ mod tests {
let mut rng = rand::thread_rng();
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let mut tower = Vec::new();
for k in 0..MAX_LOCKOUT_HISTORY {
let slot = k as Slot;
@@ -3651,7 +3713,11 @@ mod tests {
fn test_push_epoch_slots() {
let keys = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keys.pubkey(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let slots = cluster_info.get_epoch_slots(&mut Cursor::default());
assert!(slots.is_empty());
cluster_info.push_epoch_slots(&[0]);
@@ -3708,6 +3774,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
@@ -3861,7 +3928,11 @@ mod tests {
#[test]
fn test_tvu_peers_and_stakes() {
let d = ContactInfo::new_localhost(&Pubkey::new(&[0; 32]), timestamp());
let cluster_info = ClusterInfo::new_with_invalid_keypair(d.clone());
let cluster_info = ClusterInfo::new(
d.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let mut stakes = HashMap::new();
// no stake
@@ -3902,6 +3973,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let mut entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
@@ -3957,6 +4029,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
for i in 0..10 {
// make these invalid for the upcoming repair request
@@ -4032,6 +4105,7 @@ mod tests {
let cluster_info = ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
);
let mut range: Vec<Slot> = vec![];
//random should be hard to compress
@@ -4079,6 +4153,7 @@ mod tests {
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
));
assert_eq!(cluster_info.my_shred_version(), 0);
@@ -4162,6 +4237,7 @@ mod tests {
contact_info
},
node_keypair,
SocketAddrSpace::Unspecified,
));
assert_eq!(cluster_info.my_shred_version(), 2);
@@ -4333,7 +4409,11 @@ mod tests {
#[ignore] // TODO: debug why this is flaky on buildkite!
fn test_pull_request_time_pruning() {
let node = Node::new_localhost();
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(node.info));
let cluster_info = Arc::new(ClusterInfo::new(
node.info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let entrypoint_pubkey = solana_sdk::pubkey::new_rand();
let entrypoint = ContactInfo::new_localhost(&entrypoint_pubkey, timestamp());
cluster_info.set_entrypoint(entrypoint);

View File

@@ -7,6 +7,7 @@ use {
signature::{Keypair, Signer},
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::net::{IpAddr, SocketAddr},
};
@@ -200,16 +201,22 @@ impl ContactInfo {
(addr.port() != 0) && Self::is_valid_ip(addr.ip())
}
pub fn is_valid_address(addr: &SocketAddr) -> bool {
Self::is_valid_tvu_address(addr) && solana_streamer::socket::is_global(addr)
// TODO: Replace this entirely with streamer SocketAddrSpace.
pub fn is_valid_address(addr: &SocketAddr, socket_addr_space: &SocketAddrSpace) -> bool {
Self::is_valid_tvu_address(addr) && socket_addr_space.check(addr)
}
pub fn client_facing_addr(&self) -> (SocketAddr, SocketAddr) {
(self.rpc, self.tpu)
}
pub fn valid_client_facing_addr(&self) -> Option<(SocketAddr, SocketAddr)> {
if ContactInfo::is_valid_address(&self.rpc) && ContactInfo::is_valid_address(&self.tpu) {
pub fn valid_client_facing_addr(
&self,
socket_addr_space: &SocketAddrSpace,
) -> Option<(SocketAddr, SocketAddr)> {
if ContactInfo::is_valid_address(&self.rpc, socket_addr_space)
&& ContactInfo::is_valid_address(&self.tpu, socket_addr_space)
{
Some((self.rpc, self.tpu))
} else {
None
@@ -224,13 +231,25 @@ mod tests {
#[test]
fn test_is_valid_address() {
let bad_address_port = socketaddr!("127.0.0.1:0");
assert!(!ContactInfo::is_valid_address(&bad_address_port));
assert!(!ContactInfo::is_valid_address(
&bad_address_port,
&SocketAddrSpace::Unspecified
));
let bad_address_unspecified = socketaddr!(0, 1234);
assert!(!ContactInfo::is_valid_address(&bad_address_unspecified));
assert!(!ContactInfo::is_valid_address(
&bad_address_unspecified,
&SocketAddrSpace::Unspecified
));
let bad_address_multicast = socketaddr!([224, 254, 0, 0], 1234);
assert!(!ContactInfo::is_valid_address(&bad_address_multicast));
assert!(!ContactInfo::is_valid_address(
&bad_address_multicast,
&SocketAddrSpace::Unspecified
));
let loopback = socketaddr!("127.0.0.1:1234");
assert!(ContactInfo::is_valid_address(&loopback));
assert!(ContactInfo::is_valid_address(
&loopback,
&SocketAddrSpace::Unspecified
));
// assert!(!ContactInfo::is_valid_ip_internal(loopback.ip(), false));
}
@@ -313,11 +332,19 @@ mod tests {
#[test]
fn test_valid_client_facing() {
let mut ci = ContactInfo::default();
assert_eq!(ci.valid_client_facing_addr(), None);
assert_eq!(
ci.valid_client_facing_addr(&SocketAddrSpace::Unspecified),
None
);
ci.tpu = socketaddr!("127.0.0.1:123");
assert_eq!(ci.valid_client_facing_addr(), None);
assert_eq!(
ci.valid_client_facing_addr(&SocketAddrSpace::Unspecified),
None
);
ci.rpc = socketaddr!("127.0.0.1:234");
assert!(ci.valid_client_facing_addr().is_some());
assert!(ci
.valid_client_facing_addr(&SocketAddrSpace::Unspecified)
.is_some());
}
#[test]

View File

@@ -24,6 +24,7 @@ use {
signature::{Keypair, Signer},
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
net::SocketAddr,
@@ -186,6 +187,7 @@ impl CrdsGossip {
self_shred_version: u16,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
socket_addr_space: &SocketAddrSpace,
) {
self.push.refresh_push_active_set(
&self.crds,
@@ -195,6 +197,7 @@ impl CrdsGossip {
self_shred_version,
self.crds.num_nodes(),
CRDS_GOSSIP_NUM_ACTIVE,
socket_addr_space,
)
}
@@ -211,6 +214,7 @@ impl CrdsGossip {
bloom_size: usize,
ping_cache: &Mutex<PingCache>,
pings: &mut Vec<(SocketAddr, Ping)>,
socket_addr_space: &SocketAddrSpace,
) -> Result<(ContactInfo, Vec<CrdsFilter>), CrdsGossipError> {
self.pull.new_pull_request(
thread_pool,
@@ -223,6 +227,7 @@ impl CrdsGossip {
bloom_size,
ping_cache,
pings,
socket_addr_space,
)
}
@@ -379,6 +384,7 @@ mod test {
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
let now = timestamp();
//incorrect dest

View File

@@ -29,6 +29,7 @@ use {
pubkey::Pubkey,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet, VecDeque},
convert::TryInto,
@@ -223,6 +224,7 @@ impl CrdsGossipPull {
bloom_size: usize,
ping_cache: &Mutex<PingCache>,
pings: &mut Vec<(SocketAddr, Ping)>,
socket_addr_space: &SocketAddrSpace,
) -> Result<(ContactInfo, Vec<CrdsFilter>), CrdsGossipError> {
let (weights, peers): (Vec<_>, Vec<_>) = self
.pull_options(
@@ -232,6 +234,7 @@ impl CrdsGossipPull {
now,
gossip_validators,
stakes,
socket_addr_space,
)
.into_iter()
.unzip();
@@ -273,6 +276,7 @@ impl CrdsGossipPull {
now: u64,
gossip_validators: Option<&HashSet<Pubkey>>,
stakes: &HashMap<Pubkey, u64>,
socket_addr_space: &SocketAddrSpace,
) -> Vec<(u64, &'a ContactInfo)> {
let mut rng = rand::thread_rng();
let active_cutoff = now.saturating_sub(PULL_ACTIVE_TIMEOUT_MS);
@@ -292,7 +296,7 @@ impl CrdsGossipPull {
})
.filter(|v| {
v.id != *self_id
&& ContactInfo::is_valid_address(&v.gossip)
&& ContactInfo::is_valid_address(&v.gossip, socket_addr_space)
&& (self_shred_version == 0 || self_shred_version == v.shred_version)
&& gossip_validators
.map_or(true, |gossip_validators| gossip_validators.contains(&v.id))
@@ -698,7 +702,15 @@ pub(crate) mod tests {
stakes.insert(id, i * 100);
}
let now = 1024;
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, None, &stakes);
let mut options = node.pull_options(
&crds,
&me.label().pubkey(),
0,
now,
None,
&stakes,
&SocketAddrSpace::Unspecified,
);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@@ -748,7 +760,15 @@ pub(crate) mod tests {
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.pull_options(&crds, &me.label().pubkey(), 123, 0, None, &stakes)
.pull_options(
&crds,
&me.label().pubkey(),
123,
0,
None,
&stakes,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
@@ -758,7 +778,15 @@ pub(crate) mod tests {
// spy nodes will see all
let options = node
.pull_options(&crds, &spy.label().pubkey(), 0, 0, None, &stakes)
.pull_options(
&crds,
&spy.label().pubkey(),
0,
0,
None,
&stakes,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
@@ -798,6 +826,7 @@ pub(crate) mod tests {
0,
Some(&gossip_validators),
&stakes,
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@@ -810,6 +839,7 @@ pub(crate) mod tests {
0,
Some(&gossip_validators),
&stakes,
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@@ -822,6 +852,7 @@ pub(crate) mod tests {
0,
Some(&gossip_validators),
&stakes,
&SocketAddrSpace::Unspecified,
);
assert_eq!(options.len(), 1);
assert_eq!(options[0].1.id, node_123.pubkey());
@@ -940,6 +971,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
),
Err(CrdsGossipError::NoPeers)
);
@@ -957,6 +989,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
),
Err(CrdsGossipError::NoPeers)
);
@@ -979,6 +1012,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
);
let (peer, _) = req.unwrap();
assert_eq!(peer, *new.contact_info().unwrap());
@@ -998,6 +1032,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
);
// Even though the offline node should have higher weight, we shouldn't request from it
// until we receive a ping.
@@ -1052,6 +1087,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE, // bloom_size
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
)
.unwrap();
peer
@@ -1129,6 +1165,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&Mutex::new(ping_cache),
&mut pings,
&SocketAddrSpace::Unspecified,
);
let mut dest_crds = Crds::default();
@@ -1219,6 +1256,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&Mutex::new(ping_cache),
&mut pings,
&SocketAddrSpace::Unspecified,
);
let mut dest_crds = Crds::default();
@@ -1293,6 +1331,7 @@ pub(crate) mod tests {
PACKET_DATA_SIZE,
&ping_cache,
&mut pings,
&SocketAddrSpace::Unspecified,
);
let (_, filters) = req.unwrap();
let filters: Vec<_> = filters.into_iter().map(|f| (caller.clone(), f)).collect();

View File

@@ -24,6 +24,7 @@ use {
rand::{seq::SliceRandom, Rng},
solana_runtime::bloom::{AtomicBloom, Bloom},
solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::timestamp},
solana_streamer::socket::SocketAddrSpace,
std::{
cmp,
collections::{HashMap, HashSet},
@@ -263,6 +264,7 @@ impl CrdsGossipPush {
self_shred_version: u16,
network_size: usize,
ratio: usize,
socket_addr_space: &SocketAddrSpace,
) {
const BLOOM_FALSE_RATE: f64 = 0.1;
const BLOOM_MAX_BITS: usize = 1024 * 8 * 4;
@@ -274,7 +276,14 @@ impl CrdsGossipPush {
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
let mut new_items = HashMap::new();
let (weights, peers): (Vec<_>, Vec<_>) = self
.push_options(crds, self_id, self_shred_version, stakes, gossip_validators)
.push_options(
crds,
self_id,
self_shred_version,
stakes,
gossip_validators,
socket_addr_space,
)
.into_iter()
.unzip();
if peers.is_empty() {
@@ -315,6 +324,7 @@ impl CrdsGossipPush {
self_shred_version: u16,
stakes: &HashMap<Pubkey, u64>,
gossip_validators: Option<&HashSet<Pubkey>>,
socket_addr_space: &SocketAddrSpace,
) -> Vec<(u64, &'a ContactInfo)> {
let now = timestamp();
let mut rng = rand::thread_rng();
@@ -336,7 +346,7 @@ impl CrdsGossipPush {
})
.filter(|info| {
info.id != *self_id
&& ContactInfo::is_valid_address(&info.gossip)
&& ContactInfo::is_valid_address(&info.gossip, socket_addr_space)
&& self_shred_version == info.shred_version
&& gossip_validators.map_or(true, |gossip_validators| {
gossip_validators.contains(&info.id)
@@ -547,7 +557,16 @@ mod test {
)));
assert_eq!(crds.insert(value1.clone(), now), Ok(()));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
@@ -557,7 +576,16 @@ mod test {
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
assert_eq!(crds.insert(value2.clone(), now), Ok(()));
for _ in 0..30 {
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
if push.active_set.get(&value2.label().pubkey()).is_some() {
break;
}
@@ -570,7 +598,16 @@ mod test {
));
assert_eq!(crds.insert(value2.clone(), now), Ok(()));
}
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
assert_eq!(push.active_set.len(), push.num_active);
}
#[test]
@@ -590,7 +627,14 @@ mod test {
stakes.insert(id, i * 100);
push.last_pushed_to.put(id, time);
}
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes, None);
let mut options = push.push_options(
&crds,
&Pubkey::default(),
0,
&stakes,
None,
&SocketAddrSpace::Unspecified,
);
assert!(!options.is_empty());
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
// check that the highest stake holder is also the heaviest weighted.
@@ -641,7 +685,14 @@ mod test {
// shred version 123 should ignore nodes with versions 0 and 456
let options = node
.push_options(&crds, &me.label().pubkey(), 123, &stakes, None)
.push_options(
&crds,
&me.label().pubkey(),
123,
&stakes,
None,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
@@ -651,7 +702,14 @@ mod test {
// spy nodes should not push to people on different shred versions
let options = node
.push_options(&crds, &spy.label().pubkey(), 0, &stakes, None)
.push_options(
&crds,
&spy.label().pubkey(),
0,
&stakes,
None,
&SocketAddrSpace::Unspecified,
)
.iter()
.map(|(_, c)| c.id)
.collect::<Vec<_>>();
@@ -688,6 +746,7 @@ mod test {
0,
&stakes,
Some(&gossip_validators),
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@@ -700,6 +759,7 @@ mod test {
0,
&stakes,
Some(&gossip_validators),
&SocketAddrSpace::Unspecified,
);
assert!(options.is_empty());
@@ -711,6 +771,7 @@ mod test {
0,
&stakes,
Some(&gossip_validators),
&SocketAddrSpace::Unspecified,
);
assert_eq!(options.len(), 1);
@@ -727,7 +788,16 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), now), Ok(()));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
@@ -762,7 +832,16 @@ mod test {
push.process_push_message(&mut crds, &Pubkey::default(), peers[2].clone(), now),
Ok(())
);
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
// push 3's contact info to 1 and 2 and 3
let expected: HashMap<_, _> = vec![
@@ -784,7 +863,16 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(()));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&solana_sdk::pubkey::new_rand(),
@@ -811,7 +899,16 @@ mod test {
0,
)));
assert_eq!(crds.insert(peer, 0), Ok(()));
push.refresh_push_active_set(&crds, &HashMap::new(), None, &Pubkey::default(), 0, 1, 1);
push.refresh_push_active_set(
&crds,
&HashMap::new(),
None,
&Pubkey::default(),
0,
1,
1,
&SocketAddrSpace::Unspecified,
);
let mut ci = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
ci.wallclock = 1;

View File

@@ -13,6 +13,7 @@ use {
pubkey::Pubkey,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_streamer::streamer,
std::{
collections::HashSet,
@@ -47,6 +48,7 @@ impl GossipService {
&cluster_info.id(),
gossip_socket.local_addr().unwrap()
);
let socket_addr_space = *cluster_info.socket_addr_space();
let t_receiver = streamer::receiver(
gossip_socket.clone(),
exit,
@@ -82,7 +84,12 @@ impl GossipService {
// https://github.com/rust-lang/rust/issues/54267
// responder thread should start after response_sender.clone(). see:
// https://github.com/rust-lang/rust/issues/39364#issuecomment-381446873
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);
let t_responder = streamer::responder(
"gossip",
gossip_socket,
response_receiver,
socket_addr_space,
);
let thread_hdls = vec![
t_receiver,
t_responder,
@@ -105,6 +112,7 @@ impl GossipService {
pub fn discover_cluster(
entrypoint: &SocketAddr,
num_nodes: usize,
socket_addr_space: SocketAddrSpace,
) -> std::io::Result<Vec<ContactInfo>> {
const DISCOVER_CLUSTER_TIMEOUT: Duration = Duration::from_secs(120);
let (_all_peers, validators) = discover(
@@ -116,6 +124,7 @@ pub fn discover_cluster(
None, // find_node_by_gossip_addr
None, // my_gossip_addr
0, // my_shred_version
socket_addr_space,
)?;
Ok(validators)
}
@@ -129,6 +138,7 @@ pub fn discover(
find_node_by_gossip_addr: Option<&SocketAddr>,
my_gossip_addr: Option<&SocketAddr>,
my_shred_version: u16,
socket_addr_space: SocketAddrSpace,
) -> std::io::Result<(
Vec<ContactInfo>, // all gossip peers
Vec<ContactInfo>, // tvu peers (validators)
@@ -143,6 +153,7 @@ pub fn discover(
my_gossip_addr,
my_shred_version,
true, // should_check_duplicate_instance,
socket_addr_space,
);
let id = spy_ref.id();
@@ -189,28 +200,31 @@ pub fn discover(
}
/// Creates a ThinClient per valid node
pub fn get_clients(nodes: &[ContactInfo]) -> Vec<ThinClient> {
pub fn get_clients(nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace) -> Vec<ThinClient> {
nodes
.iter()
.filter_map(ContactInfo::valid_client_facing_addr)
.filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space))
.map(|addrs| create_client(addrs, VALIDATOR_PORT_RANGE))
.collect()
}
/// Creates a ThinClient by selecting a valid node at random
pub fn get_client(nodes: &[ContactInfo]) -> ThinClient {
pub fn get_client(nodes: &[ContactInfo], socket_addr_space: &SocketAddrSpace) -> ThinClient {
let nodes: Vec<_> = nodes
.iter()
.filter_map(ContactInfo::valid_client_facing_addr)
.filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space))
.collect();
let select = thread_rng().gen_range(0, nodes.len());
create_client(nodes[select], VALIDATOR_PORT_RANGE)
}
pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) {
pub fn get_multi_client(
nodes: &[ContactInfo],
socket_addr_space: &SocketAddrSpace,
) -> (ThinClient, usize) {
let addrs: Vec<_> = nodes
.iter()
.filter_map(ContactInfo::valid_client_facing_addr)
.filter_map(|node| ContactInfo::valid_client_facing_addr(node, socket_addr_space))
.collect();
let rpc_addrs: Vec<_> = addrs.iter().map(|addr| addr.0).collect();
let tpu_addrs: Vec<_> = addrs.iter().map(|addr| addr.1).collect();
@@ -301,13 +315,14 @@ fn make_gossip_node(
gossip_addr: Option<&SocketAddr>,
shred_version: u16,
should_check_duplicate_instance: bool,
socket_addr_space: SocketAddrSpace,
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
} else {
ClusterInfo::spy_node(&keypair.pubkey(), shred_version)
};
let cluster_info = ClusterInfo::new(node, keypair);
let cluster_info = ClusterInfo::new(node, keypair, socket_addr_space);
if let Some(entrypoint) = entrypoint {
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
@@ -337,7 +352,11 @@ mod tests {
fn test_exit() {
let exit = Arc::new(AtomicBool::new(false));
let tn = Node::new_localhost();
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
let cluster_info = ClusterInfo::new(
tn.info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
let c = Arc::new(cluster_info);
let d = GossipService::new(
&c,
@@ -360,7 +379,11 @@ mod tests {
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
let cluster_info = ClusterInfo::new(
contact_info,
Arc::new(keypair),
SocketAddrSpace::Unspecified,
);
cluster_info.insert_info(peer0_info.clone());
cluster_info.insert_info(peer1_info);

View File

@@ -11,6 +11,7 @@ use {
},
solana_gossip::{contact_info::ContactInfo, gossip_service::discover},
solana_sdk::pubkey::Pubkey,
solana_streamer::socket::SocketAddrSpace,
std::{
error,
net::{IpAddr, Ipv4Addr, SocketAddr},
@@ -32,6 +33,13 @@ fn parse_matches() -> ArgMatches<'static> {
.about(crate_description!())
.version(solana_version::version!())
.setting(AppSettings::SubcommandRequiredElseHelp)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.subcommand(
SubCommand::with_name("rpc-url")
.about("Get an RPC URL for the cluster")
@@ -224,6 +232,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> {
let pubkey = matches
.value_of("node_pubkey")
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let identity_keypair = keypair_of(matches, "identity").map(Arc::new);
@@ -251,6 +260,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> {
None, // find_node_by_gossip_addr
Some(&gossip_addr), // my_gossip_addr
shred_version,
socket_addr_space,
)?;
process_spy_results(timeout, validators, num_nodes, num_nodes_exactly, pubkey);
@@ -273,6 +283,7 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> {
let entrypoint_addr = parse_entrypoint(matches);
let timeout = value_t_or_exit!(matches, "timeout", u64);
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let (_all_peers, validators) = discover(
None, // keypair
entrypoint_addr.as_ref(),
@@ -282,13 +293,14 @@ fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> {
entrypoint_addr.as_ref(), // find_node_by_gossip_addr
None, // my_gossip_addr
shred_version,
socket_addr_space,
)?;
let rpc_addrs: Vec<_> = validators
.iter()
.filter_map(|contact_info| {
if (any || all || Some(contact_info.gossip) == entrypoint_addr)
&& ContactInfo::is_valid_address(&contact_info.rpc)
&& ContactInfo::is_valid_address(&contact_info.rpc, &socket_addr_space)
{
return Some(contact_info.rpc);
}

View File

@@ -7,7 +7,8 @@ use {
contact_info::ContactInfo,
deprecated::{shuffle_peers_and_index, sorted_retransmit_peers_and_stakes},
},
solana_sdk::pubkey::Pubkey,
solana_sdk::{pubkey::Pubkey, signer::keypair::Keypair},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
sync::{
@@ -79,7 +80,11 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
// describe the leader
let leader_info = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone());
let cluster_info = ClusterInfo::new(
leader_info.clone(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
);
// setup staked nodes
let mut staked_nodes = HashMap::new();

View File

@@ -21,6 +21,7 @@ use {
signature::{Keypair, Signer},
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::{
collections::{HashMap, HashSet},
ops::Deref,
@@ -252,6 +253,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
});
let mut total_bytes = bytes_tx;
@@ -415,6 +417,7 @@ fn network_run_push(
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
});
}
@@ -489,6 +492,7 @@ fn network_run_pull(
cluster_info::MAX_BLOOM_SIZE,
from.ping_cache.deref(),
&mut pings,
&SocketAddrSpace::Unspecified,
)
.ok()?;
let from_pubkey = from.keypair.pubkey();
@@ -709,6 +713,7 @@ fn test_prune_errors() {
0, // shred version
&HashMap::new(), // stakes
None, // gossip validators
&SocketAddrSpace::Unspecified,
);
let now = timestamp();
//incorrect dest

View File

@@ -18,6 +18,7 @@ use {
timing::timestamp,
transaction::Transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
@@ -33,7 +34,11 @@ use {
fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), keypair));
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
@@ -56,7 +61,11 @@ fn test_node_with_bank(
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), node_keypair));
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
@@ -209,7 +218,13 @@ pub fn cluster_info_retransmit() {
p.meta.size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
ClusterInfo::retransmit_to(&retransmit_peers, &p, &tn1, false);
ClusterInfo::retransmit_to(
&retransmit_peers,
&p,
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {

View File

@@ -31,6 +31,7 @@ solana-rpc = { path = "../rpc", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-stake-program = { path = "../programs/stake", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-vote-program = { path = "../programs/vote", version = "=1.7.9" }
tempfile = "3.1.0"

View File

@@ -4,6 +4,7 @@ use solana_core::validator::ValidatorConfig;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_streamer::socket::SocketAddrSpace;
use std::path::PathBuf;
use std::sync::Arc;
@@ -39,7 +40,12 @@ pub trait Cluster {
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient>;
fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>;
fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo;
fn restart_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo);
fn restart_node(
&mut self,
pubkey: &Pubkey,
cluster_validator_info: ClusterValidatorInfo,
socket_addr_space: SocketAddrSpace,
);
fn create_restart_context(
&mut self,
pubkey: &Pubkey,
@@ -48,7 +54,13 @@ pub trait Cluster {
fn restart_node_with_context(
cluster_validator_info: ClusterValidatorInfo,
restart_context: (Node, Option<ContactInfo>),
socket_addr_space: SocketAddrSpace,
) -> ClusterValidatorInfo;
fn add_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo);
fn exit_restart_node(&mut self, pubkey: &Pubkey, config: ValidatorConfig);
fn exit_restart_node(
&mut self,
pubkey: &Pubkey,
config: ValidatorConfig,
socket_addr_space: SocketAddrSpace,
);
}

View File

@@ -28,6 +28,7 @@ use solana_sdk::{
timing::duration_as_ms,
transport::TransportError,
};
use solana_streamer::socket::SocketAddrSpace;
use std::{
collections::{HashMap, HashSet},
path::Path,
@@ -42,8 +43,10 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher + Sync + Send>(
funding_keypair: &Keypair,
nodes: usize,
ignore_nodes: HashSet<Pubkey, S>,
socket_addr_space: SocketAddrSpace,
) {
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes =
discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap();
assert!(cluster_nodes.len() >= nodes);
let ignore_nodes = Arc::new(ignore_nodes);
cluster_nodes.par_iter().for_each(|ingress_node| {
@@ -182,9 +185,11 @@ pub fn kill_entry_and_spend_and_verify_rest(
funding_keypair: &Keypair,
nodes: usize,
slot_millis: u64,
socket_addr_space: SocketAddrSpace,
) {
info!("kill_entry_and_spend_and_verify_rest...");
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
let cluster_nodes =
discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap();
assert!(cluster_nodes.len() >= nodes);
let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE);
// sleep long enough to make sure we are in epoch 3

View File

@@ -37,6 +37,7 @@ use solana_sdk::{
transaction::Transaction,
};
use solana_stake_program::{config::create_account as create_stake_config_account, stake_state};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::{
vote_instruction,
vote_state::{VoteInit, VoteState},
@@ -108,6 +109,7 @@ impl LocalCluster {
num_nodes: usize,
cluster_lamports: u64,
lamports_per_node: u64,
socket_addr_space: SocketAddrSpace,
) -> Self {
let stakes: Vec<_> = (0..num_nodes).map(|_| lamports_per_node).collect();
let mut config = ClusterConfig {
@@ -119,10 +121,10 @@ impl LocalCluster {
),
..ClusterConfig::default()
};
Self::new(&mut config)
Self::new(&mut config, socket_addr_space)
}
pub fn new(config: &mut ClusterConfig) -> Self {
pub fn new(config: &mut ClusterConfig, socket_addr_space: SocketAddrSpace) -> Self {
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
let mut validator_keys = {
if let Some(ref keys) = config.validator_keys {
@@ -219,6 +221,7 @@ impl LocalCluster {
&leader_config,
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
socket_addr_space,
);
let mut validators = HashMap::new();
@@ -262,22 +265,35 @@ impl LocalCluster {
*stake,
key.clone(),
node_pubkey_to_vote_key.get(&key.pubkey()).cloned(),
socket_addr_space,
);
}
let mut listener_config = safe_clone_config(&config.validator_configs[0]);
listener_config.voting_disabled = true;
(0..config.num_listeners).for_each(|_| {
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()), None);
cluster.add_validator(
&listener_config,
0,
Arc::new(Keypair::new()),
None,
socket_addr_space,
);
});
discover_cluster(
&cluster.entry_point_info.gossip,
config.node_stakes.len() + config.num_listeners as usize,
socket_addr_space,
)
.unwrap();
discover_cluster(&cluster.entry_point_info.gossip, config.node_stakes.len()).unwrap();
discover_cluster(
&cluster.entry_point_info.gossip,
config.node_stakes.len(),
socket_addr_space,
)
.unwrap();
cluster
}
@@ -305,6 +321,7 @@ impl LocalCluster {
stake: u64,
validator_keypair: Arc<Keypair>,
mut voting_keypair: Option<Arc<Keypair>>,
socket_addr_space: SocketAddrSpace,
) -> Pubkey {
let client = create_client(
self.entry_point_info.client_facing_addr(),
@@ -361,6 +378,7 @@ impl LocalCluster {
&config,
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
socket_addr_space,
);
let validator_pubkey = validator_keypair.pubkey();
@@ -400,7 +418,12 @@ impl LocalCluster {
Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports)
}
pub fn check_for_new_roots(&self, num_new_roots: usize, test_name: &str) {
pub fn check_for_new_roots(
&self,
num_new_roots: usize,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
let alive_node_contact_infos: Vec<_> = self
.validators
.values()
@@ -411,6 +434,7 @@ impl LocalCluster {
let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip,
alive_node_contact_infos.len(),
socket_addr_space,
)
.unwrap();
info!("{} discovered {} nodes", test_name, cluster_nodes.len());
@@ -419,7 +443,12 @@ impl LocalCluster {
info!("{} done waiting for roots", test_name);
}
pub fn check_no_new_roots(&self, num_slots_to_wait: usize, test_name: &str) {
pub fn check_no_new_roots(
&self,
num_slots_to_wait: usize,
test_name: &str,
socket_addr_space: SocketAddrSpace,
) {
let alive_node_contact_infos: Vec<_> = self
.validators
.values()
@@ -430,6 +459,7 @@ impl LocalCluster {
let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip,
alive_node_contact_infos.len(),
socket_addr_space,
)
.unwrap();
info!("{} discovered {} nodes", test_name, cluster_nodes.len());
@@ -646,10 +676,18 @@ impl Cluster for LocalCluster {
(node, entry_point_info)
}
fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) {
fn restart_node(
&mut self,
pubkey: &Pubkey,
mut cluster_validator_info: ClusterValidatorInfo,
socket_addr_space: SocketAddrSpace,
) {
let restart_context = self.create_restart_context(pubkey, &mut cluster_validator_info);
let cluster_validator_info =
Self::restart_node_with_context(cluster_validator_info, restart_context);
let cluster_validator_info = Self::restart_node_with_context(
cluster_validator_info,
restart_context,
socket_addr_space,
);
self.add_node(pubkey, cluster_validator_info);
}
@@ -660,6 +698,7 @@ impl Cluster for LocalCluster {
fn restart_node_with_context(
mut cluster_validator_info: ClusterValidatorInfo,
(node, entry_point_info): (Node, Option<ContactInfo>),
socket_addr_space: SocketAddrSpace,
) -> ClusterValidatorInfo {
// Restart the node
let validator_info = &cluster_validator_info.info;
@@ -677,15 +716,21 @@ impl Cluster for LocalCluster {
&safe_clone_config(&cluster_validator_info.config),
true, // should_check_duplicate_instance
Arc::new(RwLock::new(ValidatorStartProgress::default())),
socket_addr_space,
);
cluster_validator_info.validator = Some(restarted_node);
cluster_validator_info
}
fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) {
fn exit_restart_node(
&mut self,
pubkey: &Pubkey,
validator_config: ValidatorConfig,
socket_addr_space: SocketAddrSpace,
) {
let mut cluster_validator_info = self.exit_node(pubkey);
cluster_validator_info.config = validator_config;
self.restart_node(pubkey, cluster_validator_info);
self.restart_node(pubkey, cluster_validator_info, socket_addr_space);
}
fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo> {
@@ -708,7 +753,8 @@ mod test {
fn test_local_cluster_start_and_exit() {
solana_logger::setup();
let num_nodes = 1;
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3);
let cluster =
LocalCluster::new_with_equal_stakes(num_nodes, 100, 3, SocketAddrSpace::Unspecified);
assert_eq!(cluster.validators.len(), num_nodes);
}
@@ -728,7 +774,7 @@ mod test {
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH as u64,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
assert_eq!(cluster.validators.len(), NUM_NODES);
}
}

View File

@@ -55,6 +55,7 @@ use solana_sdk::{
timing::timestamp,
transaction::Transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::{
vote_instruction,
vote_state::{Vote, MAX_LOCKOUT_HISTORY},
@@ -92,7 +93,7 @@ fn test_ledger_cleanup_service() {
validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// 200ms/per * 100 = 20 seconds, so sleep a little longer than that.
sleep(Duration::from_secs(60));
@@ -101,6 +102,7 @@ fn test_ledger_cleanup_service() {
&cluster.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
cluster.close_preserve_ledgers();
//check everyone's ledgers and make sure only ~100 slots are stored
@@ -122,12 +124,14 @@ fn test_spend_and_verify_all_nodes_1() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
error!("test_spend_and_verify_all_nodes_1");
let num_nodes = 1;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@@ -137,12 +141,14 @@ fn test_spend_and_verify_all_nodes_2() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
error!("test_spend_and_verify_all_nodes_2");
let num_nodes = 2;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@@ -152,12 +158,14 @@ fn test_spend_and_verify_all_nodes_3() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
error!("test_spend_and_verify_all_nodes_3");
let num_nodes = 3;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@@ -166,7 +174,8 @@ fn test_spend_and_verify_all_nodes_3() {
fn test_local_cluster_signature_subscribe() {
solana_logger::setup_with_default(RUST_LOG_FILTER);
let num_nodes = 2;
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let cluster =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
let nodes = cluster.get_node_pubkeys();
// Get non leader
@@ -243,12 +252,14 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() {
.expect("please set environment variable NUM_NODES")
.parse()
.expect("could not parse NUM_NODES as a number");
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
let local =
LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100, SocketAddrSpace::Unspecified);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@@ -266,7 +277,7 @@ fn test_leader_failure_4() {
validator_configs: make_identical_validator_configs(&validator_config, num_nodes),
..ClusterConfig::default()
};
let local = LocalCluster::new(&mut config);
let local = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
cluster_tests::kill_entry_and_spend_and_verify_rest(
&local.entry_point_info,
@@ -279,6 +290,7 @@ fn test_leader_failure_4() {
&local.funding_keypair,
num_nodes,
config.ticks_per_slot * config.poh_config.target_tick_duration.as_millis() as u64,
SocketAddrSpace::Unspecified,
);
}
@@ -368,7 +380,7 @@ fn run_cluster_partition<C>(
"PARTITION_TEST starting cluster with {:?} partitions slots_per_epoch: {}",
partitions, config.slots_per_epoch,
);
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
info!("PARTITION_TEST spend_and_verify_all_nodes(), ensure all nodes are caught up");
cluster_tests::spend_and_verify_all_nodes(
@@ -376,9 +388,15 @@ fn run_cluster_partition<C>(
&cluster.funding_keypair,
num_nodes,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
num_nodes,
SocketAddrSpace::Unspecified,
)
.unwrap();
// Check epochs have correct number of slots
info!("PARTITION_TEST sleeping until partition starting condition",);
@@ -425,7 +443,7 @@ fn run_cluster_partition<C>(
fn test_cluster_partition_1_2() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&[vec![1], vec![1, 1]],
@@ -445,7 +463,7 @@ fn test_cluster_partition_1_2() {
fn test_cluster_partition_1_1() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&[vec![1], vec![1]],
@@ -465,7 +483,7 @@ fn test_cluster_partition_1_1() {
fn test_cluster_partition_1_1_1() {
let empty = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&[vec![1], vec![1], vec![1]],
@@ -525,7 +543,7 @@ fn test_kill_heaviest_partition() {
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
info!("Killing validator with id: {}", validator_to_kill);
cluster.exit_node(&validator_to_kill);
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_cluster_partition(
&partitions,
@@ -880,6 +898,7 @@ fn test_switch_threshold_uses_gossip_votes() {
.keypair
.pubkey(),
heavier_node_gossip,
&SocketAddrSpace::Unspecified,
)
.unwrap();
@@ -962,7 +981,7 @@ fn test_kill_partition_switch_threshold_no_progress() {
|_: &mut LocalCluster, _: &[Pubkey], _: Vec<ClusterValidatorInfo>, _: &mut ()| {};
let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_no_new_roots(400, "PARTITION_TEST");
cluster.check_no_new_roots(400, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
// This kills `max_failures_stake`, so no progress should be made
@@ -1015,7 +1034,7 @@ fn test_kill_partition_switch_threshold_progress() {
|_: &mut LocalCluster, _: &[Pubkey], _: Vec<ClusterValidatorInfo>, _: &mut ()| {};
let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_kill_partition_switch_threshold(
&[&[(failures_stake as usize, 16)]],
@@ -1203,6 +1222,7 @@ fn test_fork_choice_refresh_old_votes() {
cluster.restart_node(
&context.smallest_validator_key,
context.alive_stake3_info.take().unwrap(),
SocketAddrSpace::Unspecified,
);
loop {
@@ -1246,7 +1266,7 @@ fn test_fork_choice_refresh_old_votes() {
// for lockouts built during partition to resolve and gives validators an opportunity
// to try and switch forks)
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| {
cluster.check_for_new_roots(16, "PARTITION_TEST");
cluster.check_for_new_roots(16, "PARTITION_TEST", SocketAddrSpace::Unspecified);
};
run_kill_partition_switch_threshold(
@@ -1273,16 +1293,19 @@ fn test_two_unbalanced_stakes() {
let num_ticks_per_slot = 10;
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
let mut cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
validator_configs: make_identical_validator_configs(&validator_config, 2),
ticks_per_slot: num_ticks_per_slot,
slots_per_epoch: num_slots_per_epoch,
stakers_slot_offset: num_slots_per_epoch,
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
..ClusterConfig::default()
});
let mut cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
validator_configs: make_identical_validator_configs(&validator_config, 2),
ticks_per_slot: num_ticks_per_slot,
slots_per_epoch: num_slots_per_epoch,
stakers_slot_offset: num_slots_per_epoch,
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
cluster_tests::sleep_n_epochs(
10.0,
@@ -1307,9 +1330,14 @@ fn test_forwarding() {
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 2),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
2,
SocketAddrSpace::Unspecified,
)
.unwrap();
assert!(cluster_nodes.len() >= 2);
let leader_pubkey = cluster.entry_point_info.id;
@@ -1331,15 +1359,18 @@ fn test_restart_node() {
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH * 2;
let ticks_per_slot = 16;
let validator_config = ValidatorConfig::default();
let mut cluster = LocalCluster::new(&mut ClusterConfig {
node_stakes: vec![100; 1],
cluster_lamports: 100,
validator_configs: vec![safe_clone_config(&validator_config)],
ticks_per_slot,
slots_per_epoch,
stakers_slot_offset: slots_per_epoch,
..ClusterConfig::default()
});
let mut cluster = LocalCluster::new(
&mut ClusterConfig {
node_stakes: vec![100; 1],
cluster_lamports: 100,
validator_configs: vec![safe_clone_config(&validator_config)],
ticks_per_slot,
slots_per_epoch,
stakers_slot_offset: slots_per_epoch,
..ClusterConfig::default()
},
SocketAddrSpace::Unspecified,
);
let nodes = cluster.get_node_pubkeys();
cluster_tests::sleep_n_epochs(
1.0,
@@ -1347,7 +1378,7 @@ fn test_restart_node() {
clock::DEFAULT_TICKS_PER_SLOT,
slots_per_epoch,
);
cluster.exit_restart_node(&nodes[0], validator_config);
cluster.exit_restart_node(&nodes[0], validator_config, SocketAddrSpace::Unspecified);
cluster_tests::sleep_n_epochs(
0.5,
&cluster.genesis_config.poh_config,
@@ -1372,8 +1403,13 @@ fn test_listener_startup() {
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
4,
SocketAddrSpace::Unspecified,
)
.unwrap();
assert_eq!(cluster_nodes.len(), 4);
}
@@ -1389,8 +1425,13 @@ fn test_mainnet_beta_cluster_type() {
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), 1),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
1,
SocketAddrSpace::Unspecified,
)
.unwrap();
assert_eq!(cluster_nodes.len(), 1);
let client = create_client(
@@ -1497,7 +1538,10 @@ fn test_frozen_account_from_genesis() {
}],
..ClusterConfig::default()
};
generate_frozen_account_panic(LocalCluster::new(&mut config), validator_identity);
generate_frozen_account_panic(
LocalCluster::new(&mut config, SocketAddrSpace::Unspecified),
validator_identity,
);
}
#[test]
@@ -1521,7 +1565,7 @@ fn test_frozen_account_from_snapshot() {
),
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let snapshot_package_output_path = &snapshot_test_config
.validator_config
@@ -1538,7 +1582,11 @@ fn test_frozen_account_from_snapshot() {
// Restart the validator from a snapshot
let validator_info = cluster.exit_node(&validator_identity.pubkey());
cluster.restart_node(&validator_identity.pubkey(), validator_info);
cluster.restart_node(
&validator_identity.pubkey(),
validator_info,
SocketAddrSpace::Unspecified,
);
generate_frozen_account_panic(cluster, validator_identity);
}
@@ -1565,10 +1613,15 @@ fn test_consistency_halt() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
sleep(Duration::from_millis(5000));
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
1,
SocketAddrSpace::Unspecified,
)
.unwrap();
info!("num_nodes: {}", cluster_nodes.len());
// Add a validator with the leader as trusted, it should halt when it detects
@@ -1592,19 +1645,28 @@ fn test_consistency_halt() {
validator_stake as u64,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,
);
let num_nodes = 2;
assert_eq!(
discover_cluster(&cluster.entry_point_info.gossip, num_nodes)
.unwrap()
.len(),
discover_cluster(
&cluster.entry_point_info.gossip,
num_nodes,
SocketAddrSpace::Unspecified
)
.unwrap()
.len(),
num_nodes
);
// Check for only 1 node on the network.
let mut encountered_error = false;
loop {
let discover = discover_cluster(&cluster.entry_point_info.gossip, 2);
let discover = discover_cluster(
&cluster.entry_point_info.gossip,
2,
SocketAddrSpace::Unspecified,
);
match discover {
Err(_) => {
encountered_error = true;
@@ -1656,7 +1718,7 @@ fn test_snapshot_download() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// Get slot after which this was generated
let snapshot_package_output_path = &leader_snapshot_test_config
@@ -1696,6 +1758,7 @@ fn test_snapshot_download() {
stake,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,
);
}
@@ -1723,7 +1786,7 @@ fn test_snapshot_restart_tower() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// Let the nodes run for a while, then stop one of the validators
sleep(Duration::from_millis(5000));
@@ -1758,7 +1821,7 @@ fn test_snapshot_restart_tower() {
// Restart validator from snapshot, the validator's tower state in this snapshot
// will contain slots < the root bank of the snapshot. Validator should not panic.
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
// Test cluster can still make progress and get confirmations in tower
// Use the restarted node as the discovery point so that we get updated
@@ -1769,6 +1832,7 @@ fn test_snapshot_restart_tower() {
&cluster.funding_keypair,
1,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
@@ -1802,7 +1866,7 @@ fn test_snapshots_blockstore_floor() {
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
trace!("Waiting for snapshot tar to be generated with slot",);
@@ -1831,7 +1895,12 @@ fn test_snapshots_blockstore_floor() {
// Start up a new node from a snapshot
let validator_stake = 5;
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(
&cluster.entry_point_info.gossip,
1,
SocketAddrSpace::Unspecified,
)
.unwrap();
let mut trusted_validators = HashSet::new();
trusted_validators.insert(cluster_nodes[0].id);
validator_snapshot_test_config
@@ -1843,6 +1912,7 @@ fn test_snapshots_blockstore_floor() {
validator_stake,
Arc::new(Keypair::new()),
None,
SocketAddrSpace::Unspecified,
);
let all_pubkeys = cluster.get_node_pubkeys();
let validator_id = all_pubkeys
@@ -1911,7 +1981,7 @@ fn test_snapshots_restart_validity() {
// Create and reboot the node from snapshot `num_runs` times
let num_runs = 3;
let mut expected_balances = HashMap::new();
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
for i in 1..num_runs {
info!("run {}", i);
// Push transactions to one of the nodes and confirm that transactions were
@@ -1941,6 +2011,7 @@ fn test_snapshots_restart_validity() {
cluster.exit_restart_node(
&nodes[0],
safe_clone_config(&snapshot_test_config.validator_config),
SocketAddrSpace::Unspecified,
);
// Verify account balances on validator
@@ -1954,6 +2025,7 @@ fn test_snapshots_restart_validity() {
&cluster.funding_keypair,
1,
HashSet::new(),
SocketAddrSpace::Unspecified,
);
}
}
@@ -2016,10 +2088,10 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut cluster_config);
let cluster = LocalCluster::new(&mut cluster_config, SocketAddrSpace::Unspecified);
// Check for new roots
cluster.check_for_new_roots(16, "test_faulty_node");
cluster.check_for_new_roots(16, "test_faulty_node", SocketAddrSpace::Unspecified);
}
#[test]
@@ -2032,7 +2104,7 @@ fn test_wait_for_max_stake() {
validator_configs: make_identical_validator_configs(&validator_config, 4),
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let client = RpcClient::new_socket(cluster.entry_point_info.rpc);
assert!(client
@@ -2056,7 +2128,7 @@ fn test_no_voting() {
validator_configs: vec![validator_config],
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let client = cluster
.get_validator_client(&cluster.entry_point_info.id)
.unwrap();
@@ -2110,7 +2182,7 @@ fn test_optimistic_confirmation_violation_detection() {
skip_warmup_slots: true,
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let entry_point_id = cluster.entry_point_info.id;
// Let the nodes run for a while. Wait for validators to vote on slot `S`
// so that the vote on `S-1` is definitely in gossip and optimistic confirmation is
@@ -2161,7 +2233,11 @@ fn test_optimistic_confirmation_violation_detection() {
let buf = std::env::var("OPTIMISTIC_CONF_TEST_DUMP_LOG")
.err()
.map(|_| BufferRedirect::stderr().unwrap());
cluster.restart_node(&entry_point_id, exited_validator_info);
cluster.restart_node(
&entry_point_id,
exited_validator_info,
SocketAddrSpace::Unspecified,
);
// Wait for a root > prev_voted_slot to be set. Because the root is on a
// different fork than `prev_voted_slot`, then optimistic confirmation is
@@ -2229,7 +2305,7 @@ fn test_validator_saves_tower() {
validator_keys: Some(vec![(validator_identity_keypair.clone(), true)]),
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
@@ -2262,7 +2338,7 @@ fn test_validator_saves_tower() {
assert_eq!(tower1.root(), 0);
// Restart the validator and wait for a new root
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for the first root
@@ -2294,7 +2370,7 @@ fn test_validator_saves_tower() {
// without having to wait for that snapshot to be generated in this test
tower1.save(&validator_identity_keypair).unwrap();
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for a new root, demonstrating the validator was able to make progress from the older `tower1`
@@ -2326,7 +2402,7 @@ fn test_validator_saves_tower() {
remove_tower(&ledger_path, &validator_id);
validator_info.config.require_tower = false;
cluster.restart_node(&validator_id, validator_info);
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for a couple more slots to pass so another vote occurs
@@ -2469,7 +2545,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
skip_warmup_slots: true,
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let base_slot = 26; // S2
let next_slot_on_a = 27; // S3
@@ -2550,7 +2626,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// Run validator C only to make it produce and vote on its own fork.
info!("Restart validator C again!!!");
let val_c_ledger_path = validator_c_info.info.ledger_path.clone();
cluster.restart_node(&validator_c_pubkey, validator_c_info);
cluster.restart_node(
&validator_c_pubkey,
validator_c_info,
SocketAddrSpace::Unspecified,
);
let mut votes_on_c_fork = std::collections::BTreeSet::new(); // S4 and S5
for _ in 0..100 {
@@ -2572,7 +2652,11 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
// Step 4:
// verify whether there was violation or not
info!("Restart validator A again!!!");
cluster.restart_node(&validator_a_pubkey, validator_a_info);
cluster.restart_node(
&validator_a_pubkey,
validator_a_info,
SocketAddrSpace::Unspecified,
);
// monitor for actual votes from validator A
let mut bad_vote_detected = false;
@@ -2659,7 +2743,7 @@ fn do_test_future_tower(cluster_mode: ClusterMode) {
skip_warmup_slots: true,
..ClusterConfig::default()
};
let mut cluster = LocalCluster::new(&mut config);
let mut cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
let val_a_ledger_path = cluster.ledger_path(&validator_a_pubkey);
@@ -2684,7 +2768,11 @@ fn do_test_future_tower(cluster_mode: ClusterMode) {
purge_slots(&blockstore, purged_slot_before_restart, 100);
}
cluster.restart_node(&validator_a_pubkey, validator_a_info);
cluster.restart_node(
&validator_a_pubkey,
validator_a_info,
SocketAddrSpace::Unspecified,
);
let mut newly_rooted = false;
let some_root_after_restart = purged_slot_before_restart + 25; // 25 is arbitrary; just wait a bit
@@ -2766,7 +2854,10 @@ fn test_hard_fork_invalidates_tower() {
skip_warmup_slots: true,
..ClusterConfig::default()
};
let cluster = std::sync::Arc::new(std::sync::Mutex::new(LocalCluster::new(&mut config)));
let cluster = std::sync::Arc::new(std::sync::Mutex::new(LocalCluster::new(
&mut config,
SocketAddrSpace::Unspecified,
)));
let val_a_ledger_path = cluster.lock().unwrap().ledger_path(&validator_a_pubkey);
@@ -2811,8 +2902,11 @@ fn test_hard_fork_invalidates_tower() {
.lock()
.unwrap()
.create_restart_context(&validator_a_pubkey, &mut validator_a_info);
let restarted_validator_info =
LocalCluster::restart_node_with_context(validator_a_info, restart_context);
let restarted_validator_info = LocalCluster::restart_node_with_context(
validator_a_info,
restart_context,
SocketAddrSpace::Unspecified,
);
cluster_for_a
.lock()
.unwrap()
@@ -2834,16 +2928,20 @@ fn test_hard_fork_invalidates_tower() {
}
// restart validator B normally
cluster
.lock()
.unwrap()
.restart_node(&validator_b_pubkey, validator_b_info);
cluster.lock().unwrap().restart_node(
&validator_b_pubkey,
validator_b_info,
SocketAddrSpace::Unspecified,
);
// validator A should now start so join its thread here
thread.join().unwrap();
// new slots should be rooted after hard-fork cluster relaunch
cluster.lock().unwrap().check_for_new_roots(16, "hard fork");
cluster
.lock()
.unwrap()
.check_for_new_roots(16, "hard fork", SocketAddrSpace::Unspecified);
}
#[test]
@@ -2902,7 +3000,11 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) {
let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {};
let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| {
cluster.check_for_new_roots(20, "run_test_load_program_accounts_partition");
cluster.check_for_new_roots(
20,
"run_test_load_program_accounts_partition",
SocketAddrSpace::Unspecified,
);
exit.store(true, Ordering::Relaxed);
t_update.join().unwrap();
t_scan.join().unwrap();
@@ -3077,7 +3179,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
additional_accounts: starting_accounts,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&mut config);
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
// Give the threads a client to use for querying the cluster
let all_pubkeys = cluster.get_node_pubkeys();
@@ -3093,7 +3195,11 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) {
scan_client_sender.send(scan_client).unwrap();
// Wait for some roots to pass
cluster.check_for_new_roots(40, "run_test_load_program_accounts");
cluster.check_for_new_roots(
40,
"run_test_load_program_accounts",
SocketAddrSpace::Unspecified,
);
// Exit and ensure no violations of consistency were found
exit.store(true, Ordering::Relaxed);

View File

@@ -38,6 +38,7 @@ solana-perf = { path = "../perf", version = "=1.7.9" }
solana-poh = { path = "../poh", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.9" }
solana-transaction-status = { path = "../transaction-status", version = "=1.7.9" }
solana-version = { path = "../version", version = "=1.7.9" }

View File

@@ -68,6 +68,7 @@ use {
sysvar::stake_history,
transaction::{self, Transaction, TransactionError},
},
solana_streamer::socket::SocketAddrSpace,
solana_transaction_status::{
EncodedConfirmedTransaction, Reward, RewardType, TransactionConfirmationStatus,
TransactionStatus, UiConfirmedBlock, UiTransactionEncoding,
@@ -276,7 +277,7 @@ impl JsonRpcRequestProcessor {
}
// Useful for unit testing
pub fn new_from_bank(bank: &Arc<Bank>) -> Self {
pub fn new_from_bank(bank: &Arc<Bank>, socket_addr_space: SocketAddrSpace) -> Self {
let genesis_hash = bank.hash();
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
&[bank.clone()],
@@ -284,7 +285,11 @@ impl JsonRpcRequestProcessor {
)));
let blockstore = Arc::new(Blockstore::open(&get_tmp_ledger_path!()).unwrap());
let exit = Arc::new(AtomicBool::new(false));
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
socket_addr_space,
));
let tpu_address = cluster_info.my_contact_info().tpu;
let (sender, receiver) = channel();
SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1);
@@ -2794,20 +2799,21 @@ pub mod rpc_full {
fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> {
debug!("get_cluster_nodes rpc request received");
let cluster_info = &meta.cluster_info;
fn valid_address_or_none(addr: &SocketAddr) -> Option<SocketAddr> {
if ContactInfo::is_valid_address(addr) {
let socket_addr_space = cluster_info.socket_addr_space();
let valid_address_or_none = |addr: &SocketAddr| -> Option<SocketAddr> {
if ContactInfo::is_valid_address(addr, socket_addr_space) {
Some(*addr)
} else {
None
}
}
};
let my_shred_version = cluster_info.my_shred_version();
Ok(cluster_info
.all_peers()
.iter()
.filter_map(|(contact_info, _)| {
if my_shred_version == contact_info.shred_version
&& ContactInfo::is_valid_address(&contact_info.gossip)
&& ContactInfo::is_valid_address(&contact_info.gossip, socket_addr_space)
{
let (version, feature_set) = if let Some(version) =
cluster_info.get_node_version(&contact_info.id)
@@ -4105,7 +4111,11 @@ pub mod tests {
let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash);
let _ = bank.process_transaction(&tx);
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let tpu_address = cluster_info.my_contact_info().tpu;
cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr(
@@ -4180,7 +4190,8 @@ pub mod tests {
let bank = Arc::new(Bank::new(&genesis.genesis_config));
bank.transfer(20, &genesis.mint_keypair, &bob_pubkey)
.unwrap();
let request_processor = JsonRpcRequestProcessor::new_from_bank(&bank);
let request_processor =
JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
assert_eq!(request_processor.get_transaction_count(None), 1);
}
@@ -4189,7 +4200,7 @@ pub mod tests {
let genesis = create_genesis_config(20);
let mint_pubkey = genesis.mint_keypair.pubkey();
let bank = Arc::new(Bank::new(&genesis.genesis_config));
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_minimal::MinimalImpl.to_delegate());
@@ -4217,7 +4228,7 @@ pub mod tests {
let genesis = create_genesis_config(20);
let mint_pubkey = genesis.mint_keypair.pubkey();
let bank = Arc::new(Bank::new(&genesis.genesis_config));
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_minimal::MinimalImpl.to_delegate());
@@ -4358,7 +4369,7 @@ pub mod tests {
bank.transfer(4, &genesis.mint_keypair, &bob_pubkey)
.unwrap();
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_minimal::MinimalImpl.to_delegate());
@@ -5641,7 +5652,7 @@ pub mod tests {
fn test_rpc_send_bad_tx() {
let genesis = create_genesis_config(100);
let bank = Arc::new(Bank::new(&genesis.genesis_config));
let meta = JsonRpcRequestProcessor::new_from_bank(&bank);
let meta = JsonRpcRequestProcessor::new_from_bank(&bank, SocketAddrSpace::Unspecified);
let mut io = MetaIoHandler::default();
io.extend_with(rpc_full::FullImpl.to_delegate());
@@ -5668,8 +5679,10 @@ pub mod tests {
let mut io = MetaIoHandler::default();
io.extend_with(rpc_full::FullImpl.to_delegate());
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let tpu_address = cluster_info.my_contact_info().tpu;
let (meta, receiver) = JsonRpcRequestProcessor::new(
@@ -5947,7 +5960,11 @@ pub mod tests {
CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()),
)));
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let tpu_address = cluster_info.my_contact_info().tpu;
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
@@ -7345,8 +7362,11 @@ pub mod tests {
let ledger_path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100);
let bank = Bank::new(&genesis_config);

View File

@@ -117,8 +117,16 @@ impl RpcHealth {
#[cfg(test)]
pub(crate) fn stub() -> Arc<Self> {
use {
solana_gossip::contact_info::ContactInfo, solana_sdk::signer::keypair::Keypair,
solana_streamer::socket::SocketAddrSpace,
};
Arc::new(Self::new(
Arc::new(ClusterInfo::default()),
Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)),
None,
42,
Arc::new(AtomicBool::new(false)),

View File

@@ -488,7 +488,10 @@ mod tests {
use {
super::*,
crate::rpc::create_validator_exit,
solana_gossip::crds_value::{CrdsData, CrdsValue, SnapshotHash},
solana_gossip::{
contact_info::ContactInfo,
crds_value::{CrdsData, CrdsValue, SnapshotHash},
},
solana_ledger::{
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
@@ -500,7 +503,9 @@ mod tests {
solana_sdk::{
genesis_config::{ClusterType, DEFAULT_GENESIS_ARCHIVE},
signature::Signer,
signer::keypair::Keypair,
},
solana_streamer::socket::SocketAddrSpace,
std::{
io::Write,
net::{IpAddr, Ipv4Addr},
@@ -517,7 +522,11 @@ mod tests {
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let bank = Bank::new(&genesis_config);
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let rpc_addr = SocketAddr::new(
ip_addr,
@@ -711,7 +720,11 @@ mod tests {
#[test]
fn test_health_check_with_trusted_validators() {
let cluster_info = Arc::new(ClusterInfo::default());
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::default(),
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
));
let health_check_slot_distance = 123;
let override_health_check = Arc::new(AtomicBool::new(false));
let trusted_validators = vec![

View File

@@ -333,6 +333,7 @@ mod test {
system_program, system_transaction,
timing::timestamp,
},
solana_streamer::socket::SocketAddrSpace,
std::sync::{atomic::AtomicBool, mpsc::channel},
};
@@ -821,6 +822,7 @@ mod test {
let cluster_info = Arc::new(ClusterInfo::new(
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
node_keypair,
SocketAddrSpace::Unspecified,
));
let validator0_socket = SocketAddr::from(([127, 0, 0, 1], 1111));

View File

@@ -1,7 +1,7 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use crate::{
recvmmsg::{recv_mmsg, NUM_RCVMMSGS},
socket::is_global,
socket::SocketAddrSpace,
};
pub use solana_perf::packet::{
limited_deserialize, to_packets_chunked, Packets, PacketsRecycler, NUM_PACKETS,
@@ -57,10 +57,14 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res
Ok(i)
}
pub fn send_to(obj: &Packets, socket: &UdpSocket) -> Result<()> {
pub fn send_to(
obj: &Packets,
socket: &UdpSocket,
socket_addr_space: &SocketAddrSpace,
) -> Result<()> {
for p in &obj.packets {
let addr = p.meta.addr();
if is_global(&addr) {
if socket_addr_space.check(&addr) {
socket.send_to(&p.data[..p.meta.size], &addr)?;
}
}
@@ -99,7 +103,7 @@ mod tests {
m.meta.set_addr(&addr);
m.meta.size = PACKET_DATA_SIZE;
}
send_to(&p, &send_socket).unwrap();
send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap();
let recvd = recv_from(&mut p, &recv_socket, 1).unwrap();
@@ -152,7 +156,7 @@ mod tests {
m.meta.set_addr(&addr);
m.meta.size = 1;
}
send_to(&p, &send_socket).unwrap();
send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap();
}
let recvd = recv_from(&mut p, &recv_socket, 100).unwrap();

View File

@@ -1,28 +1,39 @@
use std::net::SocketAddr;
use std::net::{IpAddr, SocketAddr};
// TODO: remove these once IpAddr::is_global is stable.
#[cfg(test)]
pub fn is_global(_: &SocketAddr) -> bool {
true
#[derive(Clone, Copy, PartialEq)]
pub enum SocketAddrSpace {
Unspecified,
Global,
}
#[cfg(not(test))]
pub fn is_global(addr: &SocketAddr) -> bool {
use std::net::IpAddr;
match addr.ip() {
IpAddr::V4(addr) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_link_local()
// || addr.is_broadcast() || addr.is_documentation()
// || addr.is_unspecified()
!addr.is_private()
impl SocketAddrSpace {
pub fn new(allow_private_addr: bool) -> Self {
if allow_private_addr {
SocketAddrSpace::Unspecified
} else {
SocketAddrSpace::Global
}
IpAddr::V6(_) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_unspecified(),
true
}
/// Returns true if the IP address is valid.
pub fn check(&self, addr: &SocketAddr) -> bool {
if self == &SocketAddrSpace::Unspecified {
return true;
}
// TODO: remove these once IpAddr::is_global is stable.
match addr.ip() {
IpAddr::V4(addr) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_link_local()
// || addr.is_broadcast() || addr.is_documentation()
// || addr.is_unspecified()
!addr.is_private()
}
IpAddr::V6(_) => {
// TODO: Consider excluding:
// addr.is_loopback() || addr.is_unspecified(),
true
}
}
}
}

View File

@@ -1,8 +1,11 @@
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
//!
use crate::packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH};
use crate::recvmmsg::NUM_RCVMMSGS;
use crate::{
packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH},
recvmmsg::NUM_RCVMMSGS,
socket::SocketAddrSpace,
};
use solana_sdk::timing::{duration_as_ms, timestamp};
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@@ -112,10 +115,14 @@ pub fn receiver(
.unwrap()
}
fn recv_send(sock: &UdpSocket, r: &PacketReceiver) -> Result<()> {
fn recv_send(
sock: &UdpSocket,
r: &PacketReceiver,
socket_addr_space: &SocketAddrSpace,
) -> Result<()> {
let timer = Duration::new(1, 0);
let msgs = r.recv_timeout(timer)?;
send_to(&msgs, sock)?;
send_to(&msgs, sock, socket_addr_space)?;
Ok(())
}
@@ -138,7 +145,12 @@ pub fn recv_batch(recvr: &PacketReceiver, max_batch: usize) -> Result<(Vec<Packe
Ok((batch, len, duration_as_ms(&recv_start.elapsed())))
}
pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: PacketReceiver) -> JoinHandle<()> {
pub fn responder(
name: &'static str,
sock: Arc<UdpSocket>,
r: PacketReceiver,
socket_addr_space: SocketAddrSpace,
) -> JoinHandle<()> {
Builder::new()
.name(format!("solana-responder-{}", name))
.spawn(move || {
@@ -146,7 +158,7 @@ pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: PacketReceiver) ->
let mut last_error = None;
let mut last_print = 0;
loop {
if let Err(e) = recv_send(&sock, &r) {
if let Err(e) = recv_send(&sock, &r, &socket_addr_space) {
match e {
StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break,
StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (),
@@ -222,7 +234,12 @@ mod test {
);
let t_responder = {
let (s_responder, r_responder) = channel();
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
let t_responder = responder(
"streamer_send_test",
Arc::new(send),
r_responder,
SocketAddrSpace::Unspecified,
);
let mut msgs = Packets::default();
for i in 0..5 {
let mut b = Packet::default();

View File

@@ -39,3 +39,4 @@ bincode = "1.3.1"
solana-core = { path = "../core", version = "=1.7.9" }
solana-logger = { path = "../logger", version = "=1.7.9" }
solana-program-test = { path = "../program-test", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }

View File

@@ -1208,12 +1208,14 @@ mod tests {
signature::{read_keypair_file, write_keypair_file, Signer},
stake::instruction::StakeInstruction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_transaction_status::TransactionConfirmationStatus;
#[test]
fn test_process_token_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1223,7 +1225,8 @@ mod tests {
#[test]
fn test_process_transfer_amount_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1233,7 +1236,8 @@ mod tests {
#[test]
fn test_create_stake_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1243,7 +1247,8 @@ mod tests {
#[test]
fn test_process_stake_allocations() {
let alice = Keypair::new();
let test_validator = TestValidator::with_no_fees(alice.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1563,7 +1568,12 @@ mod tests {
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1646,7 +1656,12 @@ mod tests {
let fees = 10_000;
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1761,7 +1776,12 @@ mod tests {
let fees = 10_000;
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -1870,7 +1890,12 @@ mod tests {
let fees = 10_000;
let fees_in_sol = lamports_to_sol(fees);
let alice = Keypair::new();
let test_validator = TestValidator::with_custom_fees(alice.pubkey(), fees, None);
let test_validator = TestValidator::with_custom_fees(
alice.pubkey(),
fees,
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());
@@ -2185,7 +2210,11 @@ mod tests {
#[test]
fn test_distribute_allocations_dump_db() {
let sender_keypair = Keypair::new();
let test_validator = TestValidator::with_no_fees(sender_keypair.pubkey(), None);
let test_validator = TestValidator::with_no_fees(
sender_keypair.pubkey(),
None,
SocketAddrSpace::Unspecified,
);
let url = test_validator.rpc_url();
let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed());

View File

@@ -1,6 +1,7 @@
use solana_client::rpc_client::RpcClient;
use solana_core::test_validator::TestValidator;
use solana_sdk::signature::{Keypair, Signer};
use solana_streamer::socket::SocketAddrSpace;
use solana_tokens::commands::test_process_distribute_tokens_with_client;
#[test]
@@ -8,7 +9,8 @@ fn test_process_distribute_with_rpc_client() {
solana_logger::setup();
let mint_keypair = Keypair::new();
let test_validator = TestValidator::with_no_fees(mint_keypair.pubkey(), None);
let test_validator =
TestValidator::with_no_fees(mint_keypair.pubkey(), None, SocketAddrSpace::Unspecified);
let client = RpcClient::new(test_validator.rpc_url());
test_process_distribute_tokens_with_client(&client, mint_keypair, None);

View File

@@ -45,6 +45,7 @@ solana-poh = { path = "../poh", version = "=1.7.9" }
solana-rpc = { path = "../rpc", version = "=1.7.9" }
solana-runtime = { path = "../runtime", version = "=1.7.9" }
solana-sdk = { path = "../sdk", version = "=1.7.9" }
solana-streamer = { path = "../streamer", version = "=1.7.9" }
solana-version = { path = "../version", version = "=1.7.9" }
solana-vote-program = { path = "../programs/vote", version = "=1.7.9" }
symlink = "0.1.0"

View File

@@ -22,6 +22,7 @@ use {
signature::{read_keypair_file, write_keypair_file, Keypair, Signer},
system_program,
},
solana_streamer::socket::SocketAddrSpace,
solana_validator::{
admin_rpc_service, dashboard::Dashboard, println_name_value, redirect_stderr_to_file,
test_validator::*,
@@ -279,8 +280,16 @@ fn main() {
If the ledger already exists then this parameter is silently ignored",
),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.get_matches();
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let cli_config = if let Some(config_file) = matches.value_of("config_file") {
solana_cli_config::Config::load(config_file).unwrap_or_default()
} else {
@@ -582,7 +591,7 @@ fn main() {
genesis.bind_ip_addr(bind_address);
}
match genesis.start_with_mint_address(mint_address) {
match genesis.start_with_mint_address(mint_address, socket_addr_space) {
Ok(test_validator) => {
if let Some(dashboard) = dashboard {
dashboard.run(Duration::from_millis(250));

View File

@@ -57,6 +57,7 @@ use {
pubkey::Pubkey,
signature::{Keypair, Signer},
},
solana_streamer::socket::SocketAddrSpace,
solana_validator::{
admin_rpc_service, dashboard::Dashboard, new_spinner_progress_bar, println_name_value,
redirect_stderr_to_file,
@@ -357,6 +358,7 @@ fn start_gossip_node(
expected_shred_version: Option<u16>,
gossip_validators: Option<HashSet<Pubkey>>,
should_check_duplicate_instance: bool,
socket_addr_space: SocketAddrSpace,
) -> (Arc<ClusterInfo>, Arc<AtomicBool>, GossipService) {
let mut cluster_info = ClusterInfo::new(
ClusterInfo::gossip_contact_info(
@@ -365,6 +367,7 @@ fn start_gossip_node(
expected_shred_version.unwrap_or(0),
),
identity_keypair.clone(),
socket_addr_space,
);
cluster_info.set_entrypoints(cluster_entrypoints.to_vec());
cluster_info.restore_contact_info(ledger_path, 0);
@@ -659,24 +662,25 @@ fn verify_reachable_ports(
node: &Node,
cluster_entrypoint: &ContactInfo,
validator_config: &ValidatorConfig,
socket_addr_space: &SocketAddrSpace,
) -> bool {
let mut udp_sockets = vec![&node.sockets.gossip, &node.sockets.repair];
if ContactInfo::is_valid_address(&node.info.serve_repair) {
if ContactInfo::is_valid_address(&node.info.serve_repair, socket_addr_space) {
udp_sockets.push(&node.sockets.serve_repair);
}
if ContactInfo::is_valid_address(&node.info.tpu) {
if ContactInfo::is_valid_address(&node.info.tpu, socket_addr_space) {
udp_sockets.extend(node.sockets.tpu.iter());
}
if ContactInfo::is_valid_address(&node.info.tpu_forwards) {
if ContactInfo::is_valid_address(&node.info.tpu_forwards, socket_addr_space) {
udp_sockets.extend(node.sockets.tpu_forwards.iter());
}
if ContactInfo::is_valid_address(&node.info.tvu) {
if ContactInfo::is_valid_address(&node.info.tvu, socket_addr_space) {
udp_sockets.extend(node.sockets.tvu.iter());
udp_sockets.extend(node.sockets.broadcast.iter());
udp_sockets.extend(node.sockets.retransmit_sockets.iter());
}
if ContactInfo::is_valid_address(&node.info.tvu_forwards) {
if ContactInfo::is_valid_address(&node.info.tvu_forwards, socket_addr_space) {
udp_sockets.extend(node.sockets.tvu_forwards.iter());
}
@@ -686,7 +690,7 @@ fn verify_reachable_ports(
("RPC", rpc_addr, &node.info.rpc),
("RPC pubsub", rpc_pubsub_addr, &node.info.rpc_pubsub),
] {
if ContactInfo::is_valid_address(public_addr) {
if ContactInfo::is_valid_address(public_addr, socket_addr_space) {
tcp_listeners.push((
bind_addr.port(),
TcpListener::bind(bind_addr).unwrap_or_else(|err| {
@@ -751,14 +755,19 @@ fn rpc_bootstrap(
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
minimal_snapshot_download_speed: f32,
maximum_snapshot_download_abort: u64,
socket_addr_space: SocketAddrSpace,
) {
if !no_port_check {
let mut order: Vec<_> = (0..cluster_entrypoints.len()).collect();
order.shuffle(&mut thread_rng());
if order
.into_iter()
.all(|i| !verify_reachable_ports(node, &cluster_entrypoints[i], validator_config))
{
if order.into_iter().all(|i| {
!verify_reachable_ports(
node,
&cluster_entrypoints[i],
validator_config,
&socket_addr_space,
)
}) {
exit(1);
}
}
@@ -783,6 +792,7 @@ fn rpc_bootstrap(
validator_config.expected_shred_version,
validator_config.gossip_validators.clone(),
should_check_duplicate_instance,
socket_addr_space,
));
}
@@ -1848,6 +1858,13 @@ pub fn main() {
.help("Disables duplicate instance check")
.hidden(true),
)
.arg(
Arg::with_name("allow_private_addr")
.long("allow-private-addr")
.takes_value(false)
.help("Allow contacting private ip addresses")
.hidden(true),
)
.after_help("The default subcommand is run")
.subcommand(
SubCommand::with_name("exit")
@@ -1942,6 +1959,7 @@ pub fn main() {
)
.get_matches();
let socket_addr_space = SocketAddrSpace::new(matches.is_present("allow_private_addr"));
let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap());
let operation = match matches.subcommand() {
@@ -2602,6 +2620,7 @@ pub fn main() {
&start_progress,
minimal_snapshot_download_speed,
maximum_snapshot_download_abort,
socket_addr_space,
);
*start_progress.write().unwrap() = ValidatorStartProgress::Initializing;
}
@@ -2621,6 +2640,7 @@ pub fn main() {
&validator_config,
should_check_duplicate_instance,
start_progress,
socket_addr_space,
);
if let Some(filename) = init_complete_file {