adds validator flag to allow private ip addresses (#18850)
This commit is contained in:
@ -223,13 +223,22 @@ mod tests {
|
||||
hash::hash,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
|
||||
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
|
||||
ClusterInfo::new(
|
||||
contact_info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_halt() {
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = new_test_cluster_info(contact_info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let mut trusted_validators = HashSet::new();
|
||||
@ -265,7 +274,7 @@ mod tests {
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = new_test_cluster_info(contact_info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let trusted_validators = HashSet::new();
|
||||
|
@ -1557,7 +1557,7 @@ mod tests {
|
||||
use crossbeam_channel::unbounded;
|
||||
use itertools::Itertools;
|
||||
use solana_entry::entry::{next_entry, Entry, EntrySlice};
|
||||
use solana_gossip::cluster_info::Node;
|
||||
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
|
||||
use solana_ledger::{
|
||||
blockstore::{entries_to_test_shreds, Blockstore},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
@ -1579,6 +1579,7 @@ mod tests {
|
||||
system_transaction,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use solana_transaction_status::TransactionWithStatusMeta;
|
||||
use std::{
|
||||
convert::TryInto,
|
||||
@ -1591,6 +1592,14 @@ mod tests {
|
||||
thread::sleep,
|
||||
};
|
||||
|
||||
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
|
||||
ClusterInfo::new(
|
||||
contact_info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banking_stage_shutdown1() {
|
||||
let genesis_config = create_genesis_config(2).genesis_config;
|
||||
@ -1606,7 +1615,7 @@ mod tests {
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, _entry_receiever) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
@ -1652,7 +1661,7 @@ mod tests {
|
||||
};
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
|
||||
|
||||
@ -1724,7 +1733,7 @@ mod tests {
|
||||
};
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
|
||||
|
||||
@ -1875,8 +1884,7 @@ mod tests {
|
||||
};
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info =
|
||||
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let _banking_stage = BankingStage::new_num_threads(
|
||||
&cluster_info,
|
||||
|
@ -25,7 +25,7 @@ use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair};
|
||||
use solana_streamer::{
|
||||
sendmmsg::{batch_send, SendPktsError},
|
||||
socket::is_global,
|
||||
socket::SocketAddrSpace,
|
||||
};
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::{
|
||||
@ -404,6 +404,7 @@ pub fn broadcast_shreds(
|
||||
transmit_stats: &mut TransmitShredsStats,
|
||||
self_pubkey: Pubkey,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
socket_addr_space: &SocketAddrSpace,
|
||||
) -> Result<()> {
|
||||
let mut result = Ok(());
|
||||
let broadcast_len = cluster_nodes.num_peers();
|
||||
@ -418,7 +419,7 @@ pub fn broadcast_shreds(
|
||||
.filter_map(|shred| {
|
||||
let seed = shred.seed(Some(self_pubkey), &root_bank);
|
||||
let node = cluster_nodes.get_broadcast_peer(seed)?;
|
||||
if is_global(&node.tvu) {
|
||||
if socket_addr_space.check(&node.tvu) {
|
||||
Some((&shred.payload[..], &node.tvu))
|
||||
} else {
|
||||
None
|
||||
@ -602,7 +603,11 @@ pub mod test {
|
||||
let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey());
|
||||
|
||||
// Fill the cluster_info with the buddy's info
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
|
||||
let cluster_info = ClusterInfo::new(
|
||||
leader_info.info.clone(),
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
cluster_info.insert_info(broadcast_buddy.info);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
|
@ -295,6 +295,7 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
&mut TransmitShredsStats::default(),
|
||||
cluster_info.id(),
|
||||
bank_forks,
|
||||
cluster_info.socket_addr_space(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -140,14 +140,16 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_gossip::contact_info::ContactInfo;
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
#[test]
|
||||
fn test_tvu_peers_ordering() {
|
||||
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
0,
|
||||
));
|
||||
let cluster = ClusterInfo::new(
|
||||
ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0),
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
|
||||
8080,
|
||||
|
@ -147,6 +147,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
&mut TransmitShredsStats::default(),
|
||||
cluster_info.id(),
|
||||
bank_forks,
|
||||
cluster_info.socket_addr_space(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
@ -367,6 +367,7 @@ impl StandardBroadcastRun {
|
||||
&mut transmit_stats,
|
||||
cluster_info.id(),
|
||||
bank_forks,
|
||||
cluster_info.socket_addr_space(),
|
||||
)?;
|
||||
drop(cluster_nodes);
|
||||
transmit_time.stop();
|
||||
@ -510,6 +511,7 @@ mod test {
|
||||
genesis_config::GenesisConfig,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@ -534,7 +536,11 @@ mod test {
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(leader_info.info));
|
||||
let cluster_info = Arc::new(ClusterInfo::new(
|
||||
leader_info.info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
));
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut genesis_config = create_genesis_config(10_000).genesis_config;
|
||||
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
|
||||
|
@ -246,8 +246,9 @@ mod tests {
|
||||
sorted_stakes_with_index,
|
||||
},
|
||||
},
|
||||
solana_sdk::timing::timestamp,
|
||||
std::iter::repeat_with,
|
||||
solana_sdk::{signature::Keypair, timing::timestamp},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{iter::repeat_with, sync::Arc},
|
||||
};
|
||||
|
||||
// Legacy methods copied for testing backward compatibility.
|
||||
@ -293,7 +294,11 @@ mod tests {
|
||||
.collect();
|
||||
// Add some staked nodes with no contact-info.
|
||||
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(this_node);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
this_node,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
{
|
||||
let now = timestamp();
|
||||
let mut gossip_crds = cluster_info.gossip.crds.write().unwrap();
|
||||
|
@ -180,14 +180,19 @@ mod test {
|
||||
use {
|
||||
super::*,
|
||||
solana_gossip::{cluster_info::Node, crds_value::LowestSlot},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
solana_sdk::{pubkey::Pubkey, signature::Keypair},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
};
|
||||
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
let pubkey = Pubkey::new_unique();
|
||||
let node_info = Node::new_localhost_with_pubkey(&pubkey);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
node_info.info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
ClusterSlotsService::update_lowest_slot(5, &cluster_info);
|
||||
cluster_info.flush_push_queue();
|
||||
let lowest = {
|
||||
|
@ -561,14 +561,24 @@ impl RepairService {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_gossip::cluster_info::Node;
|
||||
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
|
||||
use solana_ledger::blockstore::{
|
||||
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
|
||||
};
|
||||
use solana_ledger::shred::max_ticks_per_n_shreds;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::collections::HashSet;
|
||||
|
||||
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
|
||||
ClusterInfo::new(
|
||||
contact_info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_repair_orphan() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
@ -863,7 +873,8 @@ mod test {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let cluster_slots = ClusterSlots::default();
|
||||
let serve_repair = ServeRepair::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let serve_repair =
|
||||
ServeRepair::new(Arc::new(new_test_cluster_info(Node::new_localhost().info)));
|
||||
let mut ancestor_hashes_request_statuses = HashMap::new();
|
||||
let dead_slot = 9;
|
||||
let receive_socket = &UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
@ -946,9 +957,7 @@ mod test {
|
||||
Pubkey::default(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap().local_addr().unwrap(),
|
||||
));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
Node::new_localhost().info,
|
||||
));
|
||||
let cluster_info = Arc::new(new_test_cluster_info(Node::new_localhost().info));
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let valid_repair_peer = Node::new_localhost().info;
|
||||
|
||||
|
@ -2753,6 +2753,7 @@ pub mod tests {
|
||||
system_transaction,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use solana_transaction_status::TransactionWithStatusMeta;
|
||||
use solana_vote_program::{
|
||||
vote_state::{VoteState, VoteStateVersions},
|
||||
@ -2829,6 +2830,7 @@ pub mod tests {
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost_with_pubkey(&my_pubkey).info,
|
||||
Arc::new(Keypair::from_bytes(&my_keypairs.node_keypair.to_bytes()).unwrap()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
assert_eq!(my_pubkey, cluster_info.id());
|
||||
|
||||
|
@ -330,6 +330,7 @@ fn retransmit(
|
||||
epoch_cache_update.stop();
|
||||
|
||||
let my_id = cluster_info.id();
|
||||
let socket_addr_space = cluster_info.socket_addr_space();
|
||||
let mut discard_total = 0;
|
||||
let mut repair_total = 0;
|
||||
let mut retransmit_total = 0;
|
||||
@ -399,6 +400,7 @@ fn retransmit(
|
||||
packet,
|
||||
sock,
|
||||
/*forward socket=*/ true,
|
||||
socket_addr_space,
|
||||
);
|
||||
}
|
||||
ClusterInfo::retransmit_to(
|
||||
@ -406,6 +408,7 @@ fn retransmit(
|
||||
packet,
|
||||
sock,
|
||||
!anchor_node, // send to forward socket!
|
||||
socket_addr_space,
|
||||
);
|
||||
retransmit_time.stop();
|
||||
retransmit_total += retransmit_time.as_us();
|
||||
@ -629,6 +632,8 @@ mod tests {
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_net_utils::find_available_port_in_range;
|
||||
use solana_perf::packet::{Packet, Packets};
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
#[test]
|
||||
@ -665,7 +670,11 @@ mod tests {
|
||||
.find(|pk| me.id < *pk)
|
||||
.unwrap();
|
||||
let other = ContactInfo::new_localhost(&other, 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
other,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
cluster_info.insert_info(me);
|
||||
|
||||
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);
|
||||
|
@ -194,13 +194,6 @@ impl RepairPeers {
|
||||
}
|
||||
|
||||
impl ServeRepair {
|
||||
/// Without a valid keypair gossip will not function. Only useful for tests.
|
||||
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
|
||||
Self::new(Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
contact_info,
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn new(cluster_info: Arc<ClusterInfo>) -> Self {
|
||||
Self { cluster_info }
|
||||
}
|
||||
@ -754,7 +747,8 @@ mod tests {
|
||||
shred::{max_ticks_per_n_shreds, Shred},
|
||||
};
|
||||
use solana_perf::packet::Packet;
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::Keypair, timing::timestamp};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
|
||||
#[test]
|
||||
fn test_run_highest_window_request() {
|
||||
@ -899,11 +893,19 @@ mod tests {
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
|
||||
ClusterInfo::new(
|
||||
contact_info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn window_index_request() {
|
||||
let cluster_slots = ClusterSlots::default();
|
||||
let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me));
|
||||
let cluster_info = Arc::new(new_test_cluster_info(me));
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let mut outstanding_requests = OutstandingShredRepairs::default();
|
||||
let rv = serve_repair.repair_request(
|
||||
@ -1213,7 +1215,7 @@ mod tests {
|
||||
fn test_repair_with_repair_validators() {
|
||||
let cluster_slots = ClusterSlots::default();
|
||||
let me = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), timestamp());
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me.clone()));
|
||||
let cluster_info = Arc::new(new_test_cluster_info(me.clone()));
|
||||
|
||||
// Insert two peers on the network
|
||||
let contact_info2 =
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::serve_repair::ServeRepair;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_streamer::streamer;
|
||||
use solana_streamer::{socket::SocketAddrSpace, streamer};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
@ -17,6 +17,7 @@ impl ServeRepairService {
|
||||
serve_repair: &Arc<RwLock<ServeRepair>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
serve_repair_socket: UdpSocket,
|
||||
socket_addr_space: SocketAddrSpace,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let (request_sender, request_receiver) = channel();
|
||||
@ -36,8 +37,12 @@ impl ServeRepairService {
|
||||
false,
|
||||
);
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder =
|
||||
streamer::responder("serve-repairs", serve_repair_socket, response_receiver);
|
||||
let t_responder = streamer::responder(
|
||||
"serve-repairs",
|
||||
serve_repair_socket,
|
||||
response_receiver,
|
||||
socket_addr_space,
|
||||
);
|
||||
let t_listen = ServeRepair::listen(
|
||||
serve_repair.clone(),
|
||||
blockstore,
|
||||
|
@ -30,6 +30,7 @@ use {
|
||||
rent::Rent,
|
||||
signature::{read_keypair_file, write_keypair_file, Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
collections::HashMap,
|
||||
fs::remove_dir_all,
|
||||
@ -269,8 +270,9 @@ impl TestValidatorGenesis {
|
||||
pub fn start_with_mint_address(
|
||||
&self,
|
||||
mint_address: Pubkey,
|
||||
socket_addr_space: SocketAddrSpace,
|
||||
) -> Result<TestValidator, Box<dyn std::error::Error>> {
|
||||
TestValidator::start(mint_address, self)
|
||||
TestValidator::start(mint_address, self, socket_addr_space)
|
||||
}
|
||||
|
||||
/// Start a test validator
|
||||
@ -279,9 +281,9 @@ impl TestValidatorGenesis {
|
||||
/// created at genesis.
|
||||
///
|
||||
/// This function panics on initialization failure.
|
||||
pub fn start(&self) -> (TestValidator, Keypair) {
|
||||
pub fn start(&self, socket_addr_space: SocketAddrSpace) -> (TestValidator, Keypair) {
|
||||
let mint_keypair = Keypair::new();
|
||||
TestValidator::start(mint_keypair.pubkey(), self)
|
||||
TestValidator::start(mint_keypair.pubkey(), self, socket_addr_space)
|
||||
.map(|test_validator| (test_validator, mint_keypair))
|
||||
.expect("Test validator failed to start")
|
||||
}
|
||||
@ -303,7 +305,11 @@ impl TestValidator {
|
||||
/// Faucet optional.
|
||||
///
|
||||
/// This function panics on initialization failure.
|
||||
pub fn with_no_fees(mint_address: Pubkey, faucet_addr: Option<SocketAddr>) -> Self {
|
||||
pub fn with_no_fees(
|
||||
mint_address: Pubkey,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
socket_addr_space: SocketAddrSpace,
|
||||
) -> Self {
|
||||
TestValidatorGenesis::default()
|
||||
.fee_rate_governor(FeeRateGovernor::new(0, 0))
|
||||
.rent(Rent {
|
||||
@ -312,7 +318,7 @@ impl TestValidator {
|
||||
..Rent::default()
|
||||
})
|
||||
.faucet_addr(faucet_addr)
|
||||
.start_with_mint_address(mint_address)
|
||||
.start_with_mint_address(mint_address, socket_addr_space)
|
||||
.expect("validator start failed")
|
||||
}
|
||||
|
||||
@ -324,6 +330,7 @@ impl TestValidator {
|
||||
mint_address: Pubkey,
|
||||
target_lamports_per_signature: u64,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
socket_addr_space: SocketAddrSpace,
|
||||
) -> Self {
|
||||
TestValidatorGenesis::default()
|
||||
.fee_rate_governor(FeeRateGovernor::new(target_lamports_per_signature, 0))
|
||||
@ -333,7 +340,7 @@ impl TestValidator {
|
||||
..Rent::default()
|
||||
})
|
||||
.faucet_addr(faucet_addr)
|
||||
.start_with_mint_address(mint_address)
|
||||
.start_with_mint_address(mint_address, socket_addr_space)
|
||||
.expect("validator start failed")
|
||||
}
|
||||
|
||||
@ -436,6 +443,7 @@ impl TestValidator {
|
||||
fn start(
|
||||
mint_address: Pubkey,
|
||||
config: &TestValidatorGenesis,
|
||||
socket_addr_space: SocketAddrSpace,
|
||||
) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let preserve_ledger = config.ledger_path.is_some();
|
||||
let ledger_path = TestValidator::initialize_ledger(mint_address, config)?;
|
||||
@ -516,11 +524,12 @@ impl TestValidator {
|
||||
&validator_config,
|
||||
true, // should_check_duplicate_instance
|
||||
config.start_progress.clone(),
|
||||
socket_addr_space,
|
||||
));
|
||||
|
||||
// Needed to avoid panics in `solana-responder-gossip` in tests that create a number of
|
||||
// test validators concurrently...
|
||||
discover_cluster(&gossip, 1)
|
||||
discover_cluster(&gossip, 1, socket_addr_space)
|
||||
.map_err(|err| format!("TestValidator startup failed: {:?}", err))?;
|
||||
|
||||
// This is a hack to delay until the fees are non-zero for test consistency
|
||||
|
@ -373,7 +373,8 @@ pub mod tests {
|
||||
use solana_poh::poh_recorder::create_test_recorder;
|
||||
use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::signature::Signer;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[ignore]
|
||||
@ -391,7 +392,11 @@ pub mod tests {
|
||||
let bank_forks = BankForks::new(Bank::new(&genesis_config));
|
||||
|
||||
//start cluster_info1
|
||||
let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
||||
let cluster_info1 = ClusterInfo::new(
|
||||
target1.info.clone(),
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
cluster_info1.insert_info(leader.info);
|
||||
let cref1 = Arc::new(cluster_info1);
|
||||
|
||||
|
@ -73,6 +73,7 @@ use solana_sdk::{
|
||||
signature::{Keypair, Signer},
|
||||
timing::timestamp,
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
@ -278,6 +279,7 @@ pub(crate) fn abort() -> ! {
|
||||
}
|
||||
|
||||
impl Validator {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
mut node: Node,
|
||||
identity_keypair: Arc<Keypair>,
|
||||
@ -288,6 +290,7 @@ impl Validator {
|
||||
config: &ValidatorConfig,
|
||||
should_check_duplicate_instance: bool,
|
||||
start_progress: Arc<RwLock<ValidatorStartProgress>>,
|
||||
socket_addr_space: SocketAddrSpace,
|
||||
) -> Self {
|
||||
let id = identity_keypair.pubkey();
|
||||
assert_eq!(id, node.info.id);
|
||||
@ -438,7 +441,8 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
let mut cluster_info = ClusterInfo::new(node.info.clone(), identity_keypair);
|
||||
let mut cluster_info =
|
||||
ClusterInfo::new(node.info.clone(), identity_keypair, socket_addr_space);
|
||||
cluster_info.set_contact_debug_interval(config.contact_debug_interval);
|
||||
cluster_info.set_entrypoints(cluster_entrypoints);
|
||||
cluster_info.restore_contact_info(ledger_path, config.contact_save_interval);
|
||||
@ -511,10 +515,16 @@ impl Validator {
|
||||
optimistically_confirmed_bank_tracker,
|
||||
bank_notification_sender,
|
||||
) = if let Some((rpc_addr, rpc_pubsub_addr)) = config.rpc_addrs {
|
||||
if ContactInfo::is_valid_address(&node.info.rpc) {
|
||||
assert!(ContactInfo::is_valid_address(&node.info.rpc_pubsub));
|
||||
if ContactInfo::is_valid_address(&node.info.rpc, &socket_addr_space) {
|
||||
assert!(ContactInfo::is_valid_address(
|
||||
&node.info.rpc_pubsub,
|
||||
&socket_addr_space
|
||||
));
|
||||
} else {
|
||||
assert!(!ContactInfo::is_valid_address(&node.info.rpc_pubsub));
|
||||
assert!(!ContactInfo::is_valid_address(
|
||||
&node.info.rpc_pubsub,
|
||||
&socket_addr_space
|
||||
));
|
||||
}
|
||||
let (bank_notification_sender, bank_notification_receiver) = unbounded();
|
||||
(
|
||||
@ -595,6 +605,7 @@ impl Validator {
|
||||
&serve_repair,
|
||||
Some(blockstore.clone()),
|
||||
node.sockets.serve_repair,
|
||||
socket_addr_space,
|
||||
&exit,
|
||||
);
|
||||
|
||||
@ -1613,6 +1624,7 @@ mod tests {
|
||||
&config,
|
||||
true, // should_check_duplicate_instance
|
||||
start_progress.clone(),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
assert_eq!(
|
||||
*start_progress.read().unwrap(),
|
||||
@ -1691,6 +1703,7 @@ mod tests {
|
||||
&config,
|
||||
true, // should_check_duplicate_instance
|
||||
Arc::new(RwLock::new(ValidatorStartProgress::default())),
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
@ -1716,6 +1729,7 @@ mod tests {
|
||||
let cluster_info = ClusterInfo::new(
|
||||
ContactInfo::new_localhost(&node_keypair.pubkey(), timestamp()),
|
||||
node_keypair,
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
let (genesis_config, _mint_keypair) = create_genesis_config(1);
|
||||
|
@ -615,6 +615,7 @@ mod test {
|
||||
signature::{Keypair, Signer},
|
||||
timing::timestamp,
|
||||
};
|
||||
use solana_streamer::socket::SocketAddrSpace;
|
||||
use std::sync::Arc;
|
||||
|
||||
fn local_entries_to_shred(
|
||||
@ -765,7 +766,11 @@ mod test {
|
||||
assert!(!blockstore.has_duplicate_shreds_in_slot(duplicate_shred_slot));
|
||||
let keypair = Keypair::new();
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), timestamp());
|
||||
let cluster_info = ClusterInfo::new(contact_info, Arc::new(keypair));
|
||||
let cluster_info = ClusterInfo::new(
|
||||
contact_info,
|
||||
Arc::new(keypair),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
run_check_duplicate(
|
||||
&cluster_info,
|
||||
&blockstore,
|
||||
|
Reference in New Issue
Block a user