* Add --restricted-repair-only-mode flag (cherry picked from commit63a67f415e
) # Conflicts: # validator/src/main.rs * Add --gossip-validator argument (cherry picked from commitdaae638781
) # Conflicts: # core/src/crds_gossip_pull.rs # core/src/crds_gossip_push.rs # core/src/gossip_service.rs # validator/src/main.rs * Documenet how to reduce validator port exposure (cherry picked from commitc8f03c7f6d
) * Resolve conflicts Co-authored-by: Michael Vines <mvines@gmail.com>
This commit is contained in:
@@ -142,6 +142,7 @@ fn start_gossip_node(
|
||||
gossip_addr: &SocketAddr,
|
||||
gossip_socket: UdpSocket,
|
||||
expected_shred_version: Option<u16>,
|
||||
gossip_validators: Option<HashSet<Pubkey>>,
|
||||
) -> (Arc<ClusterInfo>, Arc<AtomicBool>, GossipService) {
|
||||
let cluster_info = ClusterInfo::new(
|
||||
ClusterInfo::gossip_contact_info(
|
||||
@@ -159,6 +160,7 @@ fn start_gossip_node(
|
||||
&cluster_info.clone(),
|
||||
None,
|
||||
gossip_socket,
|
||||
gossip_validators,
|
||||
&gossip_exit_flag,
|
||||
);
|
||||
(cluster_info, gossip_exit_flag, gossip_service)
|
||||
@@ -601,6 +603,17 @@ pub fn main() {
|
||||
.requires("entrypoint")
|
||||
.help("Skip the RPC vote account sanity check")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("restricted_repair_only_mode")
|
||||
.long("restricted-repair-only-mode")
|
||||
.takes_value(false)
|
||||
.help("Do not publish the Gossip, TPU, TVU or Repair Service ports causing \
|
||||
the validator to operate in a limited capacity that reduces its \
|
||||
exposure to the rest of the cluster. \
|
||||
\
|
||||
The --no-voting flag is implicit when this flag is enabled \
|
||||
"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("dev_halt_at_slot")
|
||||
.long("dev-halt-at-slot")
|
||||
@@ -809,7 +822,8 @@ pub fn main() {
|
||||
.requires("expected_bank_hash")
|
||||
.value_name("SLOT")
|
||||
.validator(is_slot)
|
||||
.help("After processing the ledger and the next slot is SLOT, wait until a supermajority of stake is visible on gossip before starting PoH"),
|
||||
.help("After processing the ledger and the next slot is SLOT, wait until a \
|
||||
supermajority of stake is visible on gossip before starting PoH"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("hard_forks")
|
||||
@@ -844,7 +858,18 @@ pub fn main() {
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.help("A list of validators to request repairs from. If specified, repair will not \
|
||||
request from validators outside this set [default: request repairs from all validators]")
|
||||
request from validators outside this set [default: all validators]")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("gossip_validators")
|
||||
.long("gossip-validator")
|
||||
.validator(is_pubkey)
|
||||
.value_name("PUBKEY")
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.help("A list of validators to gossip with. If specified, gossip \
|
||||
will not pull/pull from from validators outside this set. \
|
||||
[default: all validators]")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_rocksdb_compaction")
|
||||
@@ -960,6 +985,23 @@ pub fn main() {
|
||||
"repair_validators",
|
||||
"--repair-validator",
|
||||
);
|
||||
let gossip_validators = validators_set(
|
||||
&identity_keypair.pubkey(),
|
||||
&matches,
|
||||
"gossip_validators",
|
||||
"--gossip-validator",
|
||||
);
|
||||
|
||||
let bind_address = solana_net_utils::parse_host(matches.value_of("bind_address").unwrap())
|
||||
.expect("invalid bind_address");
|
||||
let rpc_bind_address = if matches.is_present("rpc_bind_address") {
|
||||
solana_net_utils::parse_host(matches.value_of("rpc_bind_address").unwrap())
|
||||
.expect("invalid rpc_bind_address")
|
||||
} else {
|
||||
bind_address
|
||||
};
|
||||
|
||||
let restricted_repair_only_mode = matches.is_present("restricted_repair_only_mode");
|
||||
|
||||
let mut validator_config = ValidatorConfig {
|
||||
dev_halt_at_slot: value_t!(matches, "dev_halt_at_slot", Slot).ok(),
|
||||
@@ -991,10 +1033,11 @@ pub fn main() {
|
||||
rpc_ports: value_t!(matches, "rpc_port", u16)
|
||||
.ok()
|
||||
.map(|rpc_port| (rpc_port, rpc_port + 1)),
|
||||
voting_disabled: matches.is_present("no_voting"),
|
||||
voting_disabled: matches.is_present("no_voting") || restricted_repair_only_mode,
|
||||
wait_for_supermajority: value_t!(matches, "wait_for_supermajority", Slot).ok(),
|
||||
trusted_validators,
|
||||
repair_validators,
|
||||
gossip_validators,
|
||||
frozen_accounts: values_t!(matches, "frozen_accounts", Pubkey).unwrap_or_default(),
|
||||
no_rocksdb_compaction,
|
||||
wal_recovery_mode,
|
||||
@@ -1002,8 +1045,10 @@ pub fn main() {
|
||||
};
|
||||
|
||||
let vote_account = pubkey_of(&matches, "vote_account").unwrap_or_else(|| {
|
||||
warn!("--vote-account not specified, validator will not vote");
|
||||
validator_config.voting_disabled = true;
|
||||
if !validator_config.voting_disabled {
|
||||
warn!("--vote-account not specified, validator will not vote");
|
||||
validator_config.voting_disabled = true;
|
||||
}
|
||||
Keypair::new().pubkey()
|
||||
});
|
||||
|
||||
@@ -1011,15 +1056,6 @@ pub fn main() {
|
||||
solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap())
|
||||
.expect("invalid dynamic_port_range");
|
||||
|
||||
let bind_address = solana_net_utils::parse_host(matches.value_of("bind_address").unwrap())
|
||||
.expect("invalid bind_address");
|
||||
let rpc_bind_address = if matches.is_present("rpc_bind_address") {
|
||||
solana_net_utils::parse_host(matches.value_of("rpc_bind_address").unwrap())
|
||||
.expect("invalid rpc_bind_address")
|
||||
} else {
|
||||
bind_address
|
||||
};
|
||||
|
||||
let account_paths = if let Some(account_paths) = matches.value_of("account_paths") {
|
||||
account_paths.split(',').map(PathBuf::from).collect()
|
||||
} else {
|
||||
@@ -1211,6 +1247,18 @@ pub fn main() {
|
||||
bind_address,
|
||||
);
|
||||
|
||||
if restricted_repair_only_mode {
|
||||
let any = SocketAddr::new(std::net::IpAddr::V4(std::net::Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
// When in --restricted_repair_only_mode is enabled only the gossip and repair ports
|
||||
// need to be reachable by the entrypoint to respond to gossip pull requests and repair
|
||||
// requests initiated by the node. All other ports are unused.
|
||||
node.info.tpu = any;
|
||||
node.info.tpu_forwards = any;
|
||||
node.info.tvu = any;
|
||||
node.info.tvu_forwards = any;
|
||||
node.info.serve_repair = any;
|
||||
}
|
||||
|
||||
if !private_rpc {
|
||||
if let Some((rpc_port, rpc_pubsub_port)) = validator_config.rpc_ports {
|
||||
node.info.rpc = SocketAddr::new(node.info.gossip.ip(), rpc_port);
|
||||
@@ -1219,17 +1267,25 @@ pub fn main() {
|
||||
}
|
||||
|
||||
if let Some(ref cluster_entrypoint) = cluster_entrypoint {
|
||||
let mut udp_sockets = vec![
|
||||
&node.sockets.gossip,
|
||||
&node.sockets.repair,
|
||||
&node.sockets.serve_repair,
|
||||
];
|
||||
udp_sockets.extend(node.sockets.tpu.iter());
|
||||
udp_sockets.extend(node.sockets.tpu_forwards.iter());
|
||||
udp_sockets.extend(node.sockets.tvu.iter());
|
||||
udp_sockets.extend(node.sockets.tvu_forwards.iter());
|
||||
udp_sockets.extend(node.sockets.broadcast.iter());
|
||||
udp_sockets.extend(node.sockets.retransmit_sockets.iter());
|
||||
let mut udp_sockets = vec![&node.sockets.gossip, &node.sockets.repair];
|
||||
|
||||
if ContactInfo::is_valid_address(&node.info.serve_repair) {
|
||||
udp_sockets.push(&node.sockets.serve_repair);
|
||||
}
|
||||
if ContactInfo::is_valid_address(&node.info.tpu) {
|
||||
udp_sockets.extend(node.sockets.tpu.iter());
|
||||
}
|
||||
if ContactInfo::is_valid_address(&node.info.tpu_forwards) {
|
||||
udp_sockets.extend(node.sockets.tpu_forwards.iter());
|
||||
}
|
||||
if ContactInfo::is_valid_address(&node.info.tvu) {
|
||||
udp_sockets.extend(node.sockets.tvu.iter());
|
||||
udp_sockets.extend(node.sockets.broadcast.iter());
|
||||
udp_sockets.extend(node.sockets.retransmit_sockets.iter());
|
||||
}
|
||||
if ContactInfo::is_valid_address(&node.info.tvu_forwards) {
|
||||
udp_sockets.extend(node.sockets.tvu_forwards.iter());
|
||||
}
|
||||
|
||||
let mut tcp_listeners = vec![];
|
||||
if !private_rpc {
|
||||
@@ -1247,9 +1303,11 @@ pub fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ip_echo) = &node.sockets.ip_echo {
|
||||
let ip_echo = ip_echo.try_clone().expect("unable to clone tcp_listener");
|
||||
tcp_listeners.push((node.info.gossip.port(), ip_echo));
|
||||
if !restricted_repair_only_mode {
|
||||
if let Some(ip_echo) = &node.sockets.ip_echo {
|
||||
let ip_echo = ip_echo.try_clone().expect("unable to clone tcp_listener");
|
||||
tcp_listeners.push((ip_echo.local_addr().unwrap().port(), ip_echo));
|
||||
}
|
||||
}
|
||||
|
||||
if !solana_net_utils::verify_reachable_ports(
|
||||
@@ -1266,6 +1324,7 @@ pub fn main() {
|
||||
&node.info.gossip,
|
||||
node.sockets.gossip.try_clone().unwrap(),
|
||||
validator_config.expected_shred_version,
|
||||
validator_config.gossip_validators.clone(),
|
||||
);
|
||||
|
||||
let mut blacklisted_rpc_nodes = HashSet::new();
|
||||
|
Reference in New Issue
Block a user