Add FullnodeConfig struct to Fullnode::new* functions

This avoids having to touch *every* Fullnode::new* call site when
a new fullnode option is added
This commit is contained in:
Michael Vines 2019-01-29 08:51:01 -08:00
parent 6da7a784f2
commit ae7f169027
5 changed files with 83 additions and 171 deletions

View File

@ -2,7 +2,7 @@ use clap::{crate_version, App, Arg, ArgMatches};
use log::*;
use solana::client::mk_client;
use solana::cluster_info::{Node, NodeInfo, FULLNODE_PORT_RANGE};
use solana::fullnode::Fullnode;
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::leader_scheduler::LeaderScheduler;
use solana::local_vote_signer_service::LocalVoteSignerService;
use solana::socketaddr;
@ -204,7 +204,8 @@ fn main() {
)
.get_matches();
let no_sigverify = matches.is_present("no_sigverify");
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.sigverify_disabled = matches.is_present("no_sigverify");
let no_signer = matches.is_present("no_signer");
let use_only_bootstrap_leader = matches.is_present("no_leader_rotation");
let (keypair, gossip) = parse_identity(&matches);
@ -233,12 +234,14 @@ fn main() {
solana_netutil::find_available_port_in_range(FULLNODE_PORT_RANGE)
.expect("unable to allocate rpc port")
};
fullnode_config.rpc_port = Some(rpc_port);
let init_complete_file = matches.value_of("init_complete_file");
let entry_stream = matches.value_of("entry_stream").map(|s| s.to_string());
fullnode_config.entry_stream = matches.value_of("entry_stream").map(|s| s.to_string());
let keypair = Arc::new(keypair);
let node = Node::new_with_external_ip(keypair.pubkey(), &gossip);
let mut node_info = node.info.clone();
node_info.rpc.set_port(rpc_port);
node_info.rpc_pubsub.set_port(rpc_port + 1);
@ -248,7 +251,7 @@ fn main() {
info!("Node ID: {}", node.info.id);
let vote_account;
let signer_option = if !no_signer {
let vote_signer_option = if !no_signer {
let vote_signer =
VoteSignerProxy::new(&keypair, Box::new(RemoteVoteSigner::new(signer_addr)));
vote_account = vote_signer.vote_account;
@ -265,13 +268,11 @@ fn main() {
keypair.clone(),
ledger_path,
Arc::new(RwLock::new(leader_scheduler)),
signer_option,
vote_signer_option,
cluster_entrypoint
.map(|i| NodeInfo::new_entry_point(&i))
.as_ref(),
no_sigverify,
Some(rpc_port),
entry_stream,
fullnode_config,
);
if !no_signer {

View File

@ -65,6 +65,27 @@ pub enum FullnodeReturnType {
ValidatorToLeaderRotation,
}
pub struct FullnodeConfig {
pub sigverify_disabled: bool,
pub rpc_port: Option<u16>,
pub entry_stream: Option<String>,
pub storage_rotate_count: u64,
}
impl Default for FullnodeConfig {
fn default() -> Self {
// TODO: remove this, temporary parameter to configure
// storage amount differently for test configurations
// so tests don't take forever to run.
const NUM_HASHES_FOR_STORAGE_ROTATE: u64 = 1024;
Self {
sigverify_disabled: false,
rpc_port: None,
entry_stream: None,
storage_rotate_count: NUM_HASHES_FOR_STORAGE_ROTATE,
}
}
}
pub struct Fullnode {
keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
@ -80,11 +101,6 @@ pub struct Fullnode {
pub role_notifiers: (TvuRotationReceiver, TpuRotationReceiver),
}
// TODO: remove this, temporary parameter to configure
// storage amount differently for test configurations
// so tests don't take forever to run.
const NUM_HASHES_FOR_STORAGE_ROTATE: u64 = 1024;
impl Fullnode {
pub fn new(
node: Node,
@ -93,36 +109,7 @@ impl Fullnode {
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
vote_signer: Option<Arc<VoteSignerProxy>>,
entrypoint_info_option: Option<&NodeInfo>,
sigverify_disabled: bool,
rpc_port: Option<u16>,
entry_stream: Option<String>,
) -> Self {
Self::new_with_storage_rotate(
node,
keypair,
ledger_path,
vote_signer,
entrypoint_info_option,
sigverify_disabled,
leader_scheduler,
rpc_port,
NUM_HASHES_FOR_STORAGE_ROTATE,
entry_stream,
)
}
#[allow(clippy::too_many_arguments)]
pub fn new_with_storage_rotate(
node: Node,
keypair: Arc<Keypair>,
ledger_path: &str,
vote_signer: Option<Arc<VoteSignerProxy>>,
entrypoint_info_option: Option<&NodeInfo>,
sigverify_disabled: bool,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
rpc_port: Option<u16>,
storage_rotate_count: u64,
entry_stream: Option<String>,
config: FullnodeConfig,
) -> Self {
let (genesis_block, db_ledger) = Self::make_db_ledger(ledger_path);
let (bank, entry_height, last_entry_id) =
@ -136,10 +123,7 @@ impl Fullnode {
&last_entry_id,
vote_signer,
entrypoint_info_option,
sigverify_disabled,
rpc_port,
storage_rotate_count,
entry_stream,
config,
)
}
@ -153,9 +137,7 @@ impl Fullnode {
last_entry_id: &Hash,
vote_signer: Option<Arc<VoteSignerProxy>>,
entrypoint_info_option: Option<&NodeInfo>,
sigverify_disabled: bool,
rpc_port: Option<u16>,
entry_stream: Option<String>,
config: FullnodeConfig,
) -> Self {
let (_genesis_block, db_ledger) = Self::make_db_ledger(ledger_path);
Self::new_with_bank_and_db_ledger(
@ -167,10 +149,7 @@ impl Fullnode {
&last_entry_id,
vote_signer,
entrypoint_info_option,
sigverify_disabled,
rpc_port,
NUM_HASHES_FOR_STORAGE_ROTATE,
entry_stream,
config,
)
}
@ -184,15 +163,12 @@ impl Fullnode {
last_entry_id: &Hash,
vote_signer: Option<Arc<VoteSignerProxy>>,
entrypoint_info_option: Option<&NodeInfo>,
sigverify_disabled: bool,
rpc_port: Option<u16>,
storage_rotate_count: u64,
entry_stream: Option<String>,
config: FullnodeConfig,
) -> Self {
let mut rpc_addr = node.info.rpc;
let mut rpc_pubsub_addr = node.info.rpc_pubsub;
// If rpc_port == `None`, node will listen on the ports set in NodeInfo
if let Some(port) = rpc_port {
if let Some(port) = config.rpc_port {
rpc_addr.set_port(port);
node.info.rpc = rpc_addr;
rpc_pubsub_addr.set_port(port + 1);
@ -304,10 +280,10 @@ impl Fullnode {
&cluster_info,
sockets,
db_ledger.clone(),
storage_rotate_count,
config.storage_rotate_count,
to_leader_sender,
&storage_state,
entry_stream,
config.entry_stream,
);
let max_tick_height = {
let ls_lock = bank.leader_scheduler.read().unwrap();
@ -328,7 +304,7 @@ impl Fullnode {
.expect("Failed to clone broadcast socket"),
cluster_info.clone(),
entry_height,
sigverify_disabled,
config.sigverify_disabled,
max_tick_height,
last_entry_id,
keypair.pubkey(),
@ -342,7 +318,7 @@ impl Fullnode {
keypair,
cluster_info,
bank,
sigverify_disabled,
sigverify_disabled: config.sigverify_disabled,
gossip_service,
rpc_service: Some(rpc_service),
rpc_pubsub_service: Some(rpc_pubsub_service),
@ -571,9 +547,7 @@ mod tests {
&last_id,
Some(Arc::new(signer)),
Some(&entry),
false,
None,
None,
Default::default(),
);
v.close().unwrap();
remove_dir_all(validator_ledger_path).unwrap();
@ -614,9 +588,7 @@ mod tests {
&last_id,
Some(Arc::new(signer)),
Some(&entry),
false,
None,
None,
Default::default(),
)
})
.collect();
@ -686,9 +658,7 @@ mod tests {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
// Wait for the leader to transition, ticks should cause the leader to
@ -791,9 +761,7 @@ mod tests {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(vote_signer)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
assert!(!bootstrap_leader.node_services.tpu.is_leader());
@ -806,9 +774,7 @@ mod tests {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(validator_vote_account_id)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
assert!(validator.node_services.tpu.is_leader());
@ -902,9 +868,7 @@ mod tests {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(vote_signer)),
Some(&leader_node.info),
false,
None,
None,
Default::default(),
);
// Send blobs to the validator from our mock leader

View File

@ -44,7 +44,6 @@ pub struct ThinClient {
balances: HashMap<Pubkey, Account>,
signature_status: bool,
confirmation: Option<usize>,
rpc_client: RpcClient,
}
@ -472,9 +471,7 @@ pub fn new_fullnode(
&last_id,
Some(Arc::new(vote_signer)),
None,
false,
None,
None,
Default::default(),
);
(server, leader_data, genesis_block, alice, ledger_path)

View File

@ -163,9 +163,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
None,
false,
None,
None,
Default::default(),
);
// start up another validator from zero, converge and then check
@ -184,9 +182,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
false,
None,
None,
Default::default(),
);
// Send validator some tokens to vote
@ -269,9 +265,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
None,
false,
None,
None,
Default::default(),
);
let mut nodes = vec![server];
@ -304,9 +298,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
false,
None,
None,
Default::default(),
);
nodes.push(val);
}
@ -368,9 +360,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
false,
None,
None,
Default::default(),
);
nodes.push(val);
let servers = converge(&leader_data, N + 2); // contains the leader and new node
@ -459,9 +449,7 @@ fn test_multi_node_basic() {
))),
Some(Arc::new(signer_proxy)),
None,
false,
None,
None,
Default::default(),
);
let mut nodes = vec![server];
@ -490,9 +478,7 @@ fn test_multi_node_basic() {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
false,
None,
None,
Default::default(),
);
nodes.push(val);
}
@ -571,9 +557,7 @@ fn test_boot_validator_from_file() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
None,
false,
None,
None,
Default::default(),
);
let leader_balance =
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, 500, Some(500)).unwrap();
@ -597,9 +581,7 @@ fn test_boot_validator_from_file() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
false,
None,
None,
Default::default(),
);
let mut client = mk_client(&validator_data);
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(leader_balance));
@ -631,9 +613,7 @@ fn create_leader(
))),
Some(signer),
None,
false,
None,
None,
Default::default(),
);
(leader_data, leader_fullnode)
}
@ -709,9 +689,7 @@ fn test_leader_restart_validator_start_from_old_ledger() -> result::Result<()> {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
false,
None,
None,
Default::default(),
);
// trigger broadcast, validator should catch up from leader, whose window contains
@ -779,9 +757,7 @@ fn test_multi_node_dynamic_network() {
))),
Some(Arc::new(signer_proxy)),
None,
true,
None,
None,
Default::default(),
);
info!(
"found leader: {:?}",
@ -855,9 +831,7 @@ fn test_multi_node_dynamic_network() {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_data),
true,
None,
None,
Default::default(),
);
(rd, val)
})
@ -1037,9 +1011,7 @@ fn test_leader_to_validator_transition() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&leader_info),
false,
None,
None,
Default::default(),
);
// Make an extra node for our leader to broadcast to,
@ -1193,9 +1165,7 @@ fn test_leader_validator_basic() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&leader_info),
false,
None,
None,
Default::default(),
);
// Start the leader fullnode
@ -1207,9 +1177,7 @@ fn test_leader_validator_basic() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&leader_info),
false,
None,
None,
Default::default(),
);
// Wait for convergence
@ -1403,9 +1371,7 @@ fn test_dropped_handoff_recovery() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
let mut nodes = vec![bootstrap_leader];
@ -1427,9 +1393,7 @@ fn test_dropped_handoff_recovery() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
nodes.push(validator);
@ -1455,9 +1419,7 @@ fn test_dropped_handoff_recovery() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
info!("Wait for 'next leader' to assume leader role");
@ -1606,9 +1568,7 @@ fn test_full_leader_validator_network() {
leader_scheduler.clone(),
Some(Arc::new(signer_proxy)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
schedules.push(leader_scheduler);
@ -1625,9 +1585,7 @@ fn test_full_leader_validator_network() {
leader_scheduler.clone(),
Some(Arc::new(signer_proxy)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
schedules.push(leader_scheduler);
@ -1804,9 +1762,7 @@ fn test_broadcast_last_tick() {
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
Some(Arc::new(signer_proxy)),
Some(&bootstrap_leader_info),
false,
None,
None,
Default::default(),
);
// Wait for convergence

View File

@ -11,7 +11,7 @@ use solana::cluster_info::{ClusterInfo, Node, NodeInfo};
use solana::db_ledger::DbLedger;
use solana::db_ledger::{create_tmp_genesis, get_tmp_ledger_path, tmp_copy_ledger};
use solana::entry::Entry;
use solana::fullnode::Fullnode;
use solana::fullnode::{Fullnode, FullnodeConfig};
use solana::leader_scheduler::LeaderScheduler;
use solana::replicator::Replicator;
use solana::storage_stage::STORAGE_ROTATE_TEST_COUNT;
@ -49,19 +49,18 @@ fn test_replicator_startup() {
{
let signer_proxy = VoteSignerProxy::new_local(&leader_keypair);
let leader = Fullnode::new_with_storage_rotate(
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let leader = Fullnode::new(
leader_node,
leader_keypair,
&leader_ledger_path,
Some(Arc::new(signer_proxy)),
None,
false,
Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_info.id.clone(),
))),
Some(Arc::new(signer_proxy)),
None,
STORAGE_ROTATE_TEST_COUNT,
None,
fullnode_config,
);
let validator_keypair = Arc::new(Keypair::new());
@ -80,19 +79,18 @@ fn test_replicator_startup() {
#[cfg(feature = "chacha")]
let validator_node_info = validator_node.info.clone();
let validator = Fullnode::new_with_storage_rotate(
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.storage_rotate_count = STORAGE_ROTATE_TEST_COUNT;
let validator = Fullnode::new(
validator_node,
validator_keypair,
&validator_ledger_path,
Some(Arc::new(signer_proxy)),
Some(&leader_info),
false,
Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_info.id,
))),
None,
STORAGE_ROTATE_TEST_COUNT,
None,
Some(Arc::new(signer_proxy)),
Some(&leader_info),
fullnode_config,
);
let bob = Keypair::new();
@ -287,9 +285,7 @@ fn test_replicator_startup_ledger_hang() {
))),
Some(Arc::new(signer_proxy)),
None,
false,
None,
None,
Default::default(),
);
let validator_keypair = Arc::new(Keypair::new());
@ -305,9 +301,7 @@ fn test_replicator_startup_ledger_hang() {
))),
Some(Arc::new(signer_proxy)),
Some(&leader_info),
false,
None,
None,
Default::default(),
);
info!("starting replicator node");