Add option of replicators to local cluster test
This commit is contained in:
committed by
sakridge
parent
a17843c8f6
commit
c208f4dbb5
@ -3,6 +3,7 @@ use crate::cluster_info::{Node, FULLNODE_PORT_RANGE};
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::fullnode::{Fullnode, FullnodeConfig};
|
||||
use crate::gossip_service::discover;
|
||||
use crate::replicator::Replicator;
|
||||
use crate::service::Service;
|
||||
use solana_client::client::create_client;
|
||||
use solana_client::thin_client::{retry_get_balance, ThinClient};
|
||||
@ -23,6 +24,9 @@ pub struct LocalCluster {
|
||||
pub entry_point_info: ContactInfo,
|
||||
pub ledger_paths: Vec<String>,
|
||||
fullnodes: Vec<Fullnode>,
|
||||
replicators: Vec<Replicator>,
|
||||
genesis_ledger_path: String,
|
||||
genesis_block: GenesisBlock,
|
||||
}
|
||||
|
||||
impl LocalCluster {
|
||||
@ -35,6 +39,15 @@ impl LocalCluster {
|
||||
node_stakes: &[u64],
|
||||
cluster_lamports: u64,
|
||||
fullnode_config: &FullnodeConfig,
|
||||
) -> Self {
|
||||
Self::new_with_config_replicators(node_stakes, cluster_lamports, fullnode_config, 0)
|
||||
}
|
||||
|
||||
pub fn new_with_config_replicators(
|
||||
node_stakes: &[u64],
|
||||
cluster_lamports: u64,
|
||||
fullnode_config: &FullnodeConfig,
|
||||
num_replicators: usize,
|
||||
) -> Self {
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
@ -48,6 +61,7 @@ impl LocalCluster {
|
||||
ledger_paths.push(leader_ledger_path.clone());
|
||||
let voting_keypair = Keypair::new();
|
||||
let leader_contact_info = leader_node.info.clone();
|
||||
|
||||
let leader_server = Fullnode::new(
|
||||
leader_node,
|
||||
&leader_keypair,
|
||||
@ -57,54 +71,30 @@ impl LocalCluster {
|
||||
None,
|
||||
fullnode_config,
|
||||
);
|
||||
let mut fullnodes = vec![leader_server];
|
||||
let mut client = create_client(
|
||||
leader_contact_info.client_facing_addr(),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
for stake in &node_stakes[1..] {
|
||||
// Must have enough tokens to fund vote account and set delegate
|
||||
assert!(*stake > 2);
|
||||
let validator_keypair = Arc::new(Keypair::new());
|
||||
let voting_keypair = Keypair::new();
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
||||
let ledger_path = tmp_copy_blocktree!(&genesis_ledger_path);
|
||||
ledger_paths.push(ledger_path.clone());
|
||||
|
||||
// Send each validator some lamports to vote
|
||||
let validator_balance =
|
||||
Self::transfer(&mut client, &mint_keypair, &validator_pubkey, *stake);
|
||||
info!(
|
||||
"validator {} balance {}",
|
||||
validator_pubkey, validator_balance
|
||||
);
|
||||
let fullnodes = vec![leader_server];
|
||||
|
||||
Self::create_and_fund_vote_account(
|
||||
&mut client,
|
||||
&voting_keypair,
|
||||
&validator_keypair,
|
||||
stake - 1,
|
||||
)
|
||||
.unwrap();
|
||||
let validator_server = Fullnode::new(
|
||||
validator_node,
|
||||
&validator_keypair,
|
||||
&ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
voting_keypair,
|
||||
Some(&leader_contact_info),
|
||||
fullnode_config,
|
||||
);
|
||||
fullnodes.push(validator_server);
|
||||
}
|
||||
discover(&leader_contact_info.gossip, node_stakes.len()).unwrap();
|
||||
Self {
|
||||
let mut cluster = Self {
|
||||
funding_keypair: mint_keypair,
|
||||
entry_point_info: leader_contact_info,
|
||||
fullnodes,
|
||||
replicators: vec![],
|
||||
ledger_paths,
|
||||
genesis_ledger_path,
|
||||
genesis_block,
|
||||
};
|
||||
|
||||
for stake in &node_stakes[1..] {
|
||||
cluster.add_validator(&fullnode_config, *stake);
|
||||
}
|
||||
|
||||
for _ in 0..num_replicators {
|
||||
cluster.add_replicator();
|
||||
}
|
||||
|
||||
discover(&cluster.entry_point_info.gossip, node_stakes.len()).unwrap();
|
||||
|
||||
cluster
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
@ -118,6 +108,84 @@ impl LocalCluster {
|
||||
while let Some(node) = self.fullnodes.pop() {
|
||||
node.join().unwrap();
|
||||
}
|
||||
|
||||
while let Some(node) = self.replicators.pop() {
|
||||
node.close();
|
||||
}
|
||||
}
|
||||
|
||||
fn add_validator(&mut self, fullnode_config: &FullnodeConfig, stake: u64) {
|
||||
let mut client = create_client(
|
||||
self.entry_point_info.client_facing_addr(),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
|
||||
// Must have enough tokens to fund vote account and set delegate
|
||||
assert!(stake > 2);
|
||||
let validator_keypair = Arc::new(Keypair::new());
|
||||
let voting_keypair = Keypair::new();
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
||||
let ledger_path = tmp_copy_blocktree!(&self.genesis_ledger_path);
|
||||
self.ledger_paths.push(ledger_path.clone());
|
||||
|
||||
// Send each validator some lamports to vote
|
||||
let validator_balance =
|
||||
Self::transfer(&mut client, &self.funding_keypair, &validator_pubkey, stake);
|
||||
info!(
|
||||
"validator {} balance {}",
|
||||
validator_pubkey, validator_balance
|
||||
);
|
||||
|
||||
Self::create_and_fund_vote_account(
|
||||
&mut client,
|
||||
&voting_keypair,
|
||||
&validator_keypair,
|
||||
stake - 1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let validator_server = Fullnode::new(
|
||||
validator_node,
|
||||
&validator_keypair,
|
||||
&ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
voting_keypair,
|
||||
Some(&self.entry_point_info),
|
||||
fullnode_config,
|
||||
);
|
||||
|
||||
self.fullnodes.push(validator_server);
|
||||
}
|
||||
|
||||
fn add_replicator(&mut self) {
|
||||
let replicator_keypair = Arc::new(Keypair::new());
|
||||
let mut client = create_client(
|
||||
self.entry_point_info.client_facing_addr(),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
|
||||
Self::transfer(
|
||||
&mut client,
|
||||
&self.funding_keypair,
|
||||
&replicator_keypair.pubkey(),
|
||||
1,
|
||||
);
|
||||
let replicator_node = Node::new_localhost_with_pubkey(&replicator_keypair.pubkey());
|
||||
|
||||
let (replicator_ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_block);
|
||||
let replicator = Replicator::new(
|
||||
&replicator_ledger_path,
|
||||
replicator_node,
|
||||
self.entry_point_info.clone(),
|
||||
replicator_keypair,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
self.ledger_paths.push(replicator_ledger_path);
|
||||
|
||||
self.replicators.push(replicator);
|
||||
}
|
||||
|
||||
fn close(&mut self) {
|
||||
@ -216,7 +284,10 @@ mod test {
|
||||
#[test]
|
||||
fn test_local_cluster_start_and_exit() {
|
||||
solana_logger::setup();
|
||||
let _cluster = LocalCluster::new(1, 100, 3);
|
||||
let num_nodes = 1;
|
||||
let cluster = LocalCluster::new(num_nodes, 100, 3);
|
||||
assert_eq!(cluster.fullnodes.len(), num_nodes);
|
||||
assert_eq!(cluster.replicators.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -224,6 +295,15 @@ mod test {
|
||||
solana_logger::setup();
|
||||
let mut fullnode_exit = FullnodeConfig::default();
|
||||
fullnode_exit.rpc_config.enable_fullnode_exit = true;
|
||||
let _cluster = LocalCluster::new_with_config(&[3], 100, &fullnode_exit);
|
||||
const NUM_NODES: usize = 1;
|
||||
let num_replicators = 1;
|
||||
let cluster = LocalCluster::new_with_config_replicators(
|
||||
&[3; NUM_NODES],
|
||||
100,
|
||||
&fullnode_exit,
|
||||
num_replicators,
|
||||
);
|
||||
assert_eq!(cluster.fullnodes.len(), NUM_NODES);
|
||||
assert_eq!(cluster.replicators.len(), num_replicators);
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ use crate::repair_service::RepairSlotRange;
|
||||
use crate::result::Result;
|
||||
use crate::service::Service;
|
||||
use crate::storage_stage::{get_segment_from_entry, ENTRIES_PER_SEGMENT};
|
||||
use crate::streamer::BlobReceiver;
|
||||
use crate::window_service::WindowService;
|
||||
use rand::thread_rng;
|
||||
use rand::Rng;
|
||||
@ -35,13 +34,15 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::spawn;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
pub struct Replicator {
|
||||
gossip_service: GossipService,
|
||||
fetch_stage: BlobFetchStage,
|
||||
window_service: WindowService,
|
||||
pub retransmit_receiver: BlobReceiver,
|
||||
t_retransmit: JoinHandle<()>,
|
||||
exit: Arc<AtomicBool>,
|
||||
slot: u64,
|
||||
ledger_path: String,
|
||||
@ -174,7 +175,6 @@ impl Replicator {
|
||||
let (blob_fetch_sender, blob_fetch_receiver) = channel();
|
||||
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
|
||||
|
||||
// todo: pull blobs off the retransmit_receiver and recycle them?
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let window_service = WindowService::new(
|
||||
@ -187,11 +187,20 @@ impl Replicator {
|
||||
repair_slot_range,
|
||||
);
|
||||
|
||||
// receive blobs from retransmit and drop them.
|
||||
let exit2 = exit.clone();
|
||||
let t_retransmit = spawn(move || loop {
|
||||
let _ = retransmit_receiver.recv_timeout(Duration::from_secs(1));
|
||||
if exit2.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
gossip_service,
|
||||
fetch_stage,
|
||||
window_service,
|
||||
retransmit_receiver,
|
||||
t_retransmit,
|
||||
exit,
|
||||
slot,
|
||||
ledger_path: ledger_path.to_string(),
|
||||
@ -329,14 +338,7 @@ impl Replicator {
|
||||
self.gossip_service.join().unwrap();
|
||||
self.fetch_stage.join().unwrap();
|
||||
self.window_service.join().unwrap();
|
||||
|
||||
// Drain the queue here to prevent self.retransmit_receiver from being dropped
|
||||
// before the window_service thread is joined
|
||||
let mut retransmit_queue_count = 0;
|
||||
while let Ok(_blob) = self.retransmit_receiver.recv_timeout(Duration::new(1, 0)) {
|
||||
retransmit_queue_count += 1;
|
||||
}
|
||||
debug!("retransmit channel count: {}", retransmit_queue_count);
|
||||
self.t_retransmit.join().unwrap();
|
||||
}
|
||||
|
||||
pub fn entry_height(&self) -> u64 {
|
||||
|
Reference in New Issue
Block a user