2018-07-02 15:24:40 -07:00
|
|
|
//! The `fullnode` module hosts all the fullnode microservices.
|
|
|
|
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::bank::Bank;
|
2019-02-07 20:52:39 -08:00
|
|
|
use crate::blocktree::{Blocktree, BlocktreeConfig};
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::cluster_info::{ClusterInfo, Node, NodeInfo};
|
2018-12-12 12:38:00 -08:00
|
|
|
use crate::counter::Counter;
|
2019-01-24 12:04:04 -08:00
|
|
|
use crate::genesis_block::GenesisBlock;
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::gossip_service::GossipService;
|
2019-02-06 17:46:37 -08:00
|
|
|
use crate::leader_scheduler::LeaderSchedulerConfig;
|
2019-02-09 09:20:43 -08:00
|
|
|
use crate::poh_service::PohServiceConfig;
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::rpc::JsonRpcService;
|
|
|
|
use crate::rpc_pubsub::PubSubService;
|
|
|
|
use crate::service::Service;
|
2019-01-28 14:53:50 -08:00
|
|
|
use crate::storage_stage::StorageState;
|
2019-01-31 13:43:22 -08:00
|
|
|
use crate::streamer::BlobSender;
|
2019-02-10 19:34:18 -08:00
|
|
|
use crate::tpu::{Tpu, TpuRotationReceiver, TpuRotationSender};
|
|
|
|
use crate::tvu::{Sockets, Tvu};
|
2019-01-31 21:12:51 -07:00
|
|
|
use crate::voting_keypair::VotingKeypair;
|
2018-12-12 12:38:00 -08:00
|
|
|
use log::Level;
|
2018-11-16 08:04:46 -08:00
|
|
|
use solana_sdk::hash::Hash;
|
2019-01-30 20:02:35 -07:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2018-12-03 10:26:28 -08:00
|
|
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
2019-01-16 10:14:18 -08:00
|
|
|
use solana_sdk::timing::{duration_as_ms, timestamp};
|
2018-09-14 01:53:18 -07:00
|
|
|
use std::net::UdpSocket;
|
2018-11-06 12:57:41 -07:00
|
|
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
2018-12-12 12:38:00 -08:00
|
|
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
2019-02-06 18:21:51 -08:00
|
|
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender, SyncSender};
|
2018-07-02 15:24:40 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
2019-02-10 16:28:52 -08:00
|
|
|
use std::thread::{sleep, spawn, Result};
|
2019-02-06 18:21:51 -08:00
|
|
|
use std::time::{Duration, Instant};
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2019-02-06 20:45:26 -08:00
|
|
|
struct NodeServices {
|
2018-09-13 14:00:17 -07:00
|
|
|
tpu: Tpu,
|
|
|
|
tvu: Tvu,
|
|
|
|
}
|
|
|
|
|
2019-01-26 13:58:08 +05:30
|
|
|
impl NodeServices {
|
|
|
|
fn new(tpu: Tpu, tvu: Tvu) -> Self {
|
|
|
|
NodeServices { tpu, tvu }
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
|
|
|
|
2019-02-06 20:45:26 -08:00
|
|
|
fn join(self) -> Result<()> {
|
2019-01-26 13:58:08 +05:30
|
|
|
self.tpu.join()?;
|
|
|
|
//tvu will never stop unless exit is signaled
|
|
|
|
self.tvu.join()?;
|
|
|
|
Ok(())
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
2018-09-14 14:34:32 -07:00
|
|
|
|
2019-02-06 20:45:26 -08:00
|
|
|
fn exit(&self) {
|
2019-01-26 13:58:08 +05:30
|
|
|
self.tpu.exit();
|
|
|
|
self.tvu.exit();
|
2018-09-14 14:34:32 -07:00
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
|
|
|
|
2019-01-31 19:21:02 -08:00
|
|
|
#[derive(Debug, PartialEq, Eq)]
|
2018-09-14 00:17:40 -07:00
|
|
|
pub enum FullnodeReturnType {
|
2018-10-10 16:49:41 -07:00
|
|
|
LeaderToValidatorRotation,
|
|
|
|
ValidatorToLeaderRotation,
|
2019-02-01 18:09:38 -08:00
|
|
|
LeaderToLeaderRotation,
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
|
|
|
|
2019-01-29 08:51:01 -08:00
|
|
|
pub struct FullnodeConfig {
|
|
|
|
pub sigverify_disabled: bool,
|
2019-01-30 17:16:55 -07:00
|
|
|
pub voting_disabled: bool,
|
2019-01-29 08:51:01 -08:00
|
|
|
pub entry_stream: Option<String>,
|
|
|
|
pub storage_rotate_count: u64,
|
2019-02-05 08:03:52 -08:00
|
|
|
pub leader_scheduler_config: LeaderSchedulerConfig,
|
2019-02-10 16:28:52 -08:00
|
|
|
pub tick_config: PohServiceConfig,
|
2019-01-29 08:51:01 -08:00
|
|
|
}
|
|
|
|
impl Default for FullnodeConfig {
|
|
|
|
fn default() -> Self {
|
|
|
|
// TODO: remove this, temporary parameter to configure
|
|
|
|
// storage amount differently for test configurations
|
|
|
|
// so tests don't take forever to run.
|
|
|
|
const NUM_HASHES_FOR_STORAGE_ROTATE: u64 = 1024;
|
|
|
|
Self {
|
|
|
|
sigverify_disabled: false,
|
2019-01-30 17:16:55 -07:00
|
|
|
voting_disabled: false,
|
2019-01-29 08:51:01 -08:00
|
|
|
entry_stream: None,
|
|
|
|
storage_rotate_count: NUM_HASHES_FOR_STORAGE_ROTATE,
|
2019-02-09 09:20:43 -08:00
|
|
|
leader_scheduler_config: LeaderSchedulerConfig::default(),
|
2019-02-10 16:28:52 -08:00
|
|
|
tick_config: PohServiceConfig::default(),
|
2019-01-29 08:51:01 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
impl FullnodeConfig {
|
2019-02-11 09:09:57 -08:00
|
|
|
pub fn ledger_config(&self) -> BlocktreeConfig {
|
|
|
|
// TODO: Refactor LeaderSchedulerConfig and BlocktreeConfig to avoid the duplicated
|
|
|
|
// `ticks_per_slot` field that must be identical between the two
|
|
|
|
BlocktreeConfig::new(self.leader_scheduler_config.ticks_per_slot)
|
2019-02-07 15:10:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 15:29:07 -06:00
|
|
|
pub struct Fullnode {
|
2019-01-30 20:02:35 -07:00
|
|
|
id: Pubkey,
|
2018-07-09 14:53:18 -06:00
|
|
|
exit: Arc<AtomicBool>,
|
2018-10-25 16:58:40 -07:00
|
|
|
rpc_service: Option<JsonRpcService>,
|
|
|
|
rpc_pubsub_service: Option<PubSubService>,
|
2018-12-06 13:52:47 -07:00
|
|
|
gossip_service: GossipService,
|
2018-09-14 01:53:18 -07:00
|
|
|
bank: Arc<Bank>,
|
2018-10-08 20:55:54 -06:00
|
|
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
2018-09-25 15:41:29 -07:00
|
|
|
sigverify_disabled: bool,
|
2018-12-07 15:09:29 -07:00
|
|
|
tpu_sockets: Vec<UdpSocket>,
|
2018-09-25 15:41:29 -07:00
|
|
|
broadcast_socket: UdpSocket,
|
2019-02-06 19:47:55 -08:00
|
|
|
node_services: NodeServices,
|
2019-02-10 19:34:18 -08:00
|
|
|
rotation_sender: TpuRotationSender,
|
|
|
|
rotation_receiver: TpuRotationReceiver,
|
2019-01-31 13:43:22 -08:00
|
|
|
blob_sender: BlobSender,
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2018-08-09 15:29:07 -06:00
|
|
|
impl Fullnode {
|
2018-08-22 17:39:34 -06:00
|
|
|
pub fn new(
|
2019-01-29 18:12:32 -08:00
|
|
|
mut node: Node,
|
2019-01-30 20:51:50 -07:00
|
|
|
keypair: &Arc<Keypair>,
|
2019-01-28 20:10:38 -08:00
|
|
|
ledger_path: &str,
|
2019-01-31 21:12:51 -07:00
|
|
|
voting_keypair: VotingKeypair,
|
2019-01-28 20:10:38 -08:00
|
|
|
entrypoint_info_option: Option<&NodeInfo>,
|
2019-01-31 20:41:03 -08:00
|
|
|
config: &FullnodeConfig,
|
2018-08-09 15:29:07 -06:00
|
|
|
) -> Self {
|
2019-02-07 15:10:54 -08:00
|
|
|
info!("creating bank...");
|
|
|
|
|
2019-01-30 20:02:35 -07:00
|
|
|
let id = keypair.pubkey();
|
2019-02-06 19:21:31 -08:00
|
|
|
assert_eq!(id, node.info.id);
|
|
|
|
|
|
|
|
let (
|
|
|
|
bank,
|
|
|
|
entry_height,
|
|
|
|
last_entry_id,
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree,
|
2019-02-06 19:21:31 -08:00
|
|
|
ledger_signal_sender,
|
|
|
|
ledger_signal_receiver,
|
2019-02-07 15:10:54 -08:00
|
|
|
) = new_bank_from_ledger(
|
|
|
|
ledger_path,
|
2019-02-11 09:09:57 -08:00
|
|
|
config.ledger_config(),
|
2019-02-07 15:10:54 -08:00
|
|
|
&config.leader_scheduler_config,
|
|
|
|
);
|
2019-02-06 19:21:31 -08:00
|
|
|
|
2019-01-29 09:11:49 -08:00
|
|
|
info!("node info: {:?}", node.info);
|
2019-01-28 20:10:38 -08:00
|
|
|
info!("node entrypoint_info: {:?}", entrypoint_info_option);
|
|
|
|
info!(
|
2019-01-29 09:11:49 -08:00
|
|
|
"node local gossip address: {}",
|
|
|
|
node.sockets.gossip.local_addr().unwrap()
|
2019-01-28 20:10:38 -08:00
|
|
|
);
|
|
|
|
|
2018-09-10 23:38:40 -07:00
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2018-08-22 18:50:19 -06:00
|
|
|
let bank = Arc::new(bank);
|
2019-02-07 20:52:39 -08:00
|
|
|
let blocktree = Arc::new(blocktree);
|
2018-08-14 18:03:48 -06:00
|
|
|
|
2018-11-15 13:23:26 -08:00
|
|
|
node.info.wallclock = timestamp();
|
2018-12-01 12:00:30 -08:00
|
|
|
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_keypair(
|
2019-01-29 09:11:49 -08:00
|
|
|
node.info.clone(),
|
2018-12-01 12:00:30 -08:00
|
|
|
keypair.clone(),
|
|
|
|
)));
|
2018-08-22 18:50:19 -06:00
|
|
|
|
2019-01-21 11:14:27 -08:00
|
|
|
// TODO: The RPC service assumes that there is a drone running on the cluster
|
|
|
|
// entrypoint, which is a bad assumption.
|
|
|
|
// See https://github.com/solana-labs/solana/issues/1830 for the removal of drone
|
|
|
|
// from the RPC API
|
|
|
|
let drone_addr = {
|
|
|
|
let mut entrypoint_drone_addr = match entrypoint_info_option {
|
|
|
|
Some(entrypoint_info_info) => entrypoint_info_info.rpc,
|
2019-01-29 09:11:49 -08:00
|
|
|
None => node.info.rpc,
|
2019-01-21 11:14:27 -08:00
|
|
|
};
|
|
|
|
entrypoint_drone_addr.set_port(solana_drone::drone::DRONE_PORT);
|
|
|
|
entrypoint_drone_addr
|
2019-01-04 17:20:51 -08:00
|
|
|
};
|
|
|
|
|
2019-01-28 14:53:50 -08:00
|
|
|
let storage_state = StorageState::new();
|
|
|
|
|
2019-01-15 12:20:07 -08:00
|
|
|
let rpc_service = JsonRpcService::new(
|
|
|
|
&bank,
|
|
|
|
&cluster_info,
|
2019-01-29 09:11:49 -08:00
|
|
|
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), node.info.rpc.port()),
|
2019-01-15 12:20:07 -08:00
|
|
|
drone_addr,
|
2019-01-28 14:53:50 -08:00
|
|
|
storage_state.clone(),
|
2019-01-15 12:20:07 -08:00
|
|
|
);
|
|
|
|
|
|
|
|
let rpc_pubsub_service = PubSubService::new(
|
|
|
|
&bank,
|
|
|
|
SocketAddr::new(
|
|
|
|
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
2019-01-29 09:11:49 -08:00
|
|
|
node.info.rpc_pubsub.port(),
|
2019-01-15 12:20:07 -08:00
|
|
|
),
|
|
|
|
);
|
2018-10-12 11:04:14 -07:00
|
|
|
|
2018-12-06 13:52:47 -07:00
|
|
|
let gossip_service = GossipService::new(
|
2018-10-08 20:55:54 -06:00
|
|
|
&cluster_info,
|
2019-02-07 20:52:39 -08:00
|
|
|
Some(blocktree.clone()),
|
2018-08-22 19:00:56 -06:00
|
|
|
node.sockets.gossip,
|
|
|
|
exit.clone(),
|
2018-09-02 23:23:43 -10:00
|
|
|
);
|
2018-08-22 19:00:56 -06:00
|
|
|
|
2019-01-21 11:14:27 -08:00
|
|
|
// Insert the entrypoint info, should only be None if this node
|
2018-10-10 16:49:41 -07:00
|
|
|
// is the bootstrap leader
|
2019-01-21 11:14:27 -08:00
|
|
|
if let Some(entrypoint_info) = entrypoint_info_option {
|
2018-11-15 13:23:26 -08:00
|
|
|
cluster_info
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
2019-01-21 11:14:27 -08:00
|
|
|
.insert_info(entrypoint_info.clone());
|
2018-08-22 18:51:53 -06:00
|
|
|
}
|
2018-08-22 18:50:19 -06:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Get the scheduled leader
|
2019-02-07 15:10:54 -08:00
|
|
|
let (scheduled_leader, slot_height, max_tpu_tick_height) = {
|
2019-02-05 08:03:52 -08:00
|
|
|
let tick_height = bank.tick_height();
|
|
|
|
|
|
|
|
let leader_scheduler = bank.leader_scheduler.read().unwrap();
|
|
|
|
let slot = leader_scheduler.tick_height_to_slot(tick_height);
|
|
|
|
(
|
|
|
|
leader_scheduler
|
|
|
|
.get_leader_for_slot(slot)
|
|
|
|
.expect("Leader not known after processing bank"),
|
2019-02-07 15:10:54 -08:00
|
|
|
slot,
|
2019-02-05 08:03:52 -08:00
|
|
|
tick_height + leader_scheduler.num_ticks_left_in_slot(tick_height),
|
|
|
|
)
|
|
|
|
};
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
trace!(
|
|
|
|
"scheduled_leader: {} until tick_height {}",
|
|
|
|
scheduled_leader,
|
|
|
|
max_tpu_tick_height
|
|
|
|
);
|
2018-10-10 16:49:41 -07:00
|
|
|
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
2018-11-24 19:32:33 -08:00
|
|
|
|
2019-01-28 20:10:38 -08:00
|
|
|
// TODO: always start leader and validator, keep leader side switching between tpu
|
|
|
|
// forwarder and regular tpu.
|
2019-01-26 13:58:08 +05:30
|
|
|
let sockets = Sockets {
|
|
|
|
repair: node
|
|
|
|
.sockets
|
|
|
|
.repair
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone repair socket"),
|
|
|
|
retransmit: node
|
|
|
|
.sockets
|
|
|
|
.retransmit
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone retransmit socket"),
|
|
|
|
fetch: node
|
|
|
|
.sockets
|
|
|
|
.tvu
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone TVU Sockets"))
|
|
|
|
.collect(),
|
|
|
|
};
|
2018-12-03 00:10:43 -08:00
|
|
|
|
2019-01-31 21:12:51 -07:00
|
|
|
let voting_keypair_option = if config.voting_disabled {
|
2019-01-30 17:16:55 -07:00
|
|
|
None
|
|
|
|
} else {
|
2019-01-31 21:12:51 -07:00
|
|
|
Some(Arc::new(voting_keypair))
|
2019-01-30 17:16:55 -07:00
|
|
|
};
|
|
|
|
|
2019-02-10 19:34:18 -08:00
|
|
|
// Setup channel for rotation indications
|
|
|
|
let (rotation_sender, rotation_receiver) = channel();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-02-07 20:52:39 -08:00
|
|
|
let blob_index = Self::get_consumed_for_slot(&blocktree, slot_height);
|
2019-02-07 15:10:54 -08:00
|
|
|
|
2019-01-31 13:43:22 -08:00
|
|
|
let (tvu, blob_sender) = Tvu::new(
|
2019-01-31 21:12:51 -07:00
|
|
|
voting_keypair_option,
|
2019-01-26 13:58:08 +05:30
|
|
|
&bank,
|
2019-02-07 15:10:54 -08:00
|
|
|
blob_index,
|
2019-01-26 13:58:08 +05:30
|
|
|
entry_height,
|
2019-01-29 18:12:32 -08:00
|
|
|
last_entry_id,
|
2019-01-26 13:58:08 +05:30
|
|
|
&cluster_info,
|
|
|
|
sockets,
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree.clone(),
|
2019-01-29 08:51:01 -08:00
|
|
|
config.storage_rotate_count,
|
2019-02-10 19:34:18 -08:00
|
|
|
&rotation_sender,
|
2019-01-28 14:53:50 -08:00
|
|
|
&storage_state,
|
2019-01-31 20:41:03 -08:00
|
|
|
config.entry_stream.as_ref(),
|
2019-02-04 15:33:43 -08:00
|
|
|
ledger_signal_sender,
|
|
|
|
ledger_signal_receiver,
|
2019-01-26 13:58:08 +05:30
|
|
|
);
|
|
|
|
let tpu = Tpu::new(
|
2019-01-29 11:18:56 -08:00
|
|
|
&Arc::new(bank.copy_for_tpu()),
|
2019-02-09 09:20:43 -08:00
|
|
|
PohServiceConfig::default(),
|
2019-01-26 13:58:08 +05:30
|
|
|
node.sockets
|
|
|
|
.tpu
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone TPU sockets"))
|
|
|
|
.collect(),
|
|
|
|
node.sockets
|
|
|
|
.broadcast
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone broadcast socket"),
|
|
|
|
cluster_info.clone(),
|
2019-01-29 08:51:01 -08:00
|
|
|
config.sigverify_disabled,
|
2019-02-05 08:03:52 -08:00
|
|
|
max_tpu_tick_height,
|
2019-02-07 21:17:10 -08:00
|
|
|
blob_index,
|
2019-01-29 18:12:32 -08:00
|
|
|
&last_entry_id,
|
2019-01-30 20:02:35 -07:00
|
|
|
id,
|
2019-02-10 19:34:18 -08:00
|
|
|
&rotation_sender,
|
2019-01-31 13:43:22 -08:00
|
|
|
&blob_sender,
|
2019-02-07 21:17:10 -08:00
|
|
|
scheduled_leader == id,
|
2019-01-26 13:58:08 +05:30
|
|
|
);
|
|
|
|
|
2018-12-12 12:38:00 -08:00
|
|
|
inc_new_counter_info!("fullnode-new", 1);
|
|
|
|
|
2019-01-30 17:16:55 -07:00
|
|
|
Self {
|
2019-01-30 20:02:35 -07:00
|
|
|
id,
|
2018-10-08 20:55:54 -06:00
|
|
|
cluster_info,
|
2018-09-14 01:53:18 -07:00
|
|
|
bank,
|
2019-01-29 08:51:01 -08:00
|
|
|
sigverify_disabled: config.sigverify_disabled,
|
2018-12-06 13:52:47 -07:00
|
|
|
gossip_service,
|
2018-10-25 16:58:40 -07:00
|
|
|
rpc_service: Some(rpc_service),
|
|
|
|
rpc_pubsub_service: Some(rpc_pubsub_service),
|
2019-01-26 13:58:08 +05:30
|
|
|
node_services: NodeServices::new(tpu, tvu),
|
2018-09-13 14:00:17 -07:00
|
|
|
exit,
|
2018-12-07 15:09:29 -07:00
|
|
|
tpu_sockets: node.sockets.tpu,
|
2018-09-25 15:41:29 -07:00
|
|
|
broadcast_socket: node.sockets.broadcast,
|
2019-02-10 19:34:18 -08:00
|
|
|
rotation_sender,
|
|
|
|
rotation_receiver,
|
2019-01-31 13:43:22 -08:00
|
|
|
blob_sender,
|
2018-09-14 01:53:18 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-10 17:23:32 -08:00
|
|
|
fn get_next_leader(&self, tick_height: u64) -> (Pubkey, u64) {
|
|
|
|
loop {
|
|
|
|
let bank_tick_height = self.bank.tick_height();
|
|
|
|
if bank_tick_height >= tick_height {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
trace!(
|
|
|
|
"Waiting for bank tick_height to catch up from {} to {}",
|
|
|
|
bank_tick_height,
|
|
|
|
tick_height
|
|
|
|
);
|
2019-02-10 16:28:52 -08:00
|
|
|
sleep(Duration::from_millis(10));
|
|
|
|
}
|
|
|
|
|
2019-02-10 17:23:32 -08:00
|
|
|
let (scheduled_leader, max_tick_height) = {
|
2019-02-05 08:03:52 -08:00
|
|
|
let mut leader_scheduler = self.bank.leader_scheduler.write().unwrap();
|
2019-01-31 19:21:02 -08:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
// A transition is only permitted on the final tick of a slot
|
|
|
|
assert_eq!(leader_scheduler.num_ticks_left_in_slot(tick_height), 0);
|
|
|
|
let first_tick_of_next_slot = tick_height + 1;
|
2019-01-31 19:21:02 -08:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
leader_scheduler.update_tick_height(first_tick_of_next_slot, &self.bank);
|
|
|
|
let slot = leader_scheduler.tick_height_to_slot(first_tick_of_next_slot);
|
2019-02-10 17:23:32 -08:00
|
|
|
(
|
|
|
|
leader_scheduler.get_leader_for_slot(slot).unwrap(),
|
|
|
|
first_tick_of_next_slot
|
|
|
|
+ leader_scheduler.num_ticks_left_in_slot(first_tick_of_next_slot),
|
|
|
|
)
|
2019-02-05 08:03:52 -08:00
|
|
|
};
|
2019-02-10 17:23:32 -08:00
|
|
|
|
|
|
|
debug!(
|
|
|
|
"node {:?} scheduled as leader for ticks [{}, {})",
|
|
|
|
scheduled_leader,
|
|
|
|
tick_height + 1,
|
|
|
|
max_tick_height
|
|
|
|
);
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
self.cluster_info
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.set_leader(scheduled_leader);
|
2019-02-01 18:09:38 -08:00
|
|
|
|
2019-02-10 17:23:32 -08:00
|
|
|
(scheduled_leader, max_tick_height)
|
|
|
|
}
|
|
|
|
|
2019-02-10 19:34:18 -08:00
|
|
|
fn rotate(&mut self, tick_height: u64) -> FullnodeReturnType {
|
|
|
|
trace!("{:?}: rotate at tick_height={}", self.id, tick_height,);
|
|
|
|
let was_leader = self.node_services.tpu.is_leader();
|
2019-02-10 17:23:32 -08:00
|
|
|
|
2019-02-10 19:34:18 -08:00
|
|
|
let (scheduled_leader, max_tick_height) = self.get_next_leader(tick_height);
|
2019-01-30 20:02:35 -07:00
|
|
|
if scheduled_leader == self.id {
|
2019-02-10 19:34:18 -08:00
|
|
|
let transition = if was_leader {
|
|
|
|
debug!("{:?} remaining in leader role", self.id);
|
|
|
|
FullnodeReturnType::LeaderToLeaderRotation
|
|
|
|
} else {
|
|
|
|
debug!("{:?} rotating to leader role", self.id);
|
|
|
|
FullnodeReturnType::ValidatorToLeaderRotation
|
|
|
|
};
|
|
|
|
|
2019-02-10 16:28:52 -08:00
|
|
|
let last_entry_id = self.bank.last_id();
|
2019-02-10 19:34:18 -08:00
|
|
|
|
|
|
|
self.node_services.tpu.switch_to_leader(
|
|
|
|
&Arc::new(self.bank.copy_for_tpu()),
|
|
|
|
PohServiceConfig::default(),
|
|
|
|
self.tpu_sockets
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone TPU sockets"))
|
|
|
|
.collect(),
|
|
|
|
self.broadcast_socket
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone broadcast socket"),
|
|
|
|
self.cluster_info.clone(),
|
|
|
|
self.sigverify_disabled,
|
|
|
|
max_tick_height,
|
|
|
|
0,
|
|
|
|
&last_entry_id,
|
|
|
|
self.id,
|
|
|
|
&self.rotation_sender,
|
|
|
|
&self.blob_sender,
|
|
|
|
);
|
|
|
|
|
|
|
|
transition
|
2018-10-25 16:58:40 -07:00
|
|
|
} else {
|
2019-02-10 19:34:18 -08:00
|
|
|
debug!("{:?} rotating to validator role", self.id);
|
2019-01-26 13:58:08 +05:30
|
|
|
self.node_services.tpu.switch_to_forwarder(
|
2018-12-07 15:09:29 -07:00
|
|
|
self.tpu_sockets
|
2018-11-16 16:42:29 -08:00
|
|
|
.iter()
|
2018-12-07 15:09:29 -07:00
|
|
|
.map(|s| s.try_clone().expect("Failed to clone TPU sockets"))
|
2018-11-16 16:42:29 -08:00
|
|
|
.collect(),
|
|
|
|
self.cluster_info.clone(),
|
|
|
|
);
|
2019-02-01 18:09:38 -08:00
|
|
|
FullnodeReturnType::LeaderToValidatorRotation
|
2018-10-18 22:57:48 -07:00
|
|
|
}
|
2018-09-14 01:53:18 -07:00
|
|
|
}
|
|
|
|
|
2019-02-01 18:09:38 -08:00
|
|
|
// Runs a thread to manage node role transitions. The returned closure can be used to signal the
|
|
|
|
// node to exit.
|
2019-02-05 08:03:52 -08:00
|
|
|
pub fn run(
|
|
|
|
mut self,
|
|
|
|
rotation_notifier: Option<Sender<(FullnodeReturnType, u64)>>,
|
|
|
|
) -> impl FnOnce() {
|
2019-02-01 18:09:38 -08:00
|
|
|
let (sender, receiver) = channel();
|
|
|
|
let exit = self.exit.clone();
|
2019-02-10 19:34:18 -08:00
|
|
|
let timeout = Duration::from_secs(1);
|
2019-02-01 18:09:38 -08:00
|
|
|
spawn(move || loop {
|
2019-02-10 19:34:18 -08:00
|
|
|
if self.exit.load(Ordering::Relaxed) {
|
|
|
|
debug!("node shutdown requested");
|
|
|
|
self.close().expect("Unable to close node");
|
|
|
|
sender.send(true).expect("Unable to signal exit");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
match self.rotation_receiver.recv_timeout(timeout) {
|
|
|
|
Ok(tick_height) => {
|
|
|
|
let transition = self.rotate(tick_height);
|
|
|
|
debug!("role transition complete: {:?}", transition);
|
2019-02-01 18:09:38 -08:00
|
|
|
if let Some(ref rotation_notifier) = rotation_notifier {
|
2019-02-10 19:34:18 -08:00
|
|
|
rotation_notifier
|
|
|
|
.send((transition, tick_height + 1))
|
|
|
|
.unwrap();
|
2019-02-01 18:09:38 -08:00
|
|
|
}
|
|
|
|
}
|
2019-02-10 19:34:18 -08:00
|
|
|
Err(RecvTimeoutError::Timeout) => continue,
|
|
|
|
_ => (),
|
|
|
|
}
|
2019-02-01 18:09:38 -08:00
|
|
|
});
|
|
|
|
move || {
|
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
receiver.recv().unwrap();
|
|
|
|
debug!("node shutdown complete");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-28 20:10:38 -08:00
|
|
|
// Used for notifying many nodes in parallel to exit
|
2019-02-06 19:47:55 -08:00
|
|
|
fn exit(&self) {
|
2018-07-16 22:22:29 -07:00
|
|
|
self.exit.store(true, Ordering::Relaxed);
|
2018-10-25 16:58:40 -07:00
|
|
|
if let Some(ref rpc_service) = self.rpc_service {
|
|
|
|
rpc_service.exit();
|
|
|
|
}
|
|
|
|
if let Some(ref rpc_pubsub_service) = self.rpc_pubsub_service {
|
|
|
|
rpc_pubsub_service.exit();
|
|
|
|
}
|
2019-01-26 13:58:08 +05:30
|
|
|
self.node_services.exit()
|
2018-07-16 22:22:29 -07:00
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
2019-01-26 13:58:08 +05:30
|
|
|
pub fn close(self) -> Result<()> {
|
2018-07-17 08:18:42 -07:00
|
|
|
self.exit();
|
2018-07-09 14:53:18 -06:00
|
|
|
self.join()
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2019-02-07 15:10:54 -08:00
|
|
|
|
2019-02-07 20:52:39 -08:00
|
|
|
fn get_consumed_for_slot(blocktree: &Blocktree, slot_index: u64) -> u64 {
|
|
|
|
let meta = blocktree.meta(slot_index).expect("Database error");
|
2019-02-07 15:10:54 -08:00
|
|
|
if let Some(meta) = meta {
|
|
|
|
meta.consumed
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
}
|
2019-02-06 19:47:55 -08:00
|
|
|
}
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2019-02-06 19:47:55 -08:00
|
|
|
pub fn new_bank_from_ledger(
|
|
|
|
ledger_path: &str,
|
2019-02-07 20:52:39 -08:00
|
|
|
ledger_config: BlocktreeConfig,
|
2019-02-06 19:47:55 -08:00
|
|
|
leader_scheduler_config: &LeaderSchedulerConfig,
|
2019-02-07 20:52:39 -08:00
|
|
|
) -> (Bank, u64, Hash, Blocktree, SyncSender<bool>, Receiver<bool>) {
|
|
|
|
let (blocktree, ledger_signal_sender, ledger_signal_receiver) =
|
|
|
|
Blocktree::open_with_config_signal(ledger_path, ledger_config)
|
2019-02-06 19:47:55 -08:00
|
|
|
.expect("Expected to successfully open database ledger");
|
|
|
|
let genesis_block =
|
|
|
|
GenesisBlock::load(ledger_path).expect("Expected to successfully open genesis block");
|
|
|
|
let mut bank = Bank::new_with_leader_scheduler_config(&genesis_block, leader_scheduler_config);
|
|
|
|
|
|
|
|
let now = Instant::now();
|
2019-02-07 20:52:39 -08:00
|
|
|
let entries = blocktree.read_ledger().expect("opening ledger");
|
2019-02-06 19:47:55 -08:00
|
|
|
info!("processing ledger...");
|
|
|
|
let (entry_height, last_entry_id) = bank.process_ledger(entries).expect("process_ledger");
|
|
|
|
info!(
|
|
|
|
"processed {} ledger entries in {}ms, tick_height={}...",
|
|
|
|
entry_height,
|
|
|
|
duration_as_ms(&now.elapsed()),
|
|
|
|
bank.tick_height()
|
|
|
|
);
|
|
|
|
|
|
|
|
(
|
|
|
|
bank,
|
|
|
|
entry_height,
|
|
|
|
last_entry_id,
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree,
|
2019-02-06 19:47:55 -08:00
|
|
|
ledger_signal_sender,
|
|
|
|
ledger_signal_receiver,
|
|
|
|
)
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2018-07-03 22:14:08 -06:00
|
|
|
|
2018-08-09 15:29:07 -06:00
|
|
|
impl Service for Fullnode {
|
2019-01-26 13:58:08 +05:30
|
|
|
type JoinReturnType = ();
|
2018-07-03 22:14:08 -06:00
|
|
|
|
2019-01-26 13:58:08 +05:30
|
|
|
fn join(self) -> Result<()> {
|
2018-10-25 16:58:40 -07:00
|
|
|
if let Some(rpc_service) = self.rpc_service {
|
|
|
|
rpc_service.join()?;
|
|
|
|
}
|
|
|
|
if let Some(rpc_pubsub_service) = self.rpc_pubsub_service {
|
|
|
|
rpc_pubsub_service.join()?;
|
|
|
|
}
|
|
|
|
|
2018-12-06 13:52:47 -07:00
|
|
|
self.gossip_service.join()?;
|
2019-01-26 13:58:08 +05:30
|
|
|
self.node_services.join()?;
|
|
|
|
Ok(())
|
2018-07-03 22:14:08 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-02 15:24:40 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2019-01-31 20:41:03 -08:00
|
|
|
use super::*;
|
2019-02-10 16:28:52 -08:00
|
|
|
use crate::blob_fetch_stage::BlobFetchStage;
|
2019-02-07 20:52:39 -08:00
|
|
|
use crate::blocktree::{create_tmp_sample_ledger, tmp_copy_ledger};
|
2019-01-09 14:33:44 -08:00
|
|
|
use crate::entry::make_consecutive_blobs;
|
2019-02-10 16:28:52 -08:00
|
|
|
use crate::entry::EntrySlice;
|
|
|
|
use crate::gossip_service::{converge, make_listening_node};
|
2019-02-06 20:49:40 -08:00
|
|
|
use crate::leader_scheduler::make_active_set_entries;
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::streamer::responder;
|
2019-02-10 16:28:52 -08:00
|
|
|
use std::cmp::min;
|
2018-09-14 01:53:18 -07:00
|
|
|
use std::fs::remove_dir_all;
|
2019-02-10 16:28:52 -08:00
|
|
|
use std::sync::atomic::Ordering;
|
|
|
|
use std::thread::sleep;
|
2018-08-03 11:06:06 -07:00
|
|
|
|
2018-07-02 15:24:40 -07:00
|
|
|
#[test]
|
|
|
|
fn validator_exit() {
|
2019-01-29 18:12:32 -08:00
|
|
|
let leader_keypair = Keypair::new();
|
|
|
|
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
2018-10-25 16:58:40 -07:00
|
|
|
|
2019-01-29 18:12:32 -08:00
|
|
|
let validator_keypair = Keypair::new();
|
|
|
|
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
|
2019-02-10 09:38:09 -08:00
|
|
|
let (_mint_keypair, validator_ledger_path, _last_entry_height, _last_id, _last_entry_id) =
|
2019-01-30 10:18:33 -08:00
|
|
|
create_tmp_sample_ledger("validator_exit", 10_000, 0, leader_keypair.pubkey(), 1000);
|
2019-01-29 18:12:32 -08:00
|
|
|
|
|
|
|
let validator = Fullnode::new(
|
|
|
|
validator_node,
|
2019-01-30 20:51:50 -07:00
|
|
|
&Arc::new(validator_keypair),
|
2019-01-24 12:04:04 -08:00
|
|
|
&validator_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
VotingKeypair::new(),
|
2019-01-29 18:12:32 -08:00
|
|
|
Some(&leader_node.info),
|
2019-01-31 20:41:03 -08:00
|
|
|
&FullnodeConfig::default(),
|
2018-09-14 01:53:18 -07:00
|
|
|
);
|
2019-01-29 18:12:32 -08:00
|
|
|
validator.close().unwrap();
|
2018-09-14 01:53:18 -07:00
|
|
|
remove_dir_all(validator_ledger_path).unwrap();
|
2018-07-02 11:20:35 -07:00
|
|
|
}
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2018-07-17 08:18:42 -07:00
|
|
|
#[test]
|
|
|
|
fn validator_parallel_exit() {
|
2019-01-29 18:12:32 -08:00
|
|
|
let leader_keypair = Keypair::new();
|
|
|
|
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
let mut ledger_paths = vec![];
|
2019-01-29 18:12:32 -08:00
|
|
|
let validators: Vec<Fullnode> = (0..2)
|
2018-09-14 01:53:18 -07:00
|
|
|
.map(|i| {
|
2019-01-29 18:12:32 -08:00
|
|
|
let validator_keypair = Keypair::new();
|
|
|
|
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
|
2019-02-10 09:38:09 -08:00
|
|
|
let (
|
|
|
|
_mint_keypair,
|
|
|
|
validator_ledger_path,
|
|
|
|
_last_entry_height,
|
|
|
|
_last_id,
|
|
|
|
_last_entry_id,
|
|
|
|
) = create_tmp_sample_ledger(
|
2018-11-02 14:32:05 -07:00
|
|
|
&format!("validator_parallel_exit_{}", i),
|
|
|
|
10_000,
|
2019-01-30 10:18:33 -08:00
|
|
|
0,
|
2019-01-29 18:12:32 -08:00
|
|
|
leader_keypair.pubkey(),
|
2018-11-02 14:32:05 -07:00
|
|
|
1000,
|
|
|
|
);
|
2018-09-14 01:53:18 -07:00
|
|
|
ledger_paths.push(validator_ledger_path.clone());
|
2019-01-29 18:12:32 -08:00
|
|
|
Fullnode::new(
|
|
|
|
validator_node,
|
2019-01-30 20:51:50 -07:00
|
|
|
&Arc::new(validator_keypair),
|
2019-01-24 12:04:04 -08:00
|
|
|
&validator_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
VotingKeypair::new(),
|
2019-01-29 18:12:32 -08:00
|
|
|
Some(&leader_node.info),
|
2019-01-31 20:41:03 -08:00
|
|
|
&FullnodeConfig::default(),
|
2018-09-14 01:53:18 -07:00
|
|
|
)
|
2018-12-07 20:01:28 -07:00
|
|
|
})
|
|
|
|
.collect();
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2019-01-29 18:12:32 -08:00
|
|
|
// Each validator can exit in parallel to speed many sequential calls to `join`
|
|
|
|
validators.iter().for_each(|v| v.exit());
|
|
|
|
// While join is called sequentially, the above exit call notified all the
|
|
|
|
// validators to exit from all their threads
|
|
|
|
validators.into_iter().for_each(|validator| {
|
|
|
|
validator.join().unwrap();
|
2018-08-03 11:06:06 -07:00
|
|
|
});
|
2018-09-14 01:53:18 -07:00
|
|
|
|
|
|
|
for path in ledger_paths {
|
|
|
|
remove_dir_all(path).unwrap();
|
|
|
|
}
|
2018-07-17 08:18:42 -07:00
|
|
|
}
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2018-10-18 22:57:48 -07:00
|
|
|
#[test]
|
|
|
|
fn test_leader_to_leader_transition() {
|
2019-02-05 08:03:52 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2018-10-18 22:57:48 -07:00
|
|
|
let bootstrap_leader_keypair = Keypair::new();
|
|
|
|
let bootstrap_leader_node =
|
|
|
|
Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
|
|
|
|
|
2019-02-10 09:38:09 -08:00
|
|
|
let (
|
|
|
|
_mint_keypair,
|
|
|
|
bootstrap_leader_ledger_path,
|
|
|
|
_genesis_entry_height,
|
|
|
|
_last_id,
|
|
|
|
_last_entry_id,
|
|
|
|
) = create_tmp_sample_ledger(
|
|
|
|
"test_leader_to_leader_transition",
|
|
|
|
10_000,
|
|
|
|
1,
|
|
|
|
bootstrap_leader_keypair.pubkey(),
|
|
|
|
500,
|
|
|
|
);
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
// Once the bootstrap leader hits the second epoch, because there are no other choices in
|
|
|
|
// the active set, this leader will remain the leader in the second epoch. In the second
|
|
|
|
// epoch, check that the same leader knows to shut down and restart as a leader again.
|
2019-02-07 09:31:09 -08:00
|
|
|
let ticks_per_slot = 5;
|
|
|
|
let slots_per_epoch = 2;
|
|
|
|
let ticks_per_epoch = slots_per_epoch * ticks_per_slot;
|
|
|
|
let active_window_length = 10 * ticks_per_epoch;
|
|
|
|
let leader_scheduler_config =
|
|
|
|
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length);
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2019-01-11 12:58:31 -08:00
|
|
|
let bootstrap_leader_keypair = Arc::new(bootstrap_leader_keypair);
|
2019-01-31 21:12:51 -07:00
|
|
|
let voting_keypair = VotingKeypair::new_local(&bootstrap_leader_keypair);
|
2019-02-05 08:03:52 -08:00
|
|
|
// Start the bootstrap leader
|
|
|
|
let mut fullnode_config = FullnodeConfig::default();
|
2019-02-11 09:09:57 -08:00
|
|
|
fullnode_config.leader_scheduler_config = leader_scheduler_config;
|
2019-02-01 18:09:38 -08:00
|
|
|
let bootstrap_leader = Fullnode::new(
|
2018-10-18 22:57:48 -07:00
|
|
|
bootstrap_leader_node,
|
2019-01-30 20:51:50 -07:00
|
|
|
&bootstrap_leader_keypair,
|
2019-01-28 20:10:38 -08:00
|
|
|
&bootstrap_leader_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
voting_keypair,
|
2019-02-05 08:03:52 -08:00
|
|
|
None,
|
|
|
|
&fullnode_config,
|
2018-10-18 22:57:48 -07:00
|
|
|
);
|
|
|
|
|
2019-02-01 18:09:38 -08:00
|
|
|
let (rotation_sender, rotation_receiver) = channel();
|
|
|
|
let bootstrap_leader_exit = bootstrap_leader.run(Some(rotation_sender));
|
|
|
|
|
|
|
|
// Wait for the bootstrap leader to transition. Since there are no other nodes in the
|
|
|
|
// cluster it will continue to be the leader
|
|
|
|
assert_eq!(
|
|
|
|
rotation_receiver.recv().unwrap(),
|
2019-02-07 09:31:09 -08:00
|
|
|
(FullnodeReturnType::LeaderToLeaderRotation, ticks_per_slot)
|
2019-02-01 18:09:38 -08:00
|
|
|
);
|
|
|
|
bootstrap_leader_exit();
|
2018-10-18 22:57:48 -07:00
|
|
|
}
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
#[test]
|
|
|
|
fn test_wrong_role_transition() {
|
2019-01-29 20:57:38 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
let mut fullnode_config = FullnodeConfig::default();
|
2019-02-07 09:31:09 -08:00
|
|
|
let ticks_per_slot = 16;
|
|
|
|
let slots_per_epoch = 2;
|
2019-02-11 09:09:57 -08:00
|
|
|
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
|
2019-02-07 09:31:09 -08:00
|
|
|
ticks_per_slot,
|
|
|
|
slots_per_epoch,
|
|
|
|
ticks_per_slot * slots_per_epoch,
|
2019-02-11 09:09:57 -08:00
|
|
|
);
|
2019-02-05 08:03:52 -08:00
|
|
|
|
2019-01-31 19:21:02 -08:00
|
|
|
// Create the leader and validator nodes
|
2018-10-25 16:58:40 -07:00
|
|
|
let bootstrap_leader_keypair = Arc::new(Keypair::new());
|
2019-01-31 19:21:02 -08:00
|
|
|
let validator_keypair = Arc::new(Keypair::new());
|
|
|
|
let (bootstrap_leader_node, validator_node, bootstrap_leader_ledger_path, _, _) =
|
|
|
|
setup_leader_validator(
|
|
|
|
&bootstrap_leader_keypair,
|
|
|
|
&validator_keypair,
|
2019-01-30 10:26:16 -08:00
|
|
|
0,
|
2019-02-05 08:03:52 -08:00
|
|
|
// Generate enough ticks for two epochs to flush the bootstrap_leader's vote at
|
|
|
|
// tick_height = 0 from the leader scheduler's active window
|
2019-02-07 09:31:09 -08:00
|
|
|
ticks_per_slot * 4,
|
2019-01-31 19:21:02 -08:00
|
|
|
"test_wrong_role_transition",
|
2019-02-07 15:10:54 -08:00
|
|
|
ticks_per_slot,
|
2019-01-30 10:26:16 -08:00
|
|
|
);
|
2019-01-31 19:21:02 -08:00
|
|
|
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-11-24 19:32:33 -08:00
|
|
|
let validator_ledger_path =
|
|
|
|
tmp_copy_ledger(&bootstrap_leader_ledger_path, "test_wrong_role_transition");
|
2019-01-31 19:21:02 -08:00
|
|
|
|
2018-11-24 19:32:33 -08:00
|
|
|
let ledger_paths = vec![
|
|
|
|
bootstrap_leader_ledger_path.clone(),
|
|
|
|
validator_ledger_path.clone(),
|
|
|
|
];
|
|
|
|
|
|
|
|
{
|
|
|
|
// Test that a node knows to transition to a validator based on parsing the ledger
|
|
|
|
let bootstrap_leader = Fullnode::new(
|
|
|
|
bootstrap_leader_node,
|
2019-01-30 20:51:50 -07:00
|
|
|
&bootstrap_leader_keypair,
|
2019-01-28 20:10:38 -08:00
|
|
|
&bootstrap_leader_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
VotingKeypair::new(),
|
2019-01-28 20:10:38 -08:00
|
|
|
Some(&bootstrap_leader_info),
|
2019-02-05 08:03:52 -08:00
|
|
|
&fullnode_config,
|
2018-11-24 19:32:33 -08:00
|
|
|
);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-01-26 13:58:08 +05:30
|
|
|
assert!(!bootstrap_leader.node_services.tpu.is_leader());
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-11-24 19:32:33 -08:00
|
|
|
// Test that a node knows to transition to a leader based on parsing the ledger
|
|
|
|
let validator = Fullnode::new(
|
|
|
|
validator_node,
|
2019-01-30 20:51:50 -07:00
|
|
|
&validator_keypair,
|
2019-01-28 20:10:38 -08:00
|
|
|
&validator_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
VotingKeypair::new(),
|
2019-01-28 20:10:38 -08:00
|
|
|
Some(&bootstrap_leader_info),
|
2019-02-05 08:03:52 -08:00
|
|
|
&fullnode_config,
|
2018-11-24 19:32:33 -08:00
|
|
|
);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-01-26 13:58:08 +05:30
|
|
|
assert!(validator.node_services.tpu.is_leader());
|
2018-12-12 15:58:29 -08:00
|
|
|
validator.close().expect("Expected leader node to close");
|
|
|
|
bootstrap_leader
|
|
|
|
.close()
|
|
|
|
.expect("Expected validator node to close");
|
2018-11-24 19:32:33 -08:00
|
|
|
}
|
|
|
|
for path in ledger_paths {
|
2019-02-07 20:52:39 -08:00
|
|
|
Blocktree::destroy(&path).expect("Expected successful database destruction");
|
2018-11-24 19:32:33 -08:00
|
|
|
let _ignored = remove_dir_all(&path);
|
2018-10-10 16:49:41 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-05 20:53:53 -08:00
|
|
|
// TODO: Rework this test or TVU (make_consecutive_blobs sends blobs that can't be handled by
|
|
|
|
// the replay_stage)
|
2018-09-25 15:41:29 -07:00
|
|
|
#[test]
|
2019-02-05 20:53:53 -08:00
|
|
|
#[ignore]
|
2018-09-25 15:41:29 -07:00
|
|
|
fn test_validator_to_leader_transition() {
|
2019-02-05 08:03:52 -08:00
|
|
|
solana_logger::setup();
|
2019-01-31 19:21:02 -08:00
|
|
|
// Make leader and validator node
|
2019-02-07 15:10:54 -08:00
|
|
|
let ticks_per_slot = 10;
|
|
|
|
let slots_per_epoch = 4;
|
2019-01-31 19:21:02 -08:00
|
|
|
let leader_keypair = Arc::new(Keypair::new());
|
|
|
|
let validator_keypair = Arc::new(Keypair::new());
|
|
|
|
let (leader_node, validator_node, validator_ledger_path, ledger_initial_len, last_id) =
|
|
|
|
setup_leader_validator(
|
|
|
|
&leader_keypair,
|
|
|
|
&validator_keypair,
|
2019-02-05 08:03:52 -08:00
|
|
|
0,
|
2019-01-31 19:21:02 -08:00
|
|
|
0,
|
2019-01-24 12:04:04 -08:00
|
|
|
"test_validator_to_leader_transition",
|
2019-02-07 15:10:54 -08:00
|
|
|
ticks_per_slot,
|
2019-01-24 12:04:04 -08:00
|
|
|
);
|
2018-10-18 22:57:48 -07:00
|
|
|
|
2019-01-31 19:21:02 -08:00
|
|
|
let leader_id = leader_keypair.pubkey();
|
2018-09-25 15:41:29 -07:00
|
|
|
let validator_info = validator_node.info.clone();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
info!("leader: {:?}", leader_id);
|
|
|
|
info!("validator: {:?}", validator_info.id);
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Set the leader scheduler for the validator
|
2019-02-05 08:03:52 -08:00
|
|
|
let mut fullnode_config = FullnodeConfig::default();
|
2019-02-11 09:09:57 -08:00
|
|
|
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
|
2019-02-07 09:31:09 -08:00
|
|
|
ticks_per_slot,
|
|
|
|
slots_per_epoch,
|
|
|
|
ticks_per_slot * slots_per_epoch,
|
2019-02-11 09:09:57 -08:00
|
|
|
);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2019-01-31 21:12:51 -07:00
|
|
|
let voting_keypair = VotingKeypair::new_local(&validator_keypair);
|
2019-02-07 15:10:54 -08:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Start the validator
|
2019-01-26 13:58:08 +05:30
|
|
|
let validator = Fullnode::new(
|
2018-09-25 15:41:29 -07:00
|
|
|
validator_node,
|
2019-01-30 20:51:50 -07:00
|
|
|
&validator_keypair,
|
2019-01-28 20:10:38 -08:00
|
|
|
&validator_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
voting_keypair,
|
2019-01-28 20:10:38 -08:00
|
|
|
Some(&leader_node.info),
|
2019-02-05 08:03:52 -08:00
|
|
|
&fullnode_config,
|
2018-09-25 15:41:29 -07:00
|
|
|
);
|
|
|
|
|
2019-02-07 09:31:09 -08:00
|
|
|
let blobs_to_send = slots_per_epoch * ticks_per_slot + ticks_per_slot;
|
2019-02-05 08:03:52 -08:00
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
// Send blobs to the validator from our mock leader
|
|
|
|
let t_responder = {
|
|
|
|
let (s_responder, r_responder) = channel();
|
2018-12-07 15:09:29 -07:00
|
|
|
let blob_sockets: Vec<Arc<UdpSocket>> =
|
|
|
|
leader_node.sockets.tvu.into_iter().map(Arc::new).collect();
|
2018-09-25 15:41:29 -07:00
|
|
|
let t_responder = responder(
|
|
|
|
"test_validator_to_leader_transition",
|
|
|
|
blob_sockets[0].clone(),
|
|
|
|
r_responder,
|
|
|
|
);
|
|
|
|
|
2018-11-15 13:23:26 -08:00
|
|
|
let tvu_address = &validator_info.tvu;
|
2019-02-05 08:03:52 -08:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
let msgs = make_consecutive_blobs(
|
2018-12-12 20:42:12 -08:00
|
|
|
&leader_id,
|
2019-02-05 08:03:52 -08:00
|
|
|
blobs_to_send,
|
2018-10-10 16:49:41 -07:00
|
|
|
ledger_initial_len,
|
|
|
|
last_id,
|
|
|
|
&tvu_address,
|
2018-12-07 20:01:28 -07:00
|
|
|
)
|
|
|
|
.into_iter()
|
2018-10-10 16:49:41 -07:00
|
|
|
.rev()
|
|
|
|
.collect();
|
2018-09-25 15:41:29 -07:00
|
|
|
s_responder.send(msgs).expect("send");
|
|
|
|
t_responder
|
|
|
|
};
|
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
info!("waiting for validator to rotate into the leader role");
|
|
|
|
let (rotation_sender, rotation_receiver) = channel();
|
|
|
|
let validator_exit = validator.run(Some(rotation_sender));
|
|
|
|
let rotation = rotation_receiver.recv().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
rotation,
|
|
|
|
(FullnodeReturnType::ValidatorToLeaderRotation, blobs_to_send)
|
|
|
|
);
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
// Close the validator so that rocksdb has locks available
|
|
|
|
validator_exit();
|
2019-02-07 15:10:54 -08:00
|
|
|
let (bank, entry_height, _, _, _, _) = new_bank_from_ledger(
|
|
|
|
&validator_ledger_path,
|
2019-02-07 20:52:39 -08:00
|
|
|
BlocktreeConfig::default(),
|
2019-02-07 15:10:54 -08:00
|
|
|
&LeaderSchedulerConfig::default(),
|
|
|
|
);
|
2019-02-05 08:03:52 -08:00
|
|
|
|
2019-02-07 09:31:09 -08:00
|
|
|
assert!(bank.tick_height() >= bank.leader_scheduler.read().unwrap().ticks_per_epoch);
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
assert!(entry_height >= ledger_initial_len);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
// Shut down
|
|
|
|
t_responder.join().expect("responder thread join");
|
2019-02-07 20:52:39 -08:00
|
|
|
Blocktree::destroy(&validator_ledger_path)
|
2018-11-24 19:32:33 -08:00
|
|
|
.expect("Expected successful database destruction");
|
|
|
|
let _ignored = remove_dir_all(&validator_ledger_path).unwrap();
|
2018-09-25 15:41:29 -07:00
|
|
|
}
|
2019-01-31 19:21:02 -08:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_tvu_behind() {
|
2019-02-05 08:03:52 -08:00
|
|
|
solana_logger::setup();
|
|
|
|
|
2019-01-31 19:21:02 -08:00
|
|
|
// Make leader node
|
2019-02-07 15:10:54 -08:00
|
|
|
let ticks_per_slot = 5;
|
2019-02-10 16:28:52 -08:00
|
|
|
let slots_per_epoch = 1;
|
2019-01-31 19:21:02 -08:00
|
|
|
let leader_keypair = Arc::new(Keypair::new());
|
|
|
|
let validator_keypair = Arc::new(Keypair::new());
|
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
info!("leader: {:?}", leader_keypair.pubkey());
|
|
|
|
info!("validator: {:?}", validator_keypair.pubkey());
|
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
let (leader_node, _, leader_ledger_path, _, _) = setup_leader_validator(
|
|
|
|
&leader_keypair,
|
|
|
|
&validator_keypair,
|
|
|
|
1,
|
|
|
|
0,
|
|
|
|
"test_tvu_behind",
|
|
|
|
ticks_per_slot,
|
|
|
|
);
|
2019-01-31 19:21:02 -08:00
|
|
|
|
|
|
|
let leader_node_info = leader_node.info.clone();
|
|
|
|
|
|
|
|
// Set the leader scheduler for the validator
|
2019-02-05 08:03:52 -08:00
|
|
|
let mut fullnode_config = FullnodeConfig::default();
|
2019-02-11 09:09:57 -08:00
|
|
|
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
|
2019-02-07 09:31:09 -08:00
|
|
|
ticks_per_slot,
|
|
|
|
slots_per_epoch,
|
|
|
|
ticks_per_slot * slots_per_epoch,
|
2019-02-11 09:09:57 -08:00
|
|
|
);
|
2019-02-10 16:28:52 -08:00
|
|
|
let config = PohServiceConfig::Sleep(Duration::from_millis(200));
|
|
|
|
fullnode_config.tick_config = config;
|
|
|
|
|
|
|
|
info!("Start up a listener");
|
|
|
|
let blob_receiver_exit = Arc::new(AtomicBool::new(false));
|
|
|
|
let (_, _, mut listening_node, _) = make_listening_node(&leader_node.info);
|
|
|
|
let (blob_fetch_sender, blob_fetch_receiver) = channel();
|
|
|
|
let blob_fetch_stage = BlobFetchStage::new(
|
|
|
|
Arc::new(listening_node.sockets.tvu.pop().unwrap()),
|
|
|
|
&blob_fetch_sender,
|
|
|
|
blob_receiver_exit.clone(),
|
|
|
|
);
|
2019-01-31 19:21:02 -08:00
|
|
|
|
2019-01-31 21:12:51 -07:00
|
|
|
let voting_keypair = VotingKeypair::new_local(&leader_keypair);
|
2019-02-05 08:03:52 -08:00
|
|
|
info!("Start the bootstrap leader");
|
2019-02-10 19:34:18 -08:00
|
|
|
let leader = Fullnode::new(
|
2019-01-31 19:21:02 -08:00
|
|
|
leader_node,
|
|
|
|
&leader_keypair,
|
|
|
|
&leader_ledger_path,
|
2019-01-31 21:12:51 -07:00
|
|
|
voting_keypair,
|
2019-01-31 19:21:02 -08:00
|
|
|
Some(&leader_node_info),
|
2019-02-05 08:03:52 -08:00
|
|
|
&fullnode_config,
|
2019-01-31 19:21:02 -08:00
|
|
|
);
|
|
|
|
|
2019-02-10 16:28:52 -08:00
|
|
|
let (rotation_sender, rotation_receiver) = channel();
|
|
|
|
|
|
|
|
info!("Pause the Tvu");
|
|
|
|
let pause_tvu = leader.node_services.tvu.get_pause();
|
|
|
|
pause_tvu.store(true, Ordering::Relaxed);
|
|
|
|
|
|
|
|
// Wait for convergence
|
|
|
|
converge(&leader_node_info, 2);
|
|
|
|
|
|
|
|
info!("Wait for leader -> validator transition");
|
2019-02-10 19:34:18 -08:00
|
|
|
let rotation_signal = leader
|
|
|
|
.rotation_receiver
|
2019-02-10 16:28:52 -08:00
|
|
|
.recv()
|
|
|
|
.expect("signal for leader -> validator transition");
|
2019-02-10 19:34:18 -08:00
|
|
|
debug!("received rotation signal: {:?}", rotation_signal);
|
|
|
|
// Re-send the rotation signal, it'll be received again once the tvu is unpaused
|
|
|
|
leader.rotation_sender.send(rotation_signal).expect("send");
|
2019-02-10 16:28:52 -08:00
|
|
|
|
|
|
|
info!("Make sure the tvu bank has not reached the last tick for the slot (the last tick is ticks_per_slot - 1)");
|
2019-01-31 19:21:02 -08:00
|
|
|
{
|
|
|
|
let w_last_ids = leader.bank.last_ids().write().unwrap();
|
2019-02-10 16:28:52 -08:00
|
|
|
assert!(w_last_ids.tick_height < ticks_per_slot - 1);
|
2019-01-31 19:21:02 -08:00
|
|
|
}
|
|
|
|
|
2019-02-10 19:34:18 -08:00
|
|
|
// Clear the blobs we've received so far. After this rotation, we should
|
2019-02-10 16:28:52 -08:00
|
|
|
// no longer receive blobs from slot 0
|
|
|
|
while let Ok(_) = blob_fetch_receiver.try_recv() {}
|
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
let leader_exit = leader.run(Some(rotation_sender));
|
2019-02-10 16:28:52 -08:00
|
|
|
|
2019-02-10 19:34:18 -08:00
|
|
|
// Wait for Tpu bank to progress while the Tvu bank is stuck
|
2019-02-10 16:28:52 -08:00
|
|
|
sleep(Duration::from_millis(1000));
|
|
|
|
|
|
|
|
// Tvu bank lock is released here, so tvu should start making progress again and should signal a
|
|
|
|
// rotation. After rotation it will still be the slot leader as a new leader schedule has
|
|
|
|
// not been computed yet (still in epoch 0). In the next epoch (epoch 1), the node will
|
|
|
|
// transition to a validator.
|
|
|
|
info!("Unpause the Tvu");
|
|
|
|
pause_tvu.store(false, Ordering::Relaxed);
|
2019-02-05 08:03:52 -08:00
|
|
|
let expected_rotations = vec![
|
2019-02-07 09:31:09 -08:00
|
|
|
(FullnodeReturnType::LeaderToLeaderRotation, ticks_per_slot),
|
2019-02-05 08:03:52 -08:00
|
|
|
(
|
|
|
|
FullnodeReturnType::LeaderToValidatorRotation,
|
2019-02-10 16:28:52 -08:00
|
|
|
2 * ticks_per_slot,
|
2019-02-05 08:03:52 -08:00
|
|
|
),
|
|
|
|
];
|
2019-01-31 19:21:02 -08:00
|
|
|
|
2019-02-05 08:03:52 -08:00
|
|
|
for expected_rotation in expected_rotations {
|
|
|
|
loop {
|
|
|
|
let transition = rotation_receiver.recv().unwrap();
|
|
|
|
info!("leader transition: {:?}", transition);
|
|
|
|
assert_eq!(expected_rotation, transition);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
info!("Shut down");
|
|
|
|
leader_exit();
|
2019-02-10 16:28:52 -08:00
|
|
|
|
|
|
|
// Make sure that after rotation we don't receive any blobs from slot 0 (make sure
|
|
|
|
// broadcast started again at the correct place)
|
|
|
|
while let Ok(new_blobs) = blob_fetch_receiver.try_recv() {
|
|
|
|
for blob in new_blobs {
|
|
|
|
assert_ne!(blob.read().unwrap().slot(), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the ledger to make sure the PoH chains
|
|
|
|
{
|
|
|
|
let blocktree = Blocktree::open(&leader_ledger_path).unwrap();
|
|
|
|
let entries: Vec<_> = (0..3)
|
|
|
|
.flat_map(|slot_height| blocktree.get_slot_entries(slot_height, 0, None).unwrap())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
assert!(entries[1..].verify(&entries[0].id))
|
|
|
|
}
|
|
|
|
|
|
|
|
blob_receiver_exit.store(true, Ordering::Relaxed);
|
|
|
|
blob_fetch_stage.join().unwrap();
|
|
|
|
|
2019-02-07 20:52:39 -08:00
|
|
|
Blocktree::destroy(&leader_ledger_path).expect("Expected successful database destruction");
|
2019-01-31 19:21:02 -08:00
|
|
|
let _ignored = remove_dir_all(&leader_ledger_path).unwrap();
|
|
|
|
}
|
|
|
|
|
|
|
|
fn setup_leader_validator(
|
|
|
|
leader_keypair: &Arc<Keypair>,
|
|
|
|
validator_keypair: &Arc<Keypair>,
|
|
|
|
num_genesis_ticks: u64,
|
|
|
|
num_ending_ticks: u64,
|
|
|
|
test_name: &str,
|
2019-02-07 15:10:54 -08:00
|
|
|
ticks_per_block: u64,
|
2019-01-31 19:21:02 -08:00
|
|
|
) -> (Node, Node, String, u64, Hash) {
|
|
|
|
// Make a leader identity
|
|
|
|
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
|
|
|
|
|
|
|
// Create validator identity
|
2019-02-07 15:10:54 -08:00
|
|
|
assert!(num_genesis_ticks <= ticks_per_block);
|
2019-02-10 09:38:09 -08:00
|
|
|
let (mint_keypair, ledger_path, genesis_entry_height, last_id, last_entry_id) =
|
|
|
|
create_tmp_sample_ledger(
|
|
|
|
test_name,
|
|
|
|
10_000,
|
|
|
|
num_genesis_ticks,
|
|
|
|
leader_node.info.id,
|
|
|
|
500,
|
|
|
|
);
|
2019-01-31 19:21:02 -08:00
|
|
|
|
|
|
|
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
|
|
|
|
|
|
|
|
// Write two entries so that the validator is in the active set:
|
|
|
|
//
|
|
|
|
// 1) Give the validator a nonzero number of tokens
|
|
|
|
// Write the bootstrap entries to the ledger that will cause leader rotation
|
|
|
|
// after the bootstrap height
|
|
|
|
//
|
|
|
|
// 2) A vote from the validator
|
|
|
|
let (active_set_entries, _) = make_active_set_entries(
|
|
|
|
validator_keypair,
|
|
|
|
&mint_keypair,
|
2019-02-05 08:03:52 -08:00
|
|
|
10,
|
|
|
|
1,
|
2019-02-10 09:38:09 -08:00
|
|
|
&last_entry_id,
|
2019-01-31 19:21:02 -08:00
|
|
|
&last_id,
|
|
|
|
num_ending_ticks,
|
|
|
|
);
|
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
let non_tick_active_entries_len = active_set_entries.len() - num_ending_ticks as usize;
|
|
|
|
let remaining_ticks_in_zeroth_slot = ticks_per_block - num_genesis_ticks;
|
2019-02-10 16:28:52 -08:00
|
|
|
let entries_for_zeroth_slot = min(
|
|
|
|
active_set_entries.len(),
|
|
|
|
non_tick_active_entries_len + remaining_ticks_in_zeroth_slot as usize,
|
|
|
|
);
|
2019-02-07 15:10:54 -08:00
|
|
|
let entry_chunks: Vec<_> = active_set_entries[entries_for_zeroth_slot..]
|
|
|
|
.chunks(ticks_per_block as usize)
|
|
|
|
.collect();
|
|
|
|
|
2019-02-07 20:52:39 -08:00
|
|
|
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
2019-02-07 15:10:54 -08:00
|
|
|
|
|
|
|
// Iterate writing slots through 0..entry_chunks.len()
|
|
|
|
for i in 0..entry_chunks.len() + 1 {
|
|
|
|
let (start_height, entries) = {
|
|
|
|
if i == 0 {
|
|
|
|
(
|
|
|
|
genesis_entry_height,
|
|
|
|
&active_set_entries[..entries_for_zeroth_slot],
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(0, entry_chunks[i - 1])
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree
|
2019-02-07 15:10:54 -08:00
|
|
|
.write_entries(i as u64, start_height, entries)
|
|
|
|
.unwrap();
|
|
|
|
}
|
2019-01-31 19:21:02 -08:00
|
|
|
|
|
|
|
let entry_height = genesis_entry_height + active_set_entries.len() as u64;
|
|
|
|
(
|
|
|
|
leader_node,
|
|
|
|
validator_node,
|
|
|
|
ledger_path,
|
|
|
|
entry_height,
|
|
|
|
active_set_entries.last().unwrap().id,
|
|
|
|
)
|
|
|
|
}
|
2018-07-02 11:20:35 -07:00
|
|
|
}
|