2018-07-02 15:24:40 -07:00
|
|
|
//! The `fullnode` module hosts all the fullnode microservices.
|
|
|
|
|
2018-07-02 11:20:35 -07:00
|
|
|
use bank::Bank;
|
2018-08-09 15:17:50 -06:00
|
|
|
use broadcast_stage::BroadcastStage;
|
2018-10-08 20:55:54 -06:00
|
|
|
use cluster_info::{ClusterInfo, Node, NodeInfo};
|
2018-08-22 11:54:37 -06:00
|
|
|
use drone::DRONE_PORT;
|
2018-07-02 10:07:32 -07:00
|
|
|
use entry::Entry;
|
2018-10-12 00:39:10 -07:00
|
|
|
use hash::Hash;
|
2018-10-10 16:49:41 -07:00
|
|
|
use leader_scheduler::LeaderScheduler;
|
2018-08-09 13:40:47 -06:00
|
|
|
use ledger::read_ledger;
|
2018-07-02 15:24:40 -07:00
|
|
|
use ncp::Ncp;
|
2018-08-14 18:03:48 -06:00
|
|
|
use rpc::{JsonRpcService, RPC_PORT};
|
2018-07-02 15:24:40 -07:00
|
|
|
use rpu::Rpu;
|
2018-07-03 22:14:08 -06:00
|
|
|
use service::Service;
|
2018-09-26 17:55:36 -06:00
|
|
|
use signature::{Keypair, KeypairUtil};
|
2018-09-14 01:53:18 -07:00
|
|
|
use std::net::UdpSocket;
|
2018-08-20 12:12:54 -07:00
|
|
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
2018-07-09 14:53:18 -06:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2018-07-02 15:24:40 -07:00
|
|
|
use std::sync::{Arc, RwLock};
|
2018-09-13 14:00:17 -07:00
|
|
|
use std::thread::Result;
|
2018-09-14 00:17:40 -07:00
|
|
|
use tpu::{Tpu, TpuReturnType};
|
2018-09-25 15:41:29 -07:00
|
|
|
use tvu::{Tvu, TvuReturnType};
|
2018-07-05 12:01:40 -07:00
|
|
|
use untrusted::Input;
|
2018-08-09 13:40:47 -06:00
|
|
|
use window;
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2018-09-13 14:00:17 -07:00
|
|
|
pub enum NodeRole {
|
|
|
|
Leader(LeaderServices),
|
|
|
|
Validator(ValidatorServices),
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct LeaderServices {
|
|
|
|
tpu: Tpu,
|
|
|
|
broadcast_stage: BroadcastStage,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl LeaderServices {
|
|
|
|
fn new(tpu: Tpu, broadcast_stage: BroadcastStage) -> Self {
|
|
|
|
LeaderServices {
|
|
|
|
tpu,
|
|
|
|
broadcast_stage,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
pub fn join(self) -> Result<Option<TpuReturnType>> {
|
|
|
|
self.broadcast_stage.join()?;
|
|
|
|
self.tpu.join()
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
2018-09-14 14:34:32 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
pub fn is_exited(&self) -> bool {
|
|
|
|
self.tpu.is_exited()
|
|
|
|
}
|
|
|
|
|
2018-09-14 14:34:32 -07:00
|
|
|
pub fn exit(&self) -> () {
|
|
|
|
self.tpu.exit();
|
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pub struct ValidatorServices {
|
|
|
|
tvu: Tvu,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ValidatorServices {
|
|
|
|
fn new(tvu: Tvu) -> Self {
|
|
|
|
ValidatorServices { tvu }
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
pub fn join(self) -> Result<Option<TvuReturnType>> {
|
2018-09-13 14:00:17 -07:00
|
|
|
self.tvu.join()
|
|
|
|
}
|
2018-09-14 14:34:32 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
pub fn is_exited(&self) -> bool {
|
|
|
|
self.tvu.is_exited()
|
|
|
|
}
|
|
|
|
|
2018-09-14 14:34:32 -07:00
|
|
|
pub fn exit(&self) -> () {
|
2018-09-25 15:41:29 -07:00
|
|
|
self.tvu.exit()
|
2018-09-14 14:34:32 -07:00
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
pub enum FullnodeReturnType {
|
2018-10-10 16:49:41 -07:00
|
|
|
LeaderToValidatorRotation,
|
|
|
|
ValidatorToLeaderRotation,
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
|
|
|
|
2018-08-09 15:29:07 -06:00
|
|
|
pub struct Fullnode {
|
2018-09-14 01:53:18 -07:00
|
|
|
pub node_role: Option<NodeRole>,
|
2018-10-10 16:49:41 -07:00
|
|
|
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
2018-09-14 01:53:18 -07:00
|
|
|
keypair: Arc<Keypair>,
|
2018-07-09 14:53:18 -06:00
|
|
|
exit: Arc<AtomicBool>,
|
2018-09-18 17:35:09 -07:00
|
|
|
rpu: Option<Rpu>,
|
2018-09-13 14:00:17 -07:00
|
|
|
rpc_service: JsonRpcService,
|
|
|
|
ncp: Ncp,
|
2018-09-14 01:53:18 -07:00
|
|
|
bank: Arc<Bank>,
|
2018-10-08 20:55:54 -06:00
|
|
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
2018-09-14 01:53:18 -07:00
|
|
|
ledger_path: String,
|
2018-09-25 15:41:29 -07:00
|
|
|
sigverify_disabled: bool,
|
2018-09-14 01:53:18 -07:00
|
|
|
shared_window: window::SharedWindow,
|
|
|
|
replicate_socket: Vec<UdpSocket>,
|
|
|
|
repair_socket: UdpSocket,
|
|
|
|
retransmit_socket: UdpSocket,
|
2018-09-25 15:41:29 -07:00
|
|
|
transaction_sockets: Vec<UdpSocket>,
|
|
|
|
broadcast_socket: UdpSocket,
|
2018-09-18 17:35:09 -07:00
|
|
|
requests_socket: UdpSocket,
|
|
|
|
respond_socket: UdpSocket,
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2018-07-05 12:01:40 -07:00
|
|
|
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
|
|
|
/// Fullnode configuration to be stored in file
|
|
|
|
pub struct Config {
|
2018-07-11 00:18:48 -07:00
|
|
|
pub node_info: NodeInfo,
|
2018-07-05 12:01:40 -07:00
|
|
|
pkcs8: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Structure to be replicated by the network
|
|
|
|
impl Config {
|
2018-07-12 17:26:56 -06:00
|
|
|
pub fn new(bind_addr: &SocketAddr, pkcs8: Vec<u8>) -> Self {
|
2018-07-05 12:01:40 -07:00
|
|
|
let keypair =
|
2018-08-09 08:56:04 -06:00
|
|
|
Keypair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in fullnode::Config new");
|
2018-07-05 12:01:40 -07:00
|
|
|
let pubkey = keypair.pubkey();
|
2018-08-31 00:10:39 -07:00
|
|
|
let node_info = NodeInfo::new_with_pubkey_socketaddr(pubkey, bind_addr);
|
2018-07-10 12:02:51 -07:00
|
|
|
Config { node_info, pkcs8 }
|
2018-07-05 12:01:40 -07:00
|
|
|
}
|
2018-08-09 08:56:04 -06:00
|
|
|
pub fn keypair(&self) -> Keypair {
|
|
|
|
Keypair::from_pkcs8(Input::from(&self.pkcs8))
|
2018-07-05 12:01:40 -07:00
|
|
|
.expect("from_pkcs8 in fullnode::Config keypair")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 15:29:07 -06:00
|
|
|
impl Fullnode {
|
2018-08-22 17:39:34 -06:00
|
|
|
pub fn new(
|
2018-08-28 16:32:40 -07:00
|
|
|
node: Node,
|
2018-08-03 11:06:06 -07:00
|
|
|
ledger_path: &str,
|
2018-08-09 08:56:04 -06:00
|
|
|
keypair: Keypair,
|
2018-08-22 16:51:19 -06:00
|
|
|
leader_addr: Option<SocketAddr>,
|
2018-07-31 16:54:24 -07:00
|
|
|
sigverify_disabled: bool,
|
2018-10-10 16:49:41 -07:00
|
|
|
mut leader_scheduler: LeaderScheduler,
|
2018-08-09 15:29:07 -06:00
|
|
|
) -> Self {
|
2018-07-02 15:24:40 -07:00
|
|
|
info!("creating bank...");
|
2018-10-10 16:49:41 -07:00
|
|
|
let (bank, entry_height, ledger_tail) =
|
|
|
|
Self::new_bank_from_ledger(ledger_path, &mut leader_scheduler);
|
2018-07-02 11:20:35 -07:00
|
|
|
|
2018-07-02 15:24:40 -07:00
|
|
|
info!("creating networking stack...");
|
|
|
|
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2018-07-02 11:20:35 -07:00
|
|
|
info!(
|
2018-07-02 15:24:40 -07:00
|
|
|
"starting... local gossip address: {} (advertising {})",
|
2018-08-31 00:10:39 -07:00
|
|
|
local_gossip_addr, node.info.contact_info.ncp
|
2018-07-02 11:20:35 -07:00
|
|
|
);
|
2018-09-10 23:38:40 -07:00
|
|
|
|
2018-08-22 17:37:57 -06:00
|
|
|
let local_requests_addr = node.sockets.requests.local_addr().unwrap();
|
2018-08-31 00:10:39 -07:00
|
|
|
let requests_addr = node.info.contact_info.rpu;
|
|
|
|
let leader_info = leader_addr.map(|i| NodeInfo::new_entry_point(&i));
|
2018-08-22 17:37:57 -06:00
|
|
|
let server = Self::new_with_bank(
|
2018-08-22 16:45:49 -06:00
|
|
|
keypair,
|
|
|
|
bank,
|
|
|
|
entry_height,
|
|
|
|
&ledger_tail,
|
|
|
|
node,
|
2018-08-22 20:28:05 -06:00
|
|
|
leader_info.as_ref(),
|
2018-09-14 01:53:18 -07:00
|
|
|
ledger_path,
|
2018-08-22 16:45:49 -06:00
|
|
|
sigverify_disabled,
|
2018-10-10 16:49:41 -07:00
|
|
|
leader_scheduler,
|
2018-09-20 23:38:59 -06:00
|
|
|
None,
|
2018-08-22 17:37:57 -06:00
|
|
|
);
|
|
|
|
|
|
|
|
match leader_addr {
|
|
|
|
Some(leader_addr) => {
|
|
|
|
info!(
|
2018-09-12 13:59:19 -07:00
|
|
|
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
|
|
|
local_requests_addr, requests_addr, leader_addr
|
|
|
|
);
|
2018-08-22 17:37:57 -06:00
|
|
|
}
|
|
|
|
None => {
|
|
|
|
info!(
|
|
|
|
"leader ready... local request address: {} (advertising {})",
|
|
|
|
local_requests_addr, requests_addr
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
server
|
2018-08-22 16:45:49 -06:00
|
|
|
}
|
|
|
|
|
2018-08-22 18:50:19 -06:00
|
|
|
/// Create a fullnode instance acting as a leader or validator.
|
2018-07-02 15:24:40 -07:00
|
|
|
///
|
|
|
|
/// ```text
|
|
|
|
/// .---------------------.
|
|
|
|
/// | Leader |
|
|
|
|
/// | |
|
|
|
|
/// .--------. | .-----. |
|
|
|
|
/// | |---->| | |
|
|
|
|
/// | Client | | | RPU | |
|
|
|
|
/// | |<----| | |
|
|
|
|
/// `----+---` | `-----` |
|
|
|
|
/// | | ^ |
|
|
|
|
/// | | | |
|
|
|
|
/// | | .--+---. |
|
|
|
|
/// | | | Bank | |
|
|
|
|
/// | | `------` |
|
|
|
|
/// | | ^ |
|
|
|
|
/// | | | | .------------.
|
|
|
|
/// | | .--+--. .-----. | | |
|
|
|
|
/// `-------->| TPU +-->| NCP +------>| Validators |
|
|
|
|
/// | `-----` `-----` | | |
|
|
|
|
/// | | `------------`
|
|
|
|
/// `---------------------`
|
|
|
|
///
|
|
|
|
/// .-------------------------------.
|
|
|
|
/// | Validator |
|
|
|
|
/// | |
|
|
|
|
/// .--------. | .-----. |
|
|
|
|
/// | |-------------->| | |
|
|
|
|
/// | Client | | | RPU | |
|
|
|
|
/// | |<--------------| | |
|
|
|
|
/// `--------` | `-----` |
|
|
|
|
/// | ^ |
|
|
|
|
/// | | |
|
|
|
|
/// | .--+---. |
|
|
|
|
/// | | Bank | |
|
|
|
|
/// | `------` |
|
|
|
|
/// | ^ |
|
|
|
|
/// .--------. | | | .------------.
|
|
|
|
/// | | | .--+--. | | |
|
|
|
|
/// | Leader |<------------->| TVU +<--------------->| |
|
|
|
|
/// | | | `-----` | | Validators |
|
|
|
|
/// | | | ^ | | |
|
|
|
|
/// | | | | | | |
|
|
|
|
/// | | | .--+--. | | |
|
|
|
|
/// | |<------------->| NCP +<--------------->| |
|
|
|
|
/// | | | `-----` | | |
|
|
|
|
/// `--------` | | `------------`
|
|
|
|
/// `-------------------------------`
|
|
|
|
/// ```
|
2018-09-23 14:38:17 -07:00
|
|
|
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
2018-09-11 18:40:38 -07:00
|
|
|
pub fn new_with_bank(
|
2018-08-09 08:56:04 -06:00
|
|
|
keypair: Keypair,
|
2018-08-22 18:50:19 -06:00
|
|
|
bank: Bank,
|
2018-07-02 15:24:40 -07:00
|
|
|
entry_height: u64,
|
2018-08-09 12:35:52 -06:00
|
|
|
ledger_tail: &[Entry],
|
2018-10-10 16:49:41 -07:00
|
|
|
node: Node,
|
|
|
|
bootstrap_leader_info_option: Option<&NodeInfo>,
|
2018-09-14 01:53:18 -07:00
|
|
|
ledger_path: &str,
|
2018-08-22 18:50:19 -06:00
|
|
|
sigverify_disabled: bool,
|
2018-10-10 16:49:41 -07:00
|
|
|
leader_scheduler: LeaderScheduler,
|
2018-09-20 23:38:59 -06:00
|
|
|
rpc_port: Option<u16>,
|
2018-08-22 18:50:19 -06:00
|
|
|
) -> Self {
|
2018-09-10 23:38:40 -07:00
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
2018-08-22 18:50:19 -06:00
|
|
|
let bank = Arc::new(bank);
|
2018-08-14 18:03:48 -06:00
|
|
|
|
2018-09-18 17:35:09 -07:00
|
|
|
let rpu = Some(Rpu::new(
|
2018-08-22 19:00:56 -06:00
|
|
|
&bank,
|
2018-09-18 17:35:09 -07:00
|
|
|
node.sockets
|
|
|
|
.requests
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone requests socket"),
|
|
|
|
node.sockets
|
|
|
|
.respond
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone respond socket"),
|
|
|
|
));
|
2018-07-23 18:55:58 -07:00
|
|
|
|
2018-08-31 00:10:39 -07:00
|
|
|
// TODO: this code assumes this node is the leader
|
|
|
|
let mut drone_addr = node.info.contact_info.tpu;
|
2018-08-23 10:47:02 -06:00
|
|
|
drone_addr.set_port(DRONE_PORT);
|
2018-09-21 10:19:08 -06:00
|
|
|
|
|
|
|
// Use custom RPC port, if provided (`Some(port)`)
|
|
|
|
// RPC port may be any open port on the node
|
|
|
|
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
|
|
|
|
// If rpc_port == `Some(0)`, node will dynamically choose any open port. Useful for tests.
|
|
|
|
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
|
2018-09-10 22:41:44 -07:00
|
|
|
let rpc_service = JsonRpcService::new(
|
|
|
|
&bank,
|
|
|
|
node.info.contact_info.tpu,
|
|
|
|
drone_addr,
|
|
|
|
rpc_addr,
|
|
|
|
exit.clone(),
|
|
|
|
);
|
2018-07-02 10:07:32 -07:00
|
|
|
|
2018-10-12 00:39:10 -07:00
|
|
|
let last_entry_id = &ledger_tail
|
|
|
|
.last()
|
|
|
|
.expect("Expected at least one entry in the ledger")
|
|
|
|
.id;
|
|
|
|
|
2018-09-18 08:02:57 -07:00
|
|
|
let window = window::new_window_from_entries(ledger_tail, entry_height, &node.info);
|
2018-09-07 16:08:37 -06:00
|
|
|
let shared_window = Arc::new(RwLock::new(window));
|
2018-10-10 16:49:41 -07:00
|
|
|
let cluster_info = Arc::new(RwLock::new(
|
|
|
|
ClusterInfo::new(node.info).expect("ClusterInfo::new"),
|
|
|
|
));
|
2018-08-22 18:50:19 -06:00
|
|
|
|
2018-08-22 19:00:56 -06:00
|
|
|
let ncp = Ncp::new(
|
2018-10-08 20:55:54 -06:00
|
|
|
&cluster_info,
|
2018-09-07 16:08:37 -06:00
|
|
|
shared_window.clone(),
|
2018-09-14 01:53:18 -07:00
|
|
|
Some(ledger_path),
|
2018-08-22 19:00:56 -06:00
|
|
|
node.sockets.gossip,
|
|
|
|
exit.clone(),
|
2018-09-02 23:23:43 -10:00
|
|
|
);
|
2018-08-22 19:00:56 -06:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
2018-09-14 01:53:18 -07:00
|
|
|
let keypair = Arc::new(keypair);
|
2018-08-22 18:50:19 -06:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Insert the bootstrap leader info, should only be None if this node
|
|
|
|
// is the bootstrap leader
|
|
|
|
if let Some(bootstrap_leader_info) = bootstrap_leader_info_option {
|
|
|
|
cluster_info.write().unwrap().insert(bootstrap_leader_info);
|
2018-08-22 18:51:53 -06:00
|
|
|
}
|
2018-08-22 18:50:19 -06:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Get the scheduled leader
|
|
|
|
let scheduled_leader = leader_scheduler
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.get_scheduled_leader(entry_height)
|
|
|
|
.expect("Leader not known after processing bank");
|
|
|
|
|
|
|
|
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
|
|
|
let node_role = if scheduled_leader != keypair.pubkey() {
|
|
|
|
// Start in validator mode.
|
|
|
|
let tvu = Tvu::new(
|
|
|
|
keypair.clone(),
|
|
|
|
&bank,
|
|
|
|
entry_height,
|
|
|
|
cluster_info.clone(),
|
|
|
|
shared_window.clone(),
|
|
|
|
node.sockets
|
|
|
|
.replicate
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
|
|
|
.collect(),
|
|
|
|
node.sockets
|
|
|
|
.repair
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone repair socket"),
|
|
|
|
node.sockets
|
|
|
|
.retransmit
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone retransmit socket"),
|
|
|
|
Some(ledger_path),
|
|
|
|
leader_scheduler.clone(),
|
|
|
|
);
|
|
|
|
let validator_state = ValidatorServices::new(tvu);
|
|
|
|
Some(NodeRole::Validator(validator_state))
|
|
|
|
} else {
|
|
|
|
// Start in leader mode.
|
|
|
|
let (tpu, entry_receiver, tpu_exit) = Tpu::new(
|
|
|
|
keypair.clone(),
|
|
|
|
&bank,
|
|
|
|
&cluster_info,
|
|
|
|
Default::default(),
|
|
|
|
node.sockets
|
|
|
|
.transaction
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone transaction sockets"))
|
|
|
|
.collect(),
|
|
|
|
ledger_path,
|
|
|
|
sigverify_disabled,
|
|
|
|
entry_height,
|
2018-10-12 00:39:10 -07:00
|
|
|
last_entry_id,
|
2018-10-10 16:49:41 -07:00
|
|
|
leader_scheduler.clone(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let broadcast_stage = BroadcastStage::new(
|
|
|
|
node.sockets
|
|
|
|
.broadcast
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone broadcast socket"),
|
|
|
|
cluster_info.clone(),
|
|
|
|
shared_window.clone(),
|
|
|
|
entry_height,
|
|
|
|
entry_receiver,
|
|
|
|
leader_scheduler.clone(),
|
|
|
|
tpu_exit,
|
|
|
|
);
|
|
|
|
let leader_state = LeaderServices::new(tpu, broadcast_stage);
|
|
|
|
Some(NodeRole::Leader(leader_state))
|
|
|
|
};
|
|
|
|
|
2018-09-13 14:00:17 -07:00
|
|
|
Fullnode {
|
2018-09-14 01:53:18 -07:00
|
|
|
keypair,
|
2018-10-08 20:55:54 -06:00
|
|
|
cluster_info,
|
2018-09-14 01:53:18 -07:00
|
|
|
shared_window,
|
|
|
|
bank,
|
2018-09-25 15:41:29 -07:00
|
|
|
sigverify_disabled,
|
2018-09-13 14:00:17 -07:00
|
|
|
rpu,
|
|
|
|
ncp,
|
|
|
|
rpc_service,
|
|
|
|
node_role,
|
2018-09-14 01:53:18 -07:00
|
|
|
ledger_path: ledger_path.to_owned(),
|
2018-09-13 14:00:17 -07:00
|
|
|
exit,
|
2018-09-14 01:53:18 -07:00
|
|
|
replicate_socket: node.sockets.replicate,
|
|
|
|
repair_socket: node.sockets.repair,
|
|
|
|
retransmit_socket: node.sockets.retransmit,
|
2018-09-25 15:41:29 -07:00
|
|
|
transaction_sockets: node.sockets.transaction,
|
|
|
|
broadcast_socket: node.sockets.broadcast,
|
2018-09-18 17:35:09 -07:00
|
|
|
requests_socket: node.sockets.requests,
|
|
|
|
respond_socket: node.sockets.respond,
|
2018-10-10 16:49:41 -07:00
|
|
|
leader_scheduler,
|
2018-09-14 01:53:18 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-18 17:35:09 -07:00
|
|
|
fn leader_to_validator(&mut self) -> Result<()> {
|
2018-10-10 16:49:41 -07:00
|
|
|
let (scheduled_leader, entry_height) = {
|
|
|
|
let mut ls_lock = self.leader_scheduler.write().unwrap();
|
|
|
|
// Clear the leader scheduler
|
|
|
|
ls_lock.reset();
|
|
|
|
|
|
|
|
// TODO: We can avoid building the bank again once RecordStage is
|
|
|
|
// integrated with BankingStage
|
|
|
|
let (bank, entry_height, _) =
|
|
|
|
Self::new_bank_from_ledger(&self.ledger_path, &mut *ls_lock);
|
|
|
|
|
|
|
|
self.bank = Arc::new(bank);
|
|
|
|
|
|
|
|
(
|
|
|
|
ls_lock
|
|
|
|
.get_scheduled_leader(entry_height)
|
|
|
|
.expect("Scheduled leader should exist after rebuilding bank"),
|
|
|
|
entry_height,
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
|
|
|
self.cluster_info
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.set_leader(scheduled_leader);
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2018-09-18 17:35:09 -07:00
|
|
|
// Make a new RPU to serve requests out of the new bank we've created
|
2018-09-15 23:46:16 -07:00
|
|
|
// instead of the old one
|
2018-09-24 13:26:47 -06:00
|
|
|
if self.rpu.is_some() {
|
2018-09-18 17:35:09 -07:00
|
|
|
let old_rpu = self.rpu.take().unwrap();
|
|
|
|
old_rpu.close()?;
|
|
|
|
self.rpu = Some(Rpu::new(
|
|
|
|
&self.bank,
|
|
|
|
self.requests_socket
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone requests socket"),
|
|
|
|
self.respond_socket
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone respond socket"),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
let tvu = Tvu::new(
|
|
|
|
self.keypair.clone(),
|
|
|
|
&self.bank,
|
|
|
|
entry_height,
|
2018-10-08 20:55:54 -06:00
|
|
|
self.cluster_info.clone(),
|
2018-09-14 01:53:18 -07:00
|
|
|
self.shared_window.clone(),
|
|
|
|
self.replicate_socket
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
|
|
|
.collect(),
|
|
|
|
self.repair_socket
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone repair socket"),
|
|
|
|
self.retransmit_socket
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone retransmit socket"),
|
|
|
|
Some(&self.ledger_path),
|
2018-10-10 16:49:41 -07:00
|
|
|
self.leader_scheduler.clone(),
|
2018-09-14 01:53:18 -07:00
|
|
|
);
|
|
|
|
let validator_state = ValidatorServices::new(tvu);
|
|
|
|
self.node_role = Some(NodeRole::Validator(validator_state));
|
2018-09-18 17:35:09 -07:00
|
|
|
Ok(())
|
2018-09-14 01:53:18 -07:00
|
|
|
}
|
|
|
|
|
2018-10-12 00:39:10 -07:00
|
|
|
fn validator_to_leader(&mut self, entry_height: u64, last_entry_id: Hash) {
|
2018-10-08 20:55:54 -06:00
|
|
|
self.cluster_info
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
|
|
|
.set_leader(self.keypair.pubkey());
|
2018-09-25 15:41:29 -07:00
|
|
|
let (tpu, blob_receiver, tpu_exit) = Tpu::new(
|
|
|
|
self.keypair.clone(),
|
|
|
|
&self.bank,
|
2018-10-08 20:55:54 -06:00
|
|
|
&self.cluster_info,
|
2018-09-26 05:52:13 -07:00
|
|
|
Default::default(),
|
2018-09-25 15:41:29 -07:00
|
|
|
self.transaction_sockets
|
|
|
|
.iter()
|
|
|
|
.map(|s| s.try_clone().expect("Failed to clone transaction sockets"))
|
|
|
|
.collect(),
|
|
|
|
&self.ledger_path,
|
|
|
|
self.sigverify_disabled,
|
|
|
|
entry_height,
|
2018-10-12 00:39:10 -07:00
|
|
|
// We pass the last_entry_id from the replicate stage because we can't trust that
|
|
|
|
// the window didn't overwrite the slot at for the last entry that the replicate stage
|
|
|
|
// processed. We also want to avoid reading processing the ledger for the last id.
|
|
|
|
&last_entry_id,
|
2018-10-10 16:49:41 -07:00
|
|
|
self.leader_scheduler.clone(),
|
2018-09-25 15:41:29 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
let broadcast_stage = BroadcastStage::new(
|
|
|
|
self.broadcast_socket
|
|
|
|
.try_clone()
|
|
|
|
.expect("Failed to clone broadcast socket"),
|
2018-10-08 20:55:54 -06:00
|
|
|
self.cluster_info.clone(),
|
2018-09-25 15:41:29 -07:00
|
|
|
self.shared_window.clone(),
|
|
|
|
entry_height,
|
|
|
|
blob_receiver,
|
2018-10-10 16:49:41 -07:00
|
|
|
self.leader_scheduler.clone(),
|
2018-09-25 15:41:29 -07:00
|
|
|
tpu_exit,
|
|
|
|
);
|
|
|
|
let leader_state = LeaderServices::new(tpu, broadcast_stage);
|
|
|
|
self.node_role = Some(NodeRole::Leader(leader_state));
|
|
|
|
}
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
pub fn check_role_exited(&self) -> bool {
|
|
|
|
match self.node_role {
|
|
|
|
Some(NodeRole::Leader(ref leader_services)) => leader_services.is_exited(),
|
|
|
|
Some(NodeRole::Validator(ref validator_services)) => validator_services.is_exited(),
|
|
|
|
None => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
pub fn handle_role_transition(&mut self) -> Result<Option<FullnodeReturnType>> {
|
|
|
|
let node_role = self.node_role.take();
|
|
|
|
match node_role {
|
|
|
|
Some(NodeRole::Leader(leader_services)) => match leader_services.join()? {
|
|
|
|
Some(TpuReturnType::LeaderRotation) => {
|
2018-09-18 17:35:09 -07:00
|
|
|
self.leader_to_validator()?;
|
2018-10-10 16:49:41 -07:00
|
|
|
Ok(Some(FullnodeReturnType::LeaderToValidatorRotation))
|
2018-09-14 01:53:18 -07:00
|
|
|
}
|
|
|
|
_ => Ok(None),
|
|
|
|
},
|
|
|
|
Some(NodeRole::Validator(validator_services)) => match validator_services.join()? {
|
2018-10-12 00:39:10 -07:00
|
|
|
Some(TvuReturnType::LeaderRotation(entry_height, last_entry_id)) => {
|
|
|
|
self.validator_to_leader(entry_height, last_entry_id);
|
2018-10-10 16:49:41 -07:00
|
|
|
Ok(Some(FullnodeReturnType::ValidatorToLeaderRotation))
|
2018-09-25 15:41:29 -07:00
|
|
|
}
|
2018-09-14 01:53:18 -07:00
|
|
|
_ => Ok(None),
|
|
|
|
},
|
|
|
|
None => Ok(None),
|
2018-09-13 14:00:17 -07:00
|
|
|
}
|
2018-07-09 14:53:18 -06:00
|
|
|
}
|
|
|
|
|
2018-07-16 22:22:29 -07:00
|
|
|
//used for notifying many nodes in parallel to exit
|
2018-07-17 08:18:42 -07:00
|
|
|
pub fn exit(&self) {
|
2018-07-16 22:22:29 -07:00
|
|
|
self.exit.store(true, Ordering::Relaxed);
|
2018-09-18 17:35:09 -07:00
|
|
|
if let Some(ref rpu) = self.rpu {
|
|
|
|
rpu.exit();
|
|
|
|
}
|
2018-09-14 14:34:32 -07:00
|
|
|
match self.node_role {
|
|
|
|
Some(NodeRole::Leader(ref leader_services)) => leader_services.exit(),
|
|
|
|
Some(NodeRole::Validator(ref validator_services)) => validator_services.exit(),
|
|
|
|
_ => (),
|
|
|
|
}
|
2018-07-16 22:22:29 -07:00
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
pub fn close(self) -> Result<(Option<FullnodeReturnType>)> {
|
2018-07-17 08:18:42 -07:00
|
|
|
self.exit();
|
2018-07-09 14:53:18 -06:00
|
|
|
self.join()
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
pub fn new_bank_from_ledger(
|
|
|
|
ledger_path: &str,
|
|
|
|
leader_scheduler: &mut LeaderScheduler,
|
|
|
|
) -> (Bank, u64, Vec<Entry>) {
|
2018-09-14 01:53:18 -07:00
|
|
|
let bank = Bank::new_default(false);
|
|
|
|
let entries = read_ledger(ledger_path, true).expect("opening ledger");
|
|
|
|
let entries = entries
|
|
|
|
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
|
|
|
|
info!("processing ledger...");
|
2018-10-10 16:49:41 -07:00
|
|
|
let (entry_height, ledger_tail) = bank
|
|
|
|
.process_ledger(entries, leader_scheduler)
|
|
|
|
.expect("process_ledger");
|
2018-09-14 01:53:18 -07:00
|
|
|
// entry_height is the network-wide agreed height of the ledger.
|
|
|
|
// initialize it from the input ledger
|
|
|
|
info!("processed {} ledger...", entry_height);
|
|
|
|
(bank, entry_height, ledger_tail)
|
|
|
|
}
|
2018-07-02 15:24:40 -07:00
|
|
|
}
|
2018-07-03 22:14:08 -06:00
|
|
|
|
2018-08-09 15:29:07 -06:00
|
|
|
impl Service for Fullnode {
|
2018-09-14 00:17:40 -07:00
|
|
|
type JoinReturnType = Option<FullnodeReturnType>;
|
2018-07-03 22:14:08 -06:00
|
|
|
|
2018-09-14 00:17:40 -07:00
|
|
|
fn join(self) -> Result<Option<FullnodeReturnType>> {
|
2018-09-18 17:35:09 -07:00
|
|
|
if let Some(rpu) = self.rpu {
|
|
|
|
rpu.join()?;
|
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
self.ncp.join()?;
|
|
|
|
self.rpc_service.join()?;
|
2018-09-12 13:59:19 -07:00
|
|
|
|
2018-09-13 14:00:17 -07:00
|
|
|
match self.node_role {
|
2018-09-12 13:59:19 -07:00
|
|
|
Some(NodeRole::Validator(validator_service)) => {
|
2018-10-12 00:39:10 -07:00
|
|
|
if let Some(TvuReturnType::LeaderRotation(_, _)) = validator_service.join()? {
|
2018-10-10 16:49:41 -07:00
|
|
|
return Ok(Some(FullnodeReturnType::ValidatorToLeaderRotation));
|
2018-09-25 15:41:29 -07:00
|
|
|
}
|
2018-09-13 19:02:14 -07:00
|
|
|
}
|
2018-09-12 13:59:19 -07:00
|
|
|
Some(NodeRole::Leader(leader_service)) => {
|
2018-09-14 00:17:40 -07:00
|
|
|
if let Some(TpuReturnType::LeaderRotation) = leader_service.join()? {
|
2018-10-10 16:49:41 -07:00
|
|
|
return Ok(Some(FullnodeReturnType::LeaderToValidatorRotation));
|
2018-09-14 00:17:40 -07:00
|
|
|
}
|
2018-09-13 19:02:14 -07:00
|
|
|
}
|
2018-09-12 13:59:19 -07:00
|
|
|
_ => (),
|
2018-07-03 22:14:08 -06:00
|
|
|
}
|
2018-09-13 14:00:17 -07:00
|
|
|
|
|
|
|
Ok(None)
|
2018-07-03 22:14:08 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-02 15:24:40 -07:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use bank::Bank;
|
2018-10-08 20:55:54 -06:00
|
|
|
use cluster_info::Node;
|
2018-10-01 11:40:10 -07:00
|
|
|
use fullnode::{Fullnode, NodeRole, TvuReturnType};
|
2018-10-10 16:49:41 -07:00
|
|
|
use leader_scheduler::{make_active_set_entries, LeaderScheduler, LeaderSchedulerConfig};
|
2018-10-12 00:39:10 -07:00
|
|
|
use ledger::{create_sample_ledger, genesis, LedgerWriter};
|
2018-09-26 16:50:12 +00:00
|
|
|
use packet::make_consecutive_blobs;
|
2018-07-17 11:45:52 -07:00
|
|
|
use service::Service;
|
2018-08-09 08:56:04 -06:00
|
|
|
use signature::{Keypair, KeypairUtil};
|
2018-09-25 15:41:29 -07:00
|
|
|
use std::cmp;
|
2018-09-14 01:53:18 -07:00
|
|
|
use std::fs::remove_dir_all;
|
2018-09-25 15:41:29 -07:00
|
|
|
use std::net::UdpSocket;
|
|
|
|
use std::sync::mpsc::channel;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use streamer::responder;
|
2018-08-03 11:06:06 -07:00
|
|
|
|
2018-07-02 15:24:40 -07:00
|
|
|
#[test]
|
|
|
|
fn validator_exit() {
|
2018-08-09 08:57:24 -06:00
|
|
|
let keypair = Keypair::new();
|
2018-08-28 16:32:40 -07:00
|
|
|
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
2018-10-12 00:39:10 -07:00
|
|
|
let (mint, validator_ledger_path) = genesis("validator_exit", 10_000);
|
|
|
|
let bank = Bank::new(&mint);
|
2018-08-31 00:10:39 -07:00
|
|
|
let entry = tn.info.clone();
|
2018-10-12 00:39:10 -07:00
|
|
|
let genesis_entries = &mint.create_entries();
|
|
|
|
let entry_height = genesis_entries.len() as u64;
|
|
|
|
|
2018-09-14 01:53:18 -07:00
|
|
|
let v = Fullnode::new_with_bank(
|
|
|
|
keypair,
|
|
|
|
bank,
|
2018-10-12 00:39:10 -07:00
|
|
|
entry_height,
|
|
|
|
&genesis_entries,
|
2018-09-14 01:53:18 -07:00
|
|
|
tn,
|
|
|
|
Some(&entry),
|
|
|
|
&validator_ledger_path,
|
|
|
|
false,
|
2018-10-10 16:49:41 -07:00
|
|
|
LeaderScheduler::from_bootstrap_leader(entry.id),
|
2018-09-20 23:38:59 -06:00
|
|
|
Some(0),
|
2018-09-14 01:53:18 -07:00
|
|
|
);
|
2018-09-10 23:38:40 -07:00
|
|
|
v.close().unwrap();
|
2018-09-14 01:53:18 -07:00
|
|
|
remove_dir_all(validator_ledger_path).unwrap();
|
2018-07-02 11:20:35 -07:00
|
|
|
}
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2018-07-17 08:18:42 -07:00
|
|
|
#[test]
|
|
|
|
fn validator_parallel_exit() {
|
2018-09-14 01:53:18 -07:00
|
|
|
let mut ledger_paths = vec![];
|
2018-08-09 15:29:07 -06:00
|
|
|
let vals: Vec<Fullnode> = (0..2)
|
2018-09-14 01:53:18 -07:00
|
|
|
.map(|i| {
|
2018-08-09 08:57:24 -06:00
|
|
|
let keypair = Keypair::new();
|
2018-08-28 16:32:40 -07:00
|
|
|
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
2018-10-12 00:39:10 -07:00
|
|
|
let (mint, validator_ledger_path) =
|
2018-09-14 01:53:18 -07:00
|
|
|
genesis(&format!("validator_parallel_exit_{}", i), 10_000);
|
|
|
|
ledger_paths.push(validator_ledger_path.clone());
|
2018-10-12 00:39:10 -07:00
|
|
|
let bank = Bank::new(&mint);
|
2018-08-31 00:10:39 -07:00
|
|
|
let entry = tn.info.clone();
|
2018-10-12 00:39:10 -07:00
|
|
|
|
|
|
|
let genesis_entries = &mint.create_entries();
|
|
|
|
let entry_height = genesis_entries.len() as u64;
|
2018-09-14 01:53:18 -07:00
|
|
|
Fullnode::new_with_bank(
|
|
|
|
keypair,
|
|
|
|
bank,
|
2018-10-12 00:39:10 -07:00
|
|
|
entry_height,
|
|
|
|
&genesis_entries,
|
2018-09-14 01:53:18 -07:00
|
|
|
tn,
|
|
|
|
Some(&entry),
|
|
|
|
&validator_ledger_path,
|
|
|
|
false,
|
2018-10-10 16:49:41 -07:00
|
|
|
LeaderScheduler::from_bootstrap_leader(entry.id),
|
2018-09-20 23:38:59 -06:00
|
|
|
Some(0),
|
2018-09-14 01:53:18 -07:00
|
|
|
)
|
2018-09-15 23:46:16 -07:00
|
|
|
}).collect();
|
2018-09-14 01:53:18 -07:00
|
|
|
|
2018-07-17 11:45:52 -07:00
|
|
|
//each validator can exit in parallel to speed many sequential calls to `join`
|
2018-08-05 22:04:27 -07:00
|
|
|
vals.iter().for_each(|v| v.exit());
|
2018-07-17 11:45:52 -07:00
|
|
|
//while join is called sequentially, the above exit call notified all the
|
2018-07-17 08:18:42 -07:00
|
|
|
//validators to exit from all their threads
|
2018-08-03 11:06:06 -07:00
|
|
|
vals.into_iter().for_each(|v| {
|
2018-08-05 22:04:27 -07:00
|
|
|
v.join().unwrap();
|
2018-08-03 11:06:06 -07:00
|
|
|
});
|
2018-09-14 01:53:18 -07:00
|
|
|
|
|
|
|
for path in ledger_paths {
|
|
|
|
remove_dir_all(path).unwrap();
|
|
|
|
}
|
2018-07-17 08:18:42 -07:00
|
|
|
}
|
2018-09-25 15:41:29 -07:00
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
#[test]
|
|
|
|
fn test_wrong_role_transition() {
|
|
|
|
// Create the leader node information
|
|
|
|
let bootstrap_leader_keypair = Keypair::new();
|
|
|
|
let bootstrap_leader_node =
|
|
|
|
Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
|
|
|
|
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
|
|
|
|
|
|
|
|
// Create the validator node information
|
|
|
|
let validator_keypair = Keypair::new();
|
|
|
|
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
|
|
|
|
|
|
|
|
// Make a common mint and a genesis entry for both leader + validator's ledgers
|
2018-10-12 00:39:10 -07:00
|
|
|
let (mint, bootstrap_leader_ledger_path, genesis_entries) =
|
|
|
|
create_sample_ledger("test_wrong_role_transition", 10_000);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
|
|
|
let last_id = genesis_entries
|
|
|
|
.last()
|
|
|
|
.expect("expected at least one genesis entry")
|
|
|
|
.id;
|
|
|
|
|
|
|
|
// Write the entries to the ledger that will cause leader rotation
|
|
|
|
// after the bootstrap height
|
|
|
|
let mut ledger_writer = LedgerWriter::open(&bootstrap_leader_ledger_path, false).unwrap();
|
2018-10-12 00:39:10 -07:00
|
|
|
let first_entries =
|
|
|
|
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id);
|
2018-10-10 16:49:41 -07:00
|
|
|
|
|
|
|
let ledger_initial_len = (genesis_entries.len() + first_entries.len()) as u64;
|
|
|
|
ledger_writer.write_entries(first_entries).unwrap();
|
|
|
|
|
|
|
|
// Create the common leader scheduling configuration
|
|
|
|
let num_slots_per_epoch = 3;
|
|
|
|
let leader_rotation_interval = 5;
|
|
|
|
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
|
|
|
|
|
|
|
|
// Set the bootstrap height exactly the current ledger length, so that we can
|
|
|
|
// test if the bootstrap leader knows to immediately transition to a validator
|
|
|
|
// after parsing the ledger during startup
|
|
|
|
let bootstrap_height = ledger_initial_len;
|
|
|
|
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
|
|
|
bootstrap_leader_info.id,
|
|
|
|
Some(bootstrap_height),
|
|
|
|
Some(leader_rotation_interval),
|
|
|
|
Some(seed_rotation_interval),
|
|
|
|
Some(ledger_initial_len),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Test that a node knows to transition to a validator based on parsing the ledger
|
|
|
|
let bootstrap_leader = Fullnode::new(
|
|
|
|
bootstrap_leader_node,
|
|
|
|
&bootstrap_leader_ledger_path,
|
|
|
|
bootstrap_leader_keypair,
|
|
|
|
Some(bootstrap_leader_info.contact_info.ncp),
|
|
|
|
false,
|
|
|
|
LeaderScheduler::new(&leader_scheduler_config),
|
|
|
|
);
|
|
|
|
|
|
|
|
match bootstrap_leader.node_role {
|
|
|
|
Some(NodeRole::Validator(_)) => (),
|
|
|
|
_ => {
|
|
|
|
panic!("Expected bootstrap leader to be a validator");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that a node knows to transition to a leader based on parsing the ledger
|
|
|
|
let validator = Fullnode::new(
|
|
|
|
validator_node,
|
|
|
|
&bootstrap_leader_ledger_path,
|
|
|
|
validator_keypair,
|
|
|
|
Some(bootstrap_leader_info.contact_info.ncp),
|
|
|
|
false,
|
|
|
|
LeaderScheduler::new(&leader_scheduler_config),
|
|
|
|
);
|
|
|
|
|
|
|
|
match validator.node_role {
|
|
|
|
Some(NodeRole::Leader(_)) => (),
|
|
|
|
_ => {
|
|
|
|
panic!("Expected node to be the leader");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
#[test]
|
|
|
|
fn test_validator_to_leader_transition() {
|
|
|
|
// Make a leader identity
|
|
|
|
let leader_keypair = Keypair::new();
|
|
|
|
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
|
|
|
let leader_id = leader_node.info.id;
|
|
|
|
let leader_ncp = leader_node.info.contact_info.ncp;
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Create validator identity
|
2018-10-12 00:39:10 -07:00
|
|
|
let (mint, validator_ledger_path, genesis_entries) =
|
|
|
|
create_sample_ledger("test_validator_to_leader_transition", 10_000);
|
2018-09-25 15:41:29 -07:00
|
|
|
let validator_keypair = Keypair::new();
|
|
|
|
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
|
|
|
|
let validator_info = validator_node.info.clone();
|
2018-10-10 16:49:41 -07:00
|
|
|
|
|
|
|
let mut last_id = genesis_entries
|
|
|
|
.last()
|
|
|
|
.expect("expected at least one genesis entry")
|
|
|
|
.id;
|
|
|
|
|
|
|
|
// Write two entries so that the validator is in the active set:
|
|
|
|
//
|
|
|
|
// 1) Give the validator a nonzero number of tokens
|
|
|
|
// Write the bootstrap entries to the ledger that will cause leader rotation
|
|
|
|
// after the bootstrap height
|
|
|
|
//
|
|
|
|
// 2) A vote from the validator
|
|
|
|
let mut ledger_writer = LedgerWriter::open(&validator_ledger_path, false).unwrap();
|
|
|
|
let bootstrap_entries =
|
2018-10-12 00:39:10 -07:00
|
|
|
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id);
|
2018-10-10 16:49:41 -07:00
|
|
|
let bootstrap_entries_len = bootstrap_entries.len();
|
|
|
|
last_id = bootstrap_entries.last().unwrap().id;
|
|
|
|
ledger_writer.write_entries(bootstrap_entries).unwrap();
|
|
|
|
let ledger_initial_len = (genesis_entries.len() + bootstrap_entries_len) as u64;
|
|
|
|
|
|
|
|
// Set the leader scheduler for the validator
|
|
|
|
let leader_rotation_interval = 10;
|
|
|
|
let num_bootstrap_slots = 2;
|
|
|
|
let bootstrap_height = num_bootstrap_slots * leader_rotation_interval;
|
|
|
|
|
|
|
|
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
|
|
|
leader_id,
|
|
|
|
Some(bootstrap_height),
|
|
|
|
Some(leader_rotation_interval),
|
|
|
|
Some(leader_rotation_interval * 2),
|
|
|
|
Some(bootstrap_height),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Start the validator
|
2018-09-25 15:41:29 -07:00
|
|
|
let mut validator = Fullnode::new(
|
|
|
|
validator_node,
|
|
|
|
&validator_ledger_path,
|
|
|
|
validator_keypair,
|
|
|
|
Some(leader_ncp),
|
|
|
|
false,
|
2018-10-10 16:49:41 -07:00
|
|
|
LeaderScheduler::new(&leader_scheduler_config),
|
2018-09-25 15:41:29 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// Send blobs to the validator from our mock leader
|
|
|
|
let t_responder = {
|
|
|
|
let (s_responder, r_responder) = channel();
|
|
|
|
let blob_sockets: Vec<Arc<UdpSocket>> = leader_node
|
|
|
|
.sockets
|
|
|
|
.replicate
|
|
|
|
.into_iter()
|
|
|
|
.map(Arc::new)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let t_responder = responder(
|
|
|
|
"test_validator_to_leader_transition",
|
|
|
|
blob_sockets[0].clone(),
|
|
|
|
r_responder,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Send the blobs out of order, in reverse. Also send an extra
|
|
|
|
// "extra_blobs" number of blobs to make sure the window stops in the right place.
|
|
|
|
let extra_blobs = cmp::max(leader_rotation_interval / 3, 1);
|
2018-10-10 16:49:41 -07:00
|
|
|
let total_blobs_to_send = bootstrap_height + extra_blobs;
|
2018-09-25 15:41:29 -07:00
|
|
|
let tvu_address = &validator_info.contact_info.tvu;
|
2018-10-10 16:49:41 -07:00
|
|
|
let msgs = make_consecutive_blobs(
|
|
|
|
leader_id,
|
|
|
|
total_blobs_to_send,
|
|
|
|
ledger_initial_len,
|
|
|
|
last_id,
|
|
|
|
&tvu_address,
|
|
|
|
).into_iter()
|
|
|
|
.rev()
|
|
|
|
.collect();
|
2018-09-25 15:41:29 -07:00
|
|
|
s_responder.send(msgs).expect("send");
|
|
|
|
t_responder
|
|
|
|
};
|
|
|
|
|
2018-10-01 11:40:10 -07:00
|
|
|
// Wait for validator to shut down tvu
|
|
|
|
let node_role = validator.node_role.take();
|
|
|
|
match node_role {
|
|
|
|
Some(NodeRole::Validator(validator_services)) => {
|
|
|
|
let join_result = validator_services
|
|
|
|
.join()
|
|
|
|
.expect("Expected successful validator join");
|
2018-10-12 00:39:10 -07:00
|
|
|
if let Some(TvuReturnType::LeaderRotation(result_bh, _)) = join_result {
|
|
|
|
assert_eq!(result_bh, bootstrap_height);
|
|
|
|
} else {
|
|
|
|
panic!("Expected validator to have exited due to leader rotation");
|
|
|
|
}
|
2018-10-01 11:40:10 -07:00
|
|
|
}
|
|
|
|
_ => panic!("Role should not be leader"),
|
2018-09-25 15:41:29 -07:00
|
|
|
}
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
// Check the validator ledger to make sure it's the right height, we should've
|
|
|
|
// transitioned after the bootstrap_height entry
|
|
|
|
let (_, entry_height, _) = Fullnode::new_bank_from_ledger(
|
|
|
|
&validator_ledger_path,
|
|
|
|
&mut LeaderScheduler::new(&leader_scheduler_config),
|
2018-09-25 15:41:29 -07:00
|
|
|
);
|
|
|
|
|
2018-10-10 16:49:41 -07:00
|
|
|
assert_eq!(entry_height, bootstrap_height);
|
|
|
|
|
2018-09-25 15:41:29 -07:00
|
|
|
// Shut down
|
|
|
|
t_responder.join().expect("responder thread join");
|
|
|
|
validator.close().unwrap();
|
|
|
|
remove_dir_all(&validator_ledger_path).unwrap();
|
|
|
|
}
|
2018-07-02 11:20:35 -07:00
|
|
|
}
|