Add scalable gossip library (#1546)

* Cluster Replicated Data Store

Separate the data storage and merge strategy from the network IO boundary.
Implement an eager push overlay for transporting recent messages.

Simulation shows fast convergence with 20k nodes.
This commit is contained in:
anatoly yakovenko
2018-11-15 13:23:26 -08:00
committed by GitHub
parent 4a3230904e
commit a41254e18c
31 changed files with 2821 additions and 1698 deletions

View File

@@ -16,6 +16,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::Result;
use timing::timestamp;
use tpu::{Tpu, TpuReturnType};
use tvu::{Tvu, TvuReturnType};
use untrusted::Input;
@@ -148,9 +149,9 @@ impl Fullnode {
info!(
"starting... local gossip address: {} (advertising {})",
local_gossip_addr, node.info.contact_info.ncp
local_gossip_addr, node.info.ncp
);
let mut rpc_addr = node.info.contact_info.rpc;
let mut rpc_addr = node.info.rpc;
if let Some(port) = rpc_port {
rpc_addr.set_port(port);
}
@@ -198,16 +199,16 @@ impl Fullnode {
sigverify_disabled: bool,
rpc_port: Option<u16>,
) -> Self {
let mut rpc_addr = node.info.contact_info.rpc;
let mut rpc_pubsub_addr = node.info.contact_info.rpc_pubsub;
let mut rpc_addr = node.info.rpc;
let mut rpc_pubsub_addr = node.info.rpc_pubsub;
// Use custom RPC port, if provided (`Some(port)`)
// RPC port may be any valid open port on the node
// If rpc_port == `None`, node will listen on the ports set in NodeInfo
if let Some(port) = rpc_port {
rpc_addr.set_port(port);
node.info.contact_info.rpc = rpc_addr;
node.info.rpc = rpc_addr;
rpc_pubsub_addr.set_port(port + 1);
node.info.contact_info.rpc_pubsub = rpc_pubsub_addr;
node.info.rpc_pubsub = rpc_pubsub_addr;
}
let exit = Arc::new(AtomicBool::new(false));
@@ -215,6 +216,7 @@ impl Fullnode {
let window = new_window(32 * 1024);
let shared_window = Arc::new(RwLock::new(window));
node.info.wallclock = timestamp();
let cluster_info = Arc::new(RwLock::new(
ClusterInfo::new(node.info).expect("ClusterInfo::new"),
));
@@ -233,7 +235,10 @@ impl Fullnode {
// Insert the bootstrap leader info, should only be None if this node
// is the bootstrap leader
if let Some(bootstrap_leader_info) = bootstrap_leader_info_option {
cluster_info.write().unwrap().insert(bootstrap_leader_info);
cluster_info
.write()
.unwrap()
.insert_info(bootstrap_leader_info.clone());
}
// Get the scheduled leader
@@ -738,7 +743,7 @@ mod tests {
&bootstrap_leader_ledger_path,
Arc::new(bootstrap_leader_keypair),
Arc::new(Keypair::new()),
Some(bootstrap_leader_info.contact_info.ncp),
Some(bootstrap_leader_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
None,
@@ -829,7 +834,7 @@ mod tests {
&bootstrap_leader_ledger_path,
bootstrap_leader_keypair,
leader_vote_account_keypair,
Some(bootstrap_leader_info.contact_info.ncp),
Some(bootstrap_leader_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
None,
@@ -848,7 +853,7 @@ mod tests {
&bootstrap_leader_ledger_path,
Arc::new(validator_keypair),
Arc::new(validator_vote_account_keypair),
Some(bootstrap_leader_info.contact_info.ncp),
Some(bootstrap_leader_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
None,
@@ -869,7 +874,7 @@ mod tests {
let leader_keypair = Keypair::new();
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_id = leader_node.info.id;
let leader_ncp = leader_node.info.contact_info.ncp;
let leader_ncp = leader_node.info.ncp;
// Create validator identity
let num_ending_ticks = 1;
@@ -954,7 +959,7 @@ mod tests {
// "extra_blobs" number of blobs to make sure the window stops in the right place.
let extra_blobs = cmp::max(leader_rotation_interval / 3, 1);
let total_blobs_to_send = bootstrap_height + extra_blobs;
let tvu_address = &validator_info.contact_info.tvu;
let tvu_address = &validator_info.tvu;
let msgs = make_consecutive_blobs(
leader_id,
total_blobs_to_send,