@ -364,7 +364,6 @@ impl ClusterInfo {
|
||||
pub fn contact_info_trace(&self) -> String {
|
||||
let now = timestamp();
|
||||
let mut spy_nodes = 0;
|
||||
let mut archivers = 0;
|
||||
let mut different_shred_nodes = 0;
|
||||
let my_pubkey = self.id();
|
||||
let my_shred_version = self.my_shred_version();
|
||||
@ -374,8 +373,6 @@ impl ClusterInfo {
|
||||
.filter_map(|(node, last_updated)| {
|
||||
if Self::is_spy_node(&node) {
|
||||
spy_nodes += 1;
|
||||
} else if Self::is_archiver(&node) {
|
||||
archivers += 1;
|
||||
}
|
||||
|
||||
let node_version = self.get_node_version(&node.id);
|
||||
@ -431,14 +428,9 @@ impl ClusterInfo {
|
||||
------------------+-------+----------------------------------------------+---------------+\
|
||||
------+------+------+------+------+------+------+------+------+--------\n\
|
||||
{}\
|
||||
Nodes: {}{}{}{}",
|
||||
Nodes: {}{}{}",
|
||||
nodes.join(""),
|
||||
nodes.len() - spy_nodes - archivers,
|
||||
if archivers > 0 {
|
||||
format!("\nArchivers: {}", archivers)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
nodes.len() - spy_nodes,
|
||||
if spy_nodes > 0 {
|
||||
format!("\nSpies: {}", spy_nodes)
|
||||
} else {
|
||||
@ -773,11 +765,7 @@ impl ClusterInfo {
|
||||
.table
|
||||
.values()
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| {
|
||||
ContactInfo::is_valid_address(&x.tvu)
|
||||
&& !ClusterInfo::is_archiver(x)
|
||||
&& x.id != self.id()
|
||||
})
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.tvu) && x.id != self.id())
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
@ -793,39 +781,6 @@ impl ClusterInfo {
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| {
|
||||
ContactInfo::is_valid_address(&x.tvu)
|
||||
&& !ClusterInfo::is_archiver(x)
|
||||
&& x.id != self.id()
|
||||
&& x.shred_version == self.my_shred_version()
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// all peers that have a valid storage addr regardless of `shred_version`.
|
||||
pub fn all_storage_peers(&self) -> Vec<ContactInfo> {
|
||||
self.gossip
|
||||
.read()
|
||||
.unwrap()
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr) && x.id != self.id())
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// all peers that have a valid storage addr and are on the same `shred_version`.
|
||||
pub fn storage_peers(&self) -> Vec<ContactInfo> {
|
||||
self.gossip
|
||||
.read()
|
||||
.unwrap()
|
||||
.crds
|
||||
.table
|
||||
.values()
|
||||
.filter_map(|x| x.value.contact_info())
|
||||
.filter(|x| {
|
||||
ContactInfo::is_valid_address(&x.storage_addr)
|
||||
&& x.id != self.id()
|
||||
&& x.shred_version == self.my_shred_version()
|
||||
})
|
||||
@ -871,15 +826,9 @@ impl ClusterInfo {
|
||||
}
|
||||
|
||||
fn is_spy_node(contact_info: &ContactInfo) -> bool {
|
||||
(!ContactInfo::is_valid_address(&contact_info.tpu)
|
||||
!ContactInfo::is_valid_address(&contact_info.tpu)
|
||||
|| !ContactInfo::is_valid_address(&contact_info.gossip)
|
||||
|| !ContactInfo::is_valid_address(&contact_info.tvu))
|
||||
&& !ContactInfo::is_valid_address(&contact_info.storage_addr)
|
||||
}
|
||||
|
||||
pub fn is_archiver(contact_info: &ContactInfo) -> bool {
|
||||
ContactInfo::is_valid_address(&contact_info.storage_addr)
|
||||
&& !ContactInfo::is_valid_address(&contact_info.tpu)
|
||||
|| !ContactInfo::is_valid_address(&contact_info.tvu)
|
||||
}
|
||||
|
||||
fn sorted_stakes_with_index<S: std::hash::BuildHasher>(
|
||||
@ -1935,7 +1884,6 @@ pub struct Sockets {
|
||||
pub broadcast: Vec<UdpSocket>,
|
||||
pub repair: UdpSocket,
|
||||
pub retransmit_sockets: Vec<UdpSocket>,
|
||||
pub storage: Option<UdpSocket>,
|
||||
pub serve_repair: UdpSocket,
|
||||
}
|
||||
|
||||
@ -1950,50 +1898,6 @@ impl Node {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
Self::new_localhost_with_pubkey(&pubkey)
|
||||
}
|
||||
pub fn new_localhost_archiver(pubkey: &Pubkey) -> Self {
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let tvu = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let tvu_forwards = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let storage = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let empty = "0.0.0.0:0".parse().unwrap();
|
||||
let repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
|
||||
let retransmit = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
|
||||
let info = ContactInfo {
|
||||
id: *pubkey,
|
||||
gossip: gossip.local_addr().unwrap(),
|
||||
tvu: tvu.local_addr().unwrap(),
|
||||
tvu_forwards: tvu_forwards.local_addr().unwrap(),
|
||||
repair: repair.local_addr().unwrap(),
|
||||
tpu: empty,
|
||||
tpu_forwards: empty,
|
||||
storage_addr: storage.local_addr().unwrap(),
|
||||
rpc: empty,
|
||||
rpc_pubsub: empty,
|
||||
serve_repair: serve_repair.local_addr().unwrap(),
|
||||
wallclock: timestamp(),
|
||||
shred_version: 0,
|
||||
};
|
||||
|
||||
Node {
|
||||
info,
|
||||
sockets: Sockets {
|
||||
gossip,
|
||||
tvu: vec![tvu],
|
||||
tvu_forwards: vec![],
|
||||
tpu: vec![],
|
||||
tpu_forwards: vec![],
|
||||
broadcast,
|
||||
repair,
|
||||
retransmit_sockets: vec![retransmit],
|
||||
serve_repair,
|
||||
storage: Some(storage),
|
||||
ip_echo: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
pub fn new_localhost_with_pubkey(pubkey: &Pubkey) -> Self {
|
||||
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let tpu = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
@ -2012,7 +1916,7 @@ impl Node {
|
||||
|
||||
let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()];
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let storage = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let unused = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let info = ContactInfo {
|
||||
id: *pubkey,
|
||||
@ -2022,7 +1926,7 @@ impl Node {
|
||||
repair: repair.local_addr().unwrap(),
|
||||
tpu: tpu.local_addr().unwrap(),
|
||||
tpu_forwards: tpu_forwards.local_addr().unwrap(),
|
||||
storage_addr: storage.local_addr().unwrap(),
|
||||
unused: unused.local_addr().unwrap(),
|
||||
rpc: rpc_addr,
|
||||
rpc_pubsub: rpc_pubsub_addr,
|
||||
serve_repair: serve_repair.local_addr().unwrap(),
|
||||
@ -2041,7 +1945,6 @@ impl Node {
|
||||
broadcast,
|
||||
repair,
|
||||
retransmit_sockets: vec![retransmit_socket],
|
||||
storage: None,
|
||||
serve_repair,
|
||||
},
|
||||
}
|
||||
@ -2104,7 +2007,7 @@ impl Node {
|
||||
repair: SocketAddr::new(gossip_addr.ip(), repair_port),
|
||||
tpu: SocketAddr::new(gossip_addr.ip(), tpu_port),
|
||||
tpu_forwards: SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
|
||||
storage_addr: socketaddr_any!(),
|
||||
unused: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
serve_repair: SocketAddr::new(gossip_addr.ip(), serve_repair_port),
|
||||
@ -2124,32 +2027,11 @@ impl Node {
|
||||
broadcast,
|
||||
repair,
|
||||
retransmit_sockets,
|
||||
storage: None,
|
||||
serve_repair,
|
||||
ip_echo: Some(ip_echo),
|
||||
},
|
||||
}
|
||||
}
|
||||
pub fn new_archiver_with_external_ip(
|
||||
pubkey: &Pubkey,
|
||||
gossip_addr: &SocketAddr,
|
||||
port_range: PortRange,
|
||||
bind_ip_addr: IpAddr,
|
||||
) -> Node {
|
||||
let mut new = Self::new_with_external_ip(pubkey, gossip_addr, port_range, bind_ip_addr);
|
||||
let (storage_port, storage_socket) = Self::bind(bind_ip_addr, port_range);
|
||||
|
||||
new.info.storage_addr = SocketAddr::new(gossip_addr.ip(), storage_port);
|
||||
new.sockets.storage = Some(storage_socket);
|
||||
|
||||
let empty = socketaddr_any!();
|
||||
new.info.tpu = empty;
|
||||
new.info.tpu_forwards = empty;
|
||||
new.sockets.tpu = vec![];
|
||||
new.sockets.tpu_forwards = vec![];
|
||||
|
||||
new
|
||||
}
|
||||
}
|
||||
|
||||
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
@ -2323,27 +2205,6 @@ mod tests {
|
||||
assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), port);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_archiver_external_ip_test() {
|
||||
// Can't use VALIDATOR_PORT_RANGE because if this test runs in parallel with others, the
|
||||
// port returned by `bind_in_range()` might be snatched up before `Node::new_with_external_ip()` runs
|
||||
let port_range = (VALIDATOR_PORT_RANGE.1 + 20, VALIDATOR_PORT_RANGE.1 + 30);
|
||||
let ip = Ipv4Addr::from(0);
|
||||
let node = Node::new_archiver_with_external_ip(
|
||||
&Pubkey::new_rand(),
|
||||
&socketaddr!(ip, 0),
|
||||
port_range,
|
||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
||||
);
|
||||
|
||||
let ip = IpAddr::V4(ip);
|
||||
check_socket(&node.sockets.storage.unwrap(), ip, port_range);
|
||||
check_socket(&node.sockets.gossip, ip, port_range);
|
||||
check_socket(&node.sockets.repair, ip, port_range);
|
||||
|
||||
check_sockets(&node.sockets.tvu, ip, port_range);
|
||||
}
|
||||
|
||||
//test that all cluster_info objects only generate signed messages
|
||||
//when constructed with keypairs
|
||||
#[test]
|
||||
|
@ -25,8 +25,8 @@ pub struct ContactInfo {
|
||||
pub tpu: SocketAddr,
|
||||
/// address to forward unprocessed transactions to
|
||||
pub tpu_forwards: SocketAddr,
|
||||
/// storage data address
|
||||
pub storage_addr: SocketAddr,
|
||||
/// unused address
|
||||
pub unused: SocketAddr,
|
||||
/// address to which to send JSON-RPC requests
|
||||
pub rpc: SocketAddr,
|
||||
/// websocket for JSON-RPC push notifications
|
||||
@ -95,7 +95,7 @@ impl Default for ContactInfo {
|
||||
repair: socketaddr_any!(),
|
||||
tpu: socketaddr_any!(),
|
||||
tpu_forwards: socketaddr_any!(),
|
||||
storage_addr: socketaddr_any!(),
|
||||
unused: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
serve_repair: socketaddr_any!(),
|
||||
@ -115,7 +115,7 @@ impl ContactInfo {
|
||||
repair: socketaddr!("127.0.0.1:1237"),
|
||||
tpu: socketaddr!("127.0.0.1:1238"),
|
||||
tpu_forwards: socketaddr!("127.0.0.1:1239"),
|
||||
storage_addr: socketaddr!("127.0.0.1:1240"),
|
||||
unused: socketaddr!("127.0.0.1:1240"),
|
||||
rpc: socketaddr!("127.0.0.1:1241"),
|
||||
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
|
||||
serve_repair: socketaddr!("127.0.0.1:1243"),
|
||||
@ -137,7 +137,7 @@ impl ContactInfo {
|
||||
repair: addr,
|
||||
tpu: addr,
|
||||
tpu_forwards: addr,
|
||||
storage_addr: addr,
|
||||
unused: addr,
|
||||
rpc: addr,
|
||||
rpc_pubsub: addr,
|
||||
serve_repair: addr,
|
||||
@ -171,7 +171,7 @@ impl ContactInfo {
|
||||
repair,
|
||||
tpu,
|
||||
tpu_forwards,
|
||||
storage_addr: "0.0.0.0:0".parse().unwrap(),
|
||||
unused: "0.0.0.0:0".parse().unwrap(),
|
||||
rpc,
|
||||
rpc_pubsub,
|
||||
serve_repair,
|
||||
@ -249,7 +249,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_unspecified());
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.storage_addr.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
@ -261,7 +261,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_multicast());
|
||||
assert!(ci.rpc_pubsub.ip().is_multicast());
|
||||
assert!(ci.tpu.ip().is_multicast());
|
||||
assert!(ci.storage_addr.ip().is_multicast());
|
||||
assert!(ci.unused.ip().is_multicast());
|
||||
assert!(ci.serve_repair.ip().is_multicast());
|
||||
}
|
||||
#[test]
|
||||
@ -274,7 +274,7 @@ mod tests {
|
||||
assert!(ci.rpc.ip().is_unspecified());
|
||||
assert!(ci.rpc_pubsub.ip().is_unspecified());
|
||||
assert!(ci.tpu.ip().is_unspecified());
|
||||
assert!(ci.storage_addr.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert!(ci.serve_repair.ip().is_unspecified());
|
||||
}
|
||||
#[test]
|
||||
@ -287,7 +287,7 @@ mod tests {
|
||||
assert_eq!(ci.tpu_forwards.port(), 13);
|
||||
assert_eq!(ci.rpc.port(), rpc_port::DEFAULT_RPC_PORT);
|
||||
assert_eq!(ci.rpc_pubsub.port(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
assert!(ci.storage_addr.ip().is_unspecified());
|
||||
assert!(ci.unused.ip().is_unspecified());
|
||||
assert_eq!(ci.serve_repair.port(), 16);
|
||||
}
|
||||
|
||||
|
@ -63,11 +63,11 @@ impl GossipService {
|
||||
}
|
||||
}
|
||||
|
||||
/// Discover Validators and Archivers in a cluster
|
||||
/// Discover Validators in a cluster
|
||||
pub fn discover_cluster(
|
||||
entrypoint: &SocketAddr,
|
||||
num_nodes: usize,
|
||||
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
|
||||
) -> std::io::Result<Vec<ContactInfo>> {
|
||||
discover(
|
||||
Some(entrypoint),
|
||||
Some(num_nodes),
|
||||
@ -77,18 +77,18 @@ pub fn discover_cluster(
|
||||
None,
|
||||
0,
|
||||
)
|
||||
.map(|(_all_peers, validators, archivers)| (validators, archivers))
|
||||
.map(|(_all_peers, validators)| validators)
|
||||
}
|
||||
|
||||
pub fn discover(
|
||||
entrypoint: Option<&SocketAddr>,
|
||||
num_nodes: Option<usize>, // num_nodes only counts validators and archivers, excludes spy nodes
|
||||
num_nodes: Option<usize>, // num_nodes only counts validators, excludes spy nodes
|
||||
timeout: Option<u64>,
|
||||
find_node_by_pubkey: Option<Pubkey>,
|
||||
find_node_by_gossip_addr: Option<&SocketAddr>,
|
||||
my_gossip_addr: Option<&SocketAddr>,
|
||||
my_shred_version: u16,
|
||||
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>, Vec<ContactInfo>)> {
|
||||
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (gossip_service, ip_echo, spy_ref) =
|
||||
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
|
||||
@ -102,7 +102,7 @@ pub fn discover(
|
||||
|
||||
let _ip_echo_server = ip_echo.map(solana_net_utils::ip_echo_server);
|
||||
|
||||
let (met_criteria, secs, all_peers, tvu_peers, storage_peers) = spy(
|
||||
let (met_criteria, secs, all_peers, tvu_peers) = spy(
|
||||
spy_ref.clone(),
|
||||
num_nodes,
|
||||
timeout,
|
||||
@ -119,7 +119,7 @@ pub fn discover(
|
||||
secs,
|
||||
spy_ref.contact_info_trace()
|
||||
);
|
||||
return Ok((all_peers, tvu_peers, storage_peers));
|
||||
return Ok((all_peers, tvu_peers));
|
||||
}
|
||||
|
||||
if !tvu_peers.is_empty() {
|
||||
@ -127,7 +127,7 @@ pub fn discover(
|
||||
"discover failed to match criteria by timeout...\n{}",
|
||||
spy_ref.contact_info_trace()
|
||||
);
|
||||
return Ok((all_peers, tvu_peers, storage_peers));
|
||||
return Ok((all_peers, tvu_peers));
|
||||
}
|
||||
|
||||
info!("discover failed...\n{}", spy_ref.contact_info_trace());
|
||||
@ -182,18 +182,11 @@ fn spy(
|
||||
timeout: Option<u64>,
|
||||
find_node_by_pubkey: Option<Pubkey>,
|
||||
find_node_by_gossip_addr: Option<&SocketAddr>,
|
||||
) -> (
|
||||
bool,
|
||||
u64,
|
||||
Vec<ContactInfo>,
|
||||
Vec<ContactInfo>,
|
||||
Vec<ContactInfo>,
|
||||
) {
|
||||
) -> (bool, u64, Vec<ContactInfo>, Vec<ContactInfo>) {
|
||||
let now = Instant::now();
|
||||
let mut met_criteria = false;
|
||||
let mut all_peers: Vec<ContactInfo> = Vec::new();
|
||||
let mut tvu_peers: Vec<ContactInfo> = Vec::new();
|
||||
let mut storage_peers: Vec<ContactInfo> = Vec::new();
|
||||
let mut i = 1;
|
||||
while !met_criteria {
|
||||
if let Some(secs) = timeout {
|
||||
@ -208,7 +201,6 @@ fn spy(
|
||||
.map(|x| x.0)
|
||||
.collect::<Vec<_>>();
|
||||
tvu_peers = spy_ref.all_tvu_peers().into_iter().collect::<Vec<_>>();
|
||||
storage_peers = spy_ref.all_storage_peers();
|
||||
|
||||
let found_node_by_pubkey = if let Some(pubkey) = find_node_by_pubkey {
|
||||
all_peers.iter().any(|x| x.id == pubkey)
|
||||
@ -224,7 +216,7 @@ fn spy(
|
||||
|
||||
if let Some(num) = num_nodes {
|
||||
// Only consider validators and archives for `num_nodes`
|
||||
let mut nodes: Vec<_> = tvu_peers.iter().chain(storage_peers.iter()).collect();
|
||||
let mut nodes: Vec<_> = tvu_peers.iter().collect();
|
||||
nodes.sort();
|
||||
nodes.dedup();
|
||||
|
||||
@ -248,13 +240,7 @@ fn spy(
|
||||
));
|
||||
i += 1;
|
||||
}
|
||||
(
|
||||
met_criteria,
|
||||
now.elapsed().as_secs(),
|
||||
all_peers,
|
||||
tvu_peers,
|
||||
storage_peers,
|
||||
)
|
||||
(met_criteria, now.elapsed().as_secs(), all_peers, tvu_peers)
|
||||
}
|
||||
|
||||
/// Makes a spy or gossip node based on whether or not a gossip_addr was passed in
|
||||
@ -314,21 +300,21 @@ mod tests {
|
||||
|
||||
let spy_ref = Arc::new(cluster_info);
|
||||
|
||||
let (met_criteria, secs, _, tvu_peers, _) = spy(spy_ref.clone(), None, Some(1), None, None);
|
||||
let (met_criteria, secs, _, tvu_peers) = spy(spy_ref.clone(), None, Some(1), None, None);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert_eq!(secs, 1);
|
||||
assert_eq!(tvu_peers, spy_ref.tvu_peers());
|
||||
|
||||
// Find num_nodes
|
||||
let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None);
|
||||
assert_eq!(met_criteria, true);
|
||||
let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(2), None, None, None);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(2), None, None, None);
|
||||
assert_eq!(met_criteria, true);
|
||||
|
||||
// Find specific node by pubkey
|
||||
let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), None, None, Some(peer0), None);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), None, None, Some(peer0), None);
|
||||
assert_eq!(met_criteria, true);
|
||||
let (met_criteria, _, _, _, _) = spy(
|
||||
let (met_criteria, _, _, _) = spy(
|
||||
spy_ref.clone(),
|
||||
None,
|
||||
Some(0),
|
||||
@ -338,11 +324,11 @@ mod tests {
|
||||
assert_eq!(met_criteria, false);
|
||||
|
||||
// Find num_nodes *and* specific node by pubkey
|
||||
let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(1), None, Some(peer0), None);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, Some(peer0), None);
|
||||
assert_eq!(met_criteria, true);
|
||||
let (met_criteria, _, _, _, _) = spy(spy_ref.clone(), Some(3), Some(0), Some(peer0), None);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(3), Some(0), Some(peer0), None);
|
||||
assert_eq!(met_criteria, false);
|
||||
let (met_criteria, _, _, _, _) = spy(
|
||||
let (met_criteria, _, _, _) = spy(
|
||||
spy_ref.clone(),
|
||||
Some(1),
|
||||
Some(0),
|
||||
@ -352,11 +338,11 @@ mod tests {
|
||||
assert_eq!(met_criteria, false);
|
||||
|
||||
// Find specific node by gossip address
|
||||
let (met_criteria, _, _, _, _) =
|
||||
let (met_criteria, _, _, _) =
|
||||
spy(spy_ref.clone(), None, None, None, Some(&peer0_info.gossip));
|
||||
assert_eq!(met_criteria, true);
|
||||
|
||||
let (met_criteria, _, _, _, _) = spy(
|
||||
let (met_criteria, _, _, _) = spy(
|
||||
spy_ref.clone(),
|
||||
None,
|
||||
Some(0),
|
||||
|
@ -52,7 +52,6 @@ pub mod sigverify;
|
||||
pub mod sigverify_shreds;
|
||||
pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod storage_stage;
|
||||
pub mod tpu;
|
||||
pub mod transaction_status_service;
|
||||
pub mod tvu;
|
||||
|
@ -57,14 +57,11 @@ pub const MAX_DUPLICATE_WAIT_MS: usize = 10_000;
|
||||
pub const REPAIR_MS: u64 = 100;
|
||||
pub const MAX_ORPHANS: usize = 5;
|
||||
|
||||
pub enum RepairStrategy {
|
||||
RepairRange(RepairSlotRange),
|
||||
RepairAll {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
epoch_schedule: EpochSchedule,
|
||||
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
|
||||
},
|
||||
pub struct RepairInfo {
|
||||
pub bank_forks: Arc<RwLock<BankForks>>,
|
||||
pub completed_slots_receiver: CompletedSlotsReceiver,
|
||||
pub epoch_schedule: EpochSchedule,
|
||||
pub duplicate_slots_reset_sender: DuplicateSlotsResetSender,
|
||||
}
|
||||
|
||||
pub struct RepairSlotRange {
|
||||
@ -97,7 +94,7 @@ impl RepairService {
|
||||
exit: Arc<AtomicBool>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
repair_strategy: RepairStrategy,
|
||||
repair_info: RepairInfo,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
) -> Self {
|
||||
let t_repair = Builder::new()
|
||||
@ -108,7 +105,7 @@ impl RepairService {
|
||||
&exit,
|
||||
&repair_socket,
|
||||
&cluster_info,
|
||||
repair_strategy,
|
||||
repair_info,
|
||||
&cluster_slots,
|
||||
)
|
||||
})
|
||||
@ -122,84 +119,61 @@ impl RepairService {
|
||||
exit: &AtomicBool,
|
||||
repair_socket: &UdpSocket,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
repair_strategy: RepairStrategy,
|
||||
repair_info: RepairInfo,
|
||||
cluster_slots: &Arc<ClusterSlots>,
|
||||
) {
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let id = cluster_info.id();
|
||||
if let RepairStrategy::RepairAll { .. } = repair_strategy {
|
||||
Self::initialize_lowest_slot(id, blockstore, cluster_info);
|
||||
}
|
||||
Self::initialize_lowest_slot(id, blockstore, cluster_info);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut last_stats = Instant::now();
|
||||
let mut duplicate_slot_repair_statuses = HashMap::new();
|
||||
|
||||
if let RepairStrategy::RepairAll {
|
||||
ref completed_slots_receiver,
|
||||
..
|
||||
} = repair_strategy
|
||||
{
|
||||
Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver);
|
||||
}
|
||||
Self::initialize_epoch_slots(
|
||||
blockstore,
|
||||
cluster_info,
|
||||
&repair_info.completed_slots_receiver,
|
||||
);
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
let repairs = {
|
||||
match repair_strategy {
|
||||
RepairStrategy::RepairRange(ref repair_slot_range) => {
|
||||
// Strategy used by archivers
|
||||
Self::generate_repairs_in_range(
|
||||
blockstore,
|
||||
MAX_REPAIR_LENGTH,
|
||||
repair_slot_range,
|
||||
)
|
||||
}
|
||||
|
||||
RepairStrategy::RepairAll {
|
||||
ref completed_slots_receiver,
|
||||
ref bank_forks,
|
||||
ref duplicate_slots_reset_sender,
|
||||
..
|
||||
} => {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
let new_root = root_bank.slot();
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, cluster_info, bank_forks);
|
||||
let new_duplicate_slots = Self::find_new_duplicate_slots(
|
||||
&duplicate_slot_repair_statuses,
|
||||
blockstore,
|
||||
cluster_slots,
|
||||
&root_bank,
|
||||
);
|
||||
Self::process_new_duplicate_slots(
|
||||
&new_duplicate_slots,
|
||||
&mut duplicate_slot_repair_statuses,
|
||||
cluster_slots,
|
||||
&root_bank,
|
||||
blockstore,
|
||||
&serve_repair,
|
||||
&duplicate_slots_reset_sender,
|
||||
);
|
||||
Self::generate_and_send_duplicate_repairs(
|
||||
&mut duplicate_slot_repair_statuses,
|
||||
cluster_slots,
|
||||
blockstore,
|
||||
&serve_repair,
|
||||
&mut repair_stats,
|
||||
&repair_socket,
|
||||
);
|
||||
Self::generate_repairs(
|
||||
blockstore,
|
||||
root_bank.slot(),
|
||||
MAX_REPAIR_LENGTH,
|
||||
&duplicate_slot_repair_statuses,
|
||||
)
|
||||
}
|
||||
}
|
||||
let root_bank = repair_info.bank_forks.read().unwrap().root_bank().clone();
|
||||
let new_root = root_bank.slot();
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
Self::update_completed_slots(&repair_info.completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, cluster_info, &repair_info.bank_forks);
|
||||
let new_duplicate_slots = Self::find_new_duplicate_slots(
|
||||
&duplicate_slot_repair_statuses,
|
||||
blockstore,
|
||||
cluster_slots,
|
||||
&root_bank,
|
||||
);
|
||||
Self::process_new_duplicate_slots(
|
||||
&new_duplicate_slots,
|
||||
&mut duplicate_slot_repair_statuses,
|
||||
cluster_slots,
|
||||
&root_bank,
|
||||
blockstore,
|
||||
&serve_repair,
|
||||
&repair_info.duplicate_slots_reset_sender,
|
||||
);
|
||||
Self::generate_and_send_duplicate_repairs(
|
||||
&mut duplicate_slot_repair_statuses,
|
||||
cluster_slots,
|
||||
blockstore,
|
||||
&serve_repair,
|
||||
&mut repair_stats,
|
||||
&repair_socket,
|
||||
);
|
||||
Self::generate_repairs(
|
||||
blockstore,
|
||||
root_bank.slot(),
|
||||
MAX_REPAIR_LENGTH,
|
||||
&duplicate_slot_repair_statuses,
|
||||
)
|
||||
};
|
||||
|
||||
if let Ok(repairs) = repairs {
|
||||
|
@ -45,7 +45,7 @@ use std::{
|
||||
result,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
mpsc::{Receiver, RecvTimeoutError, Sender},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
@ -157,7 +157,7 @@ impl ReplayStage {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
retransmit_slots_sender: RetransmitSlotsSender,
|
||||
duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
|
||||
) -> (Self, Receiver<Vec<Arc<Bank>>>) {
|
||||
) -> Self {
|
||||
let ReplayStageConfig {
|
||||
my_pubkey,
|
||||
vote_account,
|
||||
@ -172,7 +172,6 @@ impl ReplayStage {
|
||||
rewards_recorder_sender,
|
||||
} = config;
|
||||
|
||||
let (root_bank_sender, root_bank_receiver) = channel();
|
||||
trace!("replay stage");
|
||||
let mut tower = Tower::new(&my_pubkey, &vote_account, &bank_forks.read().unwrap());
|
||||
|
||||
@ -377,7 +376,6 @@ impl ReplayStage {
|
||||
&cluster_info,
|
||||
&blockstore,
|
||||
&leader_schedule_cache,
|
||||
&root_bank_sender,
|
||||
&lockouts_sender,
|
||||
&accounts_hash_sender,
|
||||
&latest_root_senders,
|
||||
@ -497,13 +495,11 @@ impl ReplayStage {
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
(
|
||||
Self {
|
||||
t_replay,
|
||||
commitment_service,
|
||||
},
|
||||
root_bank_receiver,
|
||||
)
|
||||
|
||||
Self {
|
||||
t_replay,
|
||||
commitment_service,
|
||||
}
|
||||
}
|
||||
|
||||
fn report_memory(
|
||||
@ -852,7 +848,6 @@ impl ReplayStage {
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
|
||||
lockouts_sender: &Sender<CommitmentAggregationData>,
|
||||
accounts_hash_sender: &Option<AccountsPackageSender>,
|
||||
latest_root_senders: &[Sender<Slot>],
|
||||
@ -905,10 +900,6 @@ impl ReplayStage {
|
||||
}
|
||||
});
|
||||
info!("new root {}", new_root);
|
||||
if let Err(e) = root_bank_sender.send(rooted_banks) {
|
||||
trace!("root_bank_sender failed: {:?}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
Self::update_commitment_cache(
|
||||
|
@ -4,7 +4,7 @@ use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_slots::ClusterSlots,
|
||||
repair_service::DuplicateSlotsResetSender,
|
||||
repair_service::RepairStrategy,
|
||||
repair_service::RepairInfo,
|
||||
result::{Error, Result},
|
||||
window_service::{should_retransmit_and_persist, WindowService},
|
||||
};
|
||||
@ -353,7 +353,7 @@ impl RetransmitStage {
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
let repair_strategy = RepairStrategy::RepairAll {
|
||||
let repair_info = RepairInfo {
|
||||
bank_forks,
|
||||
completed_slots_receiver,
|
||||
epoch_schedule,
|
||||
@ -367,7 +367,7 @@ impl RetransmitStage {
|
||||
retransmit_sender,
|
||||
repair_socket,
|
||||
exit,
|
||||
repair_strategy,
|
||||
repair_info,
|
||||
&leader_schedule_cache.clone(),
|
||||
move |id, shred, working_bank, last_root| {
|
||||
let is_connected = cfg
|
||||
|
@ -6,7 +6,6 @@ use crate::{
|
||||
contact_info::ContactInfo,
|
||||
non_circulating_supply::calculate_non_circulating_supply,
|
||||
rpc_error::RpcCustomError,
|
||||
storage_stage::StorageState,
|
||||
validator::ValidatorExit,
|
||||
};
|
||||
use bincode::serialize;
|
||||
@ -73,7 +72,6 @@ pub struct JsonRpcRequestProcessor {
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
config: JsonRpcConfig,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
}
|
||||
|
||||
@ -111,7 +109,6 @@ impl JsonRpcRequestProcessor {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
) -> Self {
|
||||
JsonRpcRequestProcessor {
|
||||
@ -119,7 +116,6 @@ impl JsonRpcRequestProcessor {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
storage_state,
|
||||
validator_exit,
|
||||
}
|
||||
}
|
||||
@ -375,31 +371,6 @@ impl JsonRpcRequestProcessor {
|
||||
})
|
||||
}
|
||||
|
||||
fn get_storage_turn_rate(&self) -> Result<u64> {
|
||||
Ok(self.storage_state.get_storage_turn_rate())
|
||||
}
|
||||
|
||||
fn get_storage_turn(&self) -> Result<RpcStorageTurn> {
|
||||
Ok(RpcStorageTurn {
|
||||
blockhash: self.storage_state.get_storage_blockhash().to_string(),
|
||||
slot: self.storage_state.get_slot(),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_slots_per_segment(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
|
||||
Ok(self.bank(commitment)?.slots_per_segment())
|
||||
}
|
||||
|
||||
fn get_storage_pubkeys_for_slot(&self, slot: Slot) -> Result<Vec<String>> {
|
||||
let pubkeys: Vec<String> = self
|
||||
.storage_state
|
||||
.get_pubkeys_for_slot(slot, &self.bank_forks)
|
||||
.iter()
|
||||
.map(|pubkey| pubkey.to_string())
|
||||
.collect();
|
||||
Ok(pubkeys)
|
||||
}
|
||||
|
||||
pub fn set_log_filter(&self, filter: String) -> Result<()> {
|
||||
if self.config.enable_set_log_filter {
|
||||
solana_logger::setup_with(&filter);
|
||||
@ -879,22 +850,6 @@ pub trait RpcSol {
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<RpcVoteAccountStatus>;
|
||||
|
||||
#[rpc(meta, name = "getStorageTurnRate")]
|
||||
fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result<u64>;
|
||||
|
||||
#[rpc(meta, name = "getStorageTurn")]
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn>;
|
||||
|
||||
#[rpc(meta, name = "getSlotsPerSegment")]
|
||||
fn get_slots_per_segment(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<u64>;
|
||||
|
||||
#[rpc(meta, name = "getStoragePubkeysForSlot")]
|
||||
fn get_storage_pubkeys_for_slot(&self, meta: Self::Metadata, slot: u64) -> Result<Vec<String>>;
|
||||
|
||||
#[rpc(meta, name = "validatorExit")]
|
||||
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool>;
|
||||
|
||||
@ -1419,39 +1374,6 @@ impl RpcSol for RpcSolImpl {
|
||||
.get_vote_accounts(commitment)
|
||||
}
|
||||
|
||||
fn get_storage_turn_rate(&self, meta: Self::Metadata) -> Result<u64> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_storage_turn_rate()
|
||||
}
|
||||
|
||||
fn get_storage_turn(&self, meta: Self::Metadata) -> Result<RpcStorageTurn> {
|
||||
meta.request_processor.read().unwrap().get_storage_turn()
|
||||
}
|
||||
|
||||
fn get_slots_per_segment(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<u64> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_slots_per_segment(commitment)
|
||||
}
|
||||
|
||||
fn get_storage_pubkeys_for_slot(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
slot: Slot,
|
||||
) -> Result<Vec<String>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_storage_pubkeys_for_slot(slot)
|
||||
}
|
||||
|
||||
fn validator_exit(&self, meta: Self::Metadata) -> Result<bool> {
|
||||
meta.request_processor.read().unwrap().validator_exit()
|
||||
}
|
||||
@ -1736,7 +1658,6 @@ pub mod tests {
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
@ -1785,7 +1706,6 @@ pub mod tests {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
thread::spawn(move || {
|
||||
@ -2524,7 +2444,6 @@ pub mod tests {
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
Arc::new(RwLock::new(request_processor))
|
||||
@ -2621,7 +2540,6 @@ pub mod tests {
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
assert_eq!(request_processor.validator_exit(), Ok(false));
|
||||
@ -2644,7 +2562,6 @@ pub mod tests {
|
||||
new_bank_forks().0,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
assert_eq!(request_processor.validator_exit(), Ok(true));
|
||||
@ -2726,7 +2643,6 @@ pub mod tests {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
);
|
||||
assert_eq!(
|
||||
|
@ -1,8 +1,7 @@
|
||||
//! The `rpc_service` module implements the Solana JSON RPC service.
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*,
|
||||
storage_stage::StorageState, validator::ValidatorExit,
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, rpc::*, validator::ValidatorExit,
|
||||
};
|
||||
use jsonrpc_core::MetaIoHandler;
|
||||
use jsonrpc_http_server::{
|
||||
@ -252,7 +251,6 @@ impl JsonRpcService {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
genesis_hash: Hash,
|
||||
ledger_path: &Path,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
) -> Self {
|
||||
@ -263,7 +261,6 @@ impl JsonRpcService {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
storage_state,
|
||||
validator_exit.clone(),
|
||||
)));
|
||||
|
||||
@ -394,7 +391,6 @@ mod tests {
|
||||
cluster_info,
|
||||
Hash::default(),
|
||||
&PathBuf::from("farf"),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
None,
|
||||
);
|
||||
|
@ -602,7 +602,7 @@ mod tests {
|
||||
repair: socketaddr!("127.0.0.1:1237"),
|
||||
tpu: socketaddr!("127.0.0.1:1238"),
|
||||
tpu_forwards: socketaddr!("127.0.0.1:1239"),
|
||||
storage_addr: socketaddr!("127.0.0.1:1240"),
|
||||
unused: socketaddr!("127.0.0.1:1240"),
|
||||
rpc: socketaddr!("127.0.0.1:1241"),
|
||||
rpc_pubsub: socketaddr!("127.0.0.1:1242"),
|
||||
serve_repair: socketaddr!("127.0.0.1:1243"),
|
||||
@ -680,7 +680,7 @@ mod tests {
|
||||
repair: socketaddr!([127, 0, 0, 1], 1237),
|
||||
tpu: socketaddr!([127, 0, 0, 1], 1238),
|
||||
tpu_forwards: socketaddr!([127, 0, 0, 1], 1239),
|
||||
storage_addr: socketaddr!([127, 0, 0, 1], 1240),
|
||||
unused: socketaddr!([127, 0, 0, 1], 1240),
|
||||
rpc: socketaddr!([127, 0, 0, 1], 1241),
|
||||
rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242),
|
||||
serve_repair: serve_repair_addr,
|
||||
@ -708,7 +708,7 @@ mod tests {
|
||||
repair: socketaddr!([127, 0, 0, 1], 1237),
|
||||
tpu: socketaddr!([127, 0, 0, 1], 1238),
|
||||
tpu_forwards: socketaddr!([127, 0, 0, 1], 1239),
|
||||
storage_addr: socketaddr!([127, 0, 0, 1], 1240),
|
||||
unused: socketaddr!([127, 0, 0, 1], 1240),
|
||||
rpc: socketaddr!([127, 0, 0, 1], 1241),
|
||||
rpc_pubsub: socketaddr!([127, 0, 0, 1], 1242),
|
||||
serve_repair: serve_repair_addr2,
|
||||
|
@ -1,740 +0,0 @@
|
||||
// A stage that handles generating the keys used to encrypt the ledger and sample it
|
||||
// for storage mining. Archivers submit storage proofs, validator then bundles them
|
||||
// to submit its proof for mining to be rewarded.
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
commitment::BlockCommitmentCache,
|
||||
contact_info::ContactInfo,
|
||||
result::{Error, Result},
|
||||
};
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_chacha_cuda::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::{bank::Bank, storage_utils::archiver_accounts};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
account_utils::StateMut,
|
||||
clock::{get_segment_from_slot, Slot},
|
||||
hash::Hash,
|
||||
instruction::Instruction,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signature, Signer},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_storage_program::{
|
||||
storage_contract::{Proof, ProofStatus, StorageContract},
|
||||
storage_instruction,
|
||||
storage_instruction::proof_validation,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
io,
|
||||
mem::size_of,
|
||||
net::UdpSocket,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// Block of hash answers to validate against
|
||||
// Vec of [ledger blocks] x [keys]
|
||||
type StorageResults = Vec<Hash>;
|
||||
type StorageKeys = Vec<u8>;
|
||||
type ArchiverMap = Vec<HashMap<Pubkey, Vec<Proof>>>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct StorageStateInner {
|
||||
pub storage_results: StorageResults,
|
||||
pub storage_keys: StorageKeys,
|
||||
archiver_map: ArchiverMap,
|
||||
storage_blockhash: Hash,
|
||||
slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
slots_per_turn: u64,
|
||||
}
|
||||
|
||||
// Used to track root slots in storage stage
|
||||
#[derive(Default)]
|
||||
struct StorageSlots {
|
||||
last_root: u64,
|
||||
slot_count: u64,
|
||||
pending_root_banks: Vec<Arc<Bank>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct StorageState {
|
||||
pub state: Arc<RwLock<StorageStateInner>>,
|
||||
}
|
||||
|
||||
pub struct StorageStage {
|
||||
t_storage_mining_verifier: JoinHandle<()>,
|
||||
t_storage_create_accounts: JoinHandle<()>,
|
||||
}
|
||||
|
||||
pub const SLOTS_PER_TURN_TEST: u64 = 2;
|
||||
// TODO: some way to dynamically size NUM_IDENTITIES
|
||||
const NUM_IDENTITIES: usize = 1024;
|
||||
pub const NUM_STORAGE_SAMPLES: usize = 4;
|
||||
const KEY_SIZE: usize = 64;
|
||||
|
||||
type InstructionSender = Sender<Instruction>;
|
||||
|
||||
impl StorageState {
|
||||
pub fn new(hash: &Hash, slots_per_turn: u64, slots_per_segment: u64) -> Self {
|
||||
let storage_keys = vec![0u8; KEY_SIZE * NUM_IDENTITIES];
|
||||
let storage_results = vec![Hash::default(); NUM_IDENTITIES];
|
||||
let archiver_map = vec![];
|
||||
|
||||
let state = StorageStateInner {
|
||||
storage_keys,
|
||||
storage_results,
|
||||
archiver_map,
|
||||
slots_per_turn,
|
||||
slot: 0,
|
||||
slots_per_segment,
|
||||
storage_blockhash: *hash,
|
||||
};
|
||||
|
||||
StorageState {
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_storage_blockhash(&self) -> Hash {
|
||||
self.state.read().unwrap().storage_blockhash
|
||||
}
|
||||
|
||||
pub fn get_storage_turn_rate(&self) -> u64 {
|
||||
self.state.read().unwrap().slots_per_turn
|
||||
}
|
||||
|
||||
pub fn get_slot(&self) -> u64 {
|
||||
self.state.read().unwrap().slot
|
||||
}
|
||||
|
||||
pub fn get_pubkeys_for_slot(
|
||||
&self,
|
||||
slot: Slot,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
) -> Vec<Pubkey> {
|
||||
// TODO: keep track of age?
|
||||
const MAX_PUBKEYS_TO_RETURN: usize = 5;
|
||||
let index =
|
||||
get_segment_from_slot(slot, self.state.read().unwrap().slots_per_segment) as usize;
|
||||
let archiver_map = &self.state.read().unwrap().archiver_map;
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let accounts = archiver_accounts(&working_bank);
|
||||
if index < archiver_map.len() {
|
||||
//perform an account owner lookup
|
||||
let mut slot_archivers = archiver_map[index]
|
||||
.keys()
|
||||
.filter_map(|account_id| {
|
||||
accounts.get(account_id).and_then(|account| {
|
||||
if let Ok(StorageContract::ArchiverStorage { owner, .. }) = account.state()
|
||||
{
|
||||
Some(owner)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
slot_archivers.truncate(MAX_PUBKEYS_TO_RETURN);
|
||||
slot_archivers
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageStage {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
storage_state: &StorageState,
|
||||
bank_receiver: Receiver<Vec<Arc<Bank>>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Self {
|
||||
let (instruction_sender, instruction_receiver) = channel();
|
||||
|
||||
let t_storage_mining_verifier = {
|
||||
let slots_per_turn = storage_state.state.read().unwrap().slots_per_turn;
|
||||
let storage_state_inner = storage_state.state.clone();
|
||||
let exit = exit.clone();
|
||||
let storage_keypair = storage_keypair.clone();
|
||||
Builder::new()
|
||||
.name("solana-storage-mining-verify-stage".to_string())
|
||||
.spawn(move || {
|
||||
let mut current_key = 0;
|
||||
let mut storage_slots = StorageSlots::default();
|
||||
loop {
|
||||
if let Some(ref some_blockstore) = blockstore {
|
||||
if let Err(e) = Self::process_entries(
|
||||
&storage_keypair,
|
||||
&storage_state_inner,
|
||||
&bank_receiver,
|
||||
&some_blockstore,
|
||||
&mut storage_slots,
|
||||
&mut current_key,
|
||||
slots_per_turn,
|
||||
&instruction_sender,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
break;
|
||||
}
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => info!("Error from process_entries: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let t_storage_create_accounts = {
|
||||
let cluster_info = cluster_info.clone();
|
||||
let exit = exit.clone();
|
||||
let keypair = keypair.clone();
|
||||
let storage_keypair = storage_keypair.clone();
|
||||
let bank_forks = bank_forks.clone();
|
||||
Builder::new()
|
||||
.name("solana-storage-create-accounts".to_string())
|
||||
.spawn(move || {
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
{
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let storage_account = working_bank.get_account(&storage_keypair.pubkey());
|
||||
if storage_account.is_none() {
|
||||
warn!("Storage account not found: {}", storage_keypair.pubkey());
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
match instruction_receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
Ok(instruction) => {
|
||||
Self::send_transaction(
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
instruction,
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&transactions_socket,
|
||||
&block_commitment_cache,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
info!("failed to send storage transaction: {:?}", err)
|
||||
});
|
||||
}
|
||||
Err(e) => match e {
|
||||
RecvTimeoutError::Disconnected => break,
|
||||
RecvTimeoutError::Timeout => (),
|
||||
},
|
||||
};
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
StorageStage {
|
||||
t_storage_mining_verifier,
|
||||
t_storage_create_accounts,
|
||||
}
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
instruction: Instruction,
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
transactions_socket: &UdpSocket,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> io::Result<()> {
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let blockhash = working_bank.confirmed_last_blockhash().0;
|
||||
let keypair_balance = working_bank.get_balance(&keypair.pubkey());
|
||||
|
||||
if keypair_balance == 0 {
|
||||
warn!("keypair account balance empty: {}", keypair.pubkey(),);
|
||||
} else {
|
||||
debug!(
|
||||
"keypair account balance: {}: {}",
|
||||
keypair.pubkey(),
|
||||
keypair_balance
|
||||
);
|
||||
}
|
||||
if working_bank
|
||||
.get_account(&storage_keypair.pubkey())
|
||||
.is_none()
|
||||
{
|
||||
warn!(
|
||||
"storage account does not exist: {}",
|
||||
storage_keypair.pubkey()
|
||||
);
|
||||
}
|
||||
|
||||
let signer_keys = vec![keypair.as_ref(), storage_keypair.as_ref()];
|
||||
let message = Message::new_with_payer(&[instruction], Some(&signer_keys[0].pubkey()));
|
||||
let transaction = Transaction::new(&signer_keys, message, blockhash);
|
||||
// try sending the transaction upto 5 times
|
||||
for _ in 0..5 {
|
||||
transactions_socket.send_to(
|
||||
&bincode::serialize(&transaction).unwrap(),
|
||||
cluster_info.my_contact_info().tpu,
|
||||
)?;
|
||||
sleep(Duration::from_millis(100));
|
||||
if Self::poll_for_signature_confirmation(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
&transaction.signatures[0],
|
||||
0,
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
break;
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn poll_for_signature_confirmation(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
signature: &Signature,
|
||||
min_confirmed_blocks: usize,
|
||||
) -> Result<()> {
|
||||
let mut now = Instant::now();
|
||||
let mut confirmed_blocks = 0;
|
||||
loop {
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let response = working_bank.get_signature_status_slot(signature);
|
||||
if let Some((slot, status)) = response {
|
||||
let confirmations = if working_bank.src.roots().contains(&slot) {
|
||||
MAX_LOCKOUT_HISTORY + 1
|
||||
} else {
|
||||
let r_block_commitment_cache = block_commitment_cache.read().unwrap();
|
||||
r_block_commitment_cache
|
||||
.get_confirmation_count(slot)
|
||||
.unwrap_or(0)
|
||||
};
|
||||
if status.is_ok() {
|
||||
if confirmed_blocks != confirmations {
|
||||
now = Instant::now();
|
||||
confirmed_blocks = confirmations;
|
||||
}
|
||||
if confirmations >= min_confirmed_blocks {
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
if now.elapsed().as_secs() > 5 {
|
||||
return Err(Error::from(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"signature not found",
|
||||
)));
|
||||
}
|
||||
sleep(Duration::from_millis(250));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_turn(
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
state: &Arc<RwLock<StorageStateInner>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
blockhash: Hash,
|
||||
slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
instruction_sender: &InstructionSender,
|
||||
total_proofs: usize,
|
||||
) -> Result<()> {
|
||||
let mut seed = [0u8; 32];
|
||||
let signature = storage_keypair.sign_message(&blockhash.as_ref());
|
||||
|
||||
let ix = storage_instruction::advertise_recent_blockhash(
|
||||
&storage_keypair.pubkey(),
|
||||
blockhash,
|
||||
get_segment_from_slot(slot, slots_per_segment),
|
||||
);
|
||||
instruction_sender.send(ix)?;
|
||||
|
||||
seed.copy_from_slice(&signature.as_ref()[..32]);
|
||||
|
||||
let mut rng = ChaChaRng::from_seed(seed);
|
||||
|
||||
{
|
||||
let mut w_state = state.write().unwrap();
|
||||
w_state.slot = slot;
|
||||
w_state.storage_blockhash = blockhash;
|
||||
}
|
||||
|
||||
if total_proofs == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Regenerate the answers
|
||||
let num_segments = get_segment_from_slot(slot, slots_per_segment) as usize;
|
||||
if num_segments == 0 {
|
||||
info!("Ledger has 0 segments!");
|
||||
return Ok(());
|
||||
}
|
||||
// TODO: what if the validator does not have this segment
|
||||
let segment = signature.as_ref()[0] as usize % num_segments;
|
||||
|
||||
debug!(
|
||||
"storage verifying: segment: {} identities: {}",
|
||||
segment, NUM_IDENTITIES,
|
||||
);
|
||||
|
||||
let mut samples = vec![];
|
||||
for _ in 0..NUM_STORAGE_SAMPLES {
|
||||
samples.push(rng.gen_range(0, 10));
|
||||
}
|
||||
debug!("generated samples: {:?}", samples);
|
||||
|
||||
// TODO: cuda required to generate the reference values
|
||||
// but if it is missing, then we need to take care not to
|
||||
// process storage mining results.
|
||||
if solana_perf::perf_libs::api().is_some() {
|
||||
// Lock the keys, since this is the IV memory,
|
||||
// it will be updated in-place by the encryption.
|
||||
// Should be overwritten by the proof signatures which replace the
|
||||
// key values by the time it runs again.
|
||||
|
||||
let mut statew = state.write().unwrap();
|
||||
|
||||
match chacha_cbc_encrypt_file_many_keys(
|
||||
blockstore,
|
||||
segment as u64,
|
||||
statew.slots_per_segment,
|
||||
&mut statew.storage_keys,
|
||||
&samples,
|
||||
) {
|
||||
Ok(hashes) => {
|
||||
debug!("Success! encrypted ledger segment: {}", segment);
|
||||
statew.storage_results.copy_from_slice(&hashes);
|
||||
}
|
||||
Err(e) => {
|
||||
info!("error encrypting file: {:?}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_proofs(
|
||||
slot: Slot,
|
||||
slots_per_segment: u64,
|
||||
account_id: Pubkey,
|
||||
account: Account,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
current_key_idx: &mut usize,
|
||||
) -> usize {
|
||||
let mut proofs_collected = 0;
|
||||
if let Ok(StorageContract::ArchiverStorage { proofs, .. }) = account.state() {
|
||||
//convert slot to segment
|
||||
let segment = get_segment_from_slot(slot, slots_per_segment);
|
||||
if let Some(proofs) = proofs.get(&segment) {
|
||||
for proof in proofs.iter() {
|
||||
{
|
||||
// TODO do this only once per account and segment? and maybe do it somewhere else
|
||||
debug!(
|
||||
"generating storage_keys from storage txs current_key_idx: {}",
|
||||
*current_key_idx
|
||||
);
|
||||
let storage_keys = &mut storage_state.write().unwrap().storage_keys;
|
||||
storage_keys[*current_key_idx..*current_key_idx + size_of::<Signature>()]
|
||||
.copy_from_slice(proof.signature.as_ref());
|
||||
*current_key_idx += size_of::<Signature>();
|
||||
*current_key_idx %= storage_keys.len();
|
||||
}
|
||||
|
||||
let mut statew = storage_state.write().unwrap();
|
||||
if statew.archiver_map.len() < segment as usize {
|
||||
statew.archiver_map.resize(segment as usize, HashMap::new());
|
||||
}
|
||||
let proof_segment_index = proof.segment_index as usize;
|
||||
if proof_segment_index < statew.archiver_map.len() {
|
||||
// TODO randomly select and verify the proof first
|
||||
// Copy the submitted proof
|
||||
statew.archiver_map[proof_segment_index]
|
||||
.entry(account_id)
|
||||
.or_default()
|
||||
.push(proof.clone());
|
||||
proofs_collected += 1;
|
||||
}
|
||||
}
|
||||
debug!("storage proof: slot: {}", slot);
|
||||
}
|
||||
}
|
||||
proofs_collected
|
||||
}
|
||||
|
||||
fn process_entries(
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
bank_receiver: &Receiver<Vec<Arc<Bank>>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
storage_slots: &mut StorageSlots,
|
||||
current_key_idx: &mut usize,
|
||||
slots_per_turn: u64,
|
||||
instruction_sender: &InstructionSender,
|
||||
) -> Result<()> {
|
||||
let timeout = Duration::new(1, 0);
|
||||
storage_slots
|
||||
.pending_root_banks
|
||||
.append(&mut bank_receiver.recv_timeout(timeout)?);
|
||||
storage_slots
|
||||
.pending_root_banks
|
||||
.sort_unstable_by(|a, b| b.slot().cmp(&a.slot()));
|
||||
// check if any rooted slots were missed leading up to this one and bump slot count and process proofs for each missed root
|
||||
while let Some(bank) = storage_slots.pending_root_banks.pop() {
|
||||
if bank.slot() > storage_slots.last_root {
|
||||
storage_slots.slot_count += 1;
|
||||
storage_slots.last_root = bank.slot();
|
||||
if storage_slots.slot_count % slots_per_turn == 0 {
|
||||
// load all the archiver accounts in the bank. collect all their proofs at the current slot
|
||||
let archiver_accounts = archiver_accounts(bank.as_ref());
|
||||
// find proofs, and use them to update
|
||||
// the storage_keys with their signatures
|
||||
let mut total_proofs = 0;
|
||||
for (account_id, account) in archiver_accounts.into_iter() {
|
||||
total_proofs += Self::collect_proofs(
|
||||
bank.slot(),
|
||||
bank.slots_per_segment(),
|
||||
account_id,
|
||||
account,
|
||||
storage_state,
|
||||
current_key_idx,
|
||||
);
|
||||
}
|
||||
|
||||
// TODO un-ignore this result and be sure to drain all pending proofs
|
||||
let _ignored = Self::process_turn(
|
||||
&storage_keypair,
|
||||
&storage_state,
|
||||
&blockstore,
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
bank.slots_per_segment(),
|
||||
instruction_sender,
|
||||
total_proofs,
|
||||
);
|
||||
Self::submit_verifications(
|
||||
get_segment_from_slot(bank.slot(), bank.slots_per_segment()),
|
||||
&storage_state,
|
||||
&storage_keypair,
|
||||
instruction_sender,
|
||||
)?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn submit_verifications(
|
||||
current_segment: u64,
|
||||
storage_state: &Arc<RwLock<StorageStateInner>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
ix_sender: &Sender<Instruction>,
|
||||
) -> Result<()> {
|
||||
// bundle up mining submissions from archivers
|
||||
// and submit them in a tx to the leader to get rewarded.
|
||||
let mut w_state = storage_state.write().unwrap();
|
||||
let mut max_proof_mask = 0;
|
||||
let proof_mask_limit = storage_instruction::proof_mask_limit();
|
||||
let instructions: Vec<_> = w_state
|
||||
.archiver_map
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
.flat_map(|(_, proof_map)| {
|
||||
let checked_proofs = proof_map
|
||||
.iter_mut()
|
||||
.filter_map(|(id, proofs)| {
|
||||
if !proofs.is_empty() {
|
||||
if (proofs.len() as u64) >= proof_mask_limit {
|
||||
proofs.clear();
|
||||
None
|
||||
} else {
|
||||
max_proof_mask = cmp::max(max_proof_mask, proofs.len());
|
||||
Some((
|
||||
*id,
|
||||
proofs
|
||||
.drain(..)
|
||||
.map(|_| ProofStatus::Valid)
|
||||
.collect::<Vec<_>>(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<(_, _)>>();
|
||||
|
||||
if !checked_proofs.is_empty() {
|
||||
let max_accounts_per_ix =
|
||||
storage_instruction::validation_account_limit(max_proof_mask);
|
||||
let ixs = checked_proofs
|
||||
.chunks(max_accounts_per_ix as usize)
|
||||
.map(|checked_proofs| {
|
||||
proof_validation(
|
||||
&storage_keypair.pubkey(),
|
||||
current_segment,
|
||||
checked_proofs.to_vec(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Some(ixs)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
let res: std::result::Result<_, _> = instructions
|
||||
.into_iter()
|
||||
.map(|ix| {
|
||||
sleep(Duration::from_millis(100));
|
||||
ix_sender.send(ix)
|
||||
})
|
||||
.collect();
|
||||
res?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.t_storage_create_accounts.join().unwrap();
|
||||
self.t_storage_mining_verifier.join()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn test_cluster_info(id: &Pubkey) -> Arc<ClusterInfo> {
|
||||
let contact_info = ContactInfo::new_localhost(id, 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
Arc::new(cluster_info)
|
||||
}
|
||||
|
||||
pub fn get_identity_index_from_signature(key: &Signature) -> usize {
|
||||
let rkey = key.as_ref();
|
||||
let mut res: usize = (rkey[0] as usize)
|
||||
| ((rkey[1] as usize) << 8)
|
||||
| ((rkey[2] as usize) << 16)
|
||||
| ((rkey[3] as usize) << 24);
|
||||
res &= NUM_IDENTITIES - 1;
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_ledger::{
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
hash::Hasher,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
cmp::{max, min},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_storage_stage_none_ledger() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[bank.clone()], 0)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore),
|
||||
));
|
||||
let (_slot_sender, slot_receiver) = channel();
|
||||
let storage_state = StorageState::new(
|
||||
&bank.last_blockhash(),
|
||||
SLOTS_PER_TURN_TEST,
|
||||
bank.slots_per_segment(),
|
||||
);
|
||||
let storage_stage = StorageStage::new(
|
||||
&storage_state,
|
||||
slot_receiver,
|
||||
None,
|
||||
&keypair,
|
||||
&storage_keypair,
|
||||
&exit.clone(),
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
storage_stage.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signature_distribution() {
|
||||
// See that signatures have an even-ish distribution..
|
||||
let mut hist = Arc::new(vec![]);
|
||||
for _ in 0..NUM_IDENTITIES {
|
||||
Arc::get_mut(&mut hist).unwrap().push(AtomicUsize::new(0));
|
||||
}
|
||||
let hasher = Hasher::default();
|
||||
{
|
||||
let hist = hist.clone();
|
||||
(0..(32 * NUM_IDENTITIES))
|
||||
.into_par_iter()
|
||||
.for_each(move |_| {
|
||||
let keypair = Keypair::new();
|
||||
let hash = hasher.clone().result();
|
||||
let signature = keypair.sign_message(&hash.as_ref());
|
||||
let ix = get_identity_index_from_signature(&signature);
|
||||
hist[ix].fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
let mut hist_max = 0;
|
||||
let mut hist_min = NUM_IDENTITIES;
|
||||
for x in hist.iter() {
|
||||
let val = x.load(Ordering::Relaxed);
|
||||
hist_max = max(val, hist_max);
|
||||
hist_min = min(val, hist_min);
|
||||
}
|
||||
info!("min: {} max: {}", hist_min, hist_max);
|
||||
assert_ne!(hist_min, 0);
|
||||
}
|
||||
}
|
@ -18,7 +18,6 @@ use crate::{
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_shreds::ShredSigVerifier,
|
||||
sigverify_stage::SigVerifyStage,
|
||||
storage_stage::{StorageStage, StorageState},
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
@ -50,7 +49,6 @@ pub struct Tvu {
|
||||
replay_stage: ReplayStage,
|
||||
ledger_cleanup_service: Option<LedgerCleanupService>,
|
||||
accounts_background_service: AccountsBackgroundService,
|
||||
storage_stage: StorageStage,
|
||||
accounts_hash_verifier: AccountsHashVerifier,
|
||||
}
|
||||
|
||||
@ -81,12 +79,10 @@ impl Tvu {
|
||||
pub fn new(
|
||||
vote_account: &Pubkey,
|
||||
authorized_voter_keypairs: Vec<Arc<Keypair>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
sockets: Sockets,
|
||||
blockstore: Arc<Blockstore>,
|
||||
storage_state: &StorageState,
|
||||
ledger_signal_receiver: Receiver<bool>,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
@ -183,12 +179,12 @@ impl Tvu {
|
||||
leader_schedule_cache: leader_schedule_cache.clone(),
|
||||
latest_root_senders: vec![ledger_cleanup_slot_sender],
|
||||
accounts_hash_sender: Some(accounts_hash_sender),
|
||||
block_commitment_cache: block_commitment_cache.clone(),
|
||||
block_commitment_cache,
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
};
|
||||
|
||||
let (replay_stage, root_bank_receiver) = ReplayStage::new(
|
||||
let replay_stage = ReplayStage::new(
|
||||
replay_stage_config,
|
||||
blockstore.clone(),
|
||||
bank_forks.clone(),
|
||||
@ -212,18 +208,6 @@ impl Tvu {
|
||||
|
||||
let accounts_background_service = AccountsBackgroundService::new(bank_forks.clone(), &exit);
|
||||
|
||||
let storage_stage = StorageStage::new(
|
||||
storage_state,
|
||||
root_bank_receiver,
|
||||
Some(blockstore),
|
||||
&keypair,
|
||||
storage_keypair,
|
||||
&exit,
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
block_commitment_cache,
|
||||
);
|
||||
|
||||
Tvu {
|
||||
fetch_stage,
|
||||
sigverify_stage,
|
||||
@ -231,7 +215,6 @@ impl Tvu {
|
||||
replay_stage,
|
||||
ledger_cleanup_service,
|
||||
accounts_background_service,
|
||||
storage_stage,
|
||||
accounts_hash_verifier,
|
||||
}
|
||||
}
|
||||
@ -240,7 +223,6 @@ impl Tvu {
|
||||
self.retransmit_stage.join()?;
|
||||
self.fetch_stage.join()?;
|
||||
self.sigverify_stage.join()?;
|
||||
self.storage_stage.join()?;
|
||||
if self.ledger_cleanup_service.is_some() {
|
||||
self.ledger_cleanup_service.unwrap().join()?;
|
||||
}
|
||||
@ -289,7 +271,6 @@ pub mod tests {
|
||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let vote_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
@ -299,7 +280,6 @@ pub mod tests {
|
||||
let tvu = Tvu::new(
|
||||
&vote_keypair.pubkey(),
|
||||
vec![Arc::new(vote_keypair)],
|
||||
&storage_keypair,
|
||||
&bank_forks,
|
||||
&cref1,
|
||||
{
|
||||
@ -311,7 +291,6 @@ pub mod tests {
|
||||
}
|
||||
},
|
||||
blockstore,
|
||||
&StorageState::default(),
|
||||
l_receiver,
|
||||
&Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
|
@ -18,7 +18,6 @@ use crate::{
|
||||
serve_repair_service::ServeRepairService,
|
||||
sigverify,
|
||||
snapshot_packager_service::SnapshotPackagerService,
|
||||
storage_stage::StorageState,
|
||||
tpu::Tpu,
|
||||
transaction_status_service::TransactionStatusService,
|
||||
tvu::{Sockets, Tvu, TvuConfig},
|
||||
@ -36,7 +35,7 @@ use solana_ledger::{
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_SLOTS_PER_TURN},
|
||||
clock::Slot,
|
||||
epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET,
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
@ -63,7 +62,6 @@ pub struct ValidatorConfig {
|
||||
pub expected_genesis_hash: Option<Hash>,
|
||||
pub expected_shred_version: Option<u16>,
|
||||
pub voting_disabled: bool,
|
||||
pub storage_slots_per_turn: u64,
|
||||
pub account_paths: Vec<PathBuf>,
|
||||
pub rpc_config: JsonRpcConfig,
|
||||
pub rpc_ports: Option<(u16, u16)>, // (API, PubSub)
|
||||
@ -90,7 +88,6 @@ impl Default for ValidatorConfig {
|
||||
expected_genesis_hash: None,
|
||||
expected_shred_version: None,
|
||||
voting_disabled: false,
|
||||
storage_slots_per_turn: DEFAULT_SLOTS_PER_TURN,
|
||||
max_ledger_shreds: None,
|
||||
account_paths: Vec::new(),
|
||||
rpc_config: JsonRpcConfig::default(),
|
||||
@ -153,7 +150,6 @@ impl Validator {
|
||||
ledger_path: &Path,
|
||||
vote_account: &Pubkey,
|
||||
mut authorized_voter_keypairs: Vec<Arc<Keypair>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
entrypoint_info_option: Option<&ContactInfo>,
|
||||
poh_verify: bool,
|
||||
config: &ValidatorConfig,
|
||||
@ -227,13 +223,6 @@ impl Validator {
|
||||
}
|
||||
|
||||
let cluster_info = Arc::new(ClusterInfo::new(node.info.clone(), keypair.clone()));
|
||||
|
||||
let storage_state = StorageState::new(
|
||||
&bank.last_blockhash(),
|
||||
config.storage_slots_per_turn,
|
||||
bank.slots_per_segment(),
|
||||
);
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
@ -264,7 +253,6 @@ impl Validator {
|
||||
cluster_info.clone(),
|
||||
genesis_config.hash(),
|
||||
ledger_path,
|
||||
storage_state.clone(),
|
||||
validator_exit.clone(),
|
||||
config.trusted_validators.clone(),
|
||||
),
|
||||
@ -394,7 +382,6 @@ impl Validator {
|
||||
let tvu = Tvu::new(
|
||||
vote_account,
|
||||
authorized_voter_keypairs,
|
||||
storage_keypair,
|
||||
&bank_forks,
|
||||
&cluster_info,
|
||||
Sockets {
|
||||
@ -423,7 +410,6 @@ impl Validator {
|
||||
.collect(),
|
||||
},
|
||||
blockstore.clone(),
|
||||
&storage_state,
|
||||
ledger_signal_receiver,
|
||||
&subscriptions,
|
||||
&poh_recorder,
|
||||
@ -715,7 +701,6 @@ impl TestValidator {
|
||||
let (ledger_path, blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
|
||||
let leader_voting_keypair = Arc::new(voting_keypair);
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
rpc_ports: Some((node.info.rpc.port(), node.info.rpc_pubsub.port())),
|
||||
..ValidatorConfig::default()
|
||||
@ -726,7 +711,6 @@ impl TestValidator {
|
||||
&ledger_path,
|
||||
&leader_voting_keypair.pubkey(),
|
||||
vec![leader_voting_keypair.clone()],
|
||||
&storage_keypair,
|
||||
None,
|
||||
true,
|
||||
&config,
|
||||
@ -882,7 +866,6 @@ mod tests {
|
||||
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
|
||||
let voting_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
rpc_ports: Some((
|
||||
validator_node.info.rpc.port(),
|
||||
@ -896,7 +879,6 @@ mod tests {
|
||||
&validator_ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
vec![voting_keypair.clone()],
|
||||
&storage_keypair,
|
||||
Some(&leader_node.info),
|
||||
true,
|
||||
&config,
|
||||
@ -921,7 +903,6 @@ mod tests {
|
||||
let (validator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
ledger_paths.push(validator_ledger_path.clone());
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let config = ValidatorConfig {
|
||||
rpc_ports: Some((
|
||||
validator_node.info.rpc.port(),
|
||||
@ -935,7 +916,6 @@ mod tests {
|
||||
&validator_ledger_path,
|
||||
&vote_account_keypair.pubkey(),
|
||||
vec![vote_account_keypair.clone()],
|
||||
&storage_keypair,
|
||||
Some(&leader_node.info),
|
||||
true,
|
||||
&config,
|
||||
|
@ -4,7 +4,7 @@
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_slots::ClusterSlots,
|
||||
repair_service::{RepairService, RepairStrategy},
|
||||
repair_service::{RepairInfo, RepairService},
|
||||
result::{Error, Result},
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
@ -254,7 +254,7 @@ impl WindowService {
|
||||
retransmit: PacketSender,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
repair_strategy: RepairStrategy,
|
||||
repair_info: RepairInfo,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
shred_filter: F,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
@ -265,17 +265,14 @@ impl WindowService {
|
||||
+ std::marker::Send
|
||||
+ std::marker::Sync,
|
||||
{
|
||||
let bank_forks = match repair_strategy {
|
||||
RepairStrategy::RepairRange(_) => None,
|
||||
RepairStrategy::RepairAll { ref bank_forks, .. } => Some(bank_forks.clone()),
|
||||
};
|
||||
let bank_forks = Some(repair_info.bank_forks.clone());
|
||||
|
||||
let repair_service = RepairService::new(
|
||||
blockstore.clone(),
|
||||
exit.clone(),
|
||||
repair_socket,
|
||||
cluster_info.clone(),
|
||||
repair_strategy,
|
||||
repair_info,
|
||||
cluster_slots,
|
||||
);
|
||||
|
||||
@ -491,10 +488,6 @@ impl WindowService {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, contact_info::ContactInfo, repair_service::RepairSlotRange,
|
||||
};
|
||||
use rand::thread_rng;
|
||||
use solana_ledger::shred::DataShredHeader;
|
||||
use solana_ledger::{
|
||||
blockstore::{make_many_slot_entries, Blockstore},
|
||||
@ -503,21 +496,13 @@ mod test {
|
||||
get_tmp_ledger_path,
|
||||
shred::Shredder,
|
||||
};
|
||||
use solana_perf::packet::Packet;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
net::UdpSocket,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::channel,
|
||||
sync::Arc,
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
fn local_entries_to_shred(
|
||||
entries: &[Entry],
|
||||
@ -620,71 +605,6 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
fn make_test_window(
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> WindowService {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let (blockstore, _, _) = Blockstore::open_with_signal(&blockstore_path)
|
||||
.expect("Expected to be able to open database ledger");
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (retransmit_sender, _retransmit_receiver) = channel();
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_localhost(&Pubkey::default(), 0),
|
||||
));
|
||||
let cluster_slots = Arc::new(ClusterSlots::default());
|
||||
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
|
||||
let window = WindowService::new(
|
||||
blockstore,
|
||||
cluster_info,
|
||||
verified_receiver,
|
||||
retransmit_sender,
|
||||
repair_sock,
|
||||
&exit,
|
||||
RepairStrategy::RepairRange(RepairSlotRange { start: 0, end: 0 }),
|
||||
&Arc::new(LeaderScheduleCache::default()),
|
||||
|_, _, _, _| true,
|
||||
cluster_slots,
|
||||
);
|
||||
window
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recv_window() {
|
||||
let (packet_sender, packet_receiver) = unbounded();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let window = make_test_window(packet_receiver, exit.clone());
|
||||
// send 5 slots worth of data to the window
|
||||
let (shreds, _) = make_many_slot_entries(0, 5, 10);
|
||||
let packets: Vec<_> = shreds
|
||||
.into_iter()
|
||||
.map(|mut s| {
|
||||
let mut p = Packet::default();
|
||||
p.data.copy_from_slice(&mut s.payload);
|
||||
p
|
||||
})
|
||||
.collect();
|
||||
let mut packets = Packets::new(packets);
|
||||
packet_sender.send(vec![packets.clone()]).unwrap();
|
||||
sleep(Duration::from_millis(500));
|
||||
|
||||
// add some empty packets to the data set. These should fail to deserialize
|
||||
packets.packets.append(&mut vec![Packet::default(); 10]);
|
||||
packets.packets.shuffle(&mut thread_rng());
|
||||
packet_sender.send(vec![packets.clone()]).unwrap();
|
||||
sleep(Duration::from_millis(500));
|
||||
|
||||
// send 1 empty packet that cannot deserialize into a shred
|
||||
packet_sender
|
||||
.send(vec![Packets::new(vec![Packet::default(); 1])])
|
||||
.unwrap();
|
||||
sleep(Duration::from_millis(500));
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
window.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_check_duplicate() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
|
Reference in New Issue
Block a user