encapsulates turbine peers computations of broadcast & retransmit stages (#18238)
Broadcast stage and retransmit stage should arrange nodes on turbine broadcast tree in exactly same order. Additionally any changes to this ordering (e.g. updating how unstaked nodes are handled) requires feature gating to keep the cluster in sync. Current implementation is scattered out over several public methods and exposes too much of implementation details (e.g. usize indices into peers vector) which makes code changes and checking for feature activations more difficult. This commit encapsulates turbine peer computations into a new struct, and only exposes two public methods, get_broadcast_peer and get_retransmit_peers, for call-sites.
This commit is contained in:
@@ -1325,80 +1325,6 @@ impl ClusterInfo {
|
||||
|| !ContactInfo::is_valid_address(&contact_info.tvu)
|
||||
}
|
||||
|
||||
fn sorted_stakes_with_index(
|
||||
peers: &[ContactInfo],
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> Vec<(u64, usize)> {
|
||||
let stakes_and_index: Vec<_> = peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, c)| {
|
||||
// For stake weighted shuffle a valid weight is atleast 1. Weight 0 is
|
||||
// assumed to be missing entry. So let's make sure stake weights are atleast 1
|
||||
let stake = 1.max(
|
||||
stakes
|
||||
.as_ref()
|
||||
.map_or(1, |stakes| *stakes.get(&c.id).unwrap_or(&1)),
|
||||
);
|
||||
(stake, i)
|
||||
})
|
||||
.sorted_by(|(l_stake, l_info), (r_stake, r_info)| {
|
||||
if r_stake == l_stake {
|
||||
peers[*r_info].id.cmp(&peers[*l_info].id)
|
||||
} else {
|
||||
r_stake.cmp(l_stake)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
stakes_and_index
|
||||
}
|
||||
|
||||
fn stake_weighted_shuffle(
|
||||
stakes_and_index: &[(u64, usize)],
|
||||
seed: [u8; 32],
|
||||
) -> Vec<(u64, usize)> {
|
||||
let stake_weights: Vec<_> = stakes_and_index.iter().map(|(w, _)| *w).collect();
|
||||
|
||||
let shuffle = weighted_shuffle(&stake_weights, seed);
|
||||
|
||||
shuffle.iter().map(|x| stakes_and_index[*x]).collect()
|
||||
}
|
||||
|
||||
// Return sorted_retransmit_peers(including self) and their stakes
|
||||
pub fn sorted_retransmit_peers_and_stakes(
|
||||
&self,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
let mut peers = self.tvu_peers();
|
||||
// insert "self" into this list for the layer and neighborhood computation
|
||||
peers.push(self.my_contact_info());
|
||||
let stakes_and_index = ClusterInfo::sorted_stakes_with_index(&peers, stakes);
|
||||
(peers, stakes_and_index)
|
||||
}
|
||||
|
||||
/// Return sorted Retransmit peers and index of `Self.id()` as if it were in that list
|
||||
pub fn shuffle_peers_and_index(
|
||||
id: &Pubkey,
|
||||
peers: &[ContactInfo],
|
||||
stakes_and_index: &[(u64, usize)],
|
||||
seed: [u8; 32],
|
||||
) -> (usize, Vec<(u64, usize)>) {
|
||||
let shuffled_stakes_and_index = ClusterInfo::stake_weighted_shuffle(stakes_and_index, seed);
|
||||
let self_index = shuffled_stakes_and_index
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find_map(|(i, (_stake, index))| {
|
||||
if peers[*index].id == *id {
|
||||
Some(i)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
(self_index, shuffled_stakes_and_index)
|
||||
}
|
||||
|
||||
/// compute broadcast table
|
||||
pub fn tpu_peers(&self) -> Vec<ContactInfo> {
|
||||
let self_pubkey = self.id();
|
||||
@@ -3071,14 +2997,6 @@ pub fn push_messages_to_peer(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn stake_weight_peers(
|
||||
peers: &mut Vec<ContactInfo>,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> Vec<(u64, usize)> {
|
||||
peers.dedup();
|
||||
ClusterInfo::sorted_stakes_with_index(peers, stakes)
|
||||
}
|
||||
|
||||
// Filters out values from nodes with different shred-version.
|
||||
fn filter_on_shred_version(
|
||||
mut msg: Protocol,
|
||||
@@ -4061,15 +3979,6 @@ mod tests {
|
||||
assert_ne!(contact_info.shred_version, d.shred_version);
|
||||
cluster_info.insert_info(contact_info);
|
||||
stakes.insert(id4, 10);
|
||||
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
let peers_and_stakes = stake_weight_peers(&mut peers, Some(&stakes));
|
||||
assert_eq!(peers.len(), 2);
|
||||
assert_eq!(peers[0].id, id);
|
||||
assert_eq!(peers[1].id, id2);
|
||||
assert_eq!(peers_and_stakes.len(), 2);
|
||||
assert_eq!(peers_and_stakes[0].0, 10);
|
||||
assert_eq!(peers_and_stakes[1].0, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -105,7 +105,7 @@ impl ContactInfo {
|
||||
}
|
||||
|
||||
/// New random ContactInfo for tests and simulations.
|
||||
pub(crate) fn new_rand<R: rand::Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
pub fn new_rand<R: rand::Rng>(rng: &mut R, pubkey: Option<Pubkey>) -> Self {
|
||||
let delay = 10 * 60 * 1000; // 10 minutes
|
||||
let now = timestamp() - delay + rng.gen_range(0, 2 * delay);
|
||||
let pubkey = pubkey.unwrap_or_else(solana_sdk::pubkey::new_rand);
|
||||
|
@@ -1,4 +1,11 @@
|
||||
use solana_sdk::clock::Slot;
|
||||
use {
|
||||
crate::{
|
||||
cluster_info::ClusterInfo, contact_info::ContactInfo, weighted_shuffle::weighted_shuffle,
|
||||
},
|
||||
itertools::Itertools,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
std::collections::HashMap,
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, AbiExample, AbiEnumVisitor)]
|
||||
enum CompressionType {
|
||||
@@ -19,3 +26,74 @@ pub(crate) struct EpochIncompleteSlots {
|
||||
compression: CompressionType,
|
||||
compressed_list: Vec<u8>,
|
||||
}
|
||||
|
||||
// Legacy methods copied for testing backward compatibility.
|
||||
|
||||
pub fn sorted_retransmit_peers_and_stakes(
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
// insert "self" into this list for the layer and neighborhood computation
|
||||
peers.push(cluster_info.my_contact_info());
|
||||
let stakes_and_index = sorted_stakes_with_index(&peers, stakes);
|
||||
(peers, stakes_and_index)
|
||||
}
|
||||
|
||||
pub fn sorted_stakes_with_index(
|
||||
peers: &[ContactInfo],
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> Vec<(u64, usize)> {
|
||||
let stakes_and_index: Vec<_> = peers
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, c)| {
|
||||
// For stake weighted shuffle a valid weight is atleast 1. Weight 0 is
|
||||
// assumed to be missing entry. So let's make sure stake weights are atleast 1
|
||||
let stake = 1.max(
|
||||
stakes
|
||||
.as_ref()
|
||||
.map_or(1, |stakes| *stakes.get(&c.id).unwrap_or(&1)),
|
||||
);
|
||||
(stake, i)
|
||||
})
|
||||
.sorted_by(|(l_stake, l_info), (r_stake, r_info)| {
|
||||
if r_stake == l_stake {
|
||||
peers[*r_info].id.cmp(&peers[*l_info].id)
|
||||
} else {
|
||||
r_stake.cmp(l_stake)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
stakes_and_index
|
||||
}
|
||||
|
||||
pub fn shuffle_peers_and_index(
|
||||
id: &Pubkey,
|
||||
peers: &[ContactInfo],
|
||||
stakes_and_index: &[(u64, usize)],
|
||||
seed: [u8; 32],
|
||||
) -> (usize, Vec<(u64, usize)>) {
|
||||
let shuffled_stakes_and_index = stake_weighted_shuffle(stakes_and_index, seed);
|
||||
let self_index = shuffled_stakes_and_index
|
||||
.iter()
|
||||
.enumerate()
|
||||
.find_map(|(i, (_stake, index))| {
|
||||
if peers[*index].id == *id {
|
||||
Some(i)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
(self_index, shuffled_stakes_and_index)
|
||||
}
|
||||
|
||||
fn stake_weighted_shuffle(stakes_and_index: &[(u64, usize)], seed: [u8; 32]) -> Vec<(u64, usize)> {
|
||||
let stake_weights: Vec<_> = stakes_and_index.iter().map(|(w, _)| *w).collect();
|
||||
|
||||
let shuffle = weighted_shuffle(&stake_weights, seed);
|
||||
|
||||
shuffle.iter().map(|x| stakes_and_index[*x]).collect()
|
||||
}
|
||||
|
@@ -13,7 +13,7 @@ pub mod crds_gossip_push;
|
||||
pub mod crds_shards;
|
||||
pub mod crds_value;
|
||||
pub mod data_budget;
|
||||
mod deprecated;
|
||||
pub mod deprecated;
|
||||
pub mod duplicate_shred;
|
||||
pub mod epoch_slots;
|
||||
pub mod gossip_error;
|
||||
|
@@ -5,6 +5,7 @@ use {
|
||||
solana_gossip::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo},
|
||||
contact_info::ContactInfo,
|
||||
deprecated::{shuffle_peers_and_index, sorted_retransmit_peers_and_stakes},
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
@@ -118,14 +119,13 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
.map(|i| {
|
||||
let mut seed = [0; 32];
|
||||
seed[0..4].copy_from_slice(&i.to_le_bytes());
|
||||
// TODO: Ideally these should use the new methods in
|
||||
// solana_core::cluster_nodes, however that would add build
|
||||
// dependency on solana_core which is not desired.
|
||||
let (peers, stakes_and_index) =
|
||||
cluster_info.sorted_retransmit_peers_and_stakes(Some(&staked_nodes));
|
||||
let (_, shuffled_stakes_and_indexes) = ClusterInfo::shuffle_peers_and_index(
|
||||
&cluster_info.id(),
|
||||
&peers,
|
||||
&stakes_and_index,
|
||||
seed,
|
||||
);
|
||||
sorted_retransmit_peers_and_stakes(&cluster_info, Some(&staked_nodes));
|
||||
let (_, shuffled_stakes_and_indexes) =
|
||||
shuffle_peers_and_index(&cluster_info.id(), &peers, &stakes_and_index, seed);
|
||||
shuffled_stakes_and_indexes
|
||||
.into_iter()
|
||||
.map(|(_, i)| peers[i].clone())
|
||||
|
Reference in New Issue
Block a user