Cross cluster gossip contamination is causing cluster-slots hash map to
contain a lot of bogus values and consume too much memory:
https://github.com/solana-labs/solana/issues/17789
If a node is using the same identity key across clusters, then these
erroneous values might not be filtered out by shred-versions check,
because one of the variants of the contact-info will have matching
shred-version:
https://github.com/solana-labs/solana/issues/17789#issuecomment-896304969
The cluster-slots hash-map is bounded and trimmed at the lower end by
the current root. This commit also discards slots epochs ahead of the
root.
(cherry picked from commit 563aec0b4d
)
Co-authored-by: behzad nouri <behzadnouri@gmail.com>
This commit is contained in:
@ -4,7 +4,10 @@ use {
|
|||||||
cluster_info::ClusterInfo, contact_info::ContactInfo, crds::Cursor, epoch_slots::EpochSlots,
|
cluster_info::ClusterInfo, contact_info::ContactInfo, crds::Cursor, epoch_slots::EpochSlots,
|
||||||
},
|
},
|
||||||
solana_runtime::{bank::Bank, epoch_stakes::NodeIdToVoteAccounts},
|
solana_runtime::{bank::Bank, epoch_stakes::NodeIdToVoteAccounts},
|
||||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
solana_sdk::{
|
||||||
|
clock::{Slot, DEFAULT_SLOTS_PER_EPOCH},
|
||||||
|
pubkey::Pubkey,
|
||||||
|
},
|
||||||
std::{
|
std::{
|
||||||
collections::{BTreeMap, HashMap},
|
collections::{BTreeMap, HashMap},
|
||||||
sync::{Arc, Mutex, RwLock},
|
sync::{Arc, Mutex, RwLock},
|
||||||
@ -36,10 +39,11 @@ impl ClusterSlots {
|
|||||||
let mut cursor = self.cursor.lock().unwrap();
|
let mut cursor = self.cursor.lock().unwrap();
|
||||||
cluster_info.get_epoch_slots(&mut cursor)
|
cluster_info.get_epoch_slots(&mut cursor)
|
||||||
};
|
};
|
||||||
self.update_internal(root_bank.slot(), epoch_slots);
|
let num_epoch_slots = root_bank.get_slots_in_epoch(root_bank.epoch());
|
||||||
|
self.update_internal(root_bank.slot(), epoch_slots, num_epoch_slots);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_internal(&self, root: Slot, epoch_slots_list: Vec<EpochSlots>) {
|
fn update_internal(&self, root: Slot, epoch_slots_list: Vec<EpochSlots>, num_epoch_slots: u64) {
|
||||||
// Attach validator's total stake.
|
// Attach validator's total stake.
|
||||||
let epoch_slots_list: Vec<_> = {
|
let epoch_slots_list: Vec<_> = {
|
||||||
let validator_stakes = self.validator_stakes.read().unwrap();
|
let validator_stakes = self.validator_stakes.read().unwrap();
|
||||||
@ -54,13 +58,20 @@ impl ClusterSlots {
|
|||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
// Discard slots at or before current root or epochs ahead.
|
||||||
|
let slot_range = (root + 1)
|
||||||
|
..root.saturating_add(
|
||||||
|
num_epoch_slots
|
||||||
|
.max(DEFAULT_SLOTS_PER_EPOCH)
|
||||||
|
.saturating_mul(2),
|
||||||
|
);
|
||||||
let slot_nodes_stakes = epoch_slots_list
|
let slot_nodes_stakes = epoch_slots_list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flat_map(|(epoch_slots, stake)| {
|
.flat_map(|(epoch_slots, stake)| {
|
||||||
epoch_slots
|
epoch_slots
|
||||||
.to_slots(root)
|
.to_slots(root)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|slot| *slot > root)
|
.filter(|slot| slot_range.contains(slot))
|
||||||
.zip(std::iter::repeat((epoch_slots.from, stake)))
|
.zip(std::iter::repeat((epoch_slots.from, stake)))
|
||||||
})
|
})
|
||||||
.into_group_map();
|
.into_group_map();
|
||||||
@ -187,7 +198,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_update_noop() {
|
fn test_update_noop() {
|
||||||
let cs = ClusterSlots::default();
|
let cs = ClusterSlots::default();
|
||||||
cs.update_internal(0, vec![]);
|
cs.update_internal(0, vec![], DEFAULT_SLOTS_PER_EPOCH);
|
||||||
assert!(cs.cluster_slots.read().unwrap().is_empty());
|
assert!(cs.cluster_slots.read().unwrap().is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +206,7 @@ mod tests {
|
|||||||
fn test_update_empty() {
|
fn test_update_empty() {
|
||||||
let cs = ClusterSlots::default();
|
let cs = ClusterSlots::default();
|
||||||
let epoch_slot = EpochSlots::default();
|
let epoch_slot = EpochSlots::default();
|
||||||
cs.update_internal(0, vec![epoch_slot]);
|
cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH);
|
||||||
assert!(cs.lookup(0).is_none());
|
assert!(cs.lookup(0).is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +216,7 @@ mod tests {
|
|||||||
let cs = ClusterSlots::default();
|
let cs = ClusterSlots::default();
|
||||||
let mut epoch_slot = EpochSlots::default();
|
let mut epoch_slot = EpochSlots::default();
|
||||||
epoch_slot.fill(&[0], 0);
|
epoch_slot.fill(&[0], 0);
|
||||||
cs.update_internal(0, vec![epoch_slot]);
|
cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH);
|
||||||
assert!(cs.lookup(0).is_none());
|
assert!(cs.lookup(0).is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +225,7 @@ mod tests {
|
|||||||
let cs = ClusterSlots::default();
|
let cs = ClusterSlots::default();
|
||||||
let mut epoch_slot = EpochSlots::default();
|
let mut epoch_slot = EpochSlots::default();
|
||||||
epoch_slot.fill(&[1], 0);
|
epoch_slot.fill(&[1], 0);
|
||||||
cs.update_internal(0, vec![epoch_slot]);
|
cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH);
|
||||||
assert!(cs.lookup(0).is_none());
|
assert!(cs.lookup(0).is_none());
|
||||||
assert!(cs.lookup(1).is_some());
|
assert!(cs.lookup(1).is_some());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -344,7 +355,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
*cs.validator_stakes.write().unwrap() = map;
|
*cs.validator_stakes.write().unwrap() = map;
|
||||||
cs.update_internal(0, vec![epoch_slot]);
|
cs.update_internal(0, vec![epoch_slot], DEFAULT_SLOTS_PER_EPOCH);
|
||||||
assert!(cs.lookup(1).is_some());
|
assert!(cs.lookup(1).is_some());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cs.lookup(1)
|
cs.lookup(1)
|
||||||
|
Reference in New Issue
Block a user