shares the lock on gossip when processing prune messages (#13339)

Processing prune messages acquires an exclusive lock on gossip:
https://github.com/solana-labs/solana/blob/55b0428ff/core/src/cluster_info.rs#L1824-L1825
This can be reduced to a shared lock if active-sets are changed to use
atomic bloom filters:
https://github.com/solana-labs/solana/blob/55b0428ff/core/src/crds_gossip_push.rs#L50
This commit is contained in:
behzad nouri
2020-11-05 15:42:00 +00:00
committed by GitHub
parent bc62313c66
commit 8f0796436a
6 changed files with 86 additions and 22 deletions

View File

@ -135,7 +135,6 @@ fn bench_add_hash_atomic(bencher: &mut Bencher) {
for hash_value in &hash_values {
bloom.add(hash_value);
}
let bloom: Bloom<_> = bloom.into();
let index = rng.gen_range(0, hash_values.len());
if !bloom.contains(&hash_values[index]) {
fail += 1;

View File

@ -146,16 +146,42 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
}
impl<T: BloomHashIndex> AtomicBloom<T> {
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
let pos = key.hash_at_index(hash_index) % self.num_bits;
// Divide by 64 to figure out which of the
// AtomicU64 bit chunks we need to modify.
let index = pos >> 6;
// (pos & 63) is equivalent to mod 64 so that we can find
// the index of the bit within the AtomicU64 to modify.
let mask = 1u64 << (pos & 63);
(index as usize, mask)
}
pub fn add(&self, key: &T) {
for k in &self.keys {
let pos = key.hash_at_index(*k) % self.num_bits;
// Divide by 64 to figure out which of the
// AtomicU64 bit chunks we need to modify.
let index = pos >> 6;
// (pos & 63) is equivalent to mod 64 so that we can find
// the index of the bit within the AtomicU64 to modify.
let bit = 1u64 << (pos & 63);
self.bits[index as usize].fetch_or(bit, Ordering::Relaxed);
let (index, mask) = self.pos(key, *k);
self.bits[index].fetch_or(mask, Ordering::Relaxed);
}
}
pub fn contains(&self, key: &T) -> bool {
self.keys.iter().all(|k| {
let (index, mask) = self.pos(key, *k);
let bit = self.bits[index].load(Ordering::Relaxed) & mask;
bit != 0u64
})
}
// Only for tests and simulations.
pub fn mock_clone(&self) -> Self {
Self {
keys: self.keys.clone(),
bits: self
.bits
.iter()
.map(|v| AtomicU64::new(v.load(Ordering::Relaxed)))
.collect(),
..*self
}
}
}
@ -303,6 +329,9 @@ mod test {
let bloom: AtomicBloom<_> = bloom.into();
assert_eq!(bloom.num_bits, 9731);
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
for hash_value in &hash_values {
assert!(bloom.contains(hash_value));
}
let bloom: Bloom<_> = bloom.into();
assert_eq!(bloom.num_bits_set, num_bits_set);
for hash_value in &hash_values {
@ -311,6 +340,9 @@ mod test {
// Round trip, re-inserting the same hash values.
let bloom: AtomicBloom<_> = bloom.into();
hash_values.par_iter().for_each(|v| bloom.add(v));
for hash_value in &hash_values {
assert!(bloom.contains(hash_value));
}
let bloom: Bloom<_> = bloom.into();
assert_eq!(bloom.num_bits_set, num_bits_set);
assert_eq!(bloom.bits.len(), 9731);
@ -326,6 +358,17 @@ mod test {
assert_eq!(bloom.num_bits, 9731);
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
more_hash_values.par_iter().for_each(|v| bloom.add(v));
for hash_value in &hash_values {
assert!(bloom.contains(hash_value));
}
for hash_value in &more_hash_values {
assert!(bloom.contains(hash_value));
}
let false_positive = std::iter::repeat_with(|| solana_sdk::hash::new_rand(&mut rng))
.take(10_000)
.filter(|hash_value| bloom.contains(hash_value))
.count();
assert!(false_positive < 2000, "false_positive: {}", false_positive);
let bloom: Bloom<_> = bloom.into();
assert_eq!(bloom.bits.len(), 9731);
assert!(bloom.num_bits_set > num_bits_set);