implements copy-on-write for staked-nodes (backport #19090) (#22507)

* implements copy-on-write for staked-nodes (#19090)

Bank::staked_nodes and Bank::epoch_staked_nodes redundantly clone
staked-nodes HashMap even though an immutable reference will suffice:
https://github.com/solana-labs/solana/blob/a9014cece/runtime/src/vote_account.rs#L77

This commit implements copy-on-write semantics for staked-nodes by
wrapping the underlying HashMap in Arc<...>.

(cherry picked from commit f302774cf7)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/stakes.rs
#	runtime/src/vote_account.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
This commit is contained in:
mergify[bot]
2022-01-14 15:26:11 +00:00
committed by GitHub
parent debac00724
commit 40ef11ec86
9 changed files with 51 additions and 39 deletions

2
Cargo.lock generated
View File

@ -5478,7 +5478,7 @@ dependencies = [
"dir-diff",
"flate2",
"fnv",
"itertools 0.9.0",
"itertools 0.10.1",
"lazy_static",
"libc",
"libloading 0.6.2",

View File

@ -216,8 +216,9 @@ impl BroadcastRun for BroadcastDuplicatesRun {
let mut stakes: Vec<(Pubkey, u64)> = bank
.epoch_staked_nodes(bank_epoch)
.unwrap()
.into_iter()
.filter(|(pubkey, _)| *pubkey != self.keypair.pubkey())
.iter()
.filter(|(pubkey, _)| **pubkey != self.keypair.pubkey())
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| {
if r_stake == l_stake {

View File

@ -1749,7 +1749,7 @@ impl ClusterInfo {
Some(root_bank.feature_set.clone()),
)
}
None => (HashMap::new(), None),
None => (Arc::default(), None),
};
let require_stake_for_gossip =
self.require_stake_for_gossip(feature_set.as_deref(), &stakes);
@ -2542,7 +2542,7 @@ impl ClusterInfo {
// feature does not roll back (if the feature happens to get enabled in
// a minority fork).
let (feature_set, stakes) = match bank_forks {
None => (None, HashMap::default()),
None => (None, Arc::default()),
Some(bank_forks) => {
let bank = bank_forks.read().unwrap().root_bank();
let feature_set = bank.feature_set.clone();

View File

@ -13,7 +13,10 @@ pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> {
bank.epoch_staked_nodes(epoch).map(|stakes| {
let mut seed = [0u8; 32];
seed[0..8].copy_from_slice(&epoch.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect();
let mut stakes: Vec<_> = stakes
.iter()
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
sort_stakes(&mut stakes);
LeaderSchedule::new(
&stakes,
@ -91,7 +94,11 @@ mod tests {
.genesis_config;
let bank = Bank::new(&genesis_config);
let pubkeys_and_stakes: Vec<_> = bank.staked_nodes().into_iter().collect();
let pubkeys_and_stakes: Vec<_> = bank
.staked_nodes()
.iter()
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
let seed = [0u8; 32];
let leader_schedule = LeaderSchedule::new(
&pubkeys_and_stakes,

View File

@ -1430,9 +1430,9 @@ dependencies = [
[[package]]
name = "itertools"
version = "0.10.0"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319"
checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
dependencies = [
"either",
]
@ -2764,7 +2764,7 @@ dependencies = [
"bincode",
"byteorder 1.3.4",
"elf",
"itertools 0.10.0",
"itertools 0.10.3",
"log",
"miow 0.2.2",
"net2",
@ -3501,7 +3501,7 @@ dependencies = [
"dir-diff",
"flate2",
"fnv",
"itertools 0.9.0",
"itertools 0.10.3",
"lazy_static",
"libc",
"libloading",

View File

@ -21,7 +21,7 @@ crossbeam-channel = "0.4"
dir-diff = "0.3.2"
flate2 = "1.0.14"
fnv = "1.0.7"
itertools = "0.9.0"
itertools = "0.10.1"
lazy_static = "1.4.0"
libc = "0.2.81"
libloading = "0.6.2"

View File

@ -5282,7 +5282,7 @@ impl Bank {
self.stakes_cache.stakes().stake_delegations().clone()
}
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.stakes_cache.stakes().staked_nodes()
}
@ -5312,7 +5312,7 @@ impl Bank {
&self.epoch_stakes
}
pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<HashMap<Pubkey, u64>> {
pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
}

View File

@ -335,7 +335,7 @@ impl Stakes {
&self.stake_delegations
}
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.vote_accounts.staked_nodes()
}

View File

@ -1,4 +1,5 @@
use {
itertools::Itertools,
serde::{
de::{Deserialize, Deserializer},
ser::{Serialize, Serializer},
@ -40,9 +41,11 @@ pub struct VoteAccounts {
// Inner Arc is meant to implement copy-on-write semantics as opposed to
// sharing mutations (hence RwLock<Arc<...>> instead of Arc<RwLock<...>>).
staked_nodes: RwLock<
HashMap<
Pubkey, // VoteAccount.vote_state.node_pubkey.
u64, // Total stake across all vote-accounts.
Arc<
HashMap<
Pubkey, // VoteAccount.vote_state.node_pubkey.
u64, // Total stake across all vote-accounts.
>,
>,
>,
staked_nodes_once: Once,
@ -69,20 +72,19 @@ impl VoteAccount {
}
impl VoteAccounts {
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.staked_nodes_once.call_once(|| {
let mut staked_nodes = HashMap::new();
for (stake, vote_account) in
self.vote_accounts.values().filter(|(stake, _)| *stake != 0)
{
if let Some(node_pubkey) = vote_account.node_pubkey() {
staked_nodes
.entry(node_pubkey)
.and_modify(|s| *s += *stake)
.or_insert(*stake);
}
}
*self.staked_nodes.write().unwrap() = staked_nodes
let staked_nodes = self
.vote_accounts
.values()
.filter(|(stake, _)| *stake != 0)
.filter_map(|(stake, vote_account)| {
let node_pubkey = vote_account.node_pubkey()?;
Some((node_pubkey, stake))
})
.into_grouping_map()
.aggregate(|acc, _node_pubkey, stake| Some(acc.unwrap_or_default() + stake));
*self.staked_nodes.write().unwrap() = Arc::new(staked_nodes)
});
self.staked_nodes.read().unwrap().clone()
}
@ -135,9 +137,9 @@ impl VoteAccounts {
fn add_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) {
if stake != 0 && self.staked_nodes_once.is_completed() {
if let Some(node_pubkey) = vote_account.node_pubkey() {
self.staked_nodes
.write()
.unwrap()
let mut staked_nodes = self.staked_nodes.write().unwrap();
let staked_nodes = Arc::make_mut(&mut staked_nodes);
staked_nodes
.entry(node_pubkey)
.and_modify(|s| *s += stake)
.or_insert(stake);
@ -148,7 +150,9 @@ impl VoteAccounts {
fn sub_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) {
if stake != 0 && self.staked_nodes_once.is_completed() {
if let Some(node_pubkey) = vote_account.node_pubkey() {
match self.staked_nodes.write().unwrap().entry(node_pubkey) {
let mut staked_nodes = self.staked_nodes.write().unwrap();
let staked_nodes = Arc::make_mut(&mut staked_nodes);
match staked_nodes.entry(node_pubkey) {
Entry::Vacant(_) => panic!("this should not happen!"),
Entry::Occupied(mut entry) => match entry.get().cmp(&stake) {
Ordering::Less => panic!("subtraction value exceeds node's stake"),
@ -485,7 +489,7 @@ mod tests {
if (k + 1) % 128 == 0 {
assert_eq!(
staked_nodes(&accounts[..k + 1]),
vote_accounts.staked_nodes()
*vote_accounts.staked_nodes()
);
}
}
@ -495,7 +499,7 @@ mod tests {
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if (k + 1) % 32 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
assert_eq!(staked_nodes(&accounts), *vote_accounts.staked_nodes());
}
}
// Modify the stakes for some of the accounts.
@ -510,7 +514,7 @@ mod tests {
}
*stake = new_stake;
if (k + 1) % 128 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
assert_eq!(staked_nodes(&accounts), *vote_accounts.staked_nodes());
}
}
// Remove everything.
@ -519,7 +523,7 @@ mod tests {
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if accounts.len() % 32 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
assert_eq!(staked_nodes(&accounts), *vote_accounts.staked_nodes());
}
}
assert!(vote_accounts.staked_nodes.read().unwrap().is_empty());