Set the correct root in block commitment cache initialization (#22750) (#22757)

* Set the correct root in block commitment cache initialization

* clean up test

* bump

(cherry picked from commit d9c259a2313f620b4b64b7cec0d55dd4234fbcd9)

Co-authored-by: Justin Starry <justin@solana.com>
This commit is contained in:
mergify[bot] 2022-01-27 03:44:59 +00:00 committed by GitHub
parent d1174f677e
commit b0e0410003
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 26 additions and 33 deletions

View File

@ -537,7 +537,7 @@ impl Validator {
cluster_info.restore_contact_info(ledger_path, config.contact_save_interval); cluster_info.restore_contact_info(ledger_path, config.contact_save_interval);
let cluster_info = Arc::new(cluster_info); let cluster_info = Arc::new(cluster_info);
let mut block_commitment_cache = BlockCommitmentCache::default(); let mut block_commitment_cache = BlockCommitmentCache::default();
block_commitment_cache.initialize_slots(bank.slot()); block_commitment_cache.initialize_slots(bank.slot(), bank_forks.read().unwrap().root());
let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache)); let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache));
let optimistically_confirmed_bank = let optimistically_confirmed_bank =

View File

@ -1799,13 +1799,10 @@ fn test_validator_saves_tower() {
let file_tower_storage = FileTowerStorage::new(ledger_path.clone()); let file_tower_storage = FileTowerStorage::new(ledger_path.clone());
// Wait for some votes to be generated // Wait for some votes to be generated
let mut last_replayed_root;
loop { loop {
if let Ok(slot) = validator_client.get_slot_with_commitment(CommitmentConfig::processed()) { if let Ok(slot) = validator_client.get_slot_with_commitment(CommitmentConfig::processed()) {
trace!("current slot: {}", slot); trace!("current slot: {}", slot);
if slot > 2 { if slot > 2 {
// this will be the root next time a validator starts
last_replayed_root = slot;
break; break;
} }
} }
@ -1817,35 +1814,31 @@ fn test_validator_saves_tower() {
let tower1 = Tower::restore(&file_tower_storage, &validator_id).unwrap(); let tower1 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower1: {:?}", tower1); trace!("tower1: {:?}", tower1);
assert_eq!(tower1.root(), 0); assert_eq!(tower1.root(), 0);
assert!(tower1.last_voted_slot().is_some());
// Restart the validator and wait for a new root // Restart the validator and wait for a new root
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap(); let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for the first root // Wait for the first new root
loop { let last_replayed_root = loop {
#[allow(deprecated)] #[allow(deprecated)]
// This test depends on knowing the immediate root, without any delay from the commitment // This test depends on knowing the immediate root, without any delay from the commitment
// service, so the deprecated CommitmentConfig::root() is retained // service, so the deprecated CommitmentConfig::root() is retained
if let Ok(root) = validator_client.get_slot_with_commitment(CommitmentConfig::root()) { if let Ok(root) = validator_client.get_slot_with_commitment(CommitmentConfig::root()) {
trace!("current root: {}", root); trace!("current root: {}", root);
if root > last_replayed_root + 1 { if root > 0 {
last_replayed_root = root; break root;
break;
} }
} }
sleep(Duration::from_millis(50)); sleep(Duration::from_millis(50));
} };
// Stop validator, and check saved tower // Stop validator, and check saved tower
let recent_slot = validator_client
.get_slot_with_commitment(CommitmentConfig::processed())
.unwrap();
let validator_info = cluster.exit_node(&validator_id); let validator_info = cluster.exit_node(&validator_id);
let tower2 = Tower::restore(&file_tower_storage, &validator_id).unwrap(); let tower2 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower2: {:?}", tower2); trace!("tower2: {:?}", tower2);
assert_eq!(tower2.root(), last_replayed_root); assert_eq!(tower2.root(), last_replayed_root);
last_replayed_root = recent_slot;
// Rollback saved tower to `tower1` to simulate a validator starting from a newer snapshot // Rollback saved tower to `tower1` to simulate a validator starting from a newer snapshot
// without having to wait for that snapshot to be generated in this test // without having to wait for that snapshot to be generated in this test
@ -1857,7 +1850,7 @@ fn test_validator_saves_tower() {
let validator_client = cluster.get_validator_client(&validator_id).unwrap(); let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for a new root, demonstrating the validator was able to make progress from the older `tower1` // Wait for a new root, demonstrating the validator was able to make progress from the older `tower1`
loop { let new_root = loop {
#[allow(deprecated)] #[allow(deprecated)]
// This test depends on knowing the immediate root, without any delay from the commitment // This test depends on knowing the immediate root, without any delay from the commitment
// service, so the deprecated CommitmentConfig::root() is retained // service, so the deprecated CommitmentConfig::root() is retained
@ -1868,17 +1861,18 @@ fn test_validator_saves_tower() {
last_replayed_root last_replayed_root
); );
if root > last_replayed_root { if root > last_replayed_root {
break; break root;
} }
} }
sleep(Duration::from_millis(50)); sleep(Duration::from_millis(50));
} };
// Check the new root is reflected in the saved tower state // Check the new root is reflected in the saved tower state
let mut validator_info = cluster.exit_node(&validator_id); let mut validator_info = cluster.exit_node(&validator_id);
let tower3 = Tower::restore(&file_tower_storage, &validator_id).unwrap(); let tower3 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower3: {:?}", tower3); trace!("tower3: {:?}", tower3);
assert!(tower3.root() > last_replayed_root); let tower3_root = tower3.root();
assert!(tower3_root >= new_root);
// Remove the tower file entirely and allow the validator to start without a tower. It will // Remove the tower file entirely and allow the validator to start without a tower. It will
// rebuild tower from its vote account contents // rebuild tower from its vote account contents
@ -1888,26 +1882,25 @@ fn test_validator_saves_tower() {
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified); cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap(); let validator_client = cluster.get_validator_client(&validator_id).unwrap();
// Wait for a couple more slots to pass so another vote occurs // Wait for another new root
let current_slot = validator_client let new_root = loop {
.get_slot_with_commitment(CommitmentConfig::processed()) #[allow(deprecated)]
.unwrap(); // This test depends on knowing the immediate root, without any delay from the commitment
loop { // service, so the deprecated CommitmentConfig::root() is retained
if let Ok(slot) = validator_client.get_slot_with_commitment(CommitmentConfig::processed()) { if let Ok(root) = validator_client.get_slot_with_commitment(CommitmentConfig::root()) {
trace!("current_slot: {}, slot: {}", current_slot, slot); trace!("current root: {}, last tower root: {}", root, tower3_root);
if slot > current_slot + 1 { if root > tower3_root {
break; break root;
} }
} }
sleep(Duration::from_millis(50)); sleep(Duration::from_millis(50));
} };
cluster.close_preserve_ledgers(); cluster.close_preserve_ledgers();
let tower4 = Tower::restore(&file_tower_storage, &validator_id).unwrap(); let tower4 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower4: {:?}", tower4); trace!("tower4: {:?}", tower4);
// should tower4 advance 1 slot compared to tower3???? assert!(tower4.root() >= new_root);
assert_eq!(tower4.root(), tower3.root() + 1);
} }
fn save_tower(tower_path: &Path, tower: &Tower, node_keypair: &Keypair) { fn save_tower(tower_path: &Path, tower: &Tower, node_keypair: &Keypair) {

View File

@ -146,7 +146,7 @@ fn initialize_from_snapshot(
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
let mut block_commitment_cache = BlockCommitmentCache::default(); let mut block_commitment_cache = BlockCommitmentCache::default();
block_commitment_cache.initialize_slots(bank0_slot); block_commitment_cache.initialize_slots(bank0_slot, bank0_slot);
let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache)); let block_commitment_cache = Arc::new(RwLock::new(block_commitment_cache));
ReplicaBankInfo { ReplicaBankInfo {

View File

@ -193,9 +193,9 @@ impl BlockCommitmentCache {
self.commitment_slots.highest_confirmed_root = root; self.commitment_slots.highest_confirmed_root = root;
} }
pub fn initialize_slots(&mut self, slot: Slot) { pub fn initialize_slots(&mut self, slot: Slot, root: Slot) {
self.commitment_slots.slot = slot; self.commitment_slots.slot = slot;
self.commitment_slots.root = slot; self.commitment_slots.root = root;
} }
pub fn set_all_slots(&mut self, slot: Slot, root: Slot) { pub fn set_all_slots(&mut self, slot: Slot, root: Slot) {