Data plane verification (#4639)
* Add signature to blob * Change Signable trait to support returning references to signable data * Add signing to broadcast * Verify signatures in window_service * Add testing for signatures to erasure * Add RPC for getting current slot, consume RPC call in test_repairman_catchup for more deterministic results
This commit is contained in:
@ -1,6 +1,5 @@
|
||||
extern crate solana;
|
||||
|
||||
use crate::solana::blocktree::Blocktree;
|
||||
use hashbrown::HashSet;
|
||||
use log::*;
|
||||
use solana::cluster::Cluster;
|
||||
@ -9,8 +8,10 @@ use solana::gossip_service::discover_cluster;
|
||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana::validator::ValidatorConfig;
|
||||
use solana_runtime::epoch_schedule::{EpochSchedule, MINIMUM_SLOT_LENGTH};
|
||||
use solana_sdk::client::SyncClient;
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sdk::timing;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
@ -265,11 +266,11 @@ fn run_repairman_catchup(num_repairmen: u64) {
|
||||
|
||||
let repairman_pubkeys: HashSet<_> = cluster.get_node_pubkeys().into_iter().collect();
|
||||
let epoch_schedule = EpochSchedule::new(num_slots_per_epoch, stakers_slot_offset, true);
|
||||
let num_warmup_epochs = (epoch_schedule.get_stakers_epoch(0) + 1) as f64;
|
||||
let num_warmup_epochs = epoch_schedule.get_stakers_epoch(0) + 1;
|
||||
|
||||
// Sleep for longer than the first N warmup epochs, with a one epoch buffer for timing issues
|
||||
cluster_tests::sleep_n_epochs(
|
||||
num_warmup_epochs + 1.0,
|
||||
num_warmup_epochs as f64 + 1.0,
|
||||
&cluster.genesis_block.poh_config,
|
||||
num_ticks_per_slot,
|
||||
num_slots_per_epoch,
|
||||
@ -278,7 +279,6 @@ fn run_repairman_catchup(num_repairmen: u64) {
|
||||
// Start up a new node, wait for catchup. Backwards repair won't be sufficient because the
|
||||
// leader is sending blobs past this validator's first two confirmed epochs. Thus, the repairman
|
||||
// protocol will have to kick in for this validator to repair.
|
||||
|
||||
cluster.add_validator(&validator_config, repairee_stake);
|
||||
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
@ -288,28 +288,18 @@ fn run_repairman_catchup(num_repairmen: u64) {
|
||||
.unwrap();
|
||||
|
||||
// Wait for repairman protocol to catch this validator up
|
||||
cluster_tests::sleep_n_epochs(
|
||||
num_warmup_epochs + 1.0,
|
||||
&cluster.genesis_block.poh_config,
|
||||
num_ticks_per_slot,
|
||||
num_slots_per_epoch,
|
||||
);
|
||||
let repairee_client = cluster.get_validator_client(&repairee_id).unwrap();
|
||||
let mut current_slot = 0;
|
||||
|
||||
cluster.close_preserve_ledgers();
|
||||
let validator_ledger_path = cluster.fullnode_infos[&repairee_id].ledger_path.clone();
|
||||
|
||||
// Expect at least the the first two epochs to have been rooted after waiting 3 epochs.
|
||||
let num_expected_slots = num_slots_per_epoch * 2;
|
||||
let validator_ledger = Blocktree::open(&validator_ledger_path).unwrap();
|
||||
let validator_rooted_slots: Vec<_> =
|
||||
validator_ledger.rooted_slot_iterator(0).unwrap().collect();
|
||||
|
||||
if validator_rooted_slots.len() as u64 <= num_expected_slots {
|
||||
error!(
|
||||
"Num expected slots: {}, number of rooted slots: {}",
|
||||
num_expected_slots,
|
||||
validator_rooted_slots.len()
|
||||
);
|
||||
// Make sure this validator can get repaired past the first few warmup epochs
|
||||
let target_slot = (num_warmup_epochs) * num_slots_per_epoch + 1;
|
||||
while current_slot <= target_slot {
|
||||
trace!("current_slot: {}", current_slot);
|
||||
if let Ok(slot) = repairee_client.get_slot() {
|
||||
current_slot = slot;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
assert!(validator_rooted_slots.len() as u64 > num_expected_slots);
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ use solana::streamer;
|
||||
use solana::tvu::{Sockets, Tvu};
|
||||
use solana::validator;
|
||||
use solana_runtime::epoch_schedule::MINIMUM_SLOT_LENGTH;
|
||||
use solana_sdk::signature::Signable;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::system_transaction;
|
||||
use std::fs::remove_dir_all;
|
||||
@ -39,9 +40,12 @@ fn new_gossip(
|
||||
#[test]
|
||||
fn test_replay() {
|
||||
solana_logger::setup();
|
||||
let leader = Node::new_localhost();
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
|
||||
|
||||
let target1_keypair = Keypair::new();
|
||||
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
|
||||
|
||||
let target2 = Node::new_localhost();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
@ -168,9 +172,12 @@ fn test_replay() {
|
||||
let blobs = entries.to_shared_blobs();
|
||||
index_blobs(&blobs, &leader.info.id, blob_idx, 1, 0);
|
||||
blob_idx += blobs.len() as u64;
|
||||
blobs
|
||||
.iter()
|
||||
.for_each(|b| b.write().unwrap().meta.set_addr(&tvu_addr));
|
||||
blobs.iter().for_each(|b| {
|
||||
let mut b_w = b.write().unwrap();
|
||||
b_w.set_id(&leader_keypair.pubkey());
|
||||
b_w.meta.set_addr(&tvu_addr);
|
||||
b_w.sign(&leader_keypair);
|
||||
});
|
||||
msgs.extend(blobs.into_iter());
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user