Only store the fullnode's pubkey
Only vote_signer is used for signing
This commit is contained in:
@ -16,6 +16,7 @@ use crate::tvu::{Sockets, Tvu, TvuReturnType};
|
|||||||
use crate::vote_signer_proxy::VoteSignerProxy;
|
use crate::vote_signer_proxy::VoteSignerProxy;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::timing::{duration_as_ms, timestamp};
|
use solana_sdk::timing::{duration_as_ms, timestamp};
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -87,7 +88,7 @@ impl Default for FullnodeConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct Fullnode {
|
pub struct Fullnode {
|
||||||
keypair: Arc<Keypair>,
|
id: Pubkey,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
rpc_service: Option<JsonRpcService>,
|
rpc_service: Option<JsonRpcService>,
|
||||||
rpc_pubsub_service: Option<PubSubService>,
|
rpc_pubsub_service: Option<PubSubService>,
|
||||||
@ -111,6 +112,7 @@ impl Fullnode {
|
|||||||
entrypoint_info_option: Option<&NodeInfo>,
|
entrypoint_info_option: Option<&NodeInfo>,
|
||||||
config: FullnodeConfig,
|
config: FullnodeConfig,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let id = keypair.pubkey();
|
||||||
let (genesis_block, db_ledger) = Self::make_db_ledger(ledger_path);
|
let (genesis_block, db_ledger) = Self::make_db_ledger(ledger_path);
|
||||||
let (bank, entry_height, last_entry_id) =
|
let (bank, entry_height, last_entry_id) =
|
||||||
Self::new_bank_from_db_ledger(&genesis_block, &db_ledger, leader_scheduler);
|
Self::new_bank_from_db_ledger(&genesis_block, &db_ledger, leader_scheduler);
|
||||||
@ -126,7 +128,7 @@ impl Fullnode {
|
|||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
|
|
||||||
node.info.wallclock = timestamp();
|
node.info.wallclock = timestamp();
|
||||||
assert_eq!(keypair.pubkey(), node.info.id);
|
assert_eq!(id, node.info.id);
|
||||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_keypair(
|
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_keypair(
|
||||||
node.info.clone(),
|
node.info.clone(),
|
||||||
keypair.clone(),
|
keypair.clone(),
|
||||||
@ -252,15 +254,15 @@ impl Fullnode {
|
|||||||
config.sigverify_disabled,
|
config.sigverify_disabled,
|
||||||
max_tick_height,
|
max_tick_height,
|
||||||
&last_entry_id,
|
&last_entry_id,
|
||||||
keypair.pubkey(),
|
id,
|
||||||
scheduled_leader == keypair.pubkey(),
|
scheduled_leader == id,
|
||||||
&to_validator_sender,
|
&to_validator_sender,
|
||||||
);
|
);
|
||||||
|
|
||||||
inc_new_counter_info!("fullnode-new", 1);
|
inc_new_counter_info!("fullnode-new", 1);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
keypair,
|
id,
|
||||||
cluster_info,
|
cluster_info,
|
||||||
bank,
|
bank,
|
||||||
sigverify_disabled: config.sigverify_disabled,
|
sigverify_disabled: config.sigverify_disabled,
|
||||||
@ -287,7 +289,7 @@ impl Fullnode {
|
|||||||
// when the new leader schedule was being generated, and there are no other validators
|
// when the new leader schedule was being generated, and there are no other validators
|
||||||
// in the active set, then the leader scheduler will pick the same leader again, so
|
// in the active set, then the leader scheduler will pick the same leader again, so
|
||||||
// check for that
|
// check for that
|
||||||
if scheduled_leader == self.keypair.pubkey() {
|
if scheduled_leader == self.id {
|
||||||
let (last_entry_id, entry_height) = self.node_services.tvu.get_state();
|
let (last_entry_id, entry_height) = self.node_services.tvu.get_state();
|
||||||
self.validator_to_leader(self.bank.tick_height(), entry_height, last_entry_id);
|
self.validator_to_leader(self.bank.tick_height(), entry_height, last_entry_id);
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -305,10 +307,7 @@ impl Fullnode {
|
|||||||
|
|
||||||
pub fn validator_to_leader(&mut self, tick_height: u64, entry_height: u64, last_id: Hash) {
|
pub fn validator_to_leader(&mut self, tick_height: u64, entry_height: u64, last_id: Hash) {
|
||||||
trace!("validator_to_leader");
|
trace!("validator_to_leader");
|
||||||
self.cluster_info
|
self.cluster_info.write().unwrap().set_leader(self.id);
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.set_leader(self.keypair.pubkey());
|
|
||||||
|
|
||||||
let max_tick_height = {
|
let max_tick_height = {
|
||||||
let ls_lock = self.bank.leader_scheduler.read().unwrap();
|
let ls_lock = self.bank.leader_scheduler.read().unwrap();
|
||||||
@ -332,7 +331,7 @@ impl Fullnode {
|
|||||||
max_tick_height,
|
max_tick_height,
|
||||||
entry_height,
|
entry_height,
|
||||||
&last_id,
|
&last_id,
|
||||||
self.keypair.pubkey(),
|
self.id,
|
||||||
&to_validator_sender,
|
&to_validator_sender,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -794,10 +793,7 @@ mod tests {
|
|||||||
t_responder
|
t_responder
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_ne!(
|
assert_ne!(validator.bank.get_current_leader().unwrap().0, validator.id);
|
||||||
validator.bank.get_current_leader().unwrap().0,
|
|
||||||
validator.keypair.pubkey()
|
|
||||||
);
|
|
||||||
loop {
|
loop {
|
||||||
let should_be_forwarder = validator.role_notifiers.1.try_recv();
|
let should_be_forwarder = validator.role_notifiers.1.try_recv();
|
||||||
let should_be_leader = validator.role_notifiers.0.try_recv();
|
let should_be_leader = validator.role_notifiers.0.try_recv();
|
||||||
|
Reference in New Issue
Block a user