Hook up getBlockTime rpc to real data (#7476)

* Add blocktree timestamp helper functions and tests

* Flesh out blocktree::get_block_time

* Move stakes up into rpc to make testing easier; expand tests

* Review comments
This commit is contained in:
Tyera Eulberg
2019-12-14 12:23:02 -07:00
committed by GitHub
parent d01ea20273
commit 6b88da2b82
3 changed files with 335 additions and 48 deletions

View File

@ -33,6 +33,7 @@ use solana_sdk::{
}; };
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}; use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
use std::{ use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket}, net::{SocketAddr, UdpSocket},
sync::{Arc, RwLock}, sync::{Arc, RwLock},
thread::sleep, thread::sleep,
@ -313,19 +314,19 @@ impl JsonRpcRequestProcessor {
Ok(self.blocktree.get_confirmed_block(slot).ok()) Ok(self.blocktree.get_confirmed_block(slot).ok())
} }
// The `get_block_time` method is not fully implemented. It currently returns `slot` *
// DEFAULT_MS_PER_SLOT offset from 0 for all requests, and null for any values that would
// overflow.
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> { pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
// This calculation currently assumes that bank.ticks_per_slot and bank.slots_per_year will // This calculation currently assumes that bank.slots_per_year will remain unchanged after
// remain unchanged after genesis. If these values will be variable in the future, those // genesis (ie. that this bank's slot_per_year will be applicable to any rooted slot being
// timing parameters will need to be stored persistently, and this calculation will likely // queried). If these values will be variable in the future, those timing parameters will
// need to be moved upstream into blocktree. Also, an explicit commitment level will need // need to be stored persistently, and the slot_duration calculation will likely need to be
// to be set. // moved upstream into blocktree. Also, an explicit commitment level will need to be set.
let bank = self.bank(None); let bank = self.bank(None);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let epoch = bank.epoch_schedule().get_epoch(slot);
let stakes = HashMap::new();
let stakes = bank.epoch_vote_accounts(epoch).unwrap_or(&stakes);
Ok(self.blocktree.get_block_time(slot, slot_duration)) Ok(self.blocktree.get_block_time(slot, slot_duration, stakes))
} }
} }
@ -1008,7 +1009,9 @@ pub mod tests {
replay_stage::tests::create_test_transactions_and_populate_blocktree, replay_stage::tests::create_test_transactions_and_populate_blocktree,
}; };
use jsonrpc_core::{MetaIoHandler, Output, Response, Value}; use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use solana_ledger::get_tmp_ledger_path; use solana_ledger::{
blocktree::entries_to_test_shreds, entry::next_entry_mut, get_tmp_ledger_path,
};
use solana_sdk::{ use solana_sdk::{
fee_calculator::DEFAULT_BURN_PERCENT, fee_calculator::DEFAULT_BURN_PERCENT,
hash::{hash, Hash}, hash::{hash, Hash},
@ -1044,6 +1047,14 @@ pub mod tests {
} }
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler { fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![1], 0)
}
fn start_rpc_handler_with_tx_and_blocktree(
pubkey: &Pubkey,
blocktree_roots: Vec<Slot>,
default_timestamp: i64,
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks(); let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank(); let bank = bank_forks.read().unwrap().working_bank();
@ -1073,6 +1084,32 @@ pub mod tests {
blocktree.clone(), blocktree.clone(),
); );
// Add timestamp vote to blocktree
let vote = Vote {
slots: vec![1],
hash: Hash::default(),
timestamp: Some(default_timestamp),
};
let vote_ix = vote_instruction::vote(
&leader_vote_keypair.pubkey(),
&leader_vote_keypair.pubkey(),
vote,
);
let vote_tx = Transaction::new_signed_instructions(
&[&leader_vote_keypair],
vec![vote_ix],
Hash::default(),
);
let shreds = entries_to_test_shreds(
vec![next_entry_mut(&mut Hash::default(), 0, vec![vote_tx])],
1,
0,
true,
0,
);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&blocktree_roots).unwrap();
let leader_pubkey = *bank.collector_id(); let leader_pubkey = *bank.collector_id();
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit); let validator_exit = create_validator_exit(&exit);
@ -1864,11 +1901,29 @@ pub mod tests {
#[test] #[test]
fn test_get_block_time() { fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let base_timestamp = 1576183541;
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx_and_blocktree(
&bob_pubkey,
vec![1, 2, 3, 4, 5, 6, 7],
base_timestamp,
);
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
let slot = 100; let slot = 2;
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
slot
);
let res = io.handle_request_sync(&req, meta.clone());
let expected = format!(r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#, base_timestamp);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
let slot = 7;
let req = format!( let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
slot slot
@ -1876,7 +1931,7 @@ pub mod tests {
let res = io.handle_request_sync(&req, meta.clone()); let res = io.handle_request_sync(&req, meta.clone());
let expected = format!( let expected = format!(
r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#, r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#,
(slot * slot_duration).as_secs() base_timestamp + (5 * slot_duration).as_secs() as i64
); );
let expected: Response = let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization"); serde_json::from_str(&expected).expect("expected response deserialization");
@ -1889,22 +1944,6 @@ pub mod tests {
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
slot slot
); );
let res = io.handle_request_sync(&req, meta.clone());
let expected = format!(
r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#,
(slot * slot_duration).as_secs()
);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
let slot = 123450000000000000u64;
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
slot
);
let res = io.handle_request_sync(&req, meta); let res = io.handle_request_sync(&req, meta);
let expected = format!(r#"{{"jsonrpc":"2.0","result":null,"id":1}}"#); let expected = format!(r#"{{"jsonrpc":"2.0","result":null,"id":1}}"#);
let expected: Response = let expected: Response =

View File

@ -11,10 +11,10 @@ use crate::{
entry::{create_ticks, Entry}, entry::{create_ticks, Entry},
erasure::ErasureConfig, erasure::ErasureConfig,
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
rooted_slot_iterator::RootedSlotIterator,
shred::{Shred, Shredder}, shred::{Shred, Shredder},
}; };
use bincode::deserialize; use bincode::deserialize;
use chrono::{offset::TimeZone, Duration as ChronoDuration, Utc};
use log::*; use log::*;
use rayon::{ use rayon::{
iter::{IntoParallelRefIterator, ParallelIterator}, iter::{IntoParallelRefIterator, ParallelIterator},
@ -26,18 +26,21 @@ use solana_measure::measure::Measure;
use solana_metrics::{datapoint_debug, datapoint_error}; use solana_metrics::{datapoint_debug, datapoint_error};
use solana_rayon_threadlimit::get_thread_count; use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::{ use solana_sdk::{
account::Account,
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, MS_PER_TICK}, clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, MS_PER_TICK},
genesis_config::GenesisConfig, genesis_config::GenesisConfig,
hash::Hash, hash::Hash,
instruction_processor_utils::limited_deserialize,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil, Signature}, signature::{Keypair, KeypairUtil, Signature},
timing::{duration_as_ms, timestamp}, timing::timestamp,
transaction::Transaction, transaction::Transaction,
}; };
use solana_vote_program::{vote_instruction::VoteInstruction, vote_state::TIMESTAMP_SLOT_INTERVAL};
use std::{ use std::{
cell::RefCell, cell::RefCell,
cmp, cmp,
collections::HashMap, collections::HashMap,
convert::TryFrom,
fs, fs,
path::{Path, PathBuf}, path::{Path, PathBuf},
rc::Rc, rc::Rc,
@ -58,6 +61,7 @@ thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::
pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000; pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
pub const MAX_TURBINE_PROPAGATION_IN_MS: u64 = 100; pub const MAX_TURBINE_PROPAGATION_IN_MS: u64 = 100;
pub const MAX_TURBINE_DELAY_IN_TICKS: u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_PER_TICK; pub const MAX_TURBINE_DELAY_IN_TICKS: u64 = MAX_TURBINE_PROPAGATION_IN_MS / MS_PER_TICK;
const TIMESTAMP_SLOT_RANGE: usize = 5;
pub type CompletedSlotsReceiver = Receiver<Vec<u64>>; pub type CompletedSlotsReceiver = Receiver<Vec<u64>>;
@ -1182,25 +1186,74 @@ impl Blocktree {
} }
} }
// The `get_block_time` method is not fully implemented (depends on validator timestamp pub fn get_block_time(
// transactions). It currently returns Some(`slot` * DEFAULT_MS_PER_SLOT) offset from 0 for all &self,
// transactions, and None for any values that would overflow any step. slot: Slot,
pub fn get_block_time(&self, slot: Slot, slot_duration: Duration) -> Option<UnixTimestamp> { slot_duration: Duration,
let ms_per_slot = duration_as_ms(&slot_duration); stakes: &HashMap<Pubkey, (u64, Account)>,
let (offset_millis, overflow) = slot.overflowing_mul(ms_per_slot); ) -> Option<UnixTimestamp> {
if !overflow { let mut total_stake = 0;
i64::try_from(offset_millis) let stake_weighted_timestamps_sum: u64 = self
.ok() .get_timestamp_slots(slot, TIMESTAMP_SLOT_INTERVAL)
.and_then(|millis| { .iter()
let median_datetime = Utc.timestamp(0, 0); .flat_map(|timestamp_slot| {
median_datetime.checked_add_signed(ChronoDuration::milliseconds(millis)) let offset = (slot - timestamp_slot) as u32 * slot_duration;
}) if let Ok(timestamps) = self.get_block_timestamps(*timestamp_slot) {
.map(|dt| dt.timestamp()) timestamps
.iter()
.filter_map(|(vote_pubkey, timestamp)| {
stakes.get(vote_pubkey).map(|(stake, _account)| {
total_stake += stake;
(*timestamp as u64 + offset.as_secs()) * stake
})
})
.collect()
} else {
vec![]
}
})
.sum();
if total_stake > 0 {
let mean_timestamp: u64 = stake_weighted_timestamps_sum / total_stake;
Some(mean_timestamp as i64)
} else { } else {
None None
} }
} }
fn get_timestamp_slots(&self, slot: Slot, timestamp_interval: u64) -> Vec<Slot> {
let rooted_slots = RootedSlotIterator::new(0, &self);
if !self.is_root(slot) || rooted_slots.is_err() {
return vec![];
}
let slots: Vec<Slot> = rooted_slots
.unwrap()
.map(|(iter_slot, _)| iter_slot)
.filter(|&iter_slot| iter_slot <= slot)
.collect();
if slots.len() < TIMESTAMP_SLOT_RANGE {
return slots;
}
let recent_timestamp_slot_position = slots
.iter()
.position(|&x| x >= slot - (slot % timestamp_interval))
.unwrap();
let filtered_iter = if slots.len() - TIMESTAMP_SLOT_RANGE >= recent_timestamp_slot_position
{
slots.iter().skip(recent_timestamp_slot_position)
} else {
let earlier_timestamp_slot_position = slots
.iter()
.position(|&x| x >= slot - (slot % timestamp_interval) - timestamp_interval)
.unwrap();
slots.iter().skip(earlier_timestamp_slot_position)
};
filtered_iter.take(TIMESTAMP_SLOT_RANGE).cloned().collect()
}
pub fn get_confirmed_block(&self, slot: Slot) -> Result<RpcConfirmedBlock> { pub fn get_confirmed_block(&self, slot: Slot) -> Result<RpcConfirmedBlock> {
if self.is_root(slot) { if self.is_root(slot) {
let slot_meta_cf = self.db.column::<cf::SlotMeta>(); let slot_meta_cf = self.db.column::<cf::SlotMeta>();
@ -1261,6 +1314,33 @@ impl Blocktree {
self.transaction_status_cf.put(index, status) self.transaction_status_cf.put(index, status)
} }
fn get_block_timestamps(&self, slot: Slot) -> Result<Vec<(Pubkey, UnixTimestamp)>> {
let slot_entries = self.get_slot_entries(slot, 0, None)?;
Ok(slot_entries
.iter()
.cloned()
.flat_map(|entry| entry.transactions)
.flat_map(|transaction| {
let mut timestamps: Vec<(Pubkey, UnixTimestamp)> = Vec::new();
for instruction in transaction.message.instructions {
let program_id = instruction.program_id(&transaction.message.account_keys);
if program_id == &solana_vote_program::id() {
if let Ok(VoteInstruction::Vote(vote)) =
limited_deserialize(&instruction.data)
{
if let Some(timestamp) = vote.timestamp {
let vote_pubkey = transaction.message.account_keys
[instruction.accounts[0] as usize];
timestamps.push((vote_pubkey, timestamp));
}
}
}
}
timestamps
})
.collect())
}
/// Returns the entry vector for the slot starting with `shred_start_index` /// Returns the entry vector for the slot starting with `shred_start_index`
pub fn get_slot_entries( pub fn get_slot_entries(
&self, &self,
@ -2163,6 +2243,7 @@ fn adjust_ulimit_nofile() {
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::{ use crate::{
blocktree_processor::fill_blocktree_slot_with_ticks,
entry::{next_entry, next_entry_mut}, entry::{next_entry, next_entry_mut},
genesis_utils::{create_genesis_config, GenesisConfigInfo}, genesis_utils::{create_genesis_config, GenesisConfigInfo},
leader_schedule::{FixedSchedule, LeaderSchedule}, leader_schedule::{FixedSchedule, LeaderSchedule},
@ -2181,6 +2262,7 @@ pub mod tests {
signature::Signature, signature::Signature,
transaction::TransactionError, transaction::TransactionError,
}; };
use solana_vote_program::{vote_instruction, vote_state::Vote};
use std::{iter::FromIterator, time::Duration}; use std::{iter::FromIterator, time::Duration};
// used for tests only // used for tests only
@ -4252,6 +4334,98 @@ pub mod tests {
} }
} }
#[test]
fn test_get_timestamp_slots() {
let ticks_per_slot = 5;
// Smaller interval than TIMESTAMP_SLOT_INTERVAL for convenience of building blocktree
let timestamp_interval = 7;
/*
Build a blocktree with < TIMESTAMP_SLOT_RANGE roots
*/
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_roots(&[0]).unwrap();
let mut last_entry_hash = Hash::default();
for slot in 0..=3 {
let parent = {
if slot == 0 {
0
} else {
slot - 1
}
};
last_entry_hash = fill_blocktree_slot_with_ticks(
&blocktree,
ticks_per_slot,
slot,
parent,
last_entry_hash,
);
}
blocktree.set_roots(&[1, 2, 3]).unwrap();
assert_eq!(
blocktree.get_timestamp_slots(2, timestamp_interval),
vec![0, 1, 2]
);
assert_eq!(
blocktree.get_timestamp_slots(3, timestamp_interval),
vec![0, 1, 2, 3]
);
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
/*
Build a blocktree in the ledger with the following rooted slots:
[0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 14, 15, 16, 17]
*/
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_roots(&[0]).unwrap();
let desired_roots = vec![1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19];
let mut last_entry_hash = Hash::default();
for (i, slot) in desired_roots.iter().enumerate() {
let parent = {
if i == 0 {
0
} else {
desired_roots[i - 1]
}
};
last_entry_hash = fill_blocktree_slot_with_ticks(
&blocktree,
ticks_per_slot,
*slot,
parent,
last_entry_hash,
);
}
blocktree.set_roots(&desired_roots).unwrap();
assert_eq!(
blocktree.get_timestamp_slots(2, timestamp_interval),
vec![0, 1, 2]
);
assert_eq!(
blocktree.get_timestamp_slots(8, timestamp_interval),
vec![0, 1, 2, 3, 4]
);
assert_eq!(
blocktree.get_timestamp_slots(13, timestamp_interval),
vec![8, 9, 10, 11, 12]
);
assert_eq!(
blocktree.get_timestamp_slots(18, timestamp_interval),
vec![8, 9, 10, 11, 12]
);
assert_eq!(
blocktree.get_timestamp_slots(19, timestamp_interval),
vec![14, 16, 17, 18, 19]
);
}
#[test] #[test]
fn test_get_confirmed_block() { fn test_get_confirmed_block() {
let slot = 10; let slot = 10;
@ -4341,7 +4515,81 @@ pub mod tests {
} }
#[test] #[test]
pub fn test_persist_transaction_status() { fn test_get_block_timestamps() {
let vote_keypairs: Vec<Keypair> = (0..6).map(|_| Keypair::new()).collect();
let base_timestamp = 1576183541;
let mut expected_timestamps: Vec<(Pubkey, UnixTimestamp)> = Vec::new();
// Populate slot 1 with vote transactions, some of which have timestamps
let mut vote_entries: Vec<Entry> = Vec::new();
for (i, keypair) in vote_keypairs.iter().enumerate() {
let timestamp = if i % 2 == 0 {
let unique_timestamp = base_timestamp + i as i64;
expected_timestamps.push((keypair.pubkey(), unique_timestamp));
Some(unique_timestamp)
} else {
None
};
let vote = Vote {
slots: vec![1],
hash: Hash::default(),
timestamp,
};
let vote_ix = vote_instruction::vote(&keypair.pubkey(), &keypair.pubkey(), vote);
let vote_tx =
Transaction::new_signed_instructions(&[keypair], vec![vote_ix], Hash::default());
vote_entries.push(next_entry_mut(&mut Hash::default(), 0, vec![vote_tx]));
let mut tick = create_ticks(1, 0, hash(&serialize(&i).unwrap()));
vote_entries.append(&mut tick);
}
let shreds = entries_to_test_shreds(vote_entries.clone(), 1, 0, true, 0);
let ledger_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&ledger_path).unwrap();
blocktree.insert_shreds(shreds, None, false).unwrap();
// Populate slot 2 with ticks only
fill_blocktree_slot_with_ticks(&blocktree, 6, 2, 1, Hash::default());
blocktree.set_roots(&[0, 1, 2]).unwrap();
assert_eq!(
blocktree.get_block_timestamps(1).unwrap(),
expected_timestamps
);
assert_eq!(blocktree.get_block_timestamps(2).unwrap(), vec![]);
// Build epoch vote_accounts HashMap to test stake-weighted block time
blocktree.set_roots(&[3, 8]).unwrap();
let mut stakes = HashMap::new();
for (i, keypair) in vote_keypairs.iter().enumerate() {
stakes.insert(keypair.pubkey(), (1 + i as u64, Account::default()));
}
let slot_duration = Duration::from_millis(400);
let block_time_slot_3 = blocktree.get_block_time(3, slot_duration.clone(), &stakes);
let mut total_stake = 0;
let mut expected_time: u64 = (0..6)
.map(|x| {
if x % 2 == 0 {
total_stake += 1 + x;
(base_timestamp as u64 + x) * (1 + x)
} else {
0
}
})
.sum();
expected_time /= total_stake;
assert_eq!(block_time_slot_3.unwrap() as u64, expected_time);
assert_eq!(
blocktree
.get_block_time(8, slot_duration.clone(), &stakes)
.unwrap() as u64,
expected_time + 2 // At 400ms block duration, 5 slots == 2sec
);
}
#[test]
fn test_persist_transaction_status() {
let blocktree_path = get_tmp_ledger_path!(); let blocktree_path = get_tmp_ledger_path!();
{ {
let blocktree = Blocktree::open(&blocktree_path).unwrap(); let blocktree = Blocktree::open(&blocktree_path).unwrap();

View File

@ -26,7 +26,7 @@ pub const INITIAL_LOCKOUT: usize = 2;
// smaller numbers makes // smaller numbers makes
pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64;
// Frequency of timestamp Votes In v0.22.0, this is approximately 30min with cluster clock // Frequency of timestamp Votes. In v0.22.0, this is approximately 30min with cluster clock
// defaults, intended to limit block time drift to < 1hr // defaults, intended to limit block time drift to < 1hr
pub const TIMESTAMP_SLOT_INTERVAL: u64 = 4500; pub const TIMESTAMP_SLOT_INTERVAL: u64 = 4500;