Add solana-ledger crate (#6415)

automerge
This commit is contained in:
Greg Fitzgerald
2019-10-18 10:28:51 -06:00
committed by Grimes
parent 6f58bdfcb1
commit 5468be2ef9
74 changed files with 350 additions and 273 deletions

View File

@ -27,8 +27,6 @@ core_affinity = "0.5.9"
crc = { version = "1.8.1", optional = true }
crossbeam-channel = "0.3"
dir-diff = "0.3.2"
dlopen = "0.1.8"
dlopen_derive = "0.1.4"
fs_extra = "1.1.0"
indexmap = "1.1"
itertools = "0.8.0"
@ -55,6 +53,7 @@ solana-chacha-sys = { path = "../chacha-sys", version = "0.20.0" }
solana-client = { path = "../client", version = "0.20.0" }
solana-drone = { path = "../drone", version = "0.20.0" }
solana-ed25519-dalek = "0.2.0"
solana-ledger = { path = "../ledger", version = "0.20.0" }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" }
solana-metrics = { path = "../metrics", version = "0.20.0" }
@ -79,13 +78,6 @@ untrusted = "0.7.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
[dependencies.rocksdb]
# Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts
# when also using the bzip2 crate
version = "0.11.0"
default-features = false
features = ["lz4"]
[dev-dependencies]
hex-literal = "0.2.1"
matches = "0.1.6"

View File

@ -2,24 +2,23 @@
extern crate test;
#[macro_use]
extern crate solana_core;
extern crate solana_ledger;
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_core::banking_stage::{create_test_recorder, BankingStage};
use solana_core::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_core::blocktree_processor::process_entries;
use solana_core::cluster_info::ClusterInfo;
use solana_core::cluster_info::Node;
use solana_core::entry::next_hash;
use solana_core::entry::Entry;
use solana_core::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::WorkingBankEntry;
use solana_core::service::Service;
use solana_core::test_tx::test_tx;
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_ledger::entry::{next_hash, Entry};
use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;

View File

@ -4,13 +4,11 @@ use rand;
extern crate test;
#[macro_use]
extern crate solana_core;
extern crate solana_ledger;
use rand::Rng;
use solana_core::{
blocktree::{entries_to_test_shreds, get_tmp_ledger_path, Blocktree},
entry::{create_ticks, Entry},
};
use solana_ledger::blocktree::{entries_to_test_shreds, get_tmp_ledger_path, Blocktree};
use solana_ledger::entry::{create_ticks, Entry};
use solana_sdk::hash::Hash;
use std::path::Path;
use test::Bencher;

View File

@ -3,8 +3,8 @@
#![feature(test)]
extern crate test;
use solana_core::poh::Poh;
use solana_core::poh_service::NUM_HASHES_PER_BATCH;
use solana_ledger::poh::Poh;
use solana_sdk::hash::Hash;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};

View File

@ -1,8 +1,7 @@
#![feature(test)]
extern crate test;
use solana_core::entry::EntrySlice;
use solana_core::entry::{next_entry_mut, Entry};
use solana_ledger::entry::{next_entry_mut, Entry, EntrySlice};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;

View File

@ -8,10 +8,10 @@ use solana_core::bank_forks::BankForks;
use solana_core::cluster_info::{ClusterInfo, Node};
use solana_core::contact_info::ContactInfo;
use solana_core::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use solana_core::leader_schedule_cache::LeaderScheduleCache;
use solana_core::packet::to_packets_chunked;
use solana_core::retransmit_stage::retransmitter;
use solana_core::test_tx::test_tx;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_measure::measure::Measure;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;

View File

@ -2,13 +2,12 @@
extern crate test;
use solana_core::entry::create_ticks;
use solana_core::entry::Entry;
use solana_core::shred::{
use solana_core::test_tx;
use solana_ledger::entry::{create_ticks, Entry};
use solana_ledger::shred::{
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
SIZE_OF_SHRED_HEADER,
};
use solana_core::test_tx;
use solana_sdk::hash::Hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::signature::{Keypair, KeypairUtil};

View File

@ -2,13 +2,9 @@
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use crate::{
blocktree::Blocktree,
cluster_info::ClusterInfo,
entry::hash_transactions,
leader_schedule_cache::LeaderScheduleCache,
packet::PACKETS_PER_BATCH,
packet::{Packet, Packets},
perf_libs,
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
poh_service::PohService,
result::{Error, Result},
@ -18,6 +14,10 @@ use crate::{
use bincode::deserialize;
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools;
use solana_ledger::{
blocktree::Blocktree, entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache,
perf_libs,
};
use solana_measure::measure::Measure;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
use solana_runtime::{accounts_db::ErrorCounters, bank::Bank, transaction_batch::TransactionBatch};
@ -967,15 +967,14 @@ pub fn create_test_recorder(
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::cluster_info::Node;
use crate::entry::{Entry, EntrySlice};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::packet::to_packets;
use crate::poh_recorder::WorkingBank;
use crate::{get_tmp_ledger_path, tmp_ledger_name};
use crossbeam_channel::unbounded;
use itertools::Itertools;
use solana_ledger::blocktree::get_tmp_ledger_path;
use solana_ledger::entry::{Entry, EntrySlice};
use solana_sdk::instruction::InstructionError;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;

View File

@ -2,11 +2,11 @@
//! local unix socket, to provide client services such as a block explorer with
//! real-time access to entries.
use crate::entry::Entry;
use crate::result::Result;
use bincode::serialize;
use chrono::{SecondsFormat, Utc};
use serde_json::json;
use solana_ledger::entry::Entry;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use std::cell::RefCell;
@ -178,7 +178,6 @@ fn serialize_transactions(entry: &Entry) -> Vec<Vec<u8>> {
#[cfg(test)]
mod test {
use super::*;
use crate::entry::Entry;
use chrono::{DateTime, FixedOffset};
use serde_json::Value;
use solana_sdk::hash::Hash;

View File

@ -7,9 +7,9 @@ use crate::blockstream::BlockstreamEvents;
use crate::blockstream::MockBlockstream as Blockstream;
#[cfg(not(test))]
use crate::blockstream::SocketBlockstream as Blockstream;
use crate::blocktree::Blocktree;
use crate::result::{Error, Result};
use crate::service::Service;
use solana_ledger::blocktree::Blocktree;
use solana_sdk::pubkey::Pubkey;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
@ -101,12 +101,12 @@ impl Service for BlockstreamService {
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::create_new_tmp_ledger;
use crate::entry::{create_ticks, Entry};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use bincode::{deserialize, serialize};
use chrono::{DateTime, FixedOffset};
use serde_json::Value;
use solana_ledger::blocktree::create_new_tmp_ledger;
use solana_ledger::entry::{create_ticks, Entry};
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;

File diff suppressed because it is too large Load Diff

View File

@ -1,834 +0,0 @@
use crate::blocktree::{BlocktreeError, Result};
use bincode::{deserialize, serialize};
use byteorder::{BigEndian, ByteOrder};
use serde::de::DeserializeOwned;
use serde::Serialize;
use solana_sdk::clock::Slot;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::fs;
use std::marker::PhantomData;
use std::path::Path;
use std::sync::Arc;
use rocksdb::{
self, ColumnFamily, ColumnFamilyDescriptor, DBIterator, DBRawIterator, Direction,
IteratorMode as RocksIteratorMode, Options, WriteBatch as RWriteBatch, DB,
};
// A good value for this is the number of cores on the machine
const TOTAL_THREADS: i32 = 8;
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
pub enum IteratorMode<Index> {
Start,
End,
From(Index, IteratorDirection),
}
pub enum IteratorDirection {
Forward,
Reverse,
}
pub mod columns {
#[derive(Debug)]
/// SlotMeta Column
pub struct SlotMeta;
#[derive(Debug)]
/// Orphans Column
pub struct Orphans;
#[derive(Debug)]
/// Data Column
pub struct DeadSlots;
#[derive(Debug)]
/// The erasure meta column
pub struct ErasureMeta;
#[derive(Debug)]
/// The root column
pub struct Root;
#[derive(Debug)]
/// The index column
pub struct Index;
#[derive(Debug)]
/// The shred data column
pub struct ShredData;
#[derive(Debug)]
/// The shred erasure code column
pub struct ShredCode;
}
#[derive(Debug)]
pub struct Rocks(rocksdb::DB);
impl Rocks {
fn open(path: &Path) -> Result<Rocks> {
use crate::blocktree::db::columns::{
DeadSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData, SlotMeta,
};
fs::create_dir_all(&path)?;
// Use default database options
let db_options = get_db_options();
// Column family names
let meta_cf_descriptor =
ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(SlotMeta::NAME));
let dead_slots_cf_descriptor =
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(DeadSlots::NAME));
let erasure_meta_cf_descriptor =
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(ErasureMeta::NAME));
let orphans_cf_descriptor =
ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(Orphans::NAME));
let root_cf_descriptor =
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
let index_cf_descriptor =
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
let shred_data_cf_descriptor =
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(ShredData::NAME));
let shred_code_cf_descriptor =
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(ShredCode::NAME));
let cfs = vec![
meta_cf_descriptor,
dead_slots_cf_descriptor,
erasure_meta_cf_descriptor,
orphans_cf_descriptor,
root_cf_descriptor,
index_cf_descriptor,
shred_data_cf_descriptor,
shred_code_cf_descriptor,
];
// Open the database
let db = Rocks(DB::open_cf_descriptors(&db_options, path, cfs)?);
Ok(db)
}
fn columns(&self) -> Vec<&'static str> {
use crate::blocktree::db::columns::{
DeadSlots, ErasureMeta, Index, Orphans, Root, ShredCode, ShredData, SlotMeta,
};
vec![
ErasureMeta::NAME,
DeadSlots::NAME,
Index::NAME,
Orphans::NAME,
Root::NAME,
SlotMeta::NAME,
ShredData::NAME,
ShredCode::NAME,
]
}
fn destroy(path: &Path) -> Result<()> {
DB::destroy(&Options::default(), path)?;
Ok(())
}
fn cf_handle(&self, cf: &str) -> ColumnFamily {
self.0
.cf_handle(cf)
.expect("should never get an unknown column")
}
fn get_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<Option<Vec<u8>>> {
let opt = self.0.get_cf(cf, key)?.map(|db_vec| db_vec.to_vec());
Ok(opt)
}
fn put_cf(&self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<()> {
self.0.put_cf(cf, key, value)?;
Ok(())
}
fn delete_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<()> {
self.0.delete_cf(cf, key)?;
Ok(())
}
fn iterator_cf(
&self,
cf: ColumnFamily,
iterator_mode: IteratorMode<&[u8]>,
) -> Result<DBIterator> {
let iter = {
match iterator_mode {
IteratorMode::Start => self.0.iterator_cf(cf, RocksIteratorMode::Start)?,
IteratorMode::End => self.0.iterator_cf(cf, RocksIteratorMode::End)?,
IteratorMode::From(start_from, direction) => {
let rocks_direction = match direction {
IteratorDirection::Forward => Direction::Forward,
IteratorDirection::Reverse => Direction::Reverse,
};
self.0
.iterator_cf(cf, RocksIteratorMode::From(start_from, rocks_direction))?
}
}
};
Ok(iter)
}
fn raw_iterator_cf(&self, cf: ColumnFamily) -> Result<DBRawIterator> {
let raw_iter = self.0.raw_iterator_cf(cf)?;
Ok(raw_iter)
}
fn batch(&self) -> Result<RWriteBatch> {
Ok(RWriteBatch::default())
}
fn write(&self, batch: RWriteBatch) -> Result<()> {
self.0.write(batch)?;
Ok(())
}
}
pub trait Column {
const NAME: &'static str;
type Index;
fn key(index: Self::Index) -> Vec<u8>;
fn index(key: &[u8]) -> Self::Index;
fn slot(index: Self::Index) -> Slot;
fn as_index(slot: Slot) -> Self::Index;
}
pub trait DbCursor {
fn valid(&self) -> bool;
fn seek(&mut self, key: &[u8]);
fn seek_to_first(&mut self);
fn next(&mut self);
fn key(&self) -> Option<Vec<u8>>;
fn value(&self) -> Option<Vec<u8>>;
}
pub trait IWriteBatch {
fn put_cf(&mut self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<()>;
fn delete_cf(&mut self, cf: ColumnFamily, key: &[u8]) -> Result<()>;
}
pub trait TypedColumn: Column {
type Type: Serialize + DeserializeOwned;
}
impl Column for columns::ShredCode {
const NAME: &'static str = super::CODE_SHRED_CF;
type Index = (u64, u64);
fn key(index: (u64, u64)) -> Vec<u8> {
columns::ShredData::key(index)
}
fn index(key: &[u8]) -> (u64, u64) {
columns::ShredData::index(key)
}
fn slot(index: Self::Index) -> Slot {
index.0
}
fn as_index(slot: Slot) -> Self::Index {
(slot, 0)
}
}
impl Column for columns::ShredData {
const NAME: &'static str = super::DATA_SHRED_CF;
type Index = (u64, u64);
fn key((slot, index): (u64, u64)) -> Vec<u8> {
let mut key = vec![0; 16];
BigEndian::write_u64(&mut key[..8], slot);
BigEndian::write_u64(&mut key[8..16], index);
key
}
fn index(key: &[u8]) -> (u64, u64) {
let slot = BigEndian::read_u64(&key[..8]);
let index = BigEndian::read_u64(&key[8..16]);
(slot, index)
}
fn slot(index: Self::Index) -> Slot {
index.0
}
fn as_index(slot: Slot) -> Self::Index {
(slot, 0)
}
}
impl Column for columns::Index {
const NAME: &'static str = super::INDEX_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn for columns::Index {
type Type = crate::blocktree::meta::Index;
}
impl Column for columns::DeadSlots {
const NAME: &'static str = super::DEAD_SLOTS_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn for columns::DeadSlots {
type Type = bool;
}
impl Column for columns::Orphans {
const NAME: &'static str = super::ORPHANS_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn for columns::Orphans {
type Type = bool;
}
impl Column for columns::Root {
const NAME: &'static str = super::ROOT_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn for columns::Root {
type Type = bool;
}
impl Column for columns::SlotMeta {
const NAME: &'static str = super::META_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn for columns::SlotMeta {
type Type = super::SlotMeta;
}
impl Column for columns::ErasureMeta {
const NAME: &'static str = super::ERASURE_META_CF;
type Index = (u64, u64);
fn index(key: &[u8]) -> (u64, u64) {
let slot = BigEndian::read_u64(&key[..8]);
let set_index = BigEndian::read_u64(&key[8..]);
(slot, set_index)
}
fn key((slot, set_index): (u64, u64)) -> Vec<u8> {
let mut key = vec![0; 16];
BigEndian::write_u64(&mut key[..8], slot);
BigEndian::write_u64(&mut key[8..], set_index);
key
}
fn slot(index: Self::Index) -> Slot {
index.0
}
fn as_index(slot: Slot) -> Self::Index {
(slot, 0)
}
}
impl TypedColumn for columns::ErasureMeta {
type Type = super::ErasureMeta;
}
#[derive(Debug, Clone)]
pub struct Database {
backend: Arc<Rocks>,
}
#[derive(Debug, Clone)]
pub struct BatchProcessor {
backend: Arc<Rocks>,
}
#[derive(Debug, Clone)]
pub struct LedgerColumn<C>
where
C: Column,
{
backend: Arc<Rocks>,
column: PhantomData<C>,
}
pub struct WriteBatch {
write_batch: RWriteBatch,
backend: PhantomData<Rocks>,
map: HashMap<&'static str, ColumnFamily>,
}
impl Database {
pub fn open(path: &Path) -> Result<Self> {
let backend = Arc::new(Rocks::open(path)?);
Ok(Database { backend })
}
pub fn destroy(path: &Path) -> Result<()> {
Rocks::destroy(path)?;
Ok(())
}
pub fn get_bytes<C>(&self, key: C::Index) -> Result<Option<Vec<u8>>>
where
C: Column,
{
self.backend
.get_cf(self.cf_handle::<C>(), C::key(key).borrow())
}
pub fn put_bytes<C>(&self, key: C::Index, data: &[u8]) -> Result<()>
where
C: Column,
{
self.backend
.put_cf(self.cf_handle::<C>(), C::key(key).borrow(), data)
}
pub fn delete<C>(&self, key: C::Index) -> Result<()>
where
C: Column,
{
self.backend
.delete_cf(self.cf_handle::<C>(), C::key(key).borrow())
}
pub fn get<C>(&self, key: C::Index) -> Result<Option<C::Type>>
where
C: TypedColumn,
{
if let Some(serialized_value) = self
.backend
.get_cf(self.cf_handle::<C>(), C::key(key).borrow())?
{
let value = deserialize(&serialized_value)?;
Ok(Some(value))
} else {
Ok(None)
}
}
pub fn put<C>(&self, key: C::Index, value: &C::Type) -> Result<()>
where
C: TypedColumn,
{
let serialized_value = serialize(value)?;
self.backend.put_cf(
self.cf_handle::<C>(),
C::key(key).borrow(),
&serialized_value,
)
}
pub fn iter<C>(
&self,
iterator_mode: IteratorMode<C::Index>,
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)>>
where
C: Column,
{
let iter = {
match iterator_mode {
IteratorMode::From(start_from, direction) => {
let key = C::key(start_from);
self.backend.iterator_cf(
self.cf_handle::<C>(),
IteratorMode::From(key.borrow(), direction),
)?
}
IteratorMode::Start => self
.backend
.iterator_cf(self.cf_handle::<C>(), IteratorMode::Start)?,
IteratorMode::End => self
.backend
.iterator_cf(self.cf_handle::<C>(), IteratorMode::End)?,
}
};
Ok(iter.map(|(key, value)| (C::index(&key), value)))
}
#[inline]
pub fn cf_handle<C>(&self) -> ColumnFamily
where
C: Column,
{
self.backend.cf_handle(C::NAME)
}
pub fn column<C>(&self) -> LedgerColumn<C>
where
C: Column,
{
LedgerColumn {
backend: Arc::clone(&self.backend),
column: PhantomData,
}
}
#[inline]
pub fn raw_iterator_cf(&self, cf: ColumnFamily) -> Result<DBRawIterator> {
self.backend.raw_iterator_cf(cf)
}
// Note this returns an object that can be used to directly write to multiple column families.
// This circumvents the synchronization around APIs that in Blocktree that use
// blocktree.batch_processor, so this API should only be used if the caller is sure they
// are writing to data in columns that will not be corrupted by any simultaneous blocktree
// operations.
pub unsafe fn batch_processor(&self) -> BatchProcessor {
BatchProcessor {
backend: Arc::clone(&self.backend),
}
}
}
impl BatchProcessor {
pub fn batch(&mut self) -> Result<WriteBatch> {
let db_write_batch = self.backend.batch()?;
let map = self
.backend
.columns()
.into_iter()
.map(|desc| (desc, self.backend.cf_handle(desc)))
.collect();
Ok(WriteBatch {
write_batch: db_write_batch,
backend: PhantomData,
map,
})
}
pub fn write(&mut self, batch: WriteBatch) -> Result<()> {
self.backend.write(batch.write_batch)
}
}
impl<C> LedgerColumn<C>
where
C: Column,
{
pub fn get_bytes(&self, key: C::Index) -> Result<Option<Vec<u8>>> {
self.backend.get_cf(self.handle(), C::key(key).borrow())
}
pub fn iter(
&self,
iterator_mode: IteratorMode<C::Index>,
) -> Result<impl Iterator<Item = (C::Index, Box<[u8]>)>> {
let iter = {
match iterator_mode {
IteratorMode::From(start_from, direction) => {
let key = C::key(start_from);
self.backend
.iterator_cf(self.handle(), IteratorMode::From(key.borrow(), direction))?
}
IteratorMode::Start => self
.backend
.iterator_cf(self.handle(), IteratorMode::Start)?,
IteratorMode::End => self.backend.iterator_cf(self.handle(), IteratorMode::End)?,
}
};
Ok(iter.map(|(key, value)| (C::index(&key), value)))
}
pub fn delete_slot(
&self,
batch: &mut WriteBatch,
from: Option<Slot>,
to: Option<Slot>,
) -> Result<bool>
where
C::Index: PartialOrd + Copy,
{
let mut end = true;
let iter_config = match from {
Some(s) => IteratorMode::From(C::as_index(s), IteratorDirection::Forward),
None => IteratorMode::Start,
};
let iter = self.iter(iter_config)?;
for (index, _) in iter {
if let Some(to) = to {
if C::slot(index) > to {
end = false;
break;
}
};
if let Err(e) = batch.delete::<C>(index) {
error!(
"Error: {:?} while adding delete from_slot {:?} to batch {:?}",
e,
from,
C::NAME
)
}
}
Ok(end)
}
#[inline]
pub fn handle(&self) -> ColumnFamily {
self.backend.cf_handle(C::NAME)
}
pub fn is_empty(&self) -> Result<bool> {
let mut iter = self.backend.raw_iterator_cf(self.handle())?;
iter.seek_to_first();
Ok(!iter.valid())
}
pub fn put_bytes(&self, key: C::Index, value: &[u8]) -> Result<()> {
self.backend
.put_cf(self.handle(), C::key(key).borrow(), value)
}
pub fn delete(&self, key: C::Index) -> Result<()> {
self.backend.delete_cf(self.handle(), C::key(key).borrow())
}
}
impl<C> LedgerColumn<C>
where
C: TypedColumn,
{
pub fn get(&self, key: C::Index) -> Result<Option<C::Type>> {
if let Some(serialized_value) = self.backend.get_cf(self.handle(), C::key(key).borrow())? {
let value = deserialize(&serialized_value)?;
Ok(Some(value))
} else {
Ok(None)
}
}
pub fn put(&self, key: C::Index, value: &C::Type) -> Result<()> {
let serialized_value = serialize(value)?;
self.backend
.put_cf(self.handle(), C::key(key).borrow(), &serialized_value)
}
}
impl WriteBatch {
pub fn put_bytes<C: Column>(&mut self, key: C::Index, bytes: &[u8]) -> Result<()> {
self.write_batch
.put_cf(self.get_cf::<C>(), C::key(key).borrow(), bytes)
.map_err(|e| e.into())
}
pub fn delete<C: Column>(&mut self, key: C::Index) -> Result<()> {
self.write_batch
.delete_cf(self.get_cf::<C>(), C::key(key).borrow())
.map_err(|e| e.into())
}
pub fn put<C: TypedColumn>(&mut self, key: C::Index, value: &C::Type) -> Result<()> {
let serialized_value = serialize(&value)?;
self.write_batch
.put_cf(self.get_cf::<C>(), C::key(key).borrow(), &serialized_value)
.map_err(|e| e.into())
}
#[inline]
fn get_cf<C: Column>(&self) -> ColumnFamily {
self.map[C::NAME]
}
}
impl DbCursor for DBRawIterator {
fn valid(&self) -> bool {
DBRawIterator::valid(self)
}
fn seek(&mut self, key: &[u8]) {
DBRawIterator::seek(self, key);
}
fn seek_to_first(&mut self) {
DBRawIterator::seek_to_first(self);
}
fn next(&mut self) {
DBRawIterator::next(self);
}
fn key(&self) -> Option<Vec<u8>> {
DBRawIterator::key(self)
}
fn value(&self) -> Option<Vec<u8>> {
DBRawIterator::value(self)
}
}
impl IWriteBatch for RWriteBatch {
fn put_cf(&mut self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<()> {
RWriteBatch::put_cf(self, cf, key, value)?;
Ok(())
}
fn delete_cf(&mut self, cf: ColumnFamily, key: &[u8]) -> Result<()> {
RWriteBatch::delete_cf(self, cf, key)?;
Ok(())
}
}
impl std::convert::From<rocksdb::Error> for BlocktreeError {
fn from(e: rocksdb::Error) -> BlocktreeError {
BlocktreeError::RocksDb(e)
}
}
fn get_cf_options(name: &'static str) -> Options {
use crate::blocktree::db::columns::{ErasureMeta, Index, ShredCode, ShredData};
let mut options = Options::default();
match name {
ShredCode::NAME | ShredData::NAME | Index::NAME | ErasureMeta::NAME => {
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
options.set_max_write_buffer_number(8);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
}
_ => {
// We want smaller CFs to flush faster. This results in more WAL files but lowers
// overall WAL space utilization and increases flush frequency
options.set_write_buffer_size(MIN_WRITE_BUFFER_SIZE as usize);
options.set_target_file_size_base(MIN_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MIN_WRITE_BUFFER_SIZE);
options.set_level_zero_file_num_compaction_trigger(1);
}
}
options
}
fn get_db_options() -> Options {
let mut options = Options::default();
options.create_if_missing(true);
options.create_missing_column_families(true);
options.increase_parallelism(TOTAL_THREADS);
options.set_max_background_flushes(4);
options.set_max_background_compactions(4);
options
}

View File

@ -1,395 +0,0 @@
use crate::erasure::ErasureConfig;
use solana_metrics::datapoint;
use std::cmp::Ordering;
use std::{collections::BTreeSet, ops::Range, ops::RangeBounds};
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
// The Meta column family
pub struct SlotMeta {
// The number of slots above the root (the genesis block). The first
// slot has slot 0.
pub slot: u64,
// The total number of consecutive blobs starting from index 0
// we have received for this slot.
pub consumed: u64,
// The index *plus one* of the highest blob received for this slot. Useful
// for checking if the slot has received any blobs yet, and to calculate the
// range where there is one or more holes: `(consumed..received)`.
pub received: u64,
// The index of the blob that is flagged as the last blob for this slot.
pub last_index: u64,
// The slot height of the block this one derives from.
pub parent_slot: u64,
// The list of slot heights, each of which contains a block that derives
// from this one.
pub next_slots: Vec<u64>,
// True if this slot is full (consumed == last_index + 1) and if every
// slot that is a parent of this slot is also connected.
pub is_connected: bool,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
pub struct ErasureSetRanges {
r: Vec<Range<u64>>,
}
impl ErasureSetRanges {
pub fn insert(&mut self, start: u64, end: u64) -> Result<usize, Range<u64>> {
let range = if start < end {
(start..end)
} else {
(end..start)
};
match self.pos(range.start) {
Ok(pos) => Err(self.r[pos].clone()),
Err(pos) => {
self.r.insert(pos, range);
Ok(pos)
}
}
}
fn pos(&self, seek: u64) -> Result<usize, usize> {
self.r.binary_search_by(|probe| {
if probe.contains(&seek) {
Ordering::Equal
} else {
probe.start.cmp(&seek)
}
})
}
pub fn lookup(&self, seek: u64) -> Result<Range<u64>, usize> {
self.pos(seek)
.map(|pos| self.r[pos].clone())
.or_else(|epos| {
if epos < self.r.len() && self.r[epos].contains(&seek) {
Ok(self.r[epos].clone())
} else {
Err(epos)
}
})
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
/// Index recording presence/absence of blobs
pub struct Index {
pub slot: u64,
data: DataIndex,
coding: CodingIndex,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
pub struct DataIndex {
/// Map representing presence/absence of data blobs
index: BTreeSet<u64>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
/// Erasure coding information
pub struct CodingIndex {
/// Map from set index, to hashmap from blob index to presence bool
index: BTreeSet<u64>,
}
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
/// Erasure coding information
pub struct ErasureMeta {
/// Which erasure set in the slot this is
pub set_index: u64,
/// Size of shards in this erasure set
pub size: usize,
/// Erasure configuration for this erasure set
pub config: ErasureConfig,
}
#[derive(Debug, PartialEq)]
pub enum ErasureMetaStatus {
CanRecover,
DataFull,
StillNeed(usize),
}
impl Index {
pub(in crate::blocktree) fn new(slot: u64) -> Self {
Index {
slot,
data: DataIndex::default(),
coding: CodingIndex::default(),
}
}
pub fn data(&self) -> &DataIndex {
&self.data
}
pub fn coding(&self) -> &CodingIndex {
&self.coding
}
pub fn data_mut(&mut self) -> &mut DataIndex {
&mut self.data
}
pub fn coding_mut(&mut self) -> &mut CodingIndex {
&mut self.coding
}
}
/// TODO: Mark: Change this when coding
impl CodingIndex {
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
self.index.range(bounds).count()
}
pub fn is_present(&self, index: u64) -> bool {
self.index.contains(&index)
}
pub fn set_present(&mut self, index: u64, presence: bool) {
if presence {
self.index.insert(index);
} else {
self.index.remove(&index);
}
}
pub fn set_many_present(&mut self, presence: impl IntoIterator<Item = (u64, bool)>) {
for (idx, present) in presence.into_iter() {
self.set_present(idx, present);
}
}
}
impl DataIndex {
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
self.index.range(bounds).count()
}
pub fn is_present(&self, index: u64) -> bool {
self.index.contains(&index)
}
pub fn set_present(&mut self, index: u64, presence: bool) {
if presence {
self.index.insert(index);
} else {
self.index.remove(&index);
}
}
pub fn set_many_present(&mut self, presence: impl IntoIterator<Item = (u64, bool)>) {
for (idx, present) in presence.into_iter() {
self.set_present(idx, present);
}
}
}
impl SlotMeta {
pub fn is_full(&self) -> bool {
// last_index is std::u64::MAX when it has no information about how
// many blobs will fill this slot.
// Note: A full slot with zero blobs is not possible.
if self.last_index == std::u64::MAX {
return false;
}
// Should never happen
if self.consumed > self.last_index + 1 {
datapoint!(
"blocktree_error",
(
"error",
format!(
"Observed a slot meta with consumed: {} > meta.last_index + 1: {}",
self.consumed,
self.last_index + 1
),
String
)
);
}
self.consumed == self.last_index + 1
}
pub fn is_parent_set(&self) -> bool {
self.parent_slot != std::u64::MAX
}
pub(in crate::blocktree) fn new(slot: u64, parent_slot: u64) -> Self {
SlotMeta {
slot,
consumed: 0,
received: 0,
parent_slot,
next_slots: vec![],
is_connected: slot == 0,
last_index: std::u64::MAX,
}
}
}
impl ErasureMeta {
pub fn new(set_index: u64, config: &ErasureConfig) -> ErasureMeta {
ErasureMeta {
set_index,
size: 0,
config: *config,
}
}
pub fn status(&self, index: &Index) -> ErasureMetaStatus {
use ErasureMetaStatus::*;
let start_idx = self.start_index();
let (data_end_idx, coding_end_idx) = self.end_indexes();
let num_coding = index.coding().present_in_bounds(start_idx..coding_end_idx);
let num_data = index.data().present_in_bounds(start_idx..data_end_idx);
let (data_missing, coding_missing) = (
self.config.num_data() - num_data,
self.config.num_coding() - num_coding,
);
let total_missing = data_missing + coding_missing;
if data_missing > 0 && total_missing <= self.config.num_coding() {
CanRecover
} else if data_missing == 0 {
DataFull
} else {
StillNeed(total_missing - self.config.num_coding())
}
}
pub fn set_size(&mut self, size: usize) {
self.size = size;
}
pub fn size(&self) -> usize {
self.size
}
pub fn set_index_for(index: u64, num_data: usize) -> u64 {
index / num_data as u64
}
pub fn start_index(&self) -> u64 {
self.set_index
}
/// returns a tuple of (data_end, coding_end)
pub fn end_indexes(&self) -> (u64, u64) {
let start = self.start_index();
(
start + self.config.num_data() as u64,
start + self.config.num_coding() as u64,
)
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::{seq::SliceRandom, thread_rng};
use std::iter::repeat;
#[test]
fn test_erasure_meta_status() {
use ErasureMetaStatus::*;
let set_index = 0;
let erasure_config = ErasureConfig::default();
let mut e_meta = ErasureMeta::new(set_index, &erasure_config);
let mut rng = thread_rng();
let mut index = Index::new(0);
e_meta.size = 1;
let data_indexes = 0..erasure_config.num_data() as u64;
let coding_indexes = 0..erasure_config.num_coding() as u64;
assert_eq!(e_meta.status(&index), StillNeed(erasure_config.num_data()));
index
.data_mut()
.set_many_present(data_indexes.clone().zip(repeat(true)));
assert_eq!(e_meta.status(&index), DataFull);
index
.coding_mut()
.set_many_present(coding_indexes.clone().zip(repeat(true)));
for &idx in data_indexes
.clone()
.collect::<Vec<_>>()
.choose_multiple(&mut rng, erasure_config.num_data())
{
index.data_mut().set_present(idx, false);
assert_eq!(e_meta.status(&index), CanRecover);
}
index
.data_mut()
.set_many_present(data_indexes.zip(repeat(true)));
for &idx in coding_indexes
.collect::<Vec<_>>()
.choose_multiple(&mut rng, erasure_config.num_coding())
{
index.coding_mut().set_present(idx, false);
assert_eq!(e_meta.status(&index), DataFull);
}
}
#[test]
fn test_erasure_set_ranges() {
let mut ranges = ErasureSetRanges::default();
// Test empty ranges
(0..100 as u64).for_each(|i| {
assert_eq!(ranges.lookup(i), Err(0));
});
// Test adding one range and all boundary condition lookups
assert_eq!(ranges.insert(5, 13), Ok(0));
assert_eq!(ranges.lookup(0), Err(0));
assert_eq!(ranges.lookup(4), Err(0));
assert_eq!(ranges.lookup(5), Ok(5..13));
assert_eq!(ranges.lookup(12), Ok(5..13));
assert_eq!(ranges.lookup(13), Err(1));
assert_eq!(ranges.lookup(100), Err(1));
// Test adding second range (with backwards values) and all boundary condition lookups
assert_eq!(ranges.insert(55, 33), Ok(1));
assert_eq!(ranges.lookup(0), Err(0));
assert_eq!(ranges.lookup(4), Err(0));
assert_eq!(ranges.lookup(5), Ok(5..13));
assert_eq!(ranges.lookup(12), Ok(5..13));
assert_eq!(ranges.lookup(13), Err(1));
assert_eq!(ranges.lookup(32), Err(1));
assert_eq!(ranges.lookup(33), Ok(33..55));
assert_eq!(ranges.lookup(54), Ok(33..55));
assert_eq!(ranges.lookup(55), Err(2));
// Add a third range between previous two ranges
assert_eq!(ranges.insert(23, 30), Ok(1));
assert_eq!(ranges.lookup(0), Err(0));
assert_eq!(ranges.lookup(4), Err(0));
assert_eq!(ranges.lookup(5), Ok(5..13));
assert_eq!(ranges.lookup(12), Ok(5..13));
assert_eq!(ranges.lookup(13), Err(1));
assert_eq!(ranges.lookup(23), Ok(23..30));
assert_eq!(ranges.lookup(29), Ok(23..30));
assert_eq!(ranges.lookup(30), Err(2));
assert_eq!(ranges.lookup(32), Err(2));
assert_eq!(ranges.lookup(33), Ok(33..55));
assert_eq!(ranges.lookup(54), Ok(33..55));
assert_eq!(ranges.lookup(55), Err(3));
}
}

View File

@ -1,11 +1,11 @@
use crate::bank_forks::BankForks;
use crate::blocktree::{Blocktree, SlotMeta};
use crate::entry::{Entry, EntrySlice};
use crate::leader_schedule_cache::LeaderScheduleCache;
use rand::seq::SliceRandom;
use rand::thread_rng;
use rayon::prelude::*;
use rayon::ThreadPool;
use solana_ledger::blocktree::{Blocktree, SlotMeta};
use solana_ledger::entry::{Entry, EntrySlice};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
use solana_runtime::bank::Bank;
use solana_runtime::transaction_batch::TransactionBatch;
@ -450,12 +450,12 @@ fn process_pending_slots(
#[cfg(test)]
pub mod tests {
use super::*;
use crate::{
blocktree::create_new_tmp_ledger,
entry::{create_ticks, next_entry, next_entry_mut, Entry},
genesis_utils::{create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo},
use crate::genesis_utils::{
create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo,
};
use rand::{thread_rng, Rng};
use solana_ledger::blocktree::create_new_tmp_ledger;
use solana_ledger::entry::{create_ticks, next_entry, next_entry_mut, Entry};
use solana_sdk::{
epoch_schedule::EpochSchedule,
hash::Hash,

View File

@ -2,12 +2,12 @@
use self::broadcast_fake_blobs_run::BroadcastFakeBlobsRun;
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun;
use self::standard_broadcast_run::StandardBroadcastRun;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
use crate::poh_recorder::WorkingBankEntry;
use crate::result::{Error, Result};
use crate::service::Service;
use crate::staking_utils;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::staking_utils;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -191,11 +191,11 @@ impl Service for BroadcastStage {
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::create_ticks;
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::service::Service;
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_ledger::entry::create_ticks;
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;

View File

@ -1,6 +1,6 @@
use super::*;
use crate::entry::Entry;
use crate::shred::{Shredder, RECOMMENDED_FEC_RATE};
use solana_ledger::entry::Entry;
use solana_ledger::shred::{Shredder, RECOMMENDED_FEC_RATE};
use solana_sdk::hash::Hash;
pub(super) struct BroadcastFakeBlobsRun {

View File

@ -1,6 +1,6 @@
use crate::entry::Entry;
use crate::poh_recorder::WorkingBankEntry;
use crate::result::Result;
use solana_ledger::entry::Entry;
use solana_runtime::bank::Bank;
use std::sync::mpsc::Receiver;
use std::sync::Arc;

View File

@ -1,5 +1,5 @@
use super::*;
use crate::shred::{Shredder, RECOMMENDED_FEC_RATE};
use solana_ledger::shred::{Shredder, RECOMMENDED_FEC_RATE};
use solana_sdk::hash::Hash;
pub(super) struct FailEntryVerificationBroadcastRun {}

View File

@ -1,8 +1,8 @@
use super::broadcast_utils::{self, ReceiveResults};
use super::*;
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
use crate::entry::Entry;
use crate::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE};
use solana_ledger::entry::Entry;
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE};
use solana_sdk::signature::Keypair;
use solana_sdk::timing::duration_as_us;
use std::time::Duration;
@ -294,11 +294,11 @@ impl BroadcastRun for StandardBroadcastRun {
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::cluster_info::{ClusterInfo, Node};
use crate::entry::create_ticks;
use crate::genesis_utils::create_genesis_block;
use crate::shred::max_ticks_per_n_shreds;
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_ledger::entry::create_ticks;
use solana_ledger::shred::max_ticks_per_n_shreds;
use solana_runtime::bank::Bank;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::{Keypair, KeypairUtil};

View File

@ -1,4 +1,4 @@
use crate::blocktree::Blocktree;
use solana_ledger::blocktree::Blocktree;
use std::fs::File;
use std::io;
use std::io::{BufWriter, Write};
@ -72,11 +72,11 @@ pub fn chacha_cbc_encrypt_ledger(
#[cfg(test)]
mod tests {
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::Blocktree;
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::entry::Entry;
use crate::gen_keys::GenKeys;
use solana_ledger::blocktree::get_tmp_ledger_path;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::entry::Entry;
use solana_sdk::hash::{hash, Hash, Hasher};
use solana_sdk::signature::KeypairUtil;
use solana_sdk::system_transaction;

View File

@ -1,8 +1,8 @@
// Module used by validators to approve storage mining proofs in parallel using the GPU
use crate::blocktree::Blocktree;
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
use crate::perf_libs;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::perf_libs;
use solana_sdk::hash::Hash;
use std::io;
use std::mem::size_of;
@ -113,10 +113,10 @@ pub fn chacha_cbc_encrypt_file_many_keys(
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::entry::create_ticks;
use crate::replicator::sample_file;
use solana_ledger::blocktree::get_tmp_ledger_path;
use solana_ledger::entry::create_ticks;
use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::fs::{remove_dir_all, remove_file};

View File

@ -13,7 +13,6 @@
//!
//! Bank needs to provide an interface for us to query the stake weight
use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::contact_info::ContactInfo;
use crate::crds_gossip::CrdsGossip;
use crate::crds_gossip_error::CrdsGossipError;
@ -23,7 +22,6 @@ use crate::packet::{to_shared_blob, Blob, Packet, SharedBlob};
use crate::repair_service::RepairType;
use crate::result::{Error, Result};
use crate::sendmmsg::{multicast, send_mmsg};
use crate::staking_utils;
use crate::streamer::{BlobReceiver, BlobSender};
use crate::weighted_shuffle::{weighted_best, weighted_shuffle};
use bincode::{deserialize, serialize, serialized_size};
@ -32,6 +30,8 @@ use itertools::Itertools;
use rand::SeedableRng;
use rand::{thread_rng, Rng};
use rand_chacha::ChaChaRng;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::staking_utils;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_netutil::{
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
@ -1779,17 +1779,16 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::tests::make_many_slot_entries;
use crate::blocktree::Blocktree;
use crate::blocktree_processor::tests::fill_blocktree_slot_with_ticks;
use crate::crds_value::CrdsValueLabel;
use crate::repair_service::RepairType;
use crate::result::Error;
use crate::shred::max_ticks_per_n_shreds;
use crate::shred::{Shred, ShredHeader};
use crate::test_tx::test_tx;
use rayon::prelude::*;
use solana_ledger::blocktree::get_tmp_ledger_path;
use solana_ledger::blocktree::make_many_slot_entries;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::shred::{max_ticks_per_n_shreds, Shred, ShredHeader};
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::collections::HashSet;

View File

@ -1,12 +1,13 @@
use crate::blocktree::Blocktree;
use crate::cluster_info::ClusterInfo;
use crate::crds_value::EpochSlots;
use crate::result::Result;
use crate::rooted_slot_iterator::RootedSlotIterator;
use crate::service::Service;
use byteorder::{ByteOrder, LittleEndian};
use rand::seq::SliceRandom;
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_ledger::blocktree::Blocktree;
use solana_metrics::datapoint;
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
@ -264,7 +265,7 @@ impl ClusterInfoRepairListener {
num_slots_to_repair: usize,
epoch_schedule: &EpochSchedule,
) -> Result<()> {
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root);
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree);
if slot_iter.is_err() {
info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
@ -479,11 +480,11 @@ impl Service for ClusterInfoRepairListener {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::tests::make_many_slot_entries;
use crate::cluster_info::Node;
use crate::packet::{Blob, SharedBlob};
use crate::streamer;
use solana_ledger::blocktree::get_tmp_ledger_path;
use solana_ledger::blocktree::make_many_slot_entries;
use std::collections::BTreeSet;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;

View File

@ -5,8 +5,8 @@
// copies from host memory to GPU memory unless the memory is page-pinned and
// cannot be paged to disk. The cuda driver provides these interfaces to pin and unpin memory.
use crate::perf_libs;
use crate::recycler::Reset;
use solana_ledger::perf_libs;
use std::ops::{Deref, DerefMut};
#[cfg(feature = "pin_gpu_memory")]

View File

@ -1,493 +0,0 @@
//! The `entry` module is a fundamental building block of Proof of History. It contains a
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use crate::perf_libs;
use crate::poh::Poh;
use rayon::prelude::*;
use rayon::ThreadPool;
use solana_merkle_tree::MerkleTree;
use solana_metrics::inc_new_counter_warn;
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::hash::Hash;
use solana_sdk::timing;
use solana_sdk::transaction::Transaction;
use std::cell::RefCell;
use std::sync::mpsc::{Receiver, Sender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Instant;
pub const NUM_THREADS: u32 = 10;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.build()
.unwrap()));
pub type EntrySender = Sender<Vec<Entry>>;
pub type EntryReceiver = Receiver<Vec<Entry>>;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `hash` field is the result
/// of hashing `hash` from the previous entry `num_hashes` times. The `transactions`
/// field points to Transactions that took place shortly before `hash` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
/// world's fastest processor at the time the entry was recorded. Or said another way, it
/// is physically not possible for a shorter duration to have occurred if one assumes the
/// hash was computed by the world's fastest processor at that time. The hash chain is both
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of
/// Work consensus!)
#[derive(Serialize, Deserialize, Debug, Default, PartialEq, Eq, Clone)]
pub struct Entry {
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub hash: Hash,
/// An unordered list of transactions that were observed before the Entry ID was
/// generated. They may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
}
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Self {
if num_hashes == 0 && transactions.is_empty() {
Entry {
num_hashes: 0,
hash: *prev_hash,
transactions,
}
} else if num_hashes == 0 {
// If you passed in transactions, but passed in num_hashes == 0, then
// next_hash will generate the next hash and set num_hashes == 1
let hash = next_hash(prev_hash, 1, &transactions);
Entry {
num_hashes: 1,
hash,
transactions,
}
} else {
// Otherwise, the next Entry `num_hashes` after `start_hash`.
// If you wanted a tick for instance, then pass in num_hashes = 1
// and transactions = empty
let hash = next_hash(prev_hash, num_hashes, &transactions);
Entry {
num_hashes,
hash,
transactions,
}
}
}
pub fn new_mut(
start_hash: &mut Hash,
num_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Self {
let entry = Self::new(start_hash, *num_hashes, transactions);
*start_hash = entry.hash;
*num_hashes = 0;
entry
}
#[cfg(test)]
pub fn new_tick(num_hashes: u64, hash: &Hash) -> Self {
Entry {
num_hashes,
hash: *hash,
transactions: vec![],
}
}
/// Verifies self.hash is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
if self.hash != ref_hash {
warn!(
"next_hash is invalid expected: {:?} actual: {:?}",
self.hash, ref_hash
);
return false;
}
true
}
pub fn is_tick(&self) -> bool {
self.transactions.is_empty()
}
}
pub fn hash_transactions(transactions: &[Transaction]) -> Hash {
// a hash of a slice of transactions only needs to hash the signatures
let signatures: Vec<_> = transactions
.iter()
.flat_map(|tx| tx.signatures.iter())
.collect();
let merkle_tree = MerkleTree::new(&signatures);
if let Some(root_hash) = merkle_tree.get_root() {
*root_hash
} else {
Hash::default()
}
}
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature. If num_hashes is zero and there's no transaction data,
/// start_hash is returned.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
if num_hashes == 0 && transactions.is_empty() {
return *start_hash;
}
let mut poh = Poh::new(*start_hash, None);
poh.hash(num_hashes.saturating_sub(1));
if transactions.is_empty() {
poh.tick().unwrap().hash
} else {
poh.record(hash_transactions(transactions)).unwrap().hash
}
}
// an EntrySlice is a slice of Entries
pub trait EntrySlice {
/// Verifies the hashes and counts of a slice of transactions are all consistent.
fn verify_cpu(&self, start_hash: &Hash) -> bool;
fn verify(&self, start_hash: &Hash) -> bool;
}
impl EntrySlice for [Entry] {
fn verify_cpu(&self, start_hash: &Hash) -> bool {
let now = Instant::now();
let genesis = [Entry {
num_hashes: 0,
hash: *start_hash,
transactions: vec![],
}];
let entry_pairs = genesis.par_iter().chain(self).zip(self);
let res = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
entry_pairs.all(|(x0, x1)| {
let r = x1.verify(&x0.hash);
if !r {
warn!(
"entry invalid!: x0: {:?}, x1: {:?} num txs: {}",
x0.hash,
x1.hash,
x1.transactions.len()
);
}
r
})
})
});
inc_new_counter_warn!(
"entry_verify-duration",
timing::duration_as_ms(&now.elapsed()) as usize
);
res
}
fn verify(&self, start_hash: &Hash) -> bool {
let api = perf_libs::api();
if api.is_none() {
return self.verify_cpu(start_hash);
}
let api = api.unwrap();
inc_new_counter_warn!("entry_verify-num_entries", self.len() as usize);
// Use CPU verify if the batch length is < 1K
if self.len() < 1024 {
return self.verify_cpu(start_hash);
}
let start = Instant::now();
let genesis = [Entry {
num_hashes: 0,
hash: *start_hash,
transactions: vec![],
}];
let hashes: Vec<Hash> = genesis
.iter()
.chain(self)
.map(|entry| entry.hash)
.take(self.len())
.collect();
let num_hashes_vec: Vec<u64> = self
.iter()
.map(|entry| entry.num_hashes.saturating_sub(1))
.collect();
let length = self.len();
let hashes = Arc::new(Mutex::new(hashes));
let hashes_clone = hashes.clone();
let gpu_wait = Instant::now();
let gpu_verify_thread = thread::spawn(move || {
let mut hashes = hashes_clone.lock().unwrap();
let res;
unsafe {
res = (api.poh_verify_many)(
hashes.as_mut_ptr() as *mut u8,
num_hashes_vec.as_ptr(),
length,
1,
);
}
if res != 0 {
panic!("GPU PoH verify many failed");
}
});
let tx_hashes: Vec<Option<Hash>> = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
self.into_par_iter()
.map(|entry| {
if entry.transactions.is_empty() {
None
} else {
Some(hash_transactions(&entry.transactions))
}
})
.collect()
})
});
gpu_verify_thread.join().unwrap();
inc_new_counter_warn!(
"entry_verify-gpu_thread",
timing::duration_as_ms(&gpu_wait.elapsed()) as usize
);
let hashes = Arc::try_unwrap(hashes).unwrap().into_inner().unwrap();
let res =
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
hashes.into_par_iter().zip(tx_hashes).zip(self).all(
|((hash, tx_hash), answer)| {
if answer.num_hashes == 0 {
hash == answer.hash
} else {
let mut poh = Poh::new(hash, None);
if let Some(mixin) = tx_hash {
poh.record(mixin).unwrap().hash == answer.hash
} else {
poh.tick().unwrap().hash == answer.hash
}
}
},
)
})
});
inc_new_counter_warn!(
"entry_verify-duration",
timing::duration_as_ms(&start.elapsed()) as usize
);
res
}
}
pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
let entry = Entry::new(&start, num_hashes, transactions);
*start = entry.hash;
entry
}
pub fn create_ticks(num_ticks: u64, mut hash: Hash) -> Vec<Entry> {
let mut ticks = Vec::with_capacity(num_ticks as usize);
for _ in 0..num_ticks {
let new_tick = next_entry_mut(&mut hash, 1, vec![]);
ticks.push(new_tick);
}
ticks
}
#[cfg(test)]
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(prev_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
assert!(num_hashes > 0 || transactions.is_empty());
Entry {
num_hashes,
hash: next_hash(prev_hash, num_hashes, &transactions),
transactions,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::entry::Entry;
use chrono::prelude::Utc;
use solana_budget_api::budget_instruction;
use solana_sdk::{
hash::hash,
signature::{Keypair, KeypairUtil},
system_transaction,
};
fn create_sample_payment(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ixs = budget_instruction::payment(&pubkey, &pubkey, 1);
Transaction::new_signed_instructions(&[keypair], ixs, hash)
}
fn create_sample_timestamp(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ix = budget_instruction::apply_timestamp(&pubkey, &pubkey, &pubkey, Utc::now());
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
}
fn create_sample_apply_signature(keypair: &Keypair, hash: Hash) -> Transaction {
let pubkey = keypair.pubkey();
let ix = budget_instruction::apply_signature(&pubkey, &pubkey, &pubkey);
Transaction::new_signed_instructions(&[keypair], vec![ix], hash)
}
#[test]
fn test_entry_verify() {
let zero = Hash::default();
let one = hash(&zero.as_ref());
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
}
#[test]
fn test_transaction_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = Keypair::new();
let tx0 = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero);
let tx1 = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_witness_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = Keypair::new();
let tx0 = create_sample_timestamp(&keypair, zero);
let tx1 = create_sample_apply_signature(&keypair, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_entry() {
let zero = Hash::default();
let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.hash, zero);
let tick = next_entry(&zero, 0, vec![]);
assert_eq!(tick.num_hashes, 0);
assert_eq!(tick.hash, zero);
let keypair = Keypair::new();
let tx0 = create_sample_timestamp(&keypair, zero);
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
assert_eq!(entry0.num_hashes, 1);
assert_eq!(entry0.hash, next_hash(&zero, 1, &vec![tx0]));
}
#[test]
#[should_panic]
fn test_next_entry_panic() {
let zero = Hash::default();
let keypair = Keypair::new();
let tx = system_transaction::create_user_account(&keypair, &keypair.pubkey(), 0, zero);
next_entry(&zero, 0, vec![tx]);
}
#[test]
fn test_verify_slice() {
solana_logger::setup();
let zero = Hash::default();
let one = hash(&zero.as_ref());
assert!(vec![][..].verify(&zero)); // base case
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
bad_ticks[1].hash = one;
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
}
#[test]
fn test_verify_slice_with_hashes() {
solana_logger::setup();
let zero = Hash::default();
let one = hash(&zero.as_ref());
let two = hash(&one.as_ref());
assert!(vec![][..].verify(&one)); // base case
assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1
assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad
let mut ticks = vec![next_entry(&one, 1, vec![])];
ticks.push(next_entry(&ticks.last().unwrap().hash, 1, vec![]));
assert!(ticks.verify(&one)); // inductive step
let mut bad_ticks = vec![next_entry(&one, 1, vec![])];
bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![]));
bad_ticks[1].hash = one;
assert!(!bad_ticks.verify(&one)); // inductive step, bad
}
#[test]
fn test_verify_slice_with_hashes_and_transactions() {
solana_logger::setup();
let zero = Hash::default();
let one = hash(&zero.as_ref());
let two = hash(&one.as_ref());
let alice_pubkey = Keypair::default();
let tx0 = create_sample_payment(&alice_pubkey, one);
let tx1 = create_sample_timestamp(&alice_pubkey, one);
assert!(vec![][..].verify(&one)); // base case
assert!(vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&one)); // singleton case 1
assert!(!vec![next_entry(&one, 1, vec![tx0.clone()])][..].verify(&two)); // singleton case 2, bad
let mut ticks = vec![next_entry(&one, 1, vec![tx0.clone()])];
ticks.push(next_entry(
&ticks.last().unwrap().hash,
1,
vec![tx1.clone()],
));
assert!(ticks.verify(&one)); // inductive step
let mut bad_ticks = vec![next_entry(&one, 1, vec![tx0])];
bad_ticks.push(next_entry(&bad_ticks.last().unwrap().hash, 1, vec![tx1]));
bad_ticks[1].hash = one;
assert!(!bad_ticks.verify(&one)); // inductive step, bad
}
}

View File

@ -1,203 +0,0 @@
//! # Erasure Coding and Recovery
//!
//! Blobs are logically grouped into erasure sets or blocks. Each set contains 16 sequential data
//! blobs and 4 sequential coding blobs.
//!
//! Coding blobs in each set starting from `start_idx`:
//! For each erasure set:
//! generate `NUM_CODING` coding_blobs.
//! index the coding blobs from `start_idx` to `start_idx + NUM_CODING - 1`.
//!
//! model of an erasure set, with top row being data blobs and second being coding
//! |<======================= NUM_DATA ==============================>|
//! |<==== NUM_CODING ===>|
//! +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
//! | D | | D | | D | | D | | D | | D | | D | | D | | D | | D |
//! +---+ +---+ +---+ +---+ +---+ . . . +---+ +---+ +---+ +---+ +---+
//! | C | | C | | C | | C | | | | | | | | | | | | |
//! +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+ +---+
//!
//! blob structure for coding blobs
//!
//! + ------- meta is set and used by transport, meta.size is actual length
//! | of data in the byte array blob.data
//! |
//! | + -- data is stuff shipped over the wire, and has an included
//! | | header
//! V V
//! +----------+------------------------------------------------------------+
//! | meta | data |
//! |+---+-- |+---+---+---+---+------------------------------------------+|
//! || s | . || i | | f | s | ||
//! || i | . || n | i | l | i | ||
//! || z | . || d | d | a | z | blob.data(), or blob.data_mut() ||
//! || e | || e | | g | e | ||
//! |+---+-- || x | | s | | ||
//! | |+---+---+---+---+------------------------------------------+|
//! +----------+------------------------------------------------------------+
//! | |<=== coding blob part for "coding" =======>|
//! | |
//! |<============== data blob part for "coding" ==============>|
//!
//!
use reed_solomon_erasure::galois_8::Field;
use reed_solomon_erasure::ReedSolomon;
//TODO(sakridge) pick these values
/// Number of data blobs
pub const NUM_DATA: usize = 8;
/// Number of coding blobs; also the maximum number that can go missing.
pub const NUM_CODING: usize = 8;
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct ErasureConfig {
num_data: usize,
num_coding: usize,
}
impl Default for ErasureConfig {
fn default() -> ErasureConfig {
ErasureConfig {
num_data: NUM_DATA,
num_coding: NUM_CODING,
}
}
}
impl ErasureConfig {
pub fn new(num_data: usize, num_coding: usize) -> ErasureConfig {
ErasureConfig {
num_data,
num_coding,
}
}
pub fn num_data(self) -> usize {
self.num_data
}
pub fn num_coding(self) -> usize {
self.num_coding
}
}
type Result<T> = std::result::Result<T, reed_solomon_erasure::Error>;
/// Represents an erasure "session" with a particular configuration and number of data and coding
/// blobs
#[derive(Debug, Clone)]
pub struct Session(ReedSolomon<Field>);
impl Session {
pub fn new(data_count: usize, coding_count: usize) -> Result<Session> {
let rs = ReedSolomon::new(data_count, coding_count)?;
Ok(Session(rs))
}
pub fn new_from_config(config: &ErasureConfig) -> Result<Session> {
let rs = ReedSolomon::new(config.num_data, config.num_coding)?;
Ok(Session(rs))
}
/// Create coding blocks by overwriting `parity`
pub fn encode(&self, data: &[&[u8]], parity: &mut [&mut [u8]]) -> Result<()> {
self.0.encode_sep(data, parity)?;
Ok(())
}
/// Recover data + coding blocks into data blocks
/// # Arguments
/// * `data` - array of data blocks to recover into
/// * `coding` - array of coding blocks
/// * `erasures` - list of indices in data where blocks should be recovered
pub fn decode_blocks(&self, blocks: &mut [(&mut [u8], bool)]) -> Result<()> {
self.0.reconstruct_data(blocks)?;
Ok(())
}
}
impl Default for Session {
fn default() -> Session {
Session::new(NUM_DATA, NUM_CODING).unwrap()
}
}
#[cfg(test)]
pub mod test {
use super::*;
/// Specifies the contents of a 16-data-blob and 4-coding-blob erasure set
/// Exists to be passed to `generate_blocktree_with_coding`
#[derive(Debug, Copy, Clone)]
pub struct ErasureSpec {
/// Which 16-blob erasure set this represents
pub set_index: u64,
pub num_data: usize,
pub num_coding: usize,
}
/// Specifies the contents of a slot
/// Exists to be passed to `generate_blocktree_with_coding`
#[derive(Debug, Clone)]
pub struct SlotSpec {
pub slot: u64,
pub set_specs: Vec<ErasureSpec>,
}
#[test]
fn test_coding() {
const N_DATA: usize = 4;
const N_CODING: usize = 2;
let session = Session::new(N_DATA, N_CODING).unwrap();
let mut vs: Vec<Vec<u8>> = (0..N_DATA as u8).map(|i| (i..(16 + i)).collect()).collect();
let v_orig: Vec<u8> = vs[0].clone();
let mut coding_blocks: Vec<_> = (0..N_CODING).map(|_| vec![0u8; 16]).collect();
let mut coding_blocks_slices: Vec<_> =
coding_blocks.iter_mut().map(Vec::as_mut_slice).collect();
let v_slices: Vec<_> = vs.iter().map(Vec::as_slice).collect();
session
.encode(v_slices.as_slice(), coding_blocks_slices.as_mut_slice())
.expect("encoding must succeed");
trace!("test_coding: coding blocks:");
for b in &coding_blocks {
trace!("test_coding: {:?}", b);
}
let erasure: usize = 1;
let mut present = vec![true; N_DATA + N_CODING];
present[erasure] = false;
let erased = vs[erasure].clone();
// clear an entry
vs[erasure as usize].copy_from_slice(&[0; 16]);
let mut blocks: Vec<_> = vs
.iter_mut()
.chain(coding_blocks.iter_mut())
.map(Vec::as_mut_slice)
.zip(present)
.collect();
session
.decode_blocks(blocks.as_mut_slice())
.expect("decoding must succeed");
trace!("test_coding: vs:");
for v in &vs {
trace!("test_coding: {:?}", v);
}
assert_eq!(v_orig, vs[0]);
assert_eq!(erased, vs[erasure]);
}
}

View File

@ -1,14 +1 @@
pub use solana_runtime::genesis_utils::{
create_genesis_block_with_leader, GenesisBlockInfo, BOOTSTRAP_LEADER_LAMPORTS,
};
use solana_sdk::pubkey::Pubkey;
// same as genesis_block::create_genesis_block, but with bootstrap_leader staking logic
// for the core crate tests
pub fn create_genesis_block(mint_lamports: u64) -> GenesisBlockInfo {
create_genesis_block_with_leader(
mint_lamports,
&Pubkey::new_rand(),
BOOTSTRAP_LEADER_LAMPORTS,
)
}
pub use solana_ledger::genesis_utils::*;

View File

@ -1,13 +1,13 @@
//! The `gossip_service` module implements the network control plane.
use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, VALIDATOR_PORT_RANGE};
use crate::contact_info::ContactInfo;
use crate::service::Service;
use crate::streamer;
use rand::{thread_rng, Rng};
use solana_client::thin_client::{create_client, ThinClient};
use solana_ledger::blocktree::Blocktree;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::net::{IpAddr, SocketAddr, UdpSocket};

View File

@ -1,141 +0,0 @@
use rand::distributions::{Distribution, WeightedIndex};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_sdk::pubkey::Pubkey;
use std::ops::Index;
/// Stake-weighted leader schedule for one epoch.
#[derive(Debug, Default, PartialEq)]
pub struct LeaderSchedule {
slot_leaders: Vec<Pubkey>,
}
impl LeaderSchedule {
// Note: passing in zero stakers will cause a panic.
pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64, repeat: u64) -> Self {
let (ids, stakes): (Vec<_>, Vec<_>) = ids_and_stakes.iter().cloned().unzip();
let rng = &mut ChaChaRng::from_seed(seed);
let weighted_index = WeightedIndex::new(stakes).unwrap();
let mut current_node = Pubkey::default();
let slot_leaders = (0..len)
.map(|i| {
if i % repeat == 0 {
current_node = ids[weighted_index.sample(rng)];
current_node
} else {
current_node
}
})
.collect();
Self { slot_leaders }
}
pub(crate) fn get_slot_leaders(&self) -> &[Pubkey] {
&self.slot_leaders
}
}
impl Index<u64> for LeaderSchedule {
type Output = Pubkey;
fn index(&self, index: u64) -> &Pubkey {
let index = index as usize;
&self.slot_leaders[index % self.slot_leaders.len()]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_leader_schedule_index() {
let pubkey0 = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let leader_schedule = LeaderSchedule {
slot_leaders: vec![pubkey0, pubkey1],
};
assert_eq!(leader_schedule[0], pubkey0);
assert_eq!(leader_schedule[1], pubkey1);
assert_eq!(leader_schedule[2], pubkey0);
}
#[test]
fn test_leader_schedule_basic() {
let num_keys = 10;
let stakes: Vec<_> = (0..num_keys).map(|i| (Pubkey::new_rand(), i)).collect();
let seed = Pubkey::new_rand();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = num_keys * 10;
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
let leader_schedule2 = LeaderSchedule::new(&stakes, seed_bytes, len, 1);
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
// Check that the same schedule is reproducibly generated
assert_eq!(leader_schedule, leader_schedule2);
}
#[test]
fn test_repeated_leader_schedule() {
let num_keys = 10;
let stakes: Vec<_> = (0..num_keys).map(|i| (Pubkey::new_rand(), i)).collect();
let seed = Pubkey::new_rand();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = num_keys * 10;
let repeat = 8;
let leader_schedule = LeaderSchedule::new(&stakes, seed_bytes, len, repeat);
assert_eq!(leader_schedule.slot_leaders.len() as u64, len);
let mut leader_node = Pubkey::default();
for (i, node) in leader_schedule.slot_leaders.iter().enumerate() {
if i % repeat as usize == 0 {
leader_node = *node;
} else {
assert_eq!(leader_node, *node);
}
}
}
#[test]
fn test_repeated_leader_schedule_specific() {
let alice_pubkey = Pubkey::new_rand();
let bob_pubkey = Pubkey::new_rand();
let stakes = vec![(alice_pubkey, 2), (bob_pubkey, 1)];
let seed = Pubkey::default();
let mut seed_bytes = [0u8; 32];
seed_bytes.copy_from_slice(seed.as_ref());
let len = 8;
// What the schedule looks like without any repeats
let leaders1 = LeaderSchedule::new(&stakes, seed_bytes, len, 1).slot_leaders;
// What the schedule looks like with repeats
let leaders2 = LeaderSchedule::new(&stakes, seed_bytes, len, 2).slot_leaders;
assert_eq!(leaders1.len(), leaders2.len());
let leaders1_expected = vec![
alice_pubkey,
alice_pubkey,
alice_pubkey,
bob_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
];
let leaders2_expected = vec![
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
alice_pubkey,
bob_pubkey,
bob_pubkey,
];
assert_eq!(leaders1, leaders1_expected);
assert_eq!(leaders2, leaders2_expected);
}
}

View File

@ -1,585 +0,0 @@
use crate::{blocktree::Blocktree, leader_schedule::LeaderSchedule, leader_schedule_utils};
use solana_runtime::bank::Bank;
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
collections::{hash_map::Entry, HashMap, VecDeque},
sync::{Arc, RwLock},
};
type CachedSchedules = (HashMap<u64, Arc<LeaderSchedule>>, VecDeque<u64>);
const MAX_SCHEDULES: usize = 10;
struct CacheCapacity(usize);
impl Default for CacheCapacity {
fn default() -> Self {
CacheCapacity(MAX_SCHEDULES)
}
}
#[derive(Default)]
pub struct LeaderScheduleCache {
// Map from an epoch to a leader schedule for that epoch
pub cached_schedules: RwLock<CachedSchedules>,
epoch_schedule: EpochSchedule,
max_epoch: RwLock<u64>,
max_schedules: CacheCapacity,
}
impl LeaderScheduleCache {
pub fn new_from_bank(bank: &Bank) -> Self {
Self::new(*bank.epoch_schedule(), bank)
}
pub fn new(epoch_schedule: EpochSchedule, root_bank: &Bank) -> Self {
let cache = Self {
cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())),
epoch_schedule,
max_epoch: RwLock::new(0),
max_schedules: CacheCapacity::default(),
};
// This sets the root and calculates the schedule at leader_schedule_epoch(root)
cache.set_root(root_bank);
// Calculate the schedule for all epochs between 0 and leader_schedule_epoch(root)
let stakers_epoch = epoch_schedule.get_leader_schedule_epoch(root_bank.slot());
for epoch in 0..stakers_epoch {
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
cache.slot_leader_at(first_slot_in_epoch, Some(root_bank));
}
cache
}
pub fn set_max_schedules(&mut self, max_schedules: usize) {
if max_schedules > 0 {
self.max_schedules = CacheCapacity(max_schedules);
}
}
pub fn max_schedules(&self) -> usize {
self.max_schedules.0
}
pub fn set_root(&self, root_bank: &Bank) {
let new_max_epoch = self
.epoch_schedule
.get_leader_schedule_epoch(root_bank.slot());
let old_max_epoch = {
let mut max_epoch = self.max_epoch.write().unwrap();
let old_max_epoch = *max_epoch;
*max_epoch = new_max_epoch;
assert!(new_max_epoch >= old_max_epoch);
old_max_epoch
};
// Calculate the epoch as soon as it's rooted
if new_max_epoch > old_max_epoch {
self.compute_epoch_schedule(new_max_epoch, root_bank);
}
}
pub fn slot_leader_at(&self, slot: u64, bank: Option<&Bank>) -> Option<Pubkey> {
if let Some(bank) = bank {
self.slot_leader_at_else_compute(slot, bank)
} else if self.epoch_schedule.slots_per_epoch == 0 {
None
} else {
self.slot_leader_at_no_compute(slot)
}
}
/// Return the (next slot, last slot) after the given current_slot that the given node will be leader
pub fn next_leader_slot(
&self,
pubkey: &Pubkey,
mut current_slot: u64,
bank: &Bank,
blocktree: Option<&Blocktree>,
) -> Option<(u64, u64)> {
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
let mut first_slot = None;
let mut last_slot = current_slot;
let max_epoch = *self.max_epoch.read().unwrap();
if epoch > max_epoch {
debug!(
"Requested next leader in slot: {} of unconfirmed epoch: {}",
current_slot + 1,
epoch
);
return None;
}
while let Some(leader_schedule) = self.get_epoch_schedule_else_compute(epoch, bank) {
// clippy thinks I should do this:
// for (i, <item>) in leader_schedule
// .iter()
// .enumerate()
// .take(bank.get_slots_in_epoch(epoch))
// .skip(from_slot_index + 1) {
//
// but leader_schedule doesn't implement Iter...
#[allow(clippy::needless_range_loop)]
for i in start_index..bank.get_slots_in_epoch(epoch) {
current_slot += 1;
if *pubkey == leader_schedule[i] {
if let Some(blocktree) = blocktree {
if let Some(meta) = blocktree.meta(current_slot).unwrap() {
// We have already sent a blob for this slot, so skip it
if meta.received > 0 {
continue;
}
}
}
if first_slot.is_none() {
first_slot = Some(current_slot);
}
last_slot = current_slot;
} else if first_slot.is_some() {
return Some((first_slot.unwrap(), last_slot));
}
}
epoch += 1;
if epoch > max_epoch {
break;
}
start_index = 0;
}
first_slot.and_then(|slot| Some((slot, last_slot)))
}
fn slot_leader_at_no_compute(&self, slot: u64) -> Option<Pubkey> {
let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot);
self.cached_schedules
.read()
.unwrap()
.0
.get(&epoch)
.map(|schedule| schedule[slot_index])
}
fn slot_leader_at_else_compute(&self, slot: u64, bank: &Bank) -> Option<Pubkey> {
let cache_result = self.slot_leader_at_no_compute(slot);
// Forbid asking for slots in an unconfirmed epoch
let bank_epoch = self.epoch_schedule.get_epoch_and_slot_index(slot).0;
if bank_epoch > *self.max_epoch.read().unwrap() {
debug!(
"Requested leader in slot: {} of unconfirmed epoch: {}",
slot, bank_epoch
);
return None;
}
if cache_result.is_some() {
cache_result
} else {
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) {
Some(epoch_schedule[slot_index])
} else {
None
}
}
}
fn get_epoch_schedule_else_compute(
&self,
epoch: u64,
bank: &Bank,
) -> Option<Arc<LeaderSchedule>> {
let epoch_schedule = self.cached_schedules.read().unwrap().0.get(&epoch).cloned();
if epoch_schedule.is_some() {
epoch_schedule
} else if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) {
Some(epoch_schedule)
} else {
None
}
}
fn compute_epoch_schedule(&self, epoch: u64, bank: &Bank) -> Option<Arc<LeaderSchedule>> {
let leader_schedule = leader_schedule_utils::leader_schedule(epoch, bank);
leader_schedule.map(|leader_schedule| {
let leader_schedule = Arc::new(leader_schedule);
let (ref mut cached_schedules, ref mut order) = *self.cached_schedules.write().unwrap();
// Check to see if schedule exists in case somebody already inserted in the time we were
// waiting for the lock
let entry = cached_schedules.entry(epoch);
if let Entry::Vacant(v) = entry {
v.insert(leader_schedule.clone());
order.push_back(epoch);
Self::retain_latest(cached_schedules, order, self.max_schedules());
}
leader_schedule
})
}
fn retain_latest(
schedules: &mut HashMap<u64, Arc<LeaderSchedule>>,
order: &mut VecDeque<u64>,
max_schedules: usize,
) {
while schedules.len() > max_schedules {
let first = order.pop_front().unwrap();
schedules.remove(&first);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
blocktree::{get_tmp_ledger_path, tests::make_slot_entries},
genesis_utils::{
create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo,
BOOTSTRAP_LEADER_LAMPORTS,
},
staking_utils::tests::setup_vote_and_stake_accounts,
};
use solana_runtime::bank::Bank;
use solana_sdk::epoch_schedule::{
EpochSchedule, DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, DEFAULT_SLOTS_PER_EPOCH,
MINIMUM_SLOTS_PER_EPOCH,
};
use std::{sync::mpsc::channel, sync::Arc, thread::Builder};
#[test]
fn test_new_cache() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2);
let bank = Bank::new(&genesis_block);
let cache = LeaderScheduleCache::new_from_bank(&bank);
assert_eq!(bank.slot(), 0);
assert_eq!(cache.max_schedules(), MAX_SCHEDULES);
// Epoch schedule for all epochs in the range:
// [0, stakers_epoch(bank.slot())] should
// be calculated by constructor
let epoch_schedule = bank.epoch_schedule();
let stakers_epoch = bank.get_leader_schedule_epoch(bank.slot());
for epoch in 0..=stakers_epoch {
let first_slot_in_stakers_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
let last_slot_in_stakers_epoch = epoch_schedule.get_last_slot_in_epoch(epoch);
assert!(cache
.slot_leader_at(first_slot_in_stakers_epoch, None)
.is_some());
assert!(cache
.slot_leader_at(last_slot_in_stakers_epoch, None)
.is_some());
if epoch == stakers_epoch {
assert!(cache
.slot_leader_at(last_slot_in_stakers_epoch + 1, None)
.is_none());
}
}
// Should be a schedule for every epoch just checked
assert_eq!(
cache.cached_schedules.read().unwrap().0.len() as u64,
stakers_epoch + 1
);
}
#[test]
fn test_retain_latest() {
let mut cached_schedules = HashMap::new();
let mut order = VecDeque::new();
for i in 0..=MAX_SCHEDULES {
cached_schedules.insert(i as u64, Arc::new(LeaderSchedule::default()));
order.push_back(i as u64);
}
LeaderScheduleCache::retain_latest(&mut cached_schedules, &mut order, MAX_SCHEDULES);
assert_eq!(cached_schedules.len(), MAX_SCHEDULES);
let mut keys: Vec<_> = cached_schedules.keys().cloned().collect();
keys.sort();
let expected: Vec<_> = (1..=MAX_SCHEDULES as u64).collect();
let expected_order: VecDeque<_> = (1..=MAX_SCHEDULES as u64).collect();
assert_eq!(expected, keys);
assert_eq!(expected_order, order);
}
#[test]
fn test_thread_race_leader_schedule_cache() {
let num_runs = 10;
for _ in 0..num_runs {
run_thread_race()
}
}
fn run_thread_race() {
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, slots_per_epoch / 2, true);
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2);
let bank = Arc::new(Bank::new(&genesis_block));
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule, &bank));
let num_threads = 10;
let (threads, senders): (Vec<_>, Vec<_>) = (0..num_threads)
.map(|_| {
let cache = cache.clone();
let bank = bank.clone();
let (sender, receiver) = channel();
(
Builder::new()
.name("test_thread_race_leader_schedule_cache".to_string())
.spawn(move || {
let _ = receiver.recv();
cache.slot_leader_at(bank.slot(), Some(&bank));
})
.unwrap(),
sender,
)
})
.unzip();
for sender in &senders {
sender.send(true).unwrap();
}
for t in threads.into_iter() {
t.join().unwrap();
}
let (ref cached_schedules, ref order) = *cache.cached_schedules.read().unwrap();
assert_eq!(cached_schedules.len(), 1);
assert_eq!(order.len(), 1);
}
#[test]
fn test_next_leader_slot() {
let pubkey = Pubkey::new_rand();
let mut genesis_block = create_genesis_block_with_leader(
BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
)
.genesis_block;
genesis_block.epoch_schedule = EpochSchedule::custom(
DEFAULT_SLOTS_PER_EPOCH,
DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET,
false,
);
let bank = Bank::new(&genesis_block);
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
assert_eq!(
cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(),
pubkey
);
assert_eq!(
cache.next_leader_slot(&pubkey, 0, &bank, None),
Some((1, 16383))
);
assert_eq!(
cache.next_leader_slot(&pubkey, 1, &bank, None),
Some((2, 16383))
);
assert_eq!(
cache.next_leader_slot(
&pubkey,
2 * genesis_block.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank,
None
),
None
);
assert_eq!(
cache.next_leader_slot(
&Pubkey::new_rand(), // not in leader_schedule
0,
&bank,
None
),
None
);
}
#[test]
fn test_next_leader_slot_blocktree() {
let pubkey = Pubkey::new_rand();
let mut genesis_block = create_genesis_block_with_leader(
BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
)
.genesis_block;
genesis_block.epoch_schedule.warmup = false;
let bank = Bank::new(&genesis_block);
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
assert_eq!(
cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(),
pubkey
);
// Check that the next leader slot after 0 is slot 1
assert_eq!(
cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
.unwrap()
.0,
1
);
// Write a blob into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped
let (shreds, _) = make_slot_entries(2, 1, 1);
blocktree.insert_shreds(shreds, None).unwrap();
assert_eq!(
cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
.unwrap()
.0,
1
);
// Write a blob into slot 1
let (shreds, _) = make_slot_entries(1, 0, 1);
// Check that slot 1 and 2 are skipped
blocktree.insert_shreds(shreds, None).unwrap();
assert_eq!(
cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
.unwrap()
.0,
3
);
// Integrity checks
assert_eq!(
cache.next_leader_slot(
&pubkey,
2 * genesis_block.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank,
Some(&blocktree)
),
None
);
assert_eq!(
cache.next_leader_slot(
&Pubkey::new_rand(), // not in leader_schedule
0,
&bank,
Some(&blocktree)
),
None
);
}
Blocktree::destroy(&ledger_path).unwrap();
}
#[test]
fn test_next_leader_slot_next_epoch() {
let GenesisBlockInfo {
mut genesis_block,
mint_keypair,
..
} = create_genesis_block(10_000);
genesis_block.epoch_schedule.warmup = false;
let bank = Bank::new(&genesis_block);
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
// Create new vote account
let node_pubkey = Pubkey::new_rand();
let vote_pubkey = Pubkey::new_rand();
setup_vote_and_stake_accounts(
&bank,
&mint_keypair,
&vote_pubkey,
&node_pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
);
// Have to wait until the epoch at after the epoch stakes generated at genesis
// for the new votes to take effect.
let mut target_slot = 1;
let epoch = bank.get_leader_schedule_epoch(0);
while bank.get_leader_schedule_epoch(target_slot) == epoch {
target_slot += 1;
}
let bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), target_slot);
let mut expected_slot = 0;
let epoch = bank.get_leader_schedule_epoch(target_slot);
for i in 0..epoch {
expected_slot += bank.get_slots_in_epoch(i);
}
let schedule = cache.compute_epoch_schedule(epoch, &bank).unwrap();
let mut index = 0;
while schedule[index] != node_pubkey {
index += 1;
assert_ne!(index, genesis_block.epoch_schedule.slots_per_epoch);
}
expected_slot += index;
// If the max root isn't set, we'll get None
assert!(cache
.next_leader_slot(&node_pubkey, 0, &bank, None)
.is_none());
cache.set_root(&bank);
assert_eq!(
cache
.next_leader_slot(&node_pubkey, 0, &bank, None)
.unwrap()
.0,
expected_slot
);
}
#[test]
fn test_schedule_for_unconfirmed_epoch() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2);
let bank = Arc::new(Bank::new(&genesis_block));
let cache = LeaderScheduleCache::new_from_bank(&bank);
assert_eq!(*cache.max_epoch.read().unwrap(), 1);
// Asking for the leader for the last slot in epoch 1 is ok b/c
// epoch 1 is confirmed
assert_eq!(bank.get_epoch_and_slot_index(95).0, 1);
assert!(cache.slot_leader_at(95, Some(&bank)).is_some());
// Asking for the lader for the first slot in epoch 2 is not ok
// b/c epoch 2 is unconfirmed
assert_eq!(bank.get_epoch_and_slot_index(96).0, 2);
assert!(cache.slot_leader_at(96, Some(&bank)).is_none());
let bank2 = Bank::new_from_parent(&bank, &Pubkey::new_rand(), 95);
assert!(bank2.epoch_vote_accounts(2).is_some());
// Set root for a slot in epoch 1, so that epoch 2 is now confirmed
cache.set_root(&bank2);
assert_eq!(*cache.max_epoch.read().unwrap(), 2);
assert!(cache.slot_leader_at(96, Some(&bank2)).is_some());
assert_eq!(bank2.get_epoch_and_slot_index(223).0, 2);
assert!(cache.slot_leader_at(223, Some(&bank2)).is_some());
assert_eq!(bank2.get_epoch_and_slot_index(224).0, 3);
assert!(cache.slot_leader_at(224, Some(&bank2)).is_none());
}
#[test]
fn test_set_max_schedules() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(2);
let bank = Arc::new(Bank::new(&genesis_block));
let mut cache = LeaderScheduleCache::new_from_bank(&bank);
// Max schedules must be greater than 0
cache.set_max_schedules(0);
assert_eq!(cache.max_schedules(), MAX_SCHEDULES);
cache.set_max_schedules(std::usize::MAX);
assert_eq!(cache.max_schedules(), std::usize::MAX);
}
}

View File

@ -1,120 +0,0 @@
use crate::leader_schedule::LeaderSchedule;
use crate::staking_utils;
use solana_runtime::bank::Bank;
use solana_sdk::clock::NUM_CONSECUTIVE_LEADER_SLOTS;
use solana_sdk::pubkey::Pubkey;
/// Return the leader schedule for the given epoch.
pub fn leader_schedule(epoch: u64, bank: &Bank) -> Option<LeaderSchedule> {
staking_utils::staked_nodes_at_epoch(bank, epoch).map(|stakes| {
let mut seed = [0u8; 32];
seed[0..8].copy_from_slice(&epoch.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect();
sort_stakes(&mut stakes);
LeaderSchedule::new(
&stakes,
seed,
bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS,
)
})
}
/// Return the leader for the given slot.
pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option<Pubkey> {
let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot);
leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index])
}
// Returns the number of ticks remaining from the specified tick_height to the end of the
// slot implied by the tick_height
pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 {
bank.ticks_per_slot() - tick_height % bank.ticks_per_slot()
}
fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) {
// Sort first by stake. If stakes are the same, sort by pubkey to ensure a
// deterministic result.
// Note: Use unstable sort, because we dedup right after to remove the equal elements.
stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| {
if r_stake == l_stake {
r_pubkey.cmp(&l_pubkey)
} else {
r_stake.cmp(&l_stake)
}
});
// Now that it's sorted, we can do an O(n) dedup.
stakes.dedup();
}
#[cfg(test)]
mod tests {
use super::*;
use crate::staking_utils;
use solana_runtime::genesis_utils::{
create_genesis_block_with_leader, BOOTSTRAP_LEADER_LAMPORTS,
};
#[test]
fn test_leader_schedule_via_bank() {
let pubkey = Pubkey::new_rand();
let genesis_block =
create_genesis_block_with_leader(0, &pubkey, BOOTSTRAP_LEADER_LAMPORTS).genesis_block;
let bank = Bank::new(&genesis_block);
let pubkeys_and_stakes: Vec<_> = staking_utils::staked_nodes(&bank).into_iter().collect();
let seed = [0u8; 32];
let leader_schedule = LeaderSchedule::new(
&pubkeys_and_stakes,
seed,
genesis_block.epoch_schedule.slots_per_epoch,
NUM_CONSECUTIVE_LEADER_SLOTS,
);
assert_eq!(leader_schedule[0], pubkey);
assert_eq!(leader_schedule[1], pubkey);
assert_eq!(leader_schedule[2], pubkey);
}
#[test]
fn test_leader_scheduler1_basic() {
let pubkey = Pubkey::new_rand();
let genesis_block = create_genesis_block_with_leader(
BOOTSTRAP_LEADER_LAMPORTS,
&pubkey,
BOOTSTRAP_LEADER_LAMPORTS,
)
.genesis_block;
let bank = Bank::new(&genesis_block);
assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey);
}
#[test]
fn test_sort_stakes_basic() {
let pubkey0 = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let mut stakes = vec![(pubkey0, 1), (pubkey1, 2)];
sort_stakes(&mut stakes);
assert_eq!(stakes, vec![(pubkey1, 2), (pubkey0, 1)]);
}
#[test]
fn test_sort_stakes_with_dup() {
let pubkey0 = Pubkey::new_rand();
let pubkey1 = Pubkey::new_rand();
let mut stakes = vec![(pubkey0, 1), (pubkey1, 2), (pubkey0, 1)];
sort_stakes(&mut stakes);
assert_eq!(stakes, vec![(pubkey1, 2), (pubkey0, 1)]);
}
#[test]
fn test_sort_stakes_with_equal_stakes() {
let pubkey0 = Pubkey::default();
let pubkey1 = Pubkey::new_rand();
let mut stakes = vec![(pubkey0, 1), (pubkey1, 1)];
sort_stakes(&mut stakes);
assert_eq!(stakes, vec![(pubkey1, 1), (pubkey0, 1)]);
}
}

View File

@ -1,8 +1,8 @@
//! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use crate::blocktree::Blocktree;
use crate::result::{Error, Result};
use crate::service::Service;
use solana_ledger::blocktree::Blocktree;
use solana_sdk::clock::DEFAULT_SLOTS_PER_EPOCH;
use solana_sdk::pubkey::Pubkey;
use std::string::ToString;
@ -75,8 +75,8 @@ impl Service for LedgerCleanupService {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::get_tmp_ledger_path;
use crate::blocktree::tests::make_many_slot_entries;
use solana_ledger::blocktree::get_tmp_ledger_path;
use solana_ledger::blocktree::make_many_slot_entries;
use std::sync::mpsc::channel;
#[test]

View File

@ -7,14 +7,11 @@
pub mod bank_forks;
pub mod banking_stage;
#[macro_use]
pub mod blocktree;
pub mod broadcast_stage;
pub mod chacha;
pub mod chacha_cuda;
pub mod cluster_info_vote_listener;
pub mod confidence;
pub mod perf_libs;
pub mod recycler;
pub mod shred_fetch_stage;
#[macro_use]
@ -32,19 +29,13 @@ pub mod crds_gossip_pull;
pub mod crds_gossip_push;
pub mod crds_value;
pub mod cuda_runtime;
pub mod entry;
pub mod erasure;
pub mod fetch_stage;
pub mod gen_keys;
pub mod genesis_utils;
pub mod gossip_service;
pub mod leader_schedule;
pub mod leader_schedule_cache;
pub mod leader_schedule_utils;
pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod packet;
pub mod poh;
pub mod poh_recorder;
pub mod poh_service;
pub mod recvmmsg;
@ -53,6 +44,7 @@ pub mod replay_stage;
pub mod replicator;
pub mod result;
pub mod retransmit_stage;
pub mod rooted_slot_iterator;
pub mod rpc;
pub mod rpc_pubsub;
pub mod rpc_pubsub_service;
@ -60,12 +52,10 @@ pub mod rpc_service;
pub mod rpc_subscriptions;
pub mod sendmmsg;
pub mod service;
pub mod shred;
pub mod sigverify;
pub mod sigverify_stage;
pub mod snapshot_package;
pub mod snapshot_utils;
pub mod staking_utils;
pub mod storage_stage;
pub mod streamer;
pub mod test_tx;
@ -76,9 +66,6 @@ pub(crate) mod version;
pub mod weighted_shuffle;
pub mod window_service;
#[macro_use]
extern crate dlopen_derive;
#[macro_use]
extern crate solana_budget_program;
@ -105,6 +92,9 @@ extern crate solana_metrics;
#[macro_use]
extern crate matches;
#[macro_use]
extern crate solana_ledger;
extern crate bzip2;
extern crate crossbeam_channel;
extern crate dir_diff;

View File

@ -1,12 +1,12 @@
//! The `packet` module defines data structures and methods to pull data from the network.
use crate::cuda_runtime::PinnedVec;
use crate::erasure::ErasureConfig;
use crate::recvmmsg::{recv_mmsg, NUM_RCVMMSGS};
use crate::recycler::{Recycler, Reset};
use crate::result::{Error, Result};
use bincode;
use byteorder::{ByteOrder, LittleEndian};
use serde::Serialize;
use solana_ledger::erasure::ErasureConfig;
use solana_metrics::inc_new_counter_debug;
pub use solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE};
use solana_sdk::pubkey::Pubkey;

View File

@ -1,171 +0,0 @@
use core::ffi::c_void;
use dlopen::symbor::{Container, SymBorApi, Symbol};
use solana_sdk::packet::Packet;
use std::env;
use std::ffi::OsStr;
use std::fs;
use std::os::raw::{c_int, c_uint};
use std::path::{Path, PathBuf};
use std::sync::Once;
#[repr(C)]
pub struct Elems {
pub elems: *const Packet,
pub num: u32,
}
#[derive(SymBorApi)]
pub struct Api<'a> {
pub ed25519_init: Symbol<'a, unsafe extern "C" fn() -> bool>,
pub ed25519_set_verbose: Symbol<'a, unsafe extern "C" fn(val: bool)>,
#[allow(clippy::type_complexity)]
pub ed25519_verify_many: Symbol<
'a,
unsafe extern "C" fn(
vecs: *const Elems,
num: u32, //number of vecs
message_size: u32, //size of each element inside the elems field of the vec
total_packets: u32,
total_signatures: u32,
message_lens: *const u32,
pubkey_offsets: *const u32,
signature_offsets: *const u32,
signed_message_offsets: *const u32,
out: *mut u8, //combined length of all the items in vecs
use_non_default_stream: u8,
) -> u32,
>,
pub chacha_cbc_encrypt_many_sample: Symbol<
'a,
unsafe extern "C" fn(
input: *const u8,
sha_state: *mut u8,
in_len: usize,
keys: *const u8,
ivec: *mut u8,
num_keys: u32,
samples: *const u64,
num_samples: u32,
starting_block: u64,
time_us: *mut f32,
),
>,
pub chacha_init_sha_state: Symbol<'a, unsafe extern "C" fn(sha_state: *mut u8, num_keys: u32)>,
pub chacha_end_sha_state:
Symbol<'a, unsafe extern "C" fn(sha_state_in: *const u8, out: *mut u8, num_keys: u32)>,
pub poh_verify_many: Symbol<
'a,
unsafe extern "C" fn(
hashes: *mut u8,
num_hashes_arr: *const u64,
num_elems: usize,
use_non_default_stream: u8,
) -> c_int,
>,
pub cuda_host_register:
Symbol<'a, unsafe extern "C" fn(ptr: *mut c_void, size: usize, flags: c_uint) -> c_int>,
pub cuda_host_unregister: Symbol<'a, unsafe extern "C" fn(ptr: *mut c_void) -> c_int>,
}
static mut API: Option<Container<Api>> = None;
fn init(name: &OsStr) {
static INIT_HOOK: Once = Once::new();
info!("Loading {:?}", name);
unsafe {
INIT_HOOK.call_once(|| {
API = Some(Container::load(name).unwrap_or_else(|err| {
error!("Unable to load {:?}: {}", name, err);
std::process::exit(1);
}));
})
}
}
fn locate_perf_libs() -> Option<PathBuf> {
let exe = env::current_exe().expect("Unable to get executable path");
let perf_libs = exe.parent().unwrap().join("perf-libs");
if perf_libs.is_dir() {
info!("perf-libs found at {:?}", perf_libs);
return Some(perf_libs);
}
warn!("{:?} does not exist", perf_libs);
None
}
fn find_cuda_home(perf_libs_path: &Path) -> Option<PathBuf> {
// Search /usr/local for a `cuda-` directory that matches a perf-libs subdirectory
for entry in fs::read_dir(&perf_libs_path).unwrap() {
if let Ok(entry) = entry {
let path = entry.path();
if !path.is_dir() {
continue;
}
let dir_name = path.file_name().unwrap().to_str().unwrap_or("");
if !dir_name.starts_with("cuda-") {
continue;
}
let cuda_home: PathBuf = ["/", "usr", "local", dir_name].iter().collect();
if !cuda_home.is_dir() {
continue;
}
return Some(cuda_home);
}
}
None
}
pub fn init_cuda() {
if let Some(perf_libs_path) = locate_perf_libs() {
if let Some(cuda_home) = find_cuda_home(&perf_libs_path) {
info!("CUDA installation found at {:?}", cuda_home);
let cuda_lib64_dir = cuda_home.join("lib64");
if cuda_lib64_dir.is_dir() {
let ld_library_path = cuda_lib64_dir.to_str().unwrap_or("").to_string()
+ ":"
+ &env::var("LD_LIBRARY_PATH").unwrap_or_else(|_| "".to_string());
info!("LD_LIBRARY_PATH set to {:?}", ld_library_path);
// Prefix LD_LIBRARY_PATH with $CUDA_HOME/lib64 directory
// to ensure the correct CUDA version is used
env::set_var("LD_LIBRARY_PATH", ld_library_path)
} else {
warn!("{:?} does not exist", cuda_lib64_dir);
}
let libcuda_crypt = perf_libs_path
.join(cuda_home.file_name().unwrap())
.join("libcuda-crypt.so");
return init(libcuda_crypt.as_os_str());
} else {
warn!("CUDA installation not found");
}
}
// Last resort! Blindly load the shared object and hope it all works out
init(OsStr::new("libcuda-crypt.so"))
}
pub fn api() -> Option<&'static Container<Api<'static>>> {
#[cfg(test)]
{
static INIT_HOOK: Once = Once::new();
INIT_HOOK.call_once(|| {
if std::env::var("TEST_PERF_LIBS_CUDA").is_ok() {
init_cuda();
}
})
}
unsafe { API.as_ref() }
}

View File

@ -1,298 +0,0 @@
//! The `Poh` module provides an object for generating a Proof of History.
use solana_sdk::hash::{hash, hashv, Hash};
use std::thread::{Builder, JoinHandle};
use std::time::{Duration, Instant};
pub struct Poh {
pub hash: Hash,
num_hashes: u64,
hashes_per_tick: u64,
remaining_hashes: u64,
}
#[derive(Debug)]
pub struct PohEntry {
pub num_hashes: u64,
pub hash: Hash,
}
impl Poh {
pub fn new(hash: Hash, hashes_per_tick: Option<u64>) -> Self {
let hashes_per_tick = hashes_per_tick.unwrap_or(std::u64::MAX);
assert!(hashes_per_tick > 1);
Poh {
hash,
num_hashes: 0,
hashes_per_tick,
remaining_hashes: hashes_per_tick,
}
}
pub fn reset(&mut self, hash: Hash, hashes_per_tick: Option<u64>) {
let mut poh = Poh::new(hash, hashes_per_tick);
std::mem::swap(&mut poh, self);
}
pub fn hash(&mut self, max_num_hashes: u64) -> bool {
let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes);
for _ in 0..num_hashes {
self.hash = hash(&self.hash.as_ref());
}
self.num_hashes += num_hashes;
self.remaining_hashes -= num_hashes;
assert!(self.remaining_hashes > 0);
self.remaining_hashes == 1 // Return `true` if caller needs to `tick()` next
}
pub fn record(&mut self, mixin: Hash) -> Option<PohEntry> {
if self.remaining_hashes == 1 {
return None; // Caller needs to `tick()` first
}
self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]);
let num_hashes = self.num_hashes + 1;
self.num_hashes = 0;
self.remaining_hashes -= 1;
Some(PohEntry {
num_hashes,
hash: self.hash,
})
}
pub fn tick(&mut self) -> Option<PohEntry> {
self.hash = hash(&self.hash.as_ref());
self.num_hashes += 1;
self.remaining_hashes -= 1;
// If the hashes_per_tick is variable (std::u64::MAX) then always generate a tick.
// Otherwise only tick if there are no remaining hashes
if self.hashes_per_tick < std::u64::MAX && self.remaining_hashes != 0 {
return None;
}
let num_hashes = self.num_hashes;
self.remaining_hashes = self.hashes_per_tick;
self.num_hashes = 0;
Some(PohEntry {
num_hashes,
hash: self.hash,
})
}
}
pub fn compute_hashes_per_tick(duration: Duration, hashes_sample_size: u64) -> u64 {
let num_cpu = sys_info::cpu_num().unwrap();
// calculate hash rate with the system under maximum load
info!(
"Running {} hashes in parallel on all threads...",
hashes_sample_size
);
let threads: Vec<JoinHandle<u64>> = (0..num_cpu)
.map(|_| {
Builder::new()
.name("solana-poh".to_string())
.spawn(move || {
let mut v = Hash::default();
let start = Instant::now();
for _ in 0..hashes_sample_size {
v = hash(&v.as_ref());
}
start.elapsed().as_millis() as u64
})
.unwrap()
})
.collect();
let avg_elapsed = (threads
.into_iter()
.map(|elapsed| elapsed.join().unwrap())
.sum::<u64>())
/ u64::from(num_cpu);
duration.as_millis() as u64 * hashes_sample_size / avg_elapsed
}
#[cfg(test)]
mod tests {
use crate::poh::{Poh, PohEntry};
use solana_sdk::hash::{hash, hashv, Hash};
fn verify(initial_hash: Hash, entries: &[(PohEntry, Option<Hash>)]) -> bool {
let mut current_hash = initial_hash;
for (entry, mixin) in entries {
assert_ne!(entry.num_hashes, 0);
for _ in 1..entry.num_hashes {
current_hash = hash(&current_hash.as_ref());
}
current_hash = match mixin {
Some(mixin) => hashv(&[&current_hash.as_ref(), &mixin.as_ref()]),
None => hash(&current_hash.as_ref()),
};
if current_hash != entry.hash {
return false;
}
}
true
}
#[test]
fn test_poh_verify() {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let two = hash(&one.as_ref());
let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]);
let mut poh = Poh::new(zero, None);
assert_eq!(
verify(
zero,
&[
(poh.tick().unwrap(), None),
(poh.record(zero).unwrap(), Some(zero)),
(poh.record(zero).unwrap(), Some(zero)),
(poh.tick().unwrap(), None),
],
),
true
);
assert_eq!(
verify(
zero,
&[(
PohEntry {
num_hashes: 1,
hash: one,
},
None
)],
),
true
);
assert_eq!(
verify(
zero,
&[(
PohEntry {
num_hashes: 2,
hash: two,
},
None
)]
),
true
);
assert_eq!(
verify(
zero,
&[(
PohEntry {
num_hashes: 1,
hash: one_with_zero,
},
Some(zero)
)]
),
true
);
assert_eq!(
verify(
zero,
&[(
PohEntry {
num_hashes: 1,
hash: zero,
},
None
)]
),
false
);
assert_eq!(
verify(
zero,
&[
(
PohEntry {
num_hashes: 1,
hash: one_with_zero,
},
Some(zero)
),
(
PohEntry {
num_hashes: 1,
hash: hash(&one_with_zero.as_ref()),
},
None
)
]
),
true
);
}
#[test]
#[should_panic]
fn test_poh_verify_assert() {
verify(
Hash::default(),
&[(
PohEntry {
num_hashes: 0,
hash: Hash::default(),
},
None,
)],
);
}
#[test]
fn test_poh_tick() {
let mut poh = Poh::new(Hash::default(), Some(2));
assert_eq!(poh.remaining_hashes, 2);
assert!(poh.tick().is_none());
assert_eq!(poh.remaining_hashes, 1);
assert_matches!(poh.tick(), Some(PohEntry { num_hashes: 2, .. }));
assert_eq!(poh.remaining_hashes, 2); // Ready for the next tick
}
#[test]
fn test_poh_tick_large_batch() {
let mut poh = Poh::new(Hash::default(), Some(2));
assert_eq!(poh.remaining_hashes, 2);
assert!(poh.hash(1_000_000)); // Stop hashing before the next tick
assert_eq!(poh.remaining_hashes, 1);
assert!(poh.hash(1_000_000)); // Does nothing...
assert_eq!(poh.remaining_hashes, 1);
poh.tick();
assert_eq!(poh.remaining_hashes, 2); // Ready for the next tick
}
#[test]
fn test_poh_tick_too_soon() {
let mut poh = Poh::new(Hash::default(), Some(2));
assert_eq!(poh.remaining_hashes, 2);
assert!(poh.tick().is_none());
}
#[test]
fn test_poh_record_not_permitted_at_final_hash() {
let mut poh = Poh::new(Hash::default(), Some(10));
assert!(poh.hash(9));
assert_eq!(poh.remaining_hashes, 1);
assert!(poh.record(Hash::default()).is_none()); // <-- record() rejected to avoid exceeding hashes_per_tick
assert_matches!(poh.tick(), Some(PohEntry { num_hashes: 10, .. }));
assert_matches!(
poh.record(Hash::default()),
Some(PohEntry { num_hashes: 1, .. }) // <-- record() ok
);
assert_eq!(poh.remaining_hashes, 9);
}
}

View File

@ -10,11 +10,11 @@
//! For Entries:
//! * recorded entry must be >= WorkingBank::min_tick_height && entry must be < WorkingBank::max_tick_height
//!
use crate::blocktree::Blocktree;
use crate::entry::Entry;
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::poh::Poh;
use crate::result::{Error, Result};
use solana_ledger::blocktree::Blocktree;
use solana_ledger::entry::Entry;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::poh::Poh;
use solana_runtime::bank::Bank;
pub use solana_sdk::clock::Slot;
use solana_sdk::clock::NUM_CONSECUTIVE_LEADER_SLOTS;
@ -459,9 +459,9 @@ impl PohRecorder {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::test_tx::test_tx;
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::hash;
use std::sync::mpsc::sync_channel;

View File

@ -109,12 +109,12 @@ impl Service for PohService {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::poh_recorder::WorkingBank;
use crate::result::Result;
use crate::test_tx::test_tx;
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_runtime::bank::Bank;
use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;

View File

@ -1,13 +1,10 @@
//! The `repair_service` module implements the tools necessary to generate a thread which
//! regularly finds missing blobs in the ledger and sends repair requests for those blobs
use crate::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta},
cluster_info::ClusterInfo,
cluster_info_repair_listener::ClusterInfoRepairListener,
result::Result,
service::Service,
bank_forks::BankForks, cluster_info::ClusterInfo,
cluster_info_repair_listener::ClusterInfoRepairListener, result::Result, service::Service,
};
use solana_ledger::blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta};
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
use std::{
collections::BTreeSet,
@ -401,15 +398,15 @@ impl Service for RepairService {
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::tests::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use crate::blocktree::{get_tmp_ledger_path, Blocktree};
use crate::cluster_info::Node;
use crate::shred::max_ticks_per_n_shreds;
use itertools::Itertools;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_ledger::blocktree::{
make_chaining_slot_entries, make_many_slot_entries, make_slot_entries,
};
use solana_ledger::shred::max_ticks_per_n_shreds;
use std::sync::mpsc::channel;
use std::thread::Builder;

View File

@ -1,21 +1,21 @@
//! The `replay_stage` replays transactions broadcast by the leader.
use crate::bank_forks::BankForks;
use crate::blocktree::{Blocktree, BlocktreeError};
use crate::blocktree_processor;
use crate::cluster_info::ClusterInfo;
use crate::confidence::{
AggregateConfidenceService, ConfidenceAggregationData, ForkConfidenceCache,
};
use crate::consensus::{StakeLockout, Tower};
use crate::entry::{Entry, EntrySlice};
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::packet::BlobError;
use crate::poh_recorder::PohRecorder;
use crate::result::{Error, Result};
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::snapshot_package::SnapshotPackageSender;
use solana_ledger::blocktree::{Blocktree, BlocktreeError};
use solana_ledger::entry::{Entry, EntrySlice};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_metrics::{datapoint_warn, inc_new_counter_info};
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
@ -868,13 +868,13 @@ impl Service for ReplayStage {
#[cfg(test)]
mod test {
use super::*;
use crate::blocktree::tests::make_slot_entries;
use crate::blocktree::{entries_to_test_shreds, get_tmp_ledger_path, BlocktreeError};
use crate::confidence::BankConfidence;
use crate::entry;
use crate::genesis_utils::{create_genesis_block, create_genesis_block_with_leader};
use crate::replay_stage::ReplayStage;
use crate::shred::{Shred, ShredHeader, DATA_COMPLETE_SHRED, SIZE_OF_SHRED_HEADER};
use solana_ledger::blocktree::make_slot_entries;
use solana_ledger::blocktree::{entries_to_test_shreds, get_tmp_ledger_path, BlocktreeError};
use solana_ledger::entry;
use solana_ledger::shred::{Shred, ShredHeader, DATA_COMPLETE_SHRED, SIZE_OF_SHRED_HEADER};
use solana_runtime::genesis_utils::GenesisBlockInfo;
use solana_sdk::hash::{hash, Hash};
use solana_sdk::packet::PACKET_DATA_SIZE;

View File

@ -1,16 +1,13 @@
use crate::blocktree::Blocktree;
use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use crate::cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE};
use crate::contact_info::ContactInfo;
use crate::gossip_service::GossipService;
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::packet::to_shared_blob;
use crate::recycler::Recycler;
use crate::repair_service;
use crate::repair_service::{RepairService, RepairSlotRange, RepairStrategy};
use crate::result::{Error, Result};
use crate::service::Service;
use crate::shred::Shred;
use crate::shred_fetch_stage::ShredFetchStage;
use crate::storage_stage::NUM_STORAGE_SAMPLES;
use crate::streamer::{receiver, responder, PacketReceiver};
@ -24,6 +21,9 @@ use solana_client::rpc_client::RpcClient;
use solana_client::rpc_request::RpcRequest;
use solana_client::thin_client::ThinClient;
use solana_ed25519_dalek as ed25519_dalek;
use solana_ledger::blocktree::Blocktree;
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_netutil::bind_in_range;
use solana_sdk::account_utils::State;
use solana_sdk::client::{AsyncClient, SyncClient};

View File

@ -1,11 +1,11 @@
//! The `result` module exposes a Result type that propagates one of many different Error types.
use crate::blocktree;
use crate::cluster_info;
use crate::packet;
use crate::poh_recorder;
use bincode;
use serde_json;
use solana_ledger::blocktree;
use solana_sdk::transaction;
use std;
use std::any::Any;

View File

@ -2,18 +2,20 @@
use crate::{
bank_forks::BankForks,
blocktree::{Blocktree, CompletedSlotsReceiver},
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
leader_schedule_cache::LeaderScheduleCache,
repair_service::RepairStrategy,
result::{Error, Result},
service::Service,
staking_utils,
streamer::PacketReceiver,
window_service::{should_retransmit_and_persist, WindowService},
};
use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_ledger::{
blocktree::{Blocktree, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache,
staking_utils,
};
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
use solana_sdk::epoch_schedule::EpochSchedule;
@ -260,11 +262,11 @@ impl Service for RetransmitStage {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::create_new_tmp_ledger;
use crate::blocktree_processor::{process_blocktree, ProcessOptions};
use crate::contact_info::ContactInfo;
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::packet::{Meta, Packet, Packets};
use solana_ledger::blocktree::create_new_tmp_ledger;
use solana_netutil::find_available_port_in_range;
use solana_sdk::pubkey::Pubkey;

View File

@ -1,12 +1,12 @@
use super::*;
use solana_ledger::blocktree::*;
pub struct RootedSlotIterator<'a> {
next_slots: Vec<u64>,
blocktree: &'a super::Blocktree,
blocktree: &'a Blocktree,
}
impl<'a> RootedSlotIterator<'a> {
pub fn new(start_slot: u64, blocktree: &'a super::Blocktree) -> Result<Self> {
pub fn new(start_slot: u64, blocktree: &'a Blocktree) -> Result<Self> {
if blocktree.is_root(start_slot) {
Ok(Self {
next_slots: vec![start_slot],
@ -18,7 +18,7 @@ impl<'a> RootedSlotIterator<'a> {
}
}
impl<'a> Iterator for RootedSlotIterator<'a> {
type Item = (u64, super::SlotMeta);
type Item = (u64, SlotMeta);
fn next(&mut self) -> Option<Self::Item> {
// Clone b/c passing the closure to the map below requires exclusive access to
@ -53,6 +53,7 @@ impl<'a> Iterator for RootedSlotIterator<'a> {
mod tests {
use super::*;
use crate::blocktree_processor::tests::fill_blocktree_slot_with_ticks;
use solana_sdk::hash::Hash;
#[test]
fn test_rooted_slot_iterator() {

View File

@ -505,7 +505,7 @@ impl RpcSol for RpcSolImpl {
fn get_leader_schedule(&self, meta: Self::Metadata) -> Result<Option<Vec<String>>> {
let bank = meta.request_processor.read().unwrap().bank();
Ok(
crate::leader_schedule_utils::leader_schedule(bank.epoch(), &bank).map(
solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank).map(
|leader_schedule| {
leader_schedule
.get_slot_leaders()

File diff suppressed because it is too large Load Diff

View File

@ -6,11 +6,11 @@
use crate::cuda_runtime::PinnedVec;
use crate::packet::{Packet, Packets};
use crate::perf_libs;
use crate::recycler::Recycler;
use crate::result::Result;
use bincode::serialized_size;
use rayon::ThreadPool;
use solana_ledger::perf_libs;
use solana_metrics::inc_new_counter_debug;
use solana_sdk::message::MessageHeader;
use solana_sdk::pubkey::Pubkey;

View File

@ -7,7 +7,6 @@
use crate::cuda_runtime::PinnedVec;
use crate::packet::Packets;
use crate::perf_libs;
use crate::recycler::Recycler;
use crate::result::{Error, Result};
use crate::service::Service;
@ -15,6 +14,7 @@ use crate::sigverify;
use crate::sigverify::TxOffset;
use crate::streamer::{self, PacketReceiver};
use crossbeam_channel::Sender as CrossbeamSender;
use solana_ledger::perf_libs;
use solana_measure::measure::Measure;
use solana_metrics::{datapoint_debug, inc_new_counter_info};
use solana_sdk::timing;

View File

@ -1,326 +0,0 @@
use solana_runtime::bank::Bank;
use solana_sdk::{account::Account, pubkey::Pubkey};
use solana_vote_api::vote_state::VoteState;
use std::{borrow::Borrow, collections::HashMap};
/// Looks through vote accounts, and finds the latest slot that has achieved
/// supermajority lockout
pub fn get_supermajority_slot(bank: &Bank, epoch_height: u64) -> Option<u64> {
// Find the amount of stake needed for supermajority
let stakes_and_lockouts = epoch_stakes_and_lockouts(bank, epoch_height);
let total_stake: u64 = stakes_and_lockouts.iter().map(|s| s.0).sum();
let supermajority_stake = total_stake * 2 / 3;
// Filter out the states that don't have a max lockout
find_supermajority_slot(supermajority_stake, stakes_and_lockouts.iter())
}
pub fn vote_account_stakes(bank: &Bank) -> HashMap<Pubkey, u64> {
bank.vote_accounts()
.into_iter()
.map(|(id, (stake, _))| (id, stake))
.collect()
}
/// Collect the staked nodes, as named by staked vote accounts from the given bank
pub fn staked_nodes(bank: &Bank) -> HashMap<Pubkey, u64> {
to_staked_nodes(to_vote_states(bank.vote_accounts().into_iter()))
}
/// At the specified epoch, collect the delegate account balance and vote states for delegates
/// that have non-zero balance in any of their managed staking accounts
pub fn staked_nodes_at_epoch(bank: &Bank, epoch_height: u64) -> Option<HashMap<Pubkey, u64>> {
bank.epoch_vote_accounts(epoch_height)
.map(|vote_accounts| to_staked_nodes(to_vote_states(vote_accounts.iter())))
}
// input (vote_pubkey, (stake, vote_account)) => (stake, vote_state)
fn to_vote_states(
node_staked_accounts: impl Iterator<Item = (impl Borrow<Pubkey>, impl Borrow<(u64, Account)>)>,
) -> impl Iterator<Item = (u64, VoteState)> {
node_staked_accounts.filter_map(|(_, stake_account)| {
VoteState::deserialize(&stake_account.borrow().1.data)
.ok()
.map(|vote_state| (stake_account.borrow().0, vote_state))
})
}
// (stake, vote_state) => (node, stake)
fn to_staked_nodes(
node_staked_accounts: impl Iterator<Item = (u64, VoteState)>,
) -> HashMap<Pubkey, u64> {
let mut map: HashMap<Pubkey, u64> = HashMap::new();
node_staked_accounts.for_each(|(stake, state)| {
map.entry(state.node_pubkey)
.and_modify(|s| *s += stake)
.or_insert(stake);
});
map
}
fn epoch_stakes_and_lockouts(bank: &Bank, epoch_height: u64) -> Vec<(u64, Option<u64>)> {
let node_staked_accounts = bank
.epoch_vote_accounts(epoch_height)
.expect("Bank state for epoch is missing")
.iter();
to_vote_states(node_staked_accounts)
.map(|(stake, states)| (stake, states.root_slot))
.collect()
}
fn find_supermajority_slot<'a, I>(supermajority_stake: u64, stakes_and_lockouts: I) -> Option<u64>
where
I: Iterator<Item = &'a (u64, Option<u64>)>,
{
// Filter out the states that don't have a max lockout
let mut stakes_and_lockouts: Vec<_> = stakes_and_lockouts
.filter_map(|(stake, slot)| slot.map(|s| (stake, s)))
.collect();
// Sort by the root slot, in descending order
stakes_and_lockouts.sort_unstable_by(|s1, s2| s1.1.cmp(&s2.1).reverse());
// Find if any slot has achieved sufficient votes for supermajority lockout
let mut total = 0;
for (stake, slot) in stakes_and_lockouts {
total += stake;
if total > supermajority_stake {
return Some(slot);
}
}
None
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo, BOOTSTRAP_LEADER_LAMPORTS};
use solana_sdk::{
instruction::Instruction,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
sysvar::stake_history::{self, StakeHistory},
transaction::Transaction,
};
use solana_stake_api::{
stake_instruction,
stake_state::{Authorized, Stake},
};
use solana_vote_api::{vote_instruction, vote_state::VoteInit};
use std::sync::Arc;
fn new_from_parent(parent: &Arc<Bank>, slot: u64) -> Bank {
Bank::new_from_parent(parent, &Pubkey::default(), slot)
}
pub(crate) fn setup_vote_and_stake_accounts(
bank: &Bank,
from_account: &Keypair,
vote_pubkey: &Pubkey,
node_pubkey: &Pubkey,
amount: u64,
) {
fn process_instructions<T: KeypairUtil>(
bank: &Bank,
keypairs: &[&T],
ixs: Vec<Instruction>,
) {
bank.process_transaction(&Transaction::new_signed_with_payer(
ixs,
Some(&keypairs[0].pubkey()),
keypairs,
bank.last_blockhash(),
))
.unwrap();
}
process_instructions(
bank,
&[from_account],
vote_instruction::create_account(
&from_account.pubkey(),
vote_pubkey,
&VoteInit {
node_pubkey: *node_pubkey,
authorized_voter: *vote_pubkey,
authorized_withdrawer: *vote_pubkey,
commission: 0,
},
amount,
),
);
let stake_account_keypair = Keypair::new();
let stake_account_pubkey = stake_account_keypair.pubkey();
process_instructions(
bank,
&[from_account, &stake_account_keypair],
stake_instruction::create_stake_account_and_delegate_stake(
&from_account.pubkey(),
&stake_account_pubkey,
vote_pubkey,
&Authorized::auto(&stake_account_pubkey),
amount,
),
);
}
#[test]
fn test_epoch_stakes_and_lockouts() {
let stake = BOOTSTRAP_LEADER_LAMPORTS * 100;
let leader_stake = Stake {
stake: BOOTSTRAP_LEADER_LAMPORTS,
activation_epoch: std::u64::MAX, // mark as bootstrap
..Stake::default()
};
let validator = Keypair::new();
let GenesisBlockInfo {
genesis_block,
mint_keypair,
..
} = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let vote_pubkey = Pubkey::new_rand();
// Give the validator some stake but don't setup a staking account
// Validator has no lamports staked, so they get filtered out. Only the bootstrap leader
// created by the genesis block will get included
bank.transfer(1, &mint_keypair, &validator.pubkey())
.unwrap();
// Make a mint vote account. Because the mint has nonzero stake, this
// should show up in the active set
setup_vote_and_stake_accounts(
&bank,
&mint_keypair,
&vote_pubkey,
&mint_keypair.pubkey(),
stake,
);
// simulated stake
let other_stake = Stake {
stake,
activation_epoch: bank.epoch(),
..Stake::default()
};
let first_leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot());
// find the first slot in the next leader schedule epoch
let mut slot = bank.slot();
loop {
slot += 1;
if bank.get_leader_schedule_epoch(slot) != first_leader_schedule_epoch {
break;
}
}
let bank = new_from_parent(&Arc::new(bank), slot);
let next_leader_schedule_epoch = bank.get_leader_schedule_epoch(slot);
let result: Vec<_> = epoch_stakes_and_lockouts(&bank, first_leader_schedule_epoch);
assert_eq!(
result,
vec![(leader_stake.stake(first_leader_schedule_epoch, None), None)]
);
// epoch stakes and lockouts are saved off for the future epoch, should
// match current bank state
let mut result: Vec<_> = epoch_stakes_and_lockouts(&bank, next_leader_schedule_epoch);
result.sort();
let stake_history =
StakeHistory::from_account(&bank.get_account(&stake_history::id()).unwrap()).unwrap();
let mut expected = vec![
(leader_stake.stake(bank.epoch(), Some(&stake_history)), None),
(other_stake.stake(bank.epoch(), Some(&stake_history)), None),
];
expected.sort();
assert_eq!(result, expected);
}
#[test]
fn test_find_supermajority_slot() {
let supermajority = 10;
let stakes_and_slots = vec![];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
None
);
let stakes_and_slots = vec![(5, None), (5, None)];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
None
);
let stakes_and_slots = vec![(5, None), (5, None), (9, Some(2))];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
None
);
let stakes_and_slots = vec![(5, None), (5, None), (9, Some(2)), (1, Some(3))];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
None
);
let stakes_and_slots = vec![(5, None), (5, None), (9, Some(2)), (2, Some(3))];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
Some(2)
);
let stakes_and_slots = vec![(9, Some(2)), (2, Some(3)), (9, None)];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
Some(2)
);
let stakes_and_slots = vec![(9, Some(2)), (2, Some(3)), (9, Some(3))];
assert_eq!(
find_supermajority_slot(supermajority, stakes_and_slots.iter()),
Some(3)
);
}
#[test]
fn test_to_staked_nodes() {
let mut stakes = Vec::new();
let node1 = Pubkey::new_rand();
// Node 1 has stake of 3
for i in 0..3 {
stakes.push((
i,
VoteState::new(&VoteInit {
node_pubkey: node1,
..VoteInit::default()
}),
));
}
// Node 1 has stake of 5
let node2 = Pubkey::new_rand();
stakes.push((
5,
VoteState::new(&VoteInit {
node_pubkey: node2,
..VoteInit::default()
}),
));
let result = to_staked_nodes(stakes.into_iter());
assert_eq!(result.len(), 2);
assert_eq!(result[&node1], 3);
assert_eq!(result[&node2], 5);
}
}

View File

@ -3,13 +3,13 @@
// to submit its proof for mining to be rewarded.
use crate::bank_forks::BankForks;
use crate::blocktree::Blocktree;
use crate::chacha_cuda::chacha_cbc_encrypt_file_many_keys;
use crate::cluster_info::ClusterInfo;
use crate::result::{Error, Result};
use crate::service::Service;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_ledger::blocktree::Blocktree;
use solana_runtime::bank::Bank;
use solana_runtime::storage_utils::replicator_accounts;
use solana_sdk::account::Account;
@ -411,7 +411,7 @@ impl StorageStage {
// TODO: cuda required to generate the reference values
// but if it is missing, then we need to take care not to
// process storage mining results.
if crate::perf_libs::api().is_some() {
if solana_ledger::perf_libs::api().is_some() {
// Lock the keys, since this is the IV memory,
// it will be updated in-place by the encryption.
// Should be overwritten by the proof signatures which replace the
@ -629,13 +629,14 @@ impl Service for StorageStage {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::{create_new_tmp_ledger, Blocktree};
use crate::blocktree_processor;
use crate::cluster_info::ClusterInfo;
use crate::contact_info::ContactInfo;
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::service::Service;
use crate::{blocktree_processor, entry};
use rayon::prelude::*;
use solana_ledger::blocktree::{create_new_tmp_ledger, Blocktree};
use solana_ledger::entry;
use solana_runtime::bank::Bank;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::{Hash, Hasher};
@ -750,7 +751,7 @@ mod tests {
.collect::<Vec<_>>();
bank_sender.send(rooted_banks).unwrap();
if crate::perf_libs::api().is_some() {
if solana_ledger::perf_libs::api().is_some() {
for _ in 0..5 {
result = storage_state.get_mining_result(&signature);
if result != Hash::default() {
@ -766,7 +767,7 @@ mod tests {
exit.store(true, Ordering::Relaxed);
storage_stage.join().unwrap();
if crate::perf_libs::api().is_some() {
if solana_ledger::perf_libs::api().is_some() {
assert_ne!(result, Hash::default());
} else {
assert_eq!(result, Hash::default());

View File

@ -2,7 +2,6 @@
//! multi-stage transaction processing pipeline in software.
use crate::banking_stage::BankingStage;
use crate::blocktree::Blocktree;
use crate::broadcast_stage::{BroadcastStage, BroadcastStageType};
use crate::cluster_info::ClusterInfo;
use crate::cluster_info_vote_listener::ClusterInfoVoteListener;
@ -11,6 +10,7 @@ use crate::poh_recorder::{PohRecorder, WorkingBankEntry};
use crate::service::Service;
use crate::sigverify_stage::SigVerifyStage;
use crossbeam_channel::unbounded;
use solana_ledger::blocktree::Blocktree;
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};

View File

@ -14,10 +14,8 @@
use crate::bank_forks::BankForks;
use crate::blockstream_service::BlockstreamService;
use crate::blocktree::{Blocktree, CompletedSlotsReceiver};
use crate::cluster_info::ClusterInfo;
use crate::confidence::ForkConfidenceCache;
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::ledger_cleanup_service::LedgerCleanupService;
use crate::poh_recorder::PohRecorder;
use crate::replay_stage::ReplayStage;
@ -27,6 +25,8 @@ use crate::service::Service;
use crate::shred_fetch_stage::ShredFetchStage;
use crate::snapshot_package::SnapshotPackagerService;
use crate::storage_stage::{StorageStage, StorageState};
use solana_ledger::blocktree::{Blocktree, CompletedSlotsReceiver};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
@ -226,9 +226,9 @@ impl Service for Tvu {
pub mod tests {
use super::*;
use crate::banking_stage::create_test_recorder;
use crate::blocktree::create_new_tmp_ledger;
use crate::cluster_info::{ClusterInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use solana_ledger::blocktree::create_new_tmp_ledger;
use solana_runtime::bank::Bank;
use std::sync::atomic::Ordering;

View File

@ -1,14 +1,12 @@
//! The `validator` module hosts all the validator microservices.
use crate::bank_forks::{BankForks, SnapshotConfig};
use crate::blocktree::{Blocktree, CompletedSlotsReceiver};
use crate::blocktree_processor::{self, BankForksInfo};
use crate::broadcast_stage::BroadcastStageType;
use crate::cluster_info::{ClusterInfo, Node};
use crate::confidence::ForkConfidenceCache;
use crate::contact_info::ContactInfo;
use crate::gossip_service::{discover_cluster, GossipService};
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::poh_recorder::PohRecorder;
use crate::poh_service::PohService;
use crate::rpc::JsonRpcConfig;
@ -21,6 +19,8 @@ use crate::snapshot_utils;
use crate::storage_stage::StorageState;
use crate::tpu::Tpu;
use crate::tvu::{Sockets, Tvu};
use solana_ledger::blocktree::{Blocktree, CompletedSlotsReceiver};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_metrics::datapoint_info;
use solana_sdk::clock::{Slot, DEFAULT_SLOTS_PER_TURN};
use solana_sdk::genesis_block::GenesisBlock;
@ -120,7 +120,7 @@ impl Validator {
warn!("vote pubkey: {:?}", vote_account);
warn!(
"CUDA is {}abled",
if crate::perf_libs::api().is_some() {
if solana_ledger::perf_libs::api().is_some() {
"en"
} else {
"dis"
@ -575,8 +575,8 @@ impl Service for Validator {
}
pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
use crate::blocktree::create_new_tmp_ledger;
use crate::genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo};
use solana_ledger::blocktree::create_new_tmp_ledger;
let node_keypair = Arc::new(Keypair::new());
let node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
@ -616,8 +616,8 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree::create_new_tmp_ledger;
use crate::genesis_utils::create_genesis_block_with_leader;
use solana_ledger::blocktree::create_new_tmp_ledger;
use std::fs::remove_dir_all;
#[test]

View File

@ -1,16 +1,16 @@
//! `window_service` handles the data plane incoming blobs, storing them in
//! blocktree and retransmitting where required
//!
use crate::blocktree::{self, Blocktree};
use crate::cluster_info::ClusterInfo;
use crate::leader_schedule_cache::LeaderScheduleCache;
use crate::repair_service::{RepairService, RepairStrategy};
use crate::result::{Error, Result};
use crate::service::Service;
use crate::shred::Shred;
use crate::streamer::{PacketReceiver, PacketSender};
use rayon::iter::{IntoParallelRefMutIterator, ParallelIterator};
use rayon::ThreadPool;
use solana_ledger::blocktree::{self, Blocktree};
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::bank::Bank;
@ -267,19 +267,19 @@ impl Service for WindowService {
mod test {
use super::*;
use crate::{
blocktree::tests::make_many_slot_entries,
blocktree::{get_tmp_ledger_path, Blocktree},
cluster_info::ClusterInfo,
contact_info::ContactInfo,
entry::{create_ticks, Entry},
genesis_utils::create_genesis_block_with_leader,
packet::{Packet, Packets},
repair_service::RepairSlotRange,
service::Service,
shred::Shredder,
shred::SIZE_OF_SHRED_TYPE,
};
use rand::{seq::SliceRandom, thread_rng};
use solana_ledger::{
blocktree::{get_tmp_ledger_path, make_many_slot_entries, Blocktree},
entry::{create_ticks, Entry},
shred::{Shredder, SIZE_OF_SHRED_TYPE},
};
use solana_sdk::{
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
hash::Hash,