v1.7: backport new column families from master (#18897)

* backport new column families from master to 1.6 (#18743)

* backporting bank_hash and program_costs column families from master to 1.6 for rocksdb backward compatibility

* missed a line to allow dead code

* include code for purge

* Exclude stubbed ProgramCosts column from compaction (#18840)

Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com>
This commit is contained in:
Tyera Eulberg
2021-07-26 13:38:47 -06:00
committed by GitHub
parent 04ee86e93c
commit 9511031490
4 changed files with 144 additions and 17 deletions

View File

@ -124,6 +124,10 @@ pub struct BlockstoreSignals {
}
// ledger window
//
// NOTE: allowing dead_code only because stubbing bank_hash_cf and program_cost_cf
// to 1.7 for rocksdb backward compatibility
#[allow(dead_code)]
pub struct Blockstore {
ledger_path: PathBuf,
db: Arc<Database>,
@ -143,6 +147,8 @@ pub struct Blockstore {
blocktime_cf: LedgerColumn<cf::Blocktime>,
perf_samples_cf: LedgerColumn<cf::PerfSamples>,
block_height_cf: LedgerColumn<cf::BlockHeight>,
program_costs_cf: LedgerColumn<cf::ProgramCosts>,
bank_hash_cf: LedgerColumn<cf::BankHash>,
last_root: Arc<RwLock<Slot>>,
insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>,
@ -342,6 +348,8 @@ impl Blockstore {
let blocktime_cf = db.column();
let perf_samples_cf = db.column();
let block_height_cf = db.column();
let program_costs_cf = db.column();
let bank_hash_cf = db.column();
let db = Arc::new(db);
@ -390,6 +398,8 @@ impl Blockstore {
blocktime_cf,
perf_samples_cf,
block_height_cf,
program_costs_cf,
bank_hash_cf,
new_shreds_signals: vec![],
completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),

View File

@ -135,6 +135,10 @@ impl Blockstore {
.db
.delete_range_cf::<cf::SlotMeta>(&mut write_batch, from_slot, to_slot)
.is_ok()
& self
.db
.delete_range_cf::<cf::BankHash>(&mut write_batch, from_slot, to_slot)
.is_ok()
& self
.db
.delete_range_cf::<cf::Root>(&mut write_batch, from_slot, to_slot)
@ -264,6 +268,10 @@ impl Blockstore {
.orphans_cf
.compact_range(from_slot, to_slot)
.unwrap_or(false)
&& self
.bank_hash_cf
.compact_range(from_slot, to_slot)
.unwrap_or(false)
&& self
.index_cf
.compact_range(from_slot, to_slot)

View File

@ -22,7 +22,7 @@ use solana_sdk::{
};
use solana_storage_proto::convert::generated;
use std::{
collections::HashMap,
collections::{HashMap, HashSet},
ffi::{CStr, CString},
fs,
marker::PhantomData,
@ -47,6 +47,8 @@ const DUPLICATE_SLOTS_CF: &str = "duplicate_slots";
const ERASURE_META_CF: &str = "erasure_meta";
// Column family for orphans data
const ORPHANS_CF: &str = "orphans";
/// Column family for bank hashes
const BANK_HASH_CF: &str = "bank_hashes";
// Column family for root data
const ROOT_CF: &str = "root";
/// Column family for indexes
@ -71,6 +73,8 @@ const BLOCKTIME_CF: &str = "blocktime";
const PERF_SAMPLES_CF: &str = "perf_samples";
/// Column family for BlockHeight
const BLOCK_HEIGHT_CF: &str = "block_height";
/// Column family for ProgramCosts
const PROGRAM_COSTS_CF: &str = "program_costs";
// 1 day is chosen for the same reasoning of DEFAULT_COMPACTION_SLOT_INTERVAL
const PERIODIC_COMPACTION_SECONDS: u64 = 60 * 60 * 24;
@ -131,6 +135,10 @@ pub mod columns {
/// The erasure meta column
pub struct ErasureMeta;
#[derive(Debug)]
/// The bank hash column
pub struct BankHash;
#[derive(Debug)]
/// The root column
pub struct Root;
@ -174,6 +182,10 @@ pub mod columns {
#[derive(Debug)]
/// The block height column
pub struct BlockHeight;
#[derive(Debug)]
// The program costs column
pub struct ProgramCosts;
}
pub enum AccessType {
@ -256,11 +268,7 @@ impl Rocks {
access_type: AccessType,
recovery_mode: Option<BlockstoreRecoveryMode>,
) -> Result<Rocks> {
use columns::{
AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta,
Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta,
TransactionStatus, TransactionStatusIndex,
};
use columns::*;
fs::create_dir_all(&path)?;
@ -296,6 +304,10 @@ impl Rocks {
Orphans::NAME,
get_cf_options::<Orphans>(&access_type, &oldest_slot),
);
let bank_hash_cf_descriptor = ColumnFamilyDescriptor::new(
BankHash::NAME,
get_cf_options::<BankHash>(&access_type, &oldest_slot),
);
let root_cf_descriptor = ColumnFamilyDescriptor::new(
Root::NAME,
get_cf_options::<Root>(&access_type, &oldest_slot),
@ -340,6 +352,10 @@ impl Rocks {
BlockHeight::NAME,
get_cf_options::<BlockHeight>(&access_type, &oldest_slot),
);
let program_costs_cf_descriptor = ColumnFamilyDescriptor::new(
ProgramCosts::NAME,
get_cf_options::<ProgramCosts>(&access_type, &oldest_slot),
);
// Don't forget to add to both run_purge_with_stats() and
// compact_storage() in ledger/src/blockstore/blockstore_purge.rs!!
@ -349,6 +365,7 @@ impl Rocks {
(DuplicateSlots::NAME, duplicate_slots_cf_descriptor),
(ErasureMeta::NAME, erasure_meta_cf_descriptor),
(Orphans::NAME, orphans_cf_descriptor),
(BankHash::NAME, bank_hash_cf_descriptor),
(Root::NAME, root_cf_descriptor),
(Index::NAME, index_cf_descriptor),
(ShredData::NAME, shred_data_cf_descriptor),
@ -363,6 +380,7 @@ impl Rocks {
(Blocktime::NAME, blocktime_cf_descriptor),
(PerfSamples::NAME, perf_samples_cf_descriptor),
(BlockHeight::NAME, block_height_cf_descriptor),
(ProgramCosts::NAME, program_costs_cf_descriptor),
];
let cf_names: Vec<_> = cfs.iter().map(|c| c.0).collect();
@ -403,9 +421,9 @@ impl Rocks {
// this is only needed for LedgerCleanupService. so guard with PrimaryOnly (i.e. running solana-validator)
if matches!(access_type, AccessType::PrimaryOnly) {
for cf_name in cf_names {
// this special column family must be excluded from LedgerCleanupService's rocksdb
// these special column families must be excluded from LedgerCleanupService's rocksdb
// compactions
if cf_name == TransactionStatusIndex::NAME {
if excludes_from_compaction(cf_name) {
continue;
}
@ -461,11 +479,7 @@ impl Rocks {
}
fn columns(&self) -> Vec<&'static str> {
use columns::{
AddressSignatures, BlockHeight, Blocktime, DeadSlots, DuplicateSlots, ErasureMeta,
Index, Orphans, PerfSamples, Rewards, Root, ShredCode, ShredData, SlotMeta,
TransactionStatus, TransactionStatusIndex,
};
use columns::*;
vec![
ErasureMeta::NAME,
@ -473,6 +487,7 @@ impl Rocks {
DuplicateSlots::NAME,
Index::NAME,
Orphans::NAME,
BankHash::NAME,
Root::NAME,
SlotMeta::NAME,
ShredData::NAME,
@ -484,6 +499,7 @@ impl Rocks {
Blocktime::NAME,
PerfSamples::NAME,
BlockHeight::NAME,
ProgramCosts::NAME,
]
}
@ -718,6 +734,14 @@ impl ColumnName for columns::TransactionStatusIndex {
const NAME: &'static str = TRANSACTION_STATUS_INDEX_CF;
}
impl SlotColumn for columns::BankHash {}
impl ColumnName for columns::BankHash {
const NAME: &'static str = BANK_HASH_CF;
}
impl TypedColumn for columns::BankHash {
type Type = blockstore_meta::FrozenHashVersioned;
}
impl SlotColumn for columns::Rewards {}
impl ColumnName for columns::Rewards {
const NAME: &'static str = REWARDS_CF;
@ -750,6 +774,39 @@ impl TypedColumn for columns::BlockHeight {
type Type = u64;
}
impl ColumnName for columns::ProgramCosts {
const NAME: &'static str = PROGRAM_COSTS_CF;
}
impl TypedColumn for columns::ProgramCosts {
type Type = blockstore_meta::ProgramCost;
}
impl Column for columns::ProgramCosts {
type Index = Pubkey;
fn key(pubkey: Pubkey) -> Vec<u8> {
let mut key = vec![0; 32]; // size_of Pubkey
key[0..32].clone_from_slice(&pubkey.as_ref()[0..32]);
key
}
fn index(key: &[u8]) -> Self::Index {
Pubkey::new(&key[0..32])
}
fn primary_index(_index: Self::Index) -> u64 {
unimplemented!()
}
fn slot(_index: Self::Index) -> Slot {
unimplemented!()
}
#[allow(clippy::wrong_self_convention)]
fn as_index(_index: u64) -> Self::Index {
Pubkey::default()
}
}
impl Column for columns::ShredCode {
type Index = (u64, u64);
@ -1262,9 +1319,7 @@ fn get_cf_options<C: 'static + Column + ColumnName>(
// TransactionStatusIndex must be excluded from LedgerCleanupService's rocksdb
// compactions....
if matches!(access_type, AccessType::PrimaryOnly)
&& C::NAME != columns::TransactionStatusIndex::NAME
{
if matches!(access_type, AccessType::PrimaryOnly) && !excludes_from_compaction(C::NAME) {
options.set_compaction_filter_factory(PurgedSlotFilterFactory::<C> {
oldest_slot: oldest_slot.clone(),
name: CString::new(format!("purged_slot_filter_factory({})", C::NAME)).unwrap(),
@ -1304,6 +1359,18 @@ fn get_db_options(access_type: &AccessType) -> Options {
options
}
fn excludes_from_compaction(cf_name: &str) -> bool {
// list of Column Families must be excluded from compaction:
let no_compaction_cfs: HashSet<&'static str> = vec![
columns::TransactionStatusIndex::NAME,
columns::ProgramCosts::NAME,
]
.into_iter()
.collect();
no_compaction_cfs.get(cf_name).is_some()
}
#[cfg(test)]
pub mod tests {
use super::*;
@ -1356,4 +1423,14 @@ pub mod tests {
CompactionDecision::Keep
);
}
#[test]
fn test_excludes_from_compaction() {
// currently there are two CFs are excluded from compaction:
assert!(excludes_from_compaction(
columns::TransactionStatusIndex::NAME
));
assert!(excludes_from_compaction(columns::ProgramCosts::NAME));
assert!(!excludes_from_compaction("something else"));
}
}

View File

@ -1,6 +1,6 @@
use crate::erasure::ErasureConfig;
use serde::{Deserialize, Serialize};
use solana_sdk::clock::Slot;
use solana_sdk::{clock::Slot, hash::Hash};
use std::{collections::BTreeSet, ops::RangeBounds};
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
@ -75,6 +75,33 @@ pub enum ErasureMetaStatus {
StillNeed(usize),
}
#[derive(Deserialize, Serialize, Debug, PartialEq)]
pub enum FrozenHashVersioned {
Current(FrozenHashStatus),
}
impl FrozenHashVersioned {
pub fn frozen_hash(&self) -> Hash {
match self {
FrozenHashVersioned::Current(frozen_hash_status) => frozen_hash_status.frozen_hash,
}
}
pub fn is_duplicate_confirmed(&self) -> bool {
match self {
FrozenHashVersioned::Current(frozen_hash_status) => {
frozen_hash_status.is_duplicate_confirmed
}
}
}
}
#[derive(Deserialize, Serialize, Debug, PartialEq)]
pub struct FrozenHashStatus {
pub frozen_hash: Hash,
pub is_duplicate_confirmed: bool,
}
impl Index {
pub(crate) fn new(slot: Slot) -> Self {
Index {
@ -253,6 +280,11 @@ pub struct PerfSample {
pub sample_period_secs: u16,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
pub struct ProgramCost {
pub cost: u64,
}
#[cfg(test)]
mod test {
use super::*;