Simplify storage interface in blocktree (#3522)
This commit is contained in:
@ -31,45 +31,29 @@ use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
#[cfg(feature = "kvstore")]
|
|
||||||
mod kvs;
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
|
||||||
mod rocks;
|
|
||||||
|
|
||||||
#[cfg(feature = "kvstore")]
|
macro_rules! db_imports {
|
||||||
use self::kvs::{DataCf, ErasureCf, Kvs, MetaCf};
|
{ $mod:ident, $db:ident, $db_path:expr } => {
|
||||||
#[cfg(not(feature = "kvstore"))]
|
mod $mod;
|
||||||
use self::rocks::{DataCf, ErasureCf, MetaCf, Rocks};
|
|
||||||
|
|
||||||
pub use db::{
|
pub use db::{
|
||||||
Cursor, Database, IDataCf, IErasureCf, IMetaCf, IWriteBatch, LedgerColumnFamily,
|
Cursor, Database, IndexColumn, IWriteBatch, LedgerColumnFamily,
|
||||||
LedgerColumnFamilyRaw,
|
LedgerColumnFamilyRaw,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub use $mod::{$db, ErasureCf, MetaCf, DataCf};
|
||||||
|
pub type BlocktreeRawIterator = <$db as Database>::Cursor;
|
||||||
|
pub type WriteBatch = <$db as Database>::WriteBatch;
|
||||||
|
pub type OwnedKey = <$db as Database>::OwnedKey;
|
||||||
|
pub type Key = <$db as Database>::Key;
|
||||||
|
pub const BLOCKTREE_DIRECTORY: &str = $db_path;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
#[cfg(not(feature = "kvstore"))]
|
||||||
pub type BlocktreeRawIterator = <Rocks as Database>::Cursor;
|
db_imports! {rocks, Rocks, "rocksdb"}
|
||||||
#[cfg(feature = "kvstore")]
|
#[cfg(feature = "kvstore")]
|
||||||
pub type BlocktreeRawIterator = <Kvs as Database>::Cursor;
|
db_imports! {kvs, Kvs, "kvstore"}
|
||||||
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
|
||||||
pub type WriteBatch = <Rocks as Database>::WriteBatch;
|
|
||||||
#[cfg(feature = "kvstore")]
|
|
||||||
pub type WriteBatch = <Kvs as Database>::WriteBatch;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
|
||||||
type KeyRef = <Rocks as Database>::KeyRef;
|
|
||||||
#[cfg(feature = "kvstore")]
|
|
||||||
type KeyRef = <Kvs as Database>::KeyRef;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
|
||||||
pub type Key = <Rocks as Database>::Key;
|
|
||||||
#[cfg(feature = "kvstore")]
|
|
||||||
pub type Key = <Kvs as Database>::Key;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "kvstore"))]
|
|
||||||
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
|
|
||||||
#[cfg(feature = "kvstore")]
|
|
||||||
pub const BLOCKTREE_DIRECTORY: &str = "kvstore";
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum BlocktreeError {
|
pub enum BlocktreeError {
|
||||||
@ -161,14 +145,14 @@ impl Blocktree {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn meta(&self, slot: u64) -> Result<Option<SlotMeta>> {
|
pub fn meta(&self, slot: u64) -> Result<Option<SlotMeta>> {
|
||||||
self.meta_cf.get(&MetaCf::key(slot))
|
self.meta_cf.get(&MetaCf::key(&slot))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset_slot_consumed(&self, slot: u64) -> Result<()> {
|
pub fn reset_slot_consumed(&self, slot: u64) -> Result<()> {
|
||||||
let meta_key = MetaCf::key(slot);
|
let meta_key = MetaCf::key(&slot);
|
||||||
if let Some(mut meta) = self.meta_cf.get(&meta_key)? {
|
if let Some(mut meta) = self.meta_cf.get(&meta_key)? {
|
||||||
for index in 0..meta.received {
|
for index in 0..meta.received {
|
||||||
self.data_cf.delete_by_slot_index(slot, index)?;
|
self.data_cf.delete_by_index(&(slot, index))?;
|
||||||
}
|
}
|
||||||
meta.consumed = 0;
|
meta.consumed = 0;
|
||||||
meta.received = 0;
|
meta.received = 0;
|
||||||
@ -181,12 +165,12 @@ impl Blocktree {
|
|||||||
|
|
||||||
pub fn get_next_slot(&self, slot: u64) -> Result<Option<u64>> {
|
pub fn get_next_slot(&self, slot: u64) -> Result<Option<u64>> {
|
||||||
let mut db_iterator = self.db.raw_iterator_cf(self.meta_cf.handle())?;
|
let mut db_iterator = self.db.raw_iterator_cf(self.meta_cf.handle())?;
|
||||||
db_iterator.seek(&MetaCf::key(slot + 1));
|
db_iterator.seek(&MetaCf::key(&(slot + 1)));
|
||||||
if !db_iterator.valid() {
|
if !db_iterator.valid() {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
let key = &db_iterator.key().expect("Expected valid key");
|
let key = &db_iterator.key().expect("Expected valid key");
|
||||||
Ok(Some(MetaCf::index_from_key(&key)?))
|
Ok(Some(MetaCf::index(&key)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -341,7 +325,7 @@ impl Blocktree {
|
|||||||
should_signal = should_signal || Self::slot_has_updates(meta, &meta_backup);
|
should_signal = should_signal || Self::slot_has_updates(meta, &meta_backup);
|
||||||
write_batch.put_cf(
|
write_batch.put_cf(
|
||||||
self.meta_cf.handle(),
|
self.meta_cf.handle(),
|
||||||
&MetaCf::key(*slot),
|
&MetaCf::key(slot),
|
||||||
&serialize(&meta)?,
|
&serialize(&meta)?,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
@ -368,7 +352,7 @@ impl Blocktree {
|
|||||||
buf: &mut [u8],
|
buf: &mut [u8],
|
||||||
slot: u64,
|
slot: u64,
|
||||||
) -> Result<(u64, u64)> {
|
) -> Result<(u64, u64)> {
|
||||||
let start_key = DataCf::key(slot, start_index);
|
let start_key = DataCf::key(&(slot, start_index));
|
||||||
let mut db_iterator = self.db.raw_iterator_cf(self.data_cf.handle())?;
|
let mut db_iterator = self.db.raw_iterator_cf(self.data_cf.handle())?;
|
||||||
db_iterator.seek(&start_key);
|
db_iterator.seek(&start_key);
|
||||||
let mut total_blobs = 0;
|
let mut total_blobs = 0;
|
||||||
@ -388,7 +372,7 @@ impl Blocktree {
|
|||||||
// Check key is the next sequential key based on
|
// Check key is the next sequential key based on
|
||||||
// blob index
|
// blob index
|
||||||
let key = &db_iterator.key().expect("Expected valid key");
|
let key = &db_iterator.key().expect("Expected valid key");
|
||||||
let index = DataCf::index_from_key(key)?;
|
let index = DataCf::index(key).1;
|
||||||
if index != expected_index {
|
if index != expected_index {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -421,24 +405,24 @@ impl Blocktree {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_coding_blob_bytes(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
pub fn get_coding_blob_bytes(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
||||||
self.erasure_cf.get_by_slot_index(slot, index)
|
self.erasure_cf.get_by_index(&(slot, index))
|
||||||
}
|
}
|
||||||
pub fn delete_coding_blob(&self, slot: u64, index: u64) -> Result<()> {
|
pub fn delete_coding_blob(&self, slot: u64, index: u64) -> Result<()> {
|
||||||
self.erasure_cf.delete_by_slot_index(slot, index)
|
self.erasure_cf.delete_by_index(&(slot, index))
|
||||||
}
|
}
|
||||||
pub fn get_data_blob_bytes(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
pub fn get_data_blob_bytes(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
||||||
self.data_cf.get_by_slot_index(slot, index)
|
self.data_cf.get_by_index(&(slot, index))
|
||||||
}
|
}
|
||||||
pub fn put_coding_blob_bytes(&self, slot: u64, index: u64, bytes: &[u8]) -> Result<()> {
|
pub fn put_coding_blob_bytes(&self, slot: u64, index: u64, bytes: &[u8]) -> Result<()> {
|
||||||
self.erasure_cf.put_by_slot_index(slot, index, bytes)
|
self.erasure_cf.put_by_index(&(slot, index), bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_data_raw(&self, key: &KeyRef, value: &[u8]) -> Result<()> {
|
pub fn put_data_raw(&self, key: &Key, value: &[u8]) -> Result<()> {
|
||||||
self.data_cf.put(key, value)
|
self.data_cf.put_bytes(key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_data_blob_bytes(&self, slot: u64, index: u64, bytes: &[u8]) -> Result<()> {
|
pub fn put_data_blob_bytes(&self, slot: u64, index: u64, bytes: &[u8]) -> Result<()> {
|
||||||
self.data_cf.put_by_slot_index(slot, index, bytes)
|
self.data_cf.put_by_index(&(slot, index), bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_data_blob(&self, slot: u64, blob_index: u64) -> Result<Option<Blob>> {
|
pub fn get_data_blob(&self, slot: u64, blob_index: u64) -> Result<Option<Blob>> {
|
||||||
@ -468,9 +452,9 @@ impl Blocktree {
|
|||||||
slot: u64,
|
slot: u64,
|
||||||
start_index: u64,
|
start_index: u64,
|
||||||
end_index: u64,
|
end_index: u64,
|
||||||
key: &dyn Fn(u64, u64) -> Key,
|
key: &dyn Fn(u64, u64) -> OwnedKey,
|
||||||
slot_from_key: &dyn Fn(&KeyRef) -> Result<u64>,
|
slot_from_key: &dyn Fn(&Key) -> u64,
|
||||||
index_from_key: &dyn Fn(&KeyRef) -> Result<u64>,
|
index_from_key: &dyn Fn(&Key) -> u64,
|
||||||
max_missing: usize,
|
max_missing: usize,
|
||||||
) -> Vec<u64> {
|
) -> Vec<u64> {
|
||||||
if start_index >= end_index || max_missing == 0 {
|
if start_index >= end_index || max_missing == 0 {
|
||||||
@ -495,14 +479,12 @@ impl Blocktree {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let current_key = db_iterator.key().expect("Expect a valid key");
|
let current_key = db_iterator.key().expect("Expect a valid key");
|
||||||
let current_slot = slot_from_key(¤t_key)
|
let current_slot = slot_from_key(¤t_key);
|
||||||
.expect("Expect to be able to parse slot from valid key");
|
|
||||||
let current_index = {
|
let current_index = {
|
||||||
if current_slot > slot {
|
if current_slot > slot {
|
||||||
end_index
|
end_index
|
||||||
} else {
|
} else {
|
||||||
index_from_key(¤t_key)
|
index_from_key(¤t_key)
|
||||||
.expect("Expect to be able to parse index from valid key")
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let upper_index = cmp::min(current_index, end_index);
|
let upper_index = cmp::min(current_index, end_index);
|
||||||
@ -543,9 +525,9 @@ impl Blocktree {
|
|||||||
slot,
|
slot,
|
||||||
start_index,
|
start_index,
|
||||||
end_index,
|
end_index,
|
||||||
&DataCf::key,
|
&|slot, index| DataCf::key(&(slot, index)),
|
||||||
&DataCf::slot_from_key,
|
&MetaCf::index,
|
||||||
&DataCf::index_from_key,
|
&|key| DataCf::index(key).1,
|
||||||
max_missing,
|
max_missing,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -564,9 +546,9 @@ impl Blocktree {
|
|||||||
slot,
|
slot,
|
||||||
start_index,
|
start_index,
|
||||||
end_index,
|
end_index,
|
||||||
&ErasureCf::key,
|
&|slot, index| ErasureCf::key(&(slot, index)),
|
||||||
&ErasureCf::slot_from_key,
|
&MetaCf::index,
|
||||||
&ErasureCf::index_from_key,
|
&|key| ErasureCf::index(key).1,
|
||||||
max_missing,
|
max_missing,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -661,11 +643,7 @@ impl Blocktree {
|
|||||||
// Write all the newly changed slots in new_chained_slots to the write_batch
|
// Write all the newly changed slots in new_chained_slots to the write_batch
|
||||||
for (slot, meta_copy) in new_chained_slots.iter() {
|
for (slot, meta_copy) in new_chained_slots.iter() {
|
||||||
let meta: &SlotMeta = &RefCell::borrow(&*meta_copy);
|
let meta: &SlotMeta = &RefCell::borrow(&*meta_copy);
|
||||||
write_batch.put_cf(
|
write_batch.put_cf(self.meta_cf.handle(), &MetaCf::key(slot), &serialize(meta)?)?;
|
||||||
self.meta_cf.handle(),
|
|
||||||
&MetaCf::key(*slot),
|
|
||||||
&serialize(meta)?,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -848,7 +826,7 @@ impl Blocktree {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let key = DataCf::key(blob_slot, blob_index);
|
let key = DataCf::key(&(blob_slot, blob_index));
|
||||||
let serialized_blob_data = &blob_to_insert.data[..BLOB_HEADER_SIZE + blob_size];
|
let serialized_blob_data = &blob_to_insert.data[..BLOB_HEADER_SIZE + blob_size];
|
||||||
|
|
||||||
// Commit step: commit all changes to the mutable structures at once, or none at all.
|
// Commit step: commit all changes to the mutable structures at once, or none at all.
|
||||||
@ -892,7 +870,7 @@ impl Blocktree {
|
|||||||
// Try to find the next blob we're looking for in the prev_inserted_blob_datas
|
// Try to find the next blob we're looking for in the prev_inserted_blob_datas
|
||||||
if let Some(prev_blob_data) = prev_inserted_blob_datas.get(&(slot, current_index)) {
|
if let Some(prev_blob_data) = prev_inserted_blob_datas.get(&(slot, current_index)) {
|
||||||
blobs.push(Cow::Borrowed(*prev_blob_data));
|
blobs.push(Cow::Borrowed(*prev_blob_data));
|
||||||
} else if let Some(blob_data) = self.data_cf.get_by_slot_index(slot, current_index)? {
|
} else if let Some(blob_data) = self.data_cf.get_by_index(&(slot, current_index))? {
|
||||||
// Try to find the next blob we're looking for in the database
|
// Try to find the next blob we're looking for in the database
|
||||||
blobs.push(Cow::Owned(blob_data));
|
blobs.push(Cow::Owned(blob_data));
|
||||||
} else {
|
} else {
|
||||||
@ -909,7 +887,7 @@ impl Blocktree {
|
|||||||
// don't count as ticks, even if they're empty entries
|
// don't count as ticks, even if they're empty entries
|
||||||
fn write_genesis_blobs(&self, blobs: &[Blob]) -> Result<()> {
|
fn write_genesis_blobs(&self, blobs: &[Blob]) -> Result<()> {
|
||||||
// TODO: change bootstrap height to number of slots
|
// TODO: change bootstrap height to number of slots
|
||||||
let meta_key = MetaCf::key(0);
|
let meta_key = MetaCf::key(&0);
|
||||||
let mut bootstrap_meta = SlotMeta::new(0, 1);
|
let mut bootstrap_meta = SlotMeta::new(0, 1);
|
||||||
let last = blobs.last().unwrap();
|
let last = blobs.last().unwrap();
|
||||||
|
|
||||||
@ -924,7 +902,7 @@ impl Blocktree {
|
|||||||
&serialize(&bootstrap_meta)?,
|
&serialize(&bootstrap_meta)?,
|
||||||
)?;
|
)?;
|
||||||
for blob in blobs {
|
for blob in blobs {
|
||||||
let key = DataCf::key(blob.slot(), blob.index());
|
let key = DataCf::key(&(blob.slot(), blob.index()));
|
||||||
let serialized_blob_datas = &blob.data[..BLOB_HEADER_SIZE + blob.size()];
|
let serialized_blob_datas = &blob.data[..BLOB_HEADER_SIZE + blob.size()];
|
||||||
batch.put_cf(self.data_cf.handle(), &key, serialized_blob_datas)?;
|
batch.put_cf(self.data_cf.handle(), &key, serialized_blob_datas)?;
|
||||||
}
|
}
|
||||||
@ -1171,7 +1149,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// Test meta column family
|
// Test meta column family
|
||||||
let meta = SlotMeta::new(0, 1);
|
let meta = SlotMeta::new(0, 1);
|
||||||
let meta_key = MetaCf::key(0);
|
let meta_key = MetaCf::key(&0);
|
||||||
ledger.meta_cf.put(&meta_key, &meta).unwrap();
|
ledger.meta_cf.put(&meta_key, &meta).unwrap();
|
||||||
let result = ledger
|
let result = ledger
|
||||||
.meta_cf
|
.meta_cf
|
||||||
@ -1183,12 +1161,15 @@ pub mod tests {
|
|||||||
|
|
||||||
// Test erasure column family
|
// Test erasure column family
|
||||||
let erasure = vec![1u8; 16];
|
let erasure = vec![1u8; 16];
|
||||||
let erasure_key = ErasureCf::key(0, 0);
|
let erasure_key = ErasureCf::key(&(0, 0));
|
||||||
ledger.erasure_cf.put(&erasure_key, &erasure).unwrap();
|
ledger
|
||||||
|
.erasure_cf
|
||||||
|
.put_bytes(&erasure_key[..], &erasure)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let result = ledger
|
let result = ledger
|
||||||
.erasure_cf
|
.erasure_cf
|
||||||
.get(&erasure_key)
|
.get_bytes(&erasure_key[..])
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.expect("Expected erasure object to exist");
|
.expect("Expected erasure object to exist");
|
||||||
|
|
||||||
@ -1196,12 +1177,12 @@ pub mod tests {
|
|||||||
|
|
||||||
// Test data column family
|
// Test data column family
|
||||||
let data = vec![2u8; 16];
|
let data = vec![2u8; 16];
|
||||||
let data_key = DataCf::key(0, 0);
|
let data_key = DataCf::key(&(0, 0));
|
||||||
ledger.data_cf.put(&data_key, &data).unwrap();
|
ledger.data_cf.put_bytes(&data_key, &data).unwrap();
|
||||||
|
|
||||||
let result = ledger
|
let result = ledger
|
||||||
.data_cf
|
.data_cf
|
||||||
.get(&data_key)
|
.get_bytes(&data_key)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.expect("Expected data object to exist");
|
.expect("Expected data object to exist");
|
||||||
|
|
||||||
@ -1296,7 +1277,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let meta = ledger
|
let meta = ledger
|
||||||
.meta_cf
|
.meta_cf
|
||||||
.get(&MetaCf::key(0))
|
.get(&MetaCf::key(&0))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.expect("Expected new metadata object to be created");
|
.expect("Expected new metadata object to be created");
|
||||||
assert!(meta.consumed == 0 && meta.received == num_entries);
|
assert!(meta.consumed == 0 && meta.received == num_entries);
|
||||||
@ -1311,7 +1292,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let meta = ledger
|
let meta = ledger
|
||||||
.meta_cf
|
.meta_cf
|
||||||
.get(&MetaCf::key(0))
|
.get(&MetaCf::key(&0))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.expect("Expected new metadata object to exist");
|
.expect("Expected new metadata object to exist");
|
||||||
assert_eq!(meta.consumed, num_entries);
|
assert_eq!(meta.consumed, num_entries);
|
||||||
@ -1341,7 +1322,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let meta = ledger
|
let meta = ledger
|
||||||
.meta_cf
|
.meta_cf
|
||||||
.get(&MetaCf::key(0))
|
.get(&MetaCf::key(&0))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.expect("Expected metadata object to exist");
|
.expect("Expected metadata object to exist");
|
||||||
assert_eq!(meta.parent_slot, 0);
|
assert_eq!(meta.parent_slot, 0);
|
||||||
@ -1392,14 +1373,13 @@ pub mod tests {
|
|||||||
.raw_iterator_cf(blocktree.data_cf.handle())
|
.raw_iterator_cf(blocktree.data_cf.handle())
|
||||||
.expect("Expected to be able to open database iterator");
|
.expect("Expected to be able to open database iterator");
|
||||||
|
|
||||||
db_iterator.seek(&DataCf::key(slot, 1));
|
db_iterator.seek(&DataCf::key(&(slot, 1)));
|
||||||
|
|
||||||
// Iterate through ledger
|
// Iterate through ledger
|
||||||
for i in 0..num_entries {
|
for i in 0..num_entries {
|
||||||
assert!(db_iterator.valid());
|
assert!(db_iterator.valid());
|
||||||
let current_key = db_iterator.key().expect("Expected a valid key");
|
let current_key = db_iterator.key().expect("Expected a valid key");
|
||||||
let current_index = DataCf::index_from_key(¤t_key)
|
let current_index = DataCf::index(¤t_key).1;
|
||||||
.expect("Expect to be able to parse index from valid key");
|
|
||||||
assert_eq!(current_index, (1 as u64) << (i * 8));
|
assert_eq!(current_index, (1 as u64) << (i * 8));
|
||||||
db_iterator.next();
|
db_iterator.next();
|
||||||
}
|
}
|
||||||
@ -1519,7 +1499,7 @@ pub mod tests {
|
|||||||
|
|
||||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), vec![]);
|
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), vec![]);
|
||||||
|
|
||||||
let meta_key = MetaCf::key(slot);
|
let meta_key = MetaCf::key(&slot);
|
||||||
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
||||||
if num_entries % 2 == 0 {
|
if num_entries % 2 == 0 {
|
||||||
assert_eq!(meta.received, num_entries);
|
assert_eq!(meta.received, num_entries);
|
||||||
@ -1541,7 +1521,7 @@ pub mod tests {
|
|||||||
original_entries,
|
original_entries,
|
||||||
);
|
);
|
||||||
|
|
||||||
let meta_key = MetaCf::key(slot);
|
let meta_key = MetaCf::key(&slot);
|
||||||
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
||||||
assert_eq!(meta.received, num_entries);
|
assert_eq!(meta.received, num_entries);
|
||||||
assert_eq!(meta.consumed, num_entries);
|
assert_eq!(meta.consumed, num_entries);
|
||||||
@ -1594,7 +1574,7 @@ pub mod tests {
|
|||||||
|
|
||||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), expected,);
|
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), expected,);
|
||||||
|
|
||||||
let meta_key = MetaCf::key(0);
|
let meta_key = MetaCf::key(&0);
|
||||||
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
||||||
assert_eq!(meta.consumed, num_unique_entries);
|
assert_eq!(meta.consumed, num_unique_entries);
|
||||||
assert_eq!(meta.received, num_unique_entries);
|
assert_eq!(meta.received, num_unique_entries);
|
||||||
@ -2053,14 +2033,17 @@ pub mod tests {
|
|||||||
assert!(blocktree.get_slots_since(&vec![0]).unwrap().is_empty());
|
assert!(blocktree.get_slots_since(&vec![0]).unwrap().is_empty());
|
||||||
|
|
||||||
let mut meta0 = SlotMeta::new(0, 0);
|
let mut meta0 = SlotMeta::new(0, 0);
|
||||||
blocktree.meta_cf.put_slot_meta(0, &meta0).unwrap();
|
blocktree
|
||||||
|
.meta_cf
|
||||||
|
.put_by_index(&0, &serialize(&meta0).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Slot exists, chains to nothing
|
// Slot exists, chains to nothing
|
||||||
let expected: HashMap<u64, Vec<u64>> =
|
let expected: HashMap<u64, Vec<u64>> =
|
||||||
HashMap::from_iter(vec![(0, vec![])].into_iter());
|
HashMap::from_iter(vec![(0, vec![])].into_iter());
|
||||||
assert_eq!(blocktree.get_slots_since(&vec![0]).unwrap(), expected);
|
assert_eq!(blocktree.get_slots_since(&vec![0]).unwrap(), expected);
|
||||||
meta0.next_slots = vec![1, 2];
|
meta0.next_slots = vec![1, 2];
|
||||||
blocktree.meta_cf.put_slot_meta(0, &meta0).unwrap();
|
blocktree.meta_cf.put(&MetaCf::key(&0), &meta0).unwrap();
|
||||||
|
|
||||||
// Slot exists, chains to some other slots
|
// Slot exists, chains to some other slots
|
||||||
let expected: HashMap<u64, Vec<u64>> =
|
let expected: HashMap<u64, Vec<u64>> =
|
||||||
@ -2070,7 +2053,10 @@ pub mod tests {
|
|||||||
|
|
||||||
let mut meta3 = SlotMeta::new(3, 1);
|
let mut meta3 = SlotMeta::new(3, 1);
|
||||||
meta3.next_slots = vec![10, 5];
|
meta3.next_slots = vec![10, 5];
|
||||||
blocktree.meta_cf.put_slot_meta(3, &meta3).unwrap();
|
blocktree
|
||||||
|
.meta_cf
|
||||||
|
.put_by_index(&3, &serialize(&meta3).unwrap())
|
||||||
|
.unwrap();
|
||||||
let expected: HashMap<u64, Vec<u64>> =
|
let expected: HashMap<u64, Vec<u64>> =
|
||||||
HashMap::from_iter(vec![(0, vec![1, 2]), (3, vec![10, 5])].into_iter());
|
HashMap::from_iter(vec![(0, vec![1, 2]), (3, vec![10, 5])].into_iter());
|
||||||
assert_eq!(blocktree.get_slots_since(&vec![0, 1, 3]).unwrap(), expected);
|
assert_eq!(blocktree.get_slots_since(&vec![0, 1, 3]).unwrap(), expected);
|
||||||
@ -2119,7 +2105,7 @@ pub mod tests {
|
|||||||
entries[i as usize]
|
entries[i as usize]
|
||||||
);
|
);
|
||||||
|
|
||||||
let meta_key = MetaCf::key(i);
|
let meta_key = MetaCf::key(&i);
|
||||||
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
let meta = blocktree.meta_cf.get(&meta_key).unwrap().unwrap();
|
||||||
assert_eq!(meta.received, i + 1);
|
assert_eq!(meta.received, i + 1);
|
||||||
assert_eq!(meta.last_index, i);
|
assert_eq!(meta.last_index, i);
|
||||||
|
@ -11,8 +11,8 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
pub trait Database: Sized + Send + Sync {
|
pub trait Database: Sized + Send + Sync {
|
||||||
type Error: Into<Error>;
|
type Error: Into<Error>;
|
||||||
type Key: Borrow<Self::KeyRef>;
|
type Key: ?Sized;
|
||||||
type KeyRef: ?Sized;
|
type OwnedKey: Borrow<Self::Key>;
|
||||||
type ColumnFamily;
|
type ColumnFamily;
|
||||||
type Cursor: Cursor<Self>;
|
type Cursor: Cursor<Self>;
|
||||||
type EntryIter: Iterator<Item = Entry>;
|
type EntryIter: Iterator<Item = Entry>;
|
||||||
@ -20,11 +20,11 @@ pub trait Database: Sized + Send + Sync {
|
|||||||
|
|
||||||
fn cf_handle(&self, cf: &str) -> Option<Self::ColumnFamily>;
|
fn cf_handle(&self, cf: &str) -> Option<Self::ColumnFamily>;
|
||||||
|
|
||||||
fn get_cf(&self, cf: Self::ColumnFamily, key: &Self::KeyRef) -> Result<Option<Vec<u8>>>;
|
fn get_cf(&self, cf: Self::ColumnFamily, key: &Self::Key) -> Result<Option<Vec<u8>>>;
|
||||||
|
|
||||||
fn put_cf(&self, cf: Self::ColumnFamily, key: &Self::KeyRef, data: &[u8]) -> Result<()>;
|
fn put_cf(&self, cf: Self::ColumnFamily, key: &Self::Key, data: &[u8]) -> Result<()>;
|
||||||
|
|
||||||
fn delete_cf(&self, cf: Self::ColumnFamily, key: &Self::KeyRef) -> Result<()>;
|
fn delete_cf(&self, cf: Self::ColumnFamily, key: &Self::Key) -> Result<()>;
|
||||||
|
|
||||||
fn raw_iterator_cf(&self, cf: Self::ColumnFamily) -> Result<Self::Cursor>;
|
fn raw_iterator_cf(&self, cf: Self::ColumnFamily) -> Result<Self::Cursor>;
|
||||||
|
|
||||||
@ -36,93 +36,25 @@ pub trait Database: Sized + Send + Sync {
|
|||||||
pub trait Cursor<D: Database> {
|
pub trait Cursor<D: Database> {
|
||||||
fn valid(&self) -> bool;
|
fn valid(&self) -> bool;
|
||||||
|
|
||||||
fn seek(&mut self, key: &D::KeyRef);
|
fn seek(&mut self, key: &D::Key);
|
||||||
|
|
||||||
fn seek_to_first(&mut self);
|
fn seek_to_first(&mut self);
|
||||||
|
|
||||||
fn next(&mut self);
|
fn next(&mut self);
|
||||||
|
|
||||||
fn key(&self) -> Option<D::Key>;
|
fn key(&self) -> Option<D::OwnedKey>;
|
||||||
|
|
||||||
fn value(&self) -> Option<Vec<u8>>;
|
fn value(&self) -> Option<Vec<u8>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait IWriteBatch<D: Database> {
|
pub trait IWriteBatch<D: Database> {
|
||||||
fn put_cf(&mut self, cf: D::ColumnFamily, key: &D::KeyRef, data: &[u8]) -> Result<()>;
|
fn put_cf(&mut self, cf: D::ColumnFamily, key: &D::Key, data: &[u8]) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait IDataCf<D: Database>: LedgerColumnFamilyRaw<D> {
|
pub trait LedgerColumnFamily<D: Database>: LedgerColumnFamilyRaw<D> {
|
||||||
fn new(db: Arc<D>) -> Self;
|
|
||||||
|
|
||||||
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.get(key.borrow())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.delete(&key.borrow())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.put(key.borrow(), serialized_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(slot: u64, index: u64) -> D::Key;
|
|
||||||
|
|
||||||
fn slot_from_key(key: &D::KeyRef) -> Result<u64>;
|
|
||||||
|
|
||||||
fn index_from_key(key: &D::KeyRef) -> Result<u64>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait IErasureCf<D: Database>: LedgerColumnFamilyRaw<D> {
|
|
||||||
fn new(db: Arc<D>) -> Self;
|
|
||||||
|
|
||||||
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.delete(key.borrow())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.get(key.borrow())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.put(key.borrow(), serialized_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(slot: u64, index: u64) -> D::Key;
|
|
||||||
|
|
||||||
fn slot_from_key(key: &D::KeyRef) -> Result<u64>;
|
|
||||||
|
|
||||||
fn index_from_key(key: &D::KeyRef) -> Result<u64>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait IMetaCf<D: Database>: LedgerColumnFamily<D, ValueType = super::SlotMeta> {
|
|
||||||
fn new(db: Arc<D>) -> Self;
|
|
||||||
|
|
||||||
fn key(slot: u64) -> D::Key;
|
|
||||||
|
|
||||||
fn get_slot_meta(&self, slot: u64) -> Result<Option<super::SlotMeta>> {
|
|
||||||
let key = Self::key(slot);
|
|
||||||
self.get(key.borrow())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_slot_meta(&self, slot: u64, slot_meta: &super::SlotMeta) -> Result<()> {
|
|
||||||
let key = Self::key(slot);
|
|
||||||
self.put(key.borrow(), slot_meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(key: &D::KeyRef) -> Result<u64>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait LedgerColumnFamily<D: Database> {
|
|
||||||
type ValueType: DeserializeOwned + Serialize;
|
type ValueType: DeserializeOwned + Serialize;
|
||||||
|
|
||||||
fn get(&self, key: &D::KeyRef) -> Result<Option<Self::ValueType>> {
|
fn get(&self, key: &D::Key) -> Result<Option<Self::ValueType>> {
|
||||||
let db = self.db();
|
let db = self.db();
|
||||||
let data_bytes = db.get_cf(self.handle(), key)?;
|
let data_bytes = db.get_cf(self.handle(), key)?;
|
||||||
|
|
||||||
@ -134,52 +66,30 @@ pub trait LedgerColumnFamily<D: Database> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_bytes(&self, key: &D::KeyRef) -> Result<Option<Vec<u8>>> {
|
fn put(&self, key: &D::Key, value: &Self::ValueType) -> Result<()> {
|
||||||
let db = self.db();
|
|
||||||
let data_bytes = db.get_cf(self.handle(), key)?;
|
|
||||||
Ok(data_bytes.map(|x| x.to_vec()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_bytes(&self, key: &D::KeyRef, serialized_value: &[u8]) -> Result<()> {
|
|
||||||
let db = self.db();
|
|
||||||
db.put_cf(self.handle(), key, &serialized_value)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put(&self, key: &D::KeyRef, value: &Self::ValueType) -> Result<()> {
|
|
||||||
let db = self.db();
|
let db = self.db();
|
||||||
let serialized = serialize(value)?;
|
let serialized = serialize(value)?;
|
||||||
db.put_cf(self.handle(), key, &serialized)?;
|
db.put_cf(self.handle(), key, &serialized)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete(&self, key: &D::KeyRef) -> Result<()> {
|
|
||||||
let db = self.db();
|
|
||||||
db.delete_cf(self.handle(), key)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn db(&self) -> &Arc<D>;
|
|
||||||
|
|
||||||
fn handle(&self) -> D::ColumnFamily;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait LedgerColumnFamilyRaw<D: Database> {
|
pub trait LedgerColumnFamilyRaw<D: Database> {
|
||||||
fn get(&self, key: &D::KeyRef) -> Result<Option<Vec<u8>>> {
|
fn get_bytes(&self, key: &D::Key) -> Result<Option<Vec<u8>>> {
|
||||||
let db = self.db();
|
let db = self.db();
|
||||||
let data_bytes = db.get_cf(self.handle(), key)?;
|
let data_bytes = db.get_cf(self.handle(), key.borrow())?;
|
||||||
Ok(data_bytes.map(|x| x.to_vec()))
|
Ok(data_bytes.map(|x| x.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn put(&self, key: &D::KeyRef, serialized_value: &[u8]) -> Result<()> {
|
fn put_bytes(&self, key: &D::Key, serialized_value: &[u8]) -> Result<()> {
|
||||||
let db = self.db();
|
let db = self.db();
|
||||||
db.put_cf(self.handle(), &key, &serialized_value)?;
|
db.put_cf(self.handle(), key.borrow(), &serialized_value)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete(&self, key: &D::KeyRef) -> Result<()> {
|
fn delete(&self, key: &D::Key) -> Result<()> {
|
||||||
let db = self.db();
|
let db = self.db();
|
||||||
db.delete_cf(self.handle(), &key)?;
|
db.delete_cf(self.handle(), key.borrow())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,3 +103,27 @@ pub trait LedgerColumnFamilyRaw<D: Database> {
|
|||||||
|
|
||||||
fn db(&self) -> &Arc<D>;
|
fn db(&self) -> &Arc<D>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait IndexColumn<D: Database>: LedgerColumnFamilyRaw<D> {
|
||||||
|
type Index;
|
||||||
|
|
||||||
|
fn get_by_index(&self, index: &Self::Index) -> Result<Option<Vec<u8>>> {
|
||||||
|
let db = self.db();
|
||||||
|
let data_bytes = db.get_cf(self.handle(), Self::key(index).borrow())?;
|
||||||
|
Ok(data_bytes.map(|x| x.to_vec()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn put_by_index(&self, index: &Self::Index, serialized_value: &[u8]) -> Result<()> {
|
||||||
|
let db = self.db();
|
||||||
|
db.put_cf(self.handle(), Self::key(index).borrow(), &serialized_value)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_by_index(&self, index: &Self::Index) -> Result<()> {
|
||||||
|
self.delete(Self::key(index).borrow())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn index(key: &D::Key) -> Self::Index;
|
||||||
|
|
||||||
|
fn key(index: &Self::Index) -> D::OwnedKey;
|
||||||
|
}
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::packet::Blob;
|
use crate::packet::Blob;
|
||||||
use crate::result::{Error, Result};
|
use crate::result::{Error, Result};
|
||||||
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
use solana_kvstore::{self as kvstore, Key, KvStore};
|
use solana_kvstore::{self as kvstore, Key, KvStore};
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::db::{
|
use super::db::{
|
||||||
Cursor, Database, IDataCf, IErasureCf, IMetaCf, IWriteBatch, LedgerColumnFamily,
|
Cursor, Database, IWriteBatch, IndexColumn, LedgerColumnFamily, LedgerColumnFamilyRaw,
|
||||||
LedgerColumnFamilyRaw,
|
|
||||||
};
|
};
|
||||||
use super::{Blocktree, BlocktreeError};
|
use super::{Blocktree, BlocktreeError};
|
||||||
|
|
||||||
@ -68,7 +67,7 @@ impl Blocktree {
|
|||||||
impl Database for Kvs {
|
impl Database for Kvs {
|
||||||
type Error = kvstore::Error;
|
type Error = kvstore::Error;
|
||||||
type Key = Key;
|
type Key = Key;
|
||||||
type KeyRef = Key;
|
type OwnedKey = Key;
|
||||||
type ColumnFamily = ColumnFamily;
|
type ColumnFamily = ColumnFamily;
|
||||||
type Cursor = KvsCursor;
|
type Cursor = KvsCursor;
|
||||||
type EntryIter = EntryIterator;
|
type EntryIter = EntryIterator;
|
||||||
@ -135,88 +134,6 @@ impl IWriteBatch<Kvs> for KvsWriteBatch {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IDataCf<Kvs> for DataCf {
|
|
||||||
fn new(db: Arc<Kvs>) -> Self {
|
|
||||||
DataCf { db }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_slot_index(&self, _slot: u64, _index: u64) -> Result<Option<Vec<u8>>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete_by_slot_index(&self, _slot: u64, _index: u64) -> Result<()> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_by_slot_index(&self, _slot: u64, _index: u64, _serialized_value: &[u8]) -> Result<()> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(_slot: u64, _index: u64) -> Key {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slot_from_key(_key: &Key) -> Result<u64> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(_key: &Key) -> Result<u64> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IErasureCf<Kvs> for ErasureCf {
|
|
||||||
fn new(db: Arc<Kvs>) -> Self {
|
|
||||||
ErasureCf { db }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete_by_slot_index(&self, _slot: u64, _index: u64) -> Result<()> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_slot_index(&self, _slot: u64, _index: u64) -> Result<Option<Vec<u8>>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_by_slot_index(&self, _slot: u64, _index: u64, _serialized_value: &[u8]) -> Result<()> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(slot: u64, index: u64) -> Key {
|
|
||||||
DataCf::key(slot, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slot_from_key(key: &Key) -> Result<u64> {
|
|
||||||
DataCf::slot_from_key(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(key: &Key) -> Result<u64> {
|
|
||||||
DataCf::index_from_key(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IMetaCf<Kvs> for MetaCf {
|
|
||||||
fn new(db: Arc<Kvs>) -> Self {
|
|
||||||
MetaCf { db }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(_slot: u64) -> Key {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_slot_meta(&self, _slot: u64) -> Result<Option<super::SlotMeta>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_slot_meta(&self, _slot: u64, _slot_meta: &super::SlotMeta) -> Result<()> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(_key: &Key) -> Result<u64> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LedgerColumnFamilyRaw<Kvs> for DataCf {
|
impl LedgerColumnFamilyRaw<Kvs> for DataCf {
|
||||||
fn db(&self) -> &Arc<Kvs> {
|
fn db(&self) -> &Arc<Kvs> {
|
||||||
&self.db
|
&self.db
|
||||||
@ -227,6 +144,20 @@ impl LedgerColumnFamilyRaw<Kvs> for DataCf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl IndexColumn<Kvs> for DataCf {
|
||||||
|
type Index = (u64, u64);
|
||||||
|
|
||||||
|
fn index(key: &Key) -> (u64, u64) {
|
||||||
|
let slot = BigEndian::read_u64(&key.0[8..16]);
|
||||||
|
let index = BigEndian::read_u64(&key.0[16..24]);
|
||||||
|
(slot, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key(idx: &(u64, u64)) -> Key {
|
||||||
|
Key::from((0, idx.0, idx.1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl LedgerColumnFamilyRaw<Kvs> for ErasureCf {
|
impl LedgerColumnFamilyRaw<Kvs> for ErasureCf {
|
||||||
fn db(&self) -> &Arc<Kvs> {
|
fn db(&self) -> &Arc<Kvs> {
|
||||||
&self.db
|
&self.db
|
||||||
@ -237,9 +168,19 @@ impl LedgerColumnFamilyRaw<Kvs> for ErasureCf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LedgerColumnFamily<Kvs> for MetaCf {
|
impl IndexColumn<Kvs> for ErasureCf {
|
||||||
type ValueType = super::SlotMeta;
|
type Index = (u64, u64);
|
||||||
|
|
||||||
|
fn index(key: &Key) -> (u64, u64) {
|
||||||
|
DataCf::index(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key(idx: &(u64, u64)) -> Key {
|
||||||
|
DataCf::key(idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LedgerColumnFamilyRaw<Kvs> for MetaCf {
|
||||||
fn db(&self) -> &Arc<Kvs> {
|
fn db(&self) -> &Arc<Kvs> {
|
||||||
&self.db
|
&self.db
|
||||||
}
|
}
|
||||||
@ -249,6 +190,24 @@ impl LedgerColumnFamily<Kvs> for MetaCf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl LedgerColumnFamily<Kvs> for MetaCf {
|
||||||
|
type ValueType = super::SlotMeta;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexColumn<Kvs> for MetaCf {
|
||||||
|
type Index = u64;
|
||||||
|
|
||||||
|
fn index(key: &Key) -> u64 {
|
||||||
|
BigEndian::read_u64(&key.0[8..16])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key(slot: &u64) -> Key {
|
||||||
|
let mut key = Key::default();
|
||||||
|
BigEndian::write_u64(&mut key.0[8..16], *slot);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::convert::From<kvstore::Error> for Error {
|
impl std::convert::From<kvstore::Error> for Error {
|
||||||
fn from(e: kvstore::Error) -> Error {
|
fn from(e: kvstore::Error) -> Error {
|
||||||
Error::BlocktreeError(BlocktreeError::KvsDb(e))
|
Error::BlocktreeError(BlocktreeError::KvsDb(e))
|
||||||
|
@ -4,7 +4,7 @@ use crate::result::{Error, Result};
|
|||||||
|
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
|
|
||||||
use rocksdb::{
|
use rocksdb::{
|
||||||
self, ColumnFamily, ColumnFamilyDescriptor, DBRawIterator, IteratorMode, Options,
|
self, ColumnFamily, ColumnFamilyDescriptor, DBRawIterator, IteratorMode, Options,
|
||||||
@ -15,13 +15,11 @@ use solana_sdk::hash::Hash;
|
|||||||
|
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::io;
|
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use super::db::{
|
use super::db::{
|
||||||
Cursor, Database, IDataCf, IErasureCf, IMetaCf, IWriteBatch, LedgerColumnFamily,
|
Cursor, Database, IWriteBatch, IndexColumn, LedgerColumnFamily, LedgerColumnFamilyRaw,
|
||||||
LedgerColumnFamilyRaw,
|
|
||||||
};
|
};
|
||||||
use super::{Blocktree, BlocktreeError};
|
use super::{Blocktree, BlocktreeError};
|
||||||
|
|
||||||
@ -98,13 +96,13 @@ impl Blocktree {
|
|||||||
)?));
|
)?));
|
||||||
|
|
||||||
// Create the metadata column family
|
// Create the metadata column family
|
||||||
let meta_cf = MetaCf::new(db.clone());
|
let meta_cf = MetaCf { db: db.clone() };
|
||||||
|
|
||||||
// Create the data column family
|
// Create the data column family
|
||||||
let data_cf = DataCf::new(db.clone());
|
let data_cf = DataCf { db: db.clone() };
|
||||||
|
|
||||||
// Create the erasure column family
|
// Create the erasure column family
|
||||||
let erasure_cf = ErasureCf::new(db.clone());
|
let erasure_cf = ErasureCf { db: db.clone() };
|
||||||
|
|
||||||
Ok(Blocktree {
|
Ok(Blocktree {
|
||||||
db,
|
db,
|
||||||
@ -167,8 +165,8 @@ impl Blocktree {
|
|||||||
|
|
||||||
impl Database for Rocks {
|
impl Database for Rocks {
|
||||||
type Error = rocksdb::Error;
|
type Error = rocksdb::Error;
|
||||||
type Key = Vec<u8>;
|
type Key = [u8];
|
||||||
type KeyRef = [u8];
|
type OwnedKey = Vec<u8>;
|
||||||
type ColumnFamily = ColumnFamily;
|
type ColumnFamily = ColumnFamily;
|
||||||
type Cursor = DBRawIterator;
|
type Cursor = DBRawIterator;
|
||||||
type EntryIter = EntryIterator;
|
type EntryIter = EntryIterator;
|
||||||
@ -238,106 +236,6 @@ impl IWriteBatch<Rocks> for RWriteBatch {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IDataCf<Rocks> for DataCf {
|
|
||||||
fn new(db: Arc<Rocks>) -> Self {
|
|
||||||
DataCf { db }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.get(&key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.delete(&key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.put(&key, serialized_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(slot: u64, index: u64) -> Vec<u8> {
|
|
||||||
let mut key = vec![0u8; 16];
|
|
||||||
BigEndian::write_u64(&mut key[0..8], slot);
|
|
||||||
BigEndian::write_u64(&mut key[8..16], index);
|
|
||||||
key
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slot_from_key(key: &[u8]) -> Result<u64> {
|
|
||||||
let mut rdr = io::Cursor::new(&key[0..8]);
|
|
||||||
let height = rdr.read_u64::<BigEndian>()?;
|
|
||||||
Ok(height)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(key: &[u8]) -> Result<u64> {
|
|
||||||
let mut rdr = io::Cursor::new(&key[8..16]);
|
|
||||||
let index = rdr.read_u64::<BigEndian>()?;
|
|
||||||
Ok(index)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IErasureCf<Rocks> for ErasureCf {
|
|
||||||
fn new(db: Arc<Rocks>) -> Self {
|
|
||||||
ErasureCf { db }
|
|
||||||
}
|
|
||||||
fn delete_by_slot_index(&self, slot: u64, index: u64) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.delete(&key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_by_slot_index(&self, slot: u64, index: u64) -> Result<Option<Vec<u8>>> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.get(&key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_by_slot_index(&self, slot: u64, index: u64, serialized_value: &[u8]) -> Result<()> {
|
|
||||||
let key = Self::key(slot, index);
|
|
||||||
self.put(&key, serialized_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(slot: u64, index: u64) -> Vec<u8> {
|
|
||||||
DataCf::key(slot, index)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slot_from_key(key: &[u8]) -> Result<u64> {
|
|
||||||
DataCf::slot_from_key(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(key: &[u8]) -> Result<u64> {
|
|
||||||
DataCf::index_from_key(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IMetaCf<Rocks> for MetaCf {
|
|
||||||
fn new(db: Arc<Rocks>) -> Self {
|
|
||||||
MetaCf { db }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key(slot: u64) -> Vec<u8> {
|
|
||||||
let mut key = vec![0u8; 8];
|
|
||||||
BigEndian::write_u64(&mut key[0..8], slot);
|
|
||||||
key
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_slot_meta(&self, slot: u64) -> Result<Option<super::SlotMeta>> {
|
|
||||||
let key = Self::key(slot);
|
|
||||||
self.get(&key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_slot_meta(&self, slot: u64, slot_meta: &super::SlotMeta) -> Result<()> {
|
|
||||||
let key = Self::key(slot);
|
|
||||||
self.put(&key, slot_meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn index_from_key(key: &[u8]) -> Result<u64> {
|
|
||||||
let mut rdr = io::Cursor::new(&key[..]);
|
|
||||||
let index = rdr.read_u64::<BigEndian>()?;
|
|
||||||
Ok(index)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LedgerColumnFamilyRaw<Rocks> for DataCf {
|
impl LedgerColumnFamilyRaw<Rocks> for DataCf {
|
||||||
fn db(&self) -> &Arc<Rocks> {
|
fn db(&self) -> &Arc<Rocks> {
|
||||||
&self.db
|
&self.db
|
||||||
@ -348,6 +246,23 @@ impl LedgerColumnFamilyRaw<Rocks> for DataCf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl IndexColumn<Rocks> for DataCf {
|
||||||
|
type Index = (u64, u64);
|
||||||
|
|
||||||
|
fn index(key: &[u8]) -> (u64, u64) {
|
||||||
|
let slot = BigEndian::read_u64(&key[..8]);
|
||||||
|
let index = BigEndian::read_u64(&key[8..16]);
|
||||||
|
(slot, index)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key(idx: &(u64, u64)) -> Vec<u8> {
|
||||||
|
let mut key = vec![0u8; 16];
|
||||||
|
BigEndian::write_u64(&mut key[0..8], idx.0);
|
||||||
|
BigEndian::write_u64(&mut key[8..16], idx.1);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl LedgerColumnFamilyRaw<Rocks> for ErasureCf {
|
impl LedgerColumnFamilyRaw<Rocks> for ErasureCf {
|
||||||
fn db(&self) -> &Arc<Rocks> {
|
fn db(&self) -> &Arc<Rocks> {
|
||||||
&self.db
|
&self.db
|
||||||
@ -358,9 +273,19 @@ impl LedgerColumnFamilyRaw<Rocks> for ErasureCf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LedgerColumnFamily<Rocks> for MetaCf {
|
impl IndexColumn<Rocks> for ErasureCf {
|
||||||
type ValueType = super::SlotMeta;
|
type Index = (u64, u64);
|
||||||
|
|
||||||
|
fn index(key: &[u8]) -> (u64, u64) {
|
||||||
|
DataCf::index(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key(idx: &(u64, u64)) -> Vec<u8> {
|
||||||
|
DataCf::key(idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LedgerColumnFamilyRaw<Rocks> for MetaCf {
|
||||||
fn db(&self) -> &Arc<Rocks> {
|
fn db(&self) -> &Arc<Rocks> {
|
||||||
&self.db
|
&self.db
|
||||||
}
|
}
|
||||||
@ -370,6 +295,24 @@ impl LedgerColumnFamily<Rocks> for MetaCf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl LedgerColumnFamily<Rocks> for MetaCf {
|
||||||
|
type ValueType = super::SlotMeta;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexColumn<Rocks> for MetaCf {
|
||||||
|
type Index = u64;
|
||||||
|
|
||||||
|
fn index(key: &[u8]) -> u64 {
|
||||||
|
BigEndian::read_u64(&key[..8])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn key(slot: &u64) -> Vec<u8> {
|
||||||
|
let mut key = vec![0; 8];
|
||||||
|
BigEndian::write_u64(&mut key[..], *slot);
|
||||||
|
key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl std::convert::From<rocksdb::Error> for Error {
|
impl std::convert::From<rocksdb::Error> for Error {
|
||||||
fn from(e: rocksdb::Error) -> Error {
|
fn from(e: rocksdb::Error) -> Error {
|
||||||
Error::BlocktreeError(BlocktreeError::RocksDb(e))
|
Error::BlocktreeError(BlocktreeError::RocksDb(e))
|
||||||
|
Reference in New Issue
Block a user