Move blocktree_processor to solana_ledger (#6460)

* Drop core::result dependency in bank_forks

* Move blocktree_processor into solana_ledger
This commit is contained in:
Greg Fitzgerald
2019-10-20 09:54:38 -06:00
committed by GitHub
parent 31e9074ae5
commit 2636418659
36 changed files with 125 additions and 104 deletions

373
ledger/src/bank_forks.rs Normal file
View File

@ -0,0 +1,373 @@
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks
use crate::snapshot_package::{SnapshotPackageSendError, SnapshotPackageSender};
use crate::snapshot_utils::{self, SnapshotError};
use log::*;
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_runtime::status_cache::MAX_CACHE_ENTRIES;
use solana_sdk::timing;
use std::collections::{HashMap, HashSet};
use std::ops::Index;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SnapshotConfig {
// Generate a new snapshot every this many slots
pub snapshot_interval_slots: usize,
// Where to store the latest packaged snapshot
pub snapshot_package_output_path: PathBuf,
// Where to place the snapshots for recent slots
pub snapshot_path: PathBuf,
}
#[derive(Debug)]
pub enum BankForksError {
SnapshotError(SnapshotError),
SnapshotPackageSendError(SnapshotPackageSendError),
}
pub type Result<T> = std::result::Result<T, BankForksError>;
impl std::convert::From<SnapshotError> for BankForksError {
fn from(e: SnapshotError) -> BankForksError {
BankForksError::SnapshotError(e)
}
}
impl std::convert::From<SnapshotPackageSendError> for BankForksError {
fn from(e: SnapshotPackageSendError) -> BankForksError {
BankForksError::SnapshotPackageSendError(e)
}
}
pub struct BankForks {
pub banks: HashMap<u64, Arc<Bank>>,
working_bank: Arc<Bank>,
root: u64,
pub snapshot_config: Option<SnapshotConfig>,
last_snapshot_slot: u64,
}
impl Index<u64> for BankForks {
type Output = Arc<Bank>;
fn index(&self, bank_slot: u64) -> &Arc<Bank> {
&self.banks[&bank_slot]
}
}
impl BankForks {
pub fn new(bank_slot: u64, bank: Bank) -> Self {
let mut banks = HashMap::new();
let working_bank = Arc::new(bank);
banks.insert(bank_slot, working_bank.clone());
Self {
banks,
working_bank,
root: 0,
snapshot_config: None,
last_snapshot_slot: bank_slot,
}
}
/// Create a map of bank slot id to the set of ancestors for the bank slot.
pub fn ancestors(&self) -> HashMap<u64, HashSet<u64>> {
let mut ancestors = HashMap::new();
let root = self.root;
for bank in self.banks.values() {
let mut set: HashSet<u64> = bank
.ancestors
.keys()
.filter(|k| **k >= root)
.cloned()
.collect();
set.remove(&bank.slot());
ancestors.insert(bank.slot(), set);
}
ancestors
}
/// Create a map of bank slot id to the set of all of its descendants
#[allow(clippy::or_fun_call)]
pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> {
let mut descendants = HashMap::new();
for bank in self.banks.values() {
let _ = descendants.entry(bank.slot()).or_insert(HashSet::new());
let mut set: HashSet<u64> = bank.ancestors.keys().cloned().collect();
set.remove(&bank.slot());
for parent in set {
descendants
.entry(parent)
.or_insert(HashSet::new())
.insert(bank.slot());
}
}
descendants
}
pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> {
self.banks
.iter()
.filter(|(_, b)| b.is_frozen())
.map(|(k, b)| (*k, b.clone()))
.collect()
}
pub fn active_banks(&self) -> Vec<u64> {
self.banks
.iter()
.filter(|(_, v)| !v.is_frozen())
.map(|(k, _v)| *k)
.collect()
}
pub fn get(&self, bank_slot: u64) -> Option<&Arc<Bank>> {
self.banks.get(&bank_slot)
}
pub fn new_from_banks(initial_forks: &[Arc<Bank>], rooted_path: Vec<u64>) -> Self {
let mut banks = HashMap::new();
let working_bank = initial_forks[0].clone();
// Iterate through the heads of all the different forks
for bank in initial_forks {
banks.insert(bank.slot(), bank.clone());
let parents = bank.parents();
for parent in parents {
if banks.contains_key(&parent.slot()) {
// All ancestors have already been inserted by another fork
break;
}
banks.insert(parent.slot(), parent.clone());
}
}
let root = *rooted_path.last().unwrap();
Self {
root,
banks,
working_bank,
snapshot_config: None,
last_snapshot_slot: root,
}
}
pub fn insert(&mut self, bank: Bank) -> Arc<Bank> {
let bank = Arc::new(bank);
let prev = self.banks.insert(bank.slot(), bank.clone());
assert!(prev.is_none());
self.working_bank = bank.clone();
bank
}
// TODO: really want to kill this...
pub fn working_bank(&self) -> Arc<Bank> {
self.working_bank.clone()
}
pub fn set_root(&mut self, root: u64, snapshot_package_sender: &Option<SnapshotPackageSender>) {
self.root = root;
let set_root_start = Instant::now();
let root_bank = self
.banks
.get(&root)
.expect("root bank didn't exist in bank_forks");
let root_tx_count = root_bank
.parents()
.last()
.map(|bank| bank.transaction_count())
.unwrap_or(0);
root_bank.squash();
let new_tx_count = root_bank.transaction_count();
// Generate a snapshot if snapshots are configured and it's been an appropriate number
// of banks since the last snapshot
if self.snapshot_config.is_some() && snapshot_package_sender.is_some() {
let config = self.snapshot_config.as_ref().unwrap();
info!("setting snapshot root: {}", root);
if root - self.last_snapshot_slot >= config.snapshot_interval_slots as u64 {
let mut snapshot_time = Measure::start("total-snapshot-ms");
let r = self.generate_snapshot(
root,
&root_bank.src.roots(),
snapshot_package_sender.as_ref().unwrap(),
snapshot_utils::get_snapshot_tar_path(&config.snapshot_package_output_path),
);
if r.is_err() {
warn!("Error generating snapshot for bank: {}, err: {:?}", root, r);
} else {
self.last_snapshot_slot = root;
}
// Cleanup outdated snapshots
self.purge_old_snapshots();
snapshot_time.stop();
inc_new_counter_info!("total-snapshot-ms", snapshot_time.as_ms() as usize);
}
}
self.prune_non_root(root);
inc_new_counter_info!(
"bank-forks_set_root_ms",
timing::duration_as_ms(&set_root_start.elapsed()) as usize
);
inc_new_counter_info!(
"bank-forks_set_root_tx_count",
(new_tx_count - root_tx_count) as usize
);
}
pub fn root(&self) -> u64 {
self.root
}
pub fn purge_old_snapshots(&self) {
// Remove outdated snapshots
let config = self.snapshot_config.as_ref().unwrap();
let slot_snapshot_paths = snapshot_utils::get_snapshot_paths(&config.snapshot_path);
let num_to_remove = slot_snapshot_paths.len().saturating_sub(MAX_CACHE_ENTRIES);
for slot_files in &slot_snapshot_paths[..num_to_remove] {
let r = snapshot_utils::remove_snapshot(slot_files.slot, &config.snapshot_path);
if r.is_err() {
warn!("Couldn't remove snapshot at: {:?}", config.snapshot_path);
}
}
}
pub fn generate_snapshot<P: AsRef<Path>>(
&self,
root: u64,
slots_to_snapshot: &[u64],
snapshot_package_sender: &SnapshotPackageSender,
tar_output_file: P,
) -> Result<()> {
let config = self.snapshot_config.as_ref().unwrap();
// Add a snapshot for the new root
let bank = self
.get(root)
.cloned()
.expect("root must exist in BankForks");
let mut add_snapshot_time = Measure::start("add-snapshot-ms");
snapshot_utils::add_snapshot(&config.snapshot_path, &bank)?;
add_snapshot_time.stop();
inc_new_counter_info!("add-snapshot-ms", add_snapshot_time.as_ms() as usize);
// Package the relevant snapshots
let slot_snapshot_paths = snapshot_utils::get_snapshot_paths(&config.snapshot_path);
let latest_slot_snapshot_paths = slot_snapshot_paths
.last()
.expect("no snapshots found in config snapshot_path");
// We only care about the last bank's snapshot.
// We'll ask the bank for MAX_CACHE_ENTRIES (on the rooted path) worth of statuses
let package = snapshot_utils::package_snapshot(
&bank,
latest_slot_snapshot_paths,
tar_output_file,
&config.snapshot_path,
slots_to_snapshot,
)?;
// Send the package to the packaging thread
snapshot_package_sender.send(package)?;
Ok(())
}
fn prune_non_root(&mut self, root: u64) {
let descendants = self.descendants();
self.banks
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
}
pub fn set_snapshot_config(&mut self, snapshot_config: SnapshotConfig) {
self.snapshot_config = Some(snapshot_config);
}
pub fn snapshot_config(&self) -> &Option<SnapshotConfig> {
&self.snapshot_config
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
#[test]
fn test_bank_forks() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
child_bank.register_tick(&Hash::default());
bank_forks.insert(child_bank);
assert_eq!(bank_forks[1u64].tick_height(), 1);
assert_eq!(bank_forks.working_bank().tick_height(), 1);
}
#[test]
fn test_bank_forks_descendants() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let bank0 = bank_forks[0].clone();
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.insert(bank);
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let descendants = bank_forks.descendants();
let children: HashSet<u64> = [1u64, 2u64].to_vec().into_iter().collect();
assert_eq!(children, *descendants.get(&0).unwrap());
assert!(descendants[&1].is_empty());
assert!(descendants[&2].is_empty());
}
#[test]
fn test_bank_forks_ancestors() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let bank0 = bank_forks[0].clone();
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.insert(bank);
let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2);
bank_forks.insert(bank);
let ancestors = bank_forks.ancestors();
assert!(ancestors[&0].is_empty());
let parents: Vec<u64> = ancestors[&1].iter().cloned().collect();
assert_eq!(parents, vec![0]);
let parents: Vec<u64> = ancestors[&2].iter().cloned().collect();
assert_eq!(parents, vec![0]);
}
#[test]
fn test_bank_forks_frozen_banks() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
bank_forks.insert(child_bank);
assert!(bank_forks.frozen_banks().get(&0).is_some());
assert!(bank_forks.frozen_banks().get(&1).is_none());
}
#[test]
fn test_bank_forks_active_banks() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let mut bank_forks = BankForks::new(0, bank);
let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1);
bank_forks.insert(child_bank);
assert_eq!(bank_forks.active_banks(), vec![1]);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,7 @@
pub mod bank_forks;
#[macro_use]
pub mod blocktree;
pub mod blocktree_processor;
pub mod entry;
pub mod erasure;
pub mod genesis_utils;
@ -8,5 +10,11 @@ pub mod leader_schedule_cache;
pub mod leader_schedule_utils;
pub mod perf_libs;
pub mod poh;
pub mod rooted_slot_iterator;
pub mod shred;
pub mod snapshot_package;
pub mod snapshot_utils;
pub mod staking_utils;
#[macro_use]
extern crate solana_metrics;

View File

@ -0,0 +1,135 @@
use crate::blocktree::*;
use log::*;
pub struct RootedSlotIterator<'a> {
next_slots: Vec<u64>,
blocktree: &'a Blocktree,
}
impl<'a> RootedSlotIterator<'a> {
pub fn new(start_slot: u64, blocktree: &'a Blocktree) -> Result<Self> {
if blocktree.is_root(start_slot) {
Ok(Self {
next_slots: vec![start_slot],
blocktree,
})
} else {
Err(BlocktreeError::SlotNotRooted)
}
}
}
impl<'a> Iterator for RootedSlotIterator<'a> {
type Item = (u64, SlotMeta);
fn next(&mut self) -> Option<Self::Item> {
// Clone b/c passing the closure to the map below requires exclusive access to
// `self`, which is borrowed here if we don't clone.
let rooted_slot = self
.next_slots
.iter()
.find(|x| self.blocktree.is_root(**x))
.cloned();
rooted_slot
.map(|rooted_slot| {
let slot_meta = self
.blocktree
.meta(rooted_slot)
.expect("Database failure, couldnt fetch SlotMeta");
if slot_meta.is_none() {
warn!("Rooted SlotMeta was deleted in between checking is_root and fetch");
}
slot_meta.map(|slot_meta| {
self.next_slots = slot_meta.next_slots.clone();
(rooted_slot, slot_meta)
})
})
.unwrap_or(None)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::blocktree_processor::fill_blocktree_slot_with_ticks;
use solana_sdk::hash::Hash;
#[test]
fn test_rooted_slot_iterator() {
let blocktree_path = get_tmp_ledger_path("test_rooted_slot_iterator");
let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree.set_roots(&[0]).unwrap();
let ticks_per_slot = 5;
/*
Build a blocktree in the ledger with the following fork structure:
slot 0
|
slot 1 <-- set_root(true)
/ \
slot 2 |
/ |
slot 3 |
|
slot 4
*/
// Fork 1, ending at slot 3
let last_entry_hash = Hash::default();
let fork_point = 1;
let mut fork_hash = Hash::default();
for slot in 0..=3 {
let parent = {
if slot == 0 {
0
} else {
slot - 1
}
};
let last_entry_hash = fill_blocktree_slot_with_ticks(
&blocktree,
ticks_per_slot,
slot,
parent,
last_entry_hash,
);
if slot == fork_point {
fork_hash = last_entry_hash;
}
}
// Fork 2, ending at slot 4
let _ =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 4, fork_point, fork_hash);
// Set a root
blocktree.set_roots(&[1, 2, 3]).unwrap();
// Trying to get an iterator on a different fork will error
assert!(RootedSlotIterator::new(4, &blocktree).is_err());
// Trying to get an iterator on any slot on the root fork should succeed
let result: Vec<_> = RootedSlotIterator::new(3, &blocktree)
.unwrap()
.into_iter()
.map(|(slot, _)| slot)
.collect();
let expected = vec![3];
assert_eq!(result, expected);
let result: Vec<_> = RootedSlotIterator::new(0, &blocktree)
.unwrap()
.into_iter()
.map(|(slot, _)| slot)
.collect();
let expected = vec![0, 1, 2, 3];
assert_eq!(result, expected);
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
}

View File

@ -0,0 +1,37 @@
use solana_runtime::accounts_db::AccountStorageEntry;
use solana_runtime::status_cache::SlotDelta;
use solana_sdk::transaction::Result as TransactionResult;
use std::path::PathBuf;
use std::sync::mpsc::{Receiver, SendError, Sender};
use std::sync::Arc;
use tempfile::TempDir;
pub type SnapshotPackageSender = Sender<SnapshotPackage>;
pub type SnapshotPackageReceiver = Receiver<SnapshotPackage>;
pub type SnapshotPackageSendError = SendError<SnapshotPackage>;
pub struct SnapshotPackage {
pub root: u64,
pub slot_deltas: Vec<SlotDelta<TransactionResult<()>>>,
pub snapshot_links: TempDir,
pub storage_entries: Vec<Arc<AccountStorageEntry>>,
pub tar_output_file: PathBuf,
}
impl SnapshotPackage {
pub fn new(
root: u64,
slot_deltas: Vec<SlotDelta<TransactionResult<()>>>,
snapshot_links: TempDir,
storage_entries: Vec<Arc<AccountStorageEntry>>,
tar_output_file: PathBuf,
) -> Self {
Self {
root,
slot_deltas,
snapshot_links,
storage_entries,
tar_output_file,
}
}
}

View File

@ -0,0 +1,327 @@
use crate::snapshot_package::SnapshotPackage;
use bincode::{deserialize_from, serialize_into};
use bzip2::bufread::BzDecoder;
use fs_extra::dir::CopyOptions;
use log::*;
use solana_measure::measure::Measure;
use solana_runtime::bank::Bank;
use solana_runtime::status_cache::SlotDelta;
use solana_sdk::transaction;
use std::cmp::Ordering;
use std::fs;
use std::fs::File;
use std::io::{BufReader, BufWriter, Error as IOError, ErrorKind};
use std::path::{Path, PathBuf};
use tar::Archive;
pub const SNAPSHOT_STATUS_CACHE_FILE_NAME: &str = "status_cache";
pub const TAR_SNAPSHOTS_DIR: &str = "snapshots";
pub const TAR_ACCOUNTS_DIR: &str = "accounts";
#[derive(PartialEq, Ord, Eq, Debug)]
pub struct SlotSnapshotPaths {
pub slot: u64,
pub snapshot_file_path: PathBuf,
}
#[derive(Debug)]
pub enum SnapshotError {
IO(std::io::Error),
Serialize(std::boxed::Box<bincode::ErrorKind>),
FsExtra(fs_extra::error::Error),
}
pub type Result<T> = std::result::Result<T, SnapshotError>;
impl std::convert::From<std::io::Error> for SnapshotError {
fn from(e: std::io::Error) -> SnapshotError {
SnapshotError::IO(e)
}
}
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for SnapshotError {
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> SnapshotError {
SnapshotError::Serialize(e)
}
}
impl std::convert::From<fs_extra::error::Error> for SnapshotError {
fn from(e: fs_extra::error::Error) -> SnapshotError {
SnapshotError::FsExtra(e)
}
}
impl PartialOrd for SlotSnapshotPaths {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.slot.cmp(&other.slot))
}
}
impl SlotSnapshotPaths {
fn copy_snapshot_directory<P: AsRef<Path>>(&self, snapshot_hardlink_dir: P) -> Result<()> {
// Create a new directory in snapshot_hardlink_dir
let new_slot_hardlink_dir = snapshot_hardlink_dir.as_ref().join(self.slot.to_string());
let _ = fs::remove_dir_all(&new_slot_hardlink_dir);
fs::create_dir_all(&new_slot_hardlink_dir)?;
// Copy the snapshot
fs::copy(
&self.snapshot_file_path,
&new_slot_hardlink_dir.join(self.slot.to_string()),
)?;
Ok(())
}
}
pub fn package_snapshot<P: AsRef<Path>, Q: AsRef<Path>>(
bank: &Bank,
snapshot_files: &SlotSnapshotPaths,
snapshot_package_output_file: P,
snapshot_path: Q,
slots_to_snapshot: &[u64],
) -> Result<SnapshotPackage> {
// Hard link all the snapshots we need for this package
let snapshot_hard_links_dir = tempfile::tempdir_in(snapshot_path)?;
// Get a reference to all the relevant AccountStorageEntries
let account_storage_entries: Vec<_> = bank
.rc
.get_storage_entries()
.into_iter()
.filter(|x| x.fork_id() <= bank.slot())
.collect();
// Create a snapshot package
info!(
"Snapshot for bank: {} has {} account storage entries",
bank.slot(),
account_storage_entries.len()
);
// Any errors from this point on will cause the above SnapshotPackage to drop, clearing
// any temporary state created for the SnapshotPackage (like the snapshot_hard_links_dir)
snapshot_files.copy_snapshot_directory(snapshot_hard_links_dir.path())?;
let package = SnapshotPackage::new(
bank.slot(),
bank.src.slot_deltas(slots_to_snapshot),
snapshot_hard_links_dir,
account_storage_entries,
snapshot_package_output_file.as_ref().to_path_buf(),
);
Ok(package)
}
pub fn get_snapshot_paths<P: AsRef<Path>>(snapshot_path: P) -> Vec<SlotSnapshotPaths>
where
P: std::fmt::Debug,
{
match fs::read_dir(&snapshot_path) {
Ok(paths) => {
let mut names = paths
.filter_map(|entry| {
entry.ok().and_then(|e| {
e.path()
.file_name()
.and_then(|n| n.to_str().map(|s| s.parse::<u64>().ok()))
.unwrap_or(None)
})
})
.map(|slot| {
let snapshot_path = snapshot_path.as_ref().join(slot.to_string());
SlotSnapshotPaths {
slot,
snapshot_file_path: snapshot_path.join(get_snapshot_file_name(slot)),
}
})
.collect::<Vec<SlotSnapshotPaths>>();
names.sort();
names
}
Err(err) => {
info!(
"Unable to read snapshot directory {:?}: {}",
snapshot_path, err
);
vec![]
}
}
}
pub fn add_snapshot<P: AsRef<Path>>(snapshot_path: P, bank: &Bank) -> Result<()> {
let slot = bank.slot();
// snapshot_path/slot
let slot_snapshot_dir = get_bank_snapshot_dir(snapshot_path, slot);
fs::create_dir_all(slot_snapshot_dir.clone())?;
// the snapshot is stored as snapshot_path/slot/slot
let snapshot_file_path = slot_snapshot_dir.join(get_snapshot_file_name(slot));
info!(
"creating snapshot {}, path: {:?}",
bank.slot(),
snapshot_file_path,
);
let snapshot_file = File::create(&snapshot_file_path)?;
// snapshot writer
let mut snapshot_stream = BufWriter::new(snapshot_file);
// Create the snapshot
serialize_into(&mut snapshot_stream, &*bank)?;
let mut bank_rc_serialize = Measure::start("bank_rc_serialize-ms");
serialize_into(&mut snapshot_stream, &bank.rc)?;
bank_rc_serialize.stop();
inc_new_counter_info!("bank-rc-serialize-ms", bank_rc_serialize.as_ms() as usize);
info!(
"successfully created snapshot {}, path: {:?}",
bank.slot(),
snapshot_file_path,
);
Ok(())
}
pub fn remove_snapshot<P: AsRef<Path>>(slot: u64, snapshot_path: P) -> Result<()> {
let slot_snapshot_dir = get_bank_snapshot_dir(&snapshot_path, slot);
// Remove the snapshot directory for this slot
fs::remove_dir_all(slot_snapshot_dir)?;
Ok(())
}
pub fn bank_slot_from_archive<P: AsRef<Path>>(snapshot_tar: P) -> Result<u64> {
let tempdir = tempfile::TempDir::new()?;
untar_snapshot_in(&snapshot_tar, &tempdir)?;
let unpacked_snapshots_dir = tempdir.path().join(TAR_SNAPSHOTS_DIR);
let snapshot_paths = get_snapshot_paths(&unpacked_snapshots_dir);
let last_root_paths = snapshot_paths
.last()
.ok_or_else(|| get_io_error("No snapshots found in snapshots directory"))?;
let file = File::open(&last_root_paths.snapshot_file_path)?;
let mut stream = BufReader::new(file);
let bank: Bank = deserialize_from(&mut stream)?;
Ok(bank.slot())
}
pub fn bank_from_archive<P: AsRef<Path>>(
account_paths: String,
snapshot_path: &PathBuf,
snapshot_tar: P,
) -> Result<Bank> {
// Untar the snapshot into a temp directory under `snapshot_config.snapshot_path()`
let unpack_dir = tempfile::tempdir_in(snapshot_path)?;
untar_snapshot_in(&snapshot_tar, &unpack_dir)?;
let unpacked_accounts_dir = unpack_dir.as_ref().join(TAR_ACCOUNTS_DIR);
let unpacked_snapshots_dir = unpack_dir.as_ref().join(TAR_SNAPSHOTS_DIR);
let bank = rebuild_bank_from_snapshots(
account_paths,
&unpacked_snapshots_dir,
unpacked_accounts_dir,
)?;
if !bank.verify_hash_internal_state() {
panic!("Snapshot bank failed to verify");
}
// Move the unpacked snapshots into `snapshot_path`
let dir_files = fs::read_dir(&unpacked_snapshots_dir).unwrap_or_else(|err| {
panic!(
"Invalid snapshot path {:?}: {}",
unpacked_snapshots_dir, err
)
});
let paths: Vec<PathBuf> = dir_files
.filter_map(|entry| entry.ok().map(|e| e.path()))
.collect();
let mut copy_options = CopyOptions::new();
copy_options.overwrite = true;
fs_extra::move_items(&paths, &snapshot_path, &copy_options)?;
Ok(bank)
}
pub fn get_snapshot_tar_path<P: AsRef<Path>>(snapshot_output_dir: P) -> PathBuf {
snapshot_output_dir.as_ref().join("snapshot.tar.bz2")
}
pub fn untar_snapshot_in<P: AsRef<Path>, Q: AsRef<Path>>(
snapshot_tar: P,
unpack_dir: Q,
) -> Result<()> {
let tar_bz2 = File::open(snapshot_tar)?;
let tar = BzDecoder::new(BufReader::new(tar_bz2));
let mut archive = Archive::new(tar);
archive.unpack(&unpack_dir)?;
Ok(())
}
fn rebuild_bank_from_snapshots<P>(
local_account_paths: String,
unpacked_snapshots_dir: &PathBuf,
append_vecs_path: P,
) -> Result<Bank>
where
P: AsRef<Path>,
{
let mut snapshot_paths = get_snapshot_paths(&unpacked_snapshots_dir);
if snapshot_paths.len() > 1 {
return Err(get_io_error("invalid snapshot format"));
}
let root_paths = snapshot_paths
.pop()
.ok_or_else(|| get_io_error("No snapshots found in snapshots directory"))?;
// Rebuild the root bank
info!("Loading from {:?}", &root_paths.snapshot_file_path);
let file = File::open(&root_paths.snapshot_file_path)?;
let mut stream = BufReader::new(file);
let bank: Bank = deserialize_from(&mut stream)?;
// Rebuild accounts
bank.rc
.accounts_from_stream(&mut stream, local_account_paths, append_vecs_path)?;
// Rebuild status cache
let status_cache_path = unpacked_snapshots_dir.join(SNAPSHOT_STATUS_CACHE_FILE_NAME);
let status_cache = File::open(status_cache_path)?;
let mut stream = BufReader::new(status_cache);
let slot_deltas: Vec<SlotDelta<transaction::Result<()>>> =
deserialize_from(&mut stream).unwrap_or_default();
bank.src.append(&slot_deltas);
Ok(bank)
}
fn get_snapshot_file_name(slot: u64) -> String {
slot.to_string()
}
fn get_bank_snapshot_dir<P: AsRef<Path>>(path: P, slot: u64) -> PathBuf {
path.as_ref().join(slot.to_string())
}
fn get_io_error(error: &str) -> SnapshotError {
warn!("Snapshot Error: {:?}", error);
SnapshotError::IO(IOError::new(ErrorKind::Other, error))
}
pub fn verify_snapshot_tar<P, Q, R>(snapshot_tar: P, snapshots_to_verify: Q, storages_to_verify: R)
where
P: AsRef<Path>,
Q: AsRef<Path>,
R: AsRef<Path>,
{
let temp_dir = tempfile::TempDir::new().unwrap();
let unpack_dir = temp_dir.path();
untar_snapshot_in(snapshot_tar, &unpack_dir).unwrap();
// Check snapshots are the same
let unpacked_snapshots = unpack_dir.join(&TAR_SNAPSHOTS_DIR);
assert!(!dir_diff::is_different(&snapshots_to_verify, unpacked_snapshots).unwrap());
// Check the account entries are the same
let unpacked_accounts = unpack_dir.join(&TAR_ACCOUNTS_DIR);
assert!(!dir_diff::is_different(&storages_to_verify, unpacked_accounts).unwrap());
}