Revert "Revert "Ledger-tool: only require ledger dir when necessary (backport #21575) (#21578)""

This reverts commit eae3166bdc.
This commit is contained in:
Tyera Eulberg
2021-12-16 13:57:24 -07:00
committed by Tyera Eulberg
parent 704d05f52d
commit 9fff4aa8b8
3 changed files with 1535 additions and 1481 deletions

View File

@ -1,23 +1,26 @@
/// The `bigtable` subcommand /// The `bigtable` subcommand
use clap::{ use {
crate::ledger_path::canonicalize_ledger_path,
clap::{
value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand,
}; },
use solana_clap_utils::{ solana_clap_utils::{
input_parsers::pubkey_of, input_parsers::pubkey_of,
input_validators::{is_slot, is_valid_pubkey}, input_validators::{is_slot, is_valid_pubkey},
}; },
use solana_cli_output::{ solana_cli_output::{
display::println_transaction, CliBlock, CliTransaction, CliTransactionConfirmation, display::println_transaction, CliBlock, CliTransaction, CliTransactionConfirmation,
OutputFormat, OutputFormat,
}; },
use solana_ledger::{blockstore::Blockstore, blockstore_db::AccessType}; solana_ledger::{blockstore::Blockstore, blockstore_db::AccessType},
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}; solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature},
use solana_transaction_status::{ConfirmedBlock, EncodedTransaction, UiTransactionEncoding}; solana_transaction_status::{ConfirmedBlock, EncodedTransaction, UiTransactionEncoding},
use std::{ std::{
path::Path, path::Path,
process::exit, process::exit,
result::Result, result::Result,
sync::{atomic::AtomicBool, Arc}, sync::{atomic::AtomicBool, Arc},
},
}; };
async fn upload( async fn upload(
@ -426,8 +429,11 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) {
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok(); let ending_slot = value_t!(arg_matches, "ending_slot", Slot).ok();
let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata"); let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata");
let force_reupload = arg_matches.is_present("force_reupload"); let force_reupload = arg_matches.is_present("force_reupload");
let blockstore = let blockstore = crate::open_blockstore(
crate::open_blockstore(ledger_path, AccessType::TryPrimaryThenSecondary, None); &canonicalize_ledger_path(ledger_path),
AccessType::TryPrimaryThenSecondary,
None,
);
runtime.block_on(upload( runtime.block_on(upload(
blockstore, blockstore,

View File

@ -0,0 +1,30 @@
use {
clap::{value_t, ArgMatches},
std::{
fs,
path::{Path, PathBuf},
process::exit,
},
};
pub fn parse_ledger_path(matches: &ArgMatches<'_>, name: &str) -> PathBuf {
PathBuf::from(value_t!(matches, name, String).unwrap_or_else(|_err| {
eprintln!(
"Error: Missing --ledger <DIR> argument.\n\n{}",
matches.usage()
);
exit(1);
}))
}
// Canonicalize ledger path to avoid issues with symlink creation
pub fn canonicalize_ledger_path(ledger_path: &Path) -> PathBuf {
fs::canonicalize(&ledger_path).unwrap_or_else(|err| {
eprintln!(
"Unable to access ledger path '{}': {}",
ledger_path.display(),
err
);
exit(1);
})
}

View File

@ -1,30 +1,31 @@
#![allow(clippy::integer_arithmetic)] #![allow(clippy::integer_arithmetic)]
use clap::{ use {
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, clap::{
Arg, ArgMatches, SubCommand, crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App,
}; AppSettings, Arg, ArgMatches, SubCommand,
use dashmap::DashMap; },
use itertools::Itertools; dashmap::DashMap,
use log::*; itertools::Itertools,
use regex::Regex; log::*,
use serde::Serialize; regex::Regex,
use serde_json::json; serde::Serialize,
use solana_clap_utils::{ serde_json::json,
solana_clap_utils::{
input_parsers::{cluster_type_of, pubkey_of, pubkeys_of}, input_parsers::{cluster_type_of, pubkey_of, pubkeys_of},
input_validators::{ input_validators::{
is_parsable, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage, is_parsable, is_pubkey, is_pubkey_or_keypair, is_slot, is_valid_percentage,
}, },
}; },
use solana_ledger::entry::Entry; solana_ledger::entry::Entry,
use solana_ledger::{ solana_ledger::{
ancestor_iterator::AncestorIterator, ancestor_iterator::AncestorIterator,
bank_forks_utils, bank_forks_utils,
blockstore::{create_new_ledger, Blockstore, PurgeType}, blockstore::{create_new_ledger, Blockstore, PurgeType},
blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database}, blockstore_db::{self, AccessType, BlockstoreRecoveryMode, Column, Database},
blockstore_processor::ProcessOptions, blockstore_processor::ProcessOptions,
shred::Shred, shred::Shred,
}; },
use solana_runtime::{ solana_runtime::{
bank::{Bank, RewardCalculationEvent}, bank::{Bank, RewardCalculationEvent},
bank_forks::{ArchiveFormat, BankForks, SnapshotConfig}, bank_forks::{ArchiveFormat, BankForks, SnapshotConfig},
cost_model::CostModel, cost_model::CostModel,
@ -32,8 +33,8 @@ use solana_runtime::{
hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE},
snapshot_utils, snapshot_utils,
snapshot_utils::{SnapshotVersion, DEFAULT_MAX_SNAPSHOTS_TO_RETAIN}, snapshot_utils::{SnapshotVersion, DEFAULT_MAX_SNAPSHOTS_TO_RETAIN},
}; },
use solana_sdk::{ solana_sdk::{
account::{AccountSharedData, ReadableAccount, WritableAccount}, account::{AccountSharedData, ReadableAccount, WritableAccount},
account_utils::StateMut, account_utils::StateMut,
clock::{Epoch, Slot}, clock::{Epoch, Slot},
@ -46,25 +47,28 @@ use solana_sdk::{
shred_version::compute_shred_version, shred_version::compute_shred_version,
stake::{self, state::StakeState}, stake::{self, state::StakeState},
system_program, system_program,
}; },
use solana_stake_program::stake_state::{self, PointValue}; solana_stake_program::stake_state::{self, PointValue},
use solana_vote_program::{ solana_vote_program::{
self, self,
vote_state::{self, VoteState}, vote_state::{self, VoteState},
}; },
use std::{ std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet}, collections::{BTreeMap, BTreeSet, HashMap, HashSet},
ffi::OsStr, ffi::OsStr,
fs::{self, File}, fs::File,
io::{self, stdout, BufRead, BufReader, Write}, io::{self, stdout, BufRead, BufReader, Write},
path::{Path, PathBuf}, path::{Path, PathBuf},
process::{exit, Command, Stdio}, process::{exit, Command, Stdio},
str::FromStr, str::FromStr,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
},
}; };
mod bigtable; mod bigtable;
use bigtable::*; use bigtable::*;
mod ledger_path;
use ledger_path::*;
#[derive(PartialEq)] #[derive(PartialEq)]
enum LedgerOutputMethod { enum LedgerOutputMethod {
@ -1498,25 +1502,7 @@ fn main() {
info!("{} {}", crate_name!(), solana_version::version!()); info!("{} {}", crate_name!(), solana_version::version!());
let ledger_path = PathBuf::from(value_t!(matches, "ledger_path", String).unwrap_or_else( let ledger_path = parse_ledger_path(&matches, "ledger_path");
|_err| {
eprintln!(
"Error: Missing --ledger <DIR> argument.\n\n{}",
matches.usage()
);
exit(1);
},
));
// Canonicalize ledger path to avoid issues with symlink creation
let ledger_path = fs::canonicalize(&ledger_path).unwrap_or_else(|err| {
eprintln!(
"Unable to access ledger path '{}': {}",
ledger_path.display(),
err
);
exit(1);
});
let snapshot_archive_path = value_t!(matches, "snapshot_archive_path", String) let snapshot_archive_path = value_t!(matches, "snapshot_archive_path", String)
.ok() .ok()
@ -1527,8 +1513,12 @@ fn main() {
.map(BlockstoreRecoveryMode::from); .map(BlockstoreRecoveryMode::from);
let verbose_level = matches.occurrences_of("verbose"); let verbose_level = matches.occurrences_of("verbose");
if let ("bigtable", Some(arg_matches)) = matches.subcommand() {
bigtable_process_command(&ledger_path, arg_matches)
} else {
let ledger_path = canonicalize_ledger_path(&ledger_path);
match matches.subcommand() { match matches.subcommand() {
("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches),
("print", Some(arg_matches)) => { ("print", Some(arg_matches)) => {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX);
@ -1554,7 +1544,8 @@ fn main() {
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot);
let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String));
let source = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); let source =
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
let target = open_blockstore(&target_db, AccessType::PrimaryOnly, None); let target = open_blockstore(&target_db, AccessType::PrimaryOnly, None);
for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() {
if slot > ending_slot { if slot > ending_slot {
@ -1578,7 +1569,8 @@ fn main() {
} }
("modify-genesis", Some(arg_matches)) => { ("modify-genesis", Some(arg_matches)) => {
let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
let output_directory = PathBuf::from(arg_matches.value_of("output_directory").unwrap()); let output_directory =
PathBuf::from(arg_matches.value_of("output_directory").unwrap());
if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") {
genesis_config.cluster_type = cluster_type; genesis_config.cluster_type = cluster_type;
@ -1654,7 +1646,8 @@ fn main() {
} }
let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot);
let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX);
let ledger = open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); let ledger =
open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None);
for (slot, _meta) in ledger for (slot, _meta) in ledger
.slot_meta_iterator(starting_slot) .slot_meta_iterator(starting_slot)
.unwrap() .unwrap()
@ -1844,7 +1837,9 @@ fn main() {
if ancestors.contains(&slot) && !map.contains_key(&slot) { if ancestors.contains(&slot) && !map.contains_key(&slot) {
map.insert(slot, line); map.insert(slot, line);
} }
if slot == ending_slot && frozen.contains_key(&slot) && full.contains_key(&slot) if slot == ending_slot
&& frozen.contains_key(&slot)
&& full.contains_key(&slot)
{ {
break; break;
} }
@ -1928,7 +1923,8 @@ fn main() {
snapshot_archive_path, snapshot_archive_path,
) { ) {
Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => { Ok((bank_forks, _leader_schedule_cache, _snapshot_hash)) => {
let dot = graph_forks(&bank_forks, arg_matches.is_present("include_all_votes")); let dot =
graph_forks(&bank_forks, arg_matches.is_present("include_all_votes"));
let extension = Path::new(&output_file).extension(); let extension = Path::new(&output_file).extension();
let result = if extension == Some(OsStr::new("pdf")) { let result = if extension == Some(OsStr::new("pdf")) {
@ -1987,15 +1983,15 @@ fn main() {
.unwrap_or_default() .unwrap_or_default()
.into_iter() .into_iter()
.collect(); .collect();
let snapshot_version = let snapshot_version = arg_matches.value_of("snapshot_version").map_or(
arg_matches SnapshotVersion::default(),
.value_of("snapshot_version") |s| {
.map_or(SnapshotVersion::default(), |s| {
s.parse::<SnapshotVersion>().unwrap_or_else(|e| { s.parse::<SnapshotVersion>().unwrap_or_else(|e| {
eprintln!("Error: {}", e); eprintln!("Error: {}", e);
exit(1) exit(1)
}) })
}); },
);
let maximum_snapshots_to_retain = let maximum_snapshots_to_retain =
value_t_or_exit!(arg_matches, "maximum_snapshots_to_retain", usize); value_t_or_exit!(arg_matches, "maximum_snapshots_to_retain", usize);
@ -2063,7 +2059,9 @@ fn main() {
child_bank.set_hashes_per_tick(match hashes_per_tick { child_bank.set_hashes_per_tick(match hashes_per_tick {
// Note: Unlike `solana-genesis`, "auto" is not supported here. // Note: Unlike `solana-genesis`, "auto" is not supported here.
"sleep" => None, "sleep" => None,
_ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), _ => {
Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64))
}
}); });
} }
bank = Arc::new(child_bank); bank = Arc::new(child_bank);
@ -2107,7 +2105,8 @@ fn main() {
.into_iter() .into_iter()
{ {
if let Ok(StakeState::Stake(meta, stake)) = account.state() { if let Ok(StakeState::Stake(meta, stake)) = account.state() {
if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) if vote_accounts_to_destake
.contains(&stake.delegation.voter_pubkey)
{ {
if verbose_level > 0 { if verbose_level > 0 {
warn!( warn!(
@ -2153,7 +2152,8 @@ fn main() {
let mut bootstrap_validator_pubkeys_iter = let mut bootstrap_validator_pubkeys_iter =
bootstrap_validator_pubkeys.iter(); bootstrap_validator_pubkeys.iter();
loop { loop {
let identity_pubkey = match bootstrap_validator_pubkeys_iter.next() { let identity_pubkey = match bootstrap_validator_pubkeys_iter.next()
{
None => break, None => break,
Some(identity_pubkey) => identity_pubkey, Some(identity_pubkey) => identity_pubkey,
}; };
@ -2321,7 +2321,10 @@ fn main() {
println!(" - slot: {}", slot); println!(" - slot: {}", slot);
println!(" - rent_epoch: {}", account.rent_epoch()); println!(" - rent_epoch: {}", account.rent_epoch());
if !exclude_account_data { if !exclude_account_data {
println!(" - data: '{}'", bs58::encode(account.data()).into_string()); println!(
" - data: '{}'",
bs58::encode(account.data()).into_string()
);
} }
println!(" - data_len: {}", data_len); println!(" - data_len: {}", data_len);
} }
@ -2364,16 +2367,21 @@ fn main() {
println!("Recalculating capitalization"); println!("Recalculating capitalization");
let old_capitalization = bank.set_capitalization(); let old_capitalization = bank.set_capitalization();
if old_capitalization == bank.capitalization() { if old_capitalization == bank.capitalization() {
eprintln!("Capitalization was identical: {}", Sol(old_capitalization)); eprintln!(
"Capitalization was identical: {}",
Sol(old_capitalization)
);
} }
} }
if arg_matches.is_present("warp_epoch") { if arg_matches.is_present("warp_epoch") {
let base_bank = bank; let base_bank = bank;
let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap(); let raw_warp_epoch =
value_t!(arg_matches, "warp_epoch", String).unwrap();
let warp_epoch = if raw_warp_epoch.starts_with('+') { let warp_epoch = if raw_warp_epoch.starts_with('+') {
base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() base_bank.epoch()
+ value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
} else { } else {
value_t!(arg_matches, "warp_epoch", Epoch).unwrap() value_t!(arg_matches, "warp_epoch", Epoch).unwrap()
}; };
@ -2679,7 +2687,10 @@ fn main() {
for point_detail in point_details { for point_detail in point_details {
let record = InflationRecord { let record = InflationRecord {
cluster_type: format!("{:?}", base_bank.cluster_type()), cluster_type: format!(
"{:?}",
base_bank.cluster_type()
),
rewarded_epoch: base_bank.epoch(), rewarded_epoch: base_bank.epoch(),
account: format!("{}", pubkey), account: format!("{}", pubkey),
owner: format!("{}", base_account.owner()), owner: format!("{}", base_account.owner()),
@ -2710,7 +2721,9 @@ fn main() {
deactivation_epoch: format_or_na( deactivation_epoch: format_or_na(
detail.and_then(|d| d.deactivation_epoch), detail.and_then(|d| d.deactivation_epoch),
), ),
earned_epochs: format_or_na(detail.map(|d| d.epochs)), earned_epochs: format_or_na(
detail.map(|d| d.epochs),
),
epoch: format_or_na(point_detail.map(|d| d.epoch)), epoch: format_or_na(point_detail.map(|d| d.epoch)),
epoch_credits: format_or_na( epoch_credits: format_or_na(
point_detail.map(|d| d.credits), point_detail.map(|d| d.credits),
@ -2736,7 +2749,9 @@ fn main() {
vote_rewards: format_or_na( vote_rewards: format_or_na(
detail.map(|d| d.vote_rewards), detail.map(|d| d.vote_rewards),
), ),
commission: format_or_na(detail.map(|d| d.commission)), commission: format_or_na(
detail.map(|d| d.commission),
),
cluster_rewards: format_or_na( cluster_rewards: format_or_na(
last_point_value last_point_value
.read() .read()
@ -2767,7 +2782,9 @@ fn main() {
} }
} else { } else {
if arg_matches.is_present("recalculate_capitalization") { if arg_matches.is_present("recalculate_capitalization") {
eprintln!("Capitalization isn't verified because it's recalculated"); eprintln!(
"Capitalization isn't verified because it's recalculated"
);
} }
if arg_matches.is_present("inflation") { if arg_matches.is_present("inflation") {
eprintln!( eprintln!(
@ -2963,8 +2980,8 @@ fn main() {
); );
exit(1); exit(1);
} }
let ancestor_iterator = let ancestor_iterator = AncestorIterator::new(start_root, &blockstore)
AncestorIterator::new(start_root, &blockstore).take_while(|&slot| slot >= end_root); .take_while(|&slot| slot >= end_root);
let roots_to_fix: Vec<_> = ancestor_iterator let roots_to_fix: Vec<_> = ancestor_iterator
.filter(|slot| !blockstore.is_root(*slot)) .filter(|slot| !blockstore.is_root(*slot))
.collect(); .collect();
@ -3085,4 +3102,5 @@ fn main() {
} }
_ => unreachable!(), _ => unreachable!(),
}; };
}
} }