Compare commits
19 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
981294cbc6 | ||
|
ff728e5e56 | ||
|
9aaf41bef2 | ||
|
271eec656c | ||
|
13d071607f | ||
|
ffe35d9a10 | ||
|
bb2fb07b39 | ||
|
85fc51dc61 | ||
|
0276b6c4c2 | ||
|
c481e4fe7f | ||
|
76a3b3ad11 | ||
|
356c663e88 | ||
|
015bbc1e12 | ||
|
454a9f3175 | ||
|
485b3d64a1 | ||
|
5d170d83c0 | ||
|
f54d8ea3ab | ||
|
ef9f54b3d4 | ||
|
8d0b102b44 |
74
Cargo.lock
generated
74
Cargo.lock
generated
@@ -1085,6 +1085,17 @@ dependencies = [
|
||||
"ieee754",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fd-lock"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a15bec795244d49f5ee3024bdc6c3883fb035f7f6601d4a4821c3d5d60784454"
|
||||
dependencies = [
|
||||
"failure",
|
||||
"libc",
|
||||
"winapi 0.3.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "feature-probe"
|
||||
version = "0.1.1"
|
||||
@@ -2121,16 +2132,6 @@ version = "2.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
|
||||
|
||||
[[package]]
|
||||
name = "memmap"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi 0.3.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memmap2"
|
||||
version = "0.1.0"
|
||||
@@ -4162,21 +4163,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-frozen-abi"
|
||||
version = "1.4.4"
|
||||
version = "1.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96b1c4f649f305d1cb1454bd5de691872bc2631b99746341b5af8b4d561dbf06"
|
||||
checksum = "9fd05ea1ac578b63449a1a89b62b5f00059dc438f7c143b4dcaf2eec1341e555"
|
||||
dependencies = [
|
||||
"bs58",
|
||||
"bv",
|
||||
"generic-array 0.14.3",
|
||||
"log 0.4.11",
|
||||
"memmap",
|
||||
"memmap2",
|
||||
"rustc_version",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"sha2",
|
||||
"solana-frozen-abi-macro 1.4.4",
|
||||
"solana-logger 1.4.4",
|
||||
"solana-frozen-abi-macro 1.4.17",
|
||||
"solana-logger 1.4.17",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
@@ -4200,9 +4201,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-frozen-abi-macro"
|
||||
version = "1.4.4"
|
||||
version = "1.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c84e6316d0b71d60ff34d2cc1a25521f6cb605111590a61e9baa768ac1234d4"
|
||||
checksum = "19f67844548a975ef56f712bb8840afcf19e94037b3341174d6edadb7e578351"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"proc-macro2 1.0.24",
|
||||
@@ -4441,9 +4442,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-logger"
|
||||
version = "1.4.4"
|
||||
version = "1.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2e6d1862202b2f6aba9bd346ef10cdecb29e05948d25ad3a3789f4239000012"
|
||||
checksum = "0b97055ab14e7c1f67a3141b066ada44e6dfd1b2424d61ba2411dfd7cab08b69"
|
||||
dependencies = [
|
||||
"env_logger 0.7.1",
|
||||
"lazy_static",
|
||||
@@ -4596,9 +4597,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-program"
|
||||
version = "1.4.4"
|
||||
version = "1.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8cf8a198b1443fccc12fab0e4927d1a3add3e5e9bd249bb61191c04d47e3c09"
|
||||
checksum = "78135c538a4bea743c9703f93f7a5289b26075b92b19de8ebb7e60853732b9a0"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"bs58",
|
||||
@@ -4617,10 +4618,10 @@ dependencies = [
|
||||
"serde_bytes",
|
||||
"serde_derive",
|
||||
"sha2",
|
||||
"solana-frozen-abi 1.4.4",
|
||||
"solana-frozen-abi-macro 1.4.4",
|
||||
"solana-logger 1.4.4",
|
||||
"solana-sdk-macro 1.4.4",
|
||||
"solana-frozen-abi 1.4.17",
|
||||
"solana-frozen-abi-macro 1.4.17",
|
||||
"solana-logger 1.4.17",
|
||||
"solana-sdk-macro 1.4.17",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
@@ -4827,9 +4828,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "solana-sdk-macro"
|
||||
version = "1.4.4"
|
||||
version = "1.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8da0dcb182f9631a69b4d6de69f82f0aa8bf2350c666122b9fad99380547ccc"
|
||||
checksum = "090e095a5ac39010fa83488dfae422132798e15183d887cc9ab33ed6bb9dab8f"
|
||||
dependencies = [
|
||||
"bs58",
|
||||
"proc-macro2 1.0.24",
|
||||
@@ -5096,6 +5097,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
"console",
|
||||
"fd-lock",
|
||||
"indicatif",
|
||||
"libc",
|
||||
"log 0.4.11",
|
||||
@@ -5213,34 +5215,34 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
||||
|
||||
[[package]]
|
||||
name = "spl-associated-token-account"
|
||||
version = "1.0.1"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41a25d15fe67b755f95c575ce074e6e39c809fea86b2edb1bf2ae8b0473d5a1d"
|
||||
checksum = "4adc47eebe5d2b662cbaaba1843719c28a67e5ec5d0460bc3ca60900a51f74e2"
|
||||
dependencies = [
|
||||
"solana-program 1.4.4",
|
||||
"solana-program 1.4.17",
|
||||
"spl-token",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spl-memo"
|
||||
version = "2.0.0"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99775feb54f735a6826ea0af500c1f78f7a5974d6b17f1ac586cd114e2da7d80"
|
||||
checksum = "fb2b771f6146dec14ef5fbf498f9374652c54badc3befc8c40c1d426dd45d720"
|
||||
dependencies = [
|
||||
"solana-program 1.4.4",
|
||||
"solana-program 1.4.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spl-token"
|
||||
version = "3.0.0"
|
||||
version = "3.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f77fa0b41cbc82d1d7c8f2d914b49e9a1a7b6e32af952d03383fb989c42bc89"
|
||||
checksum = "a9774eebb62ff1ff2f5eca112413e476143925a2f5a43cee98fc5d3a6c0eec5c"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"num-derive",
|
||||
"num-traits",
|
||||
"num_enum",
|
||||
"solana-program 1.4.4",
|
||||
"solana-program 1.4.17",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
|
@@ -22,7 +22,7 @@ solana-config-program = { path = "../programs/config", version = "1.5.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.5.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
||||
|
@@ -78,11 +78,6 @@ cargo_audit_ignores=(
|
||||
#
|
||||
# Blocked on multiple crates updating `time` to >= 0.2.23
|
||||
--ignore RUSTSEC-2020-0071
|
||||
|
||||
# memmap crate is unmaintained
|
||||
#
|
||||
# Blocked on us releasing new solana crates
|
||||
--ignore RUSTSEC-2020-0077
|
||||
)
|
||||
_ scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
|
||||
|
||||
|
@@ -2792,7 +2792,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.takes_value(false)
|
||||
.help("Use the designated program id, even if the account already holds a large balance of SOL")
|
||||
)
|
||||
.arg(commitment_arg_with_default("max")),
|
||||
.arg(commitment_arg_with_default("singleGossip")),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("program-upgrade")
|
||||
|
@@ -800,8 +800,9 @@ pub fn process_get_block(
|
||||
format!(
|
||||
"◎{:<19.9} {:>13.9}%",
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64)
|
||||
(reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64))
|
||||
* 100.0
|
||||
)
|
||||
}
|
||||
);
|
||||
|
@@ -206,7 +206,10 @@ pub fn parse_args<'a>(
|
||||
verbose,
|
||||
output_format,
|
||||
commitment,
|
||||
send_transaction_config: RpcSendTransactionConfig::default(),
|
||||
send_transaction_config: RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
address_labels,
|
||||
},
|
||||
signers,
|
||||
|
@@ -1662,13 +1662,13 @@ pub(crate) fn fetch_epoch_rewards(
|
||||
.find(|reward| reward.pubkey == address.to_string())
|
||||
{
|
||||
if reward.post_balance > reward.lamports.try_into().unwrap_or(0) {
|
||||
let percent_change = reward.lamports.abs() as f64
|
||||
let rate_change = reward.lamports.abs() as f64
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64);
|
||||
|
||||
let apr = wallclock_epoch_duration.map(|wallclock_epoch_duration| {
|
||||
let wallclock_epochs_per_year =
|
||||
(SECONDS_PER_DAY * 356) as f64 / wallclock_epoch_duration;
|
||||
percent_change * wallclock_epochs_per_year
|
||||
rate_change * wallclock_epochs_per_year
|
||||
});
|
||||
|
||||
all_epoch_rewards.push(CliEpochReward {
|
||||
@@ -1676,8 +1676,8 @@ pub(crate) fn fetch_epoch_rewards(
|
||||
effective_slot,
|
||||
amount: reward.lamports.abs() as u64,
|
||||
post_balance: reward.post_balance,
|
||||
percent_change,
|
||||
apr,
|
||||
percent_change: rate_change * 100.0,
|
||||
apr: apr.map(|r| r * 100.0),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@ pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_VERIFICATION_FAILURE: i64
|
||||
pub const JSON_RPC_SERVER_ERROR_BLOCK_NOT_AVAILABLE: i64 = -32004;
|
||||
pub const JSON_RPC_SERVER_ERROR_NODE_UNHEALTHLY: i64 = -32005;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_PRECOMPILE_VERIFICATION_FAILURE: i64 = -32006;
|
||||
pub const JSON_RPC_SERVER_ERROR_SLOT_SKIPPED: i64 = -32007;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
BlockCleanedUp {
|
||||
@@ -26,6 +27,9 @@ pub enum RpcCustomError {
|
||||
},
|
||||
RpcNodeUnhealthy,
|
||||
TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError),
|
||||
SlotSkipped {
|
||||
slot: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
@@ -73,6 +77,14 @@ impl From<RpcCustomError> for Error {
|
||||
message: format!("Transaction precompile verification failure {:?}", e),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::SlotSkipped { slot } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_SLOT_SKIPPED),
|
||||
message: format!(
|
||||
"Slot {} was skipped, or missing due to ledger jump to recent snapshot",
|
||||
slot
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -72,7 +72,7 @@ solana-sys-tuner = { path = "../sys-tuner", version = "1.5.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.5.0" }
|
||||
solana-version = { path = "../version", version = "1.5.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "0.2", features = ["full"] }
|
||||
|
@@ -41,6 +41,7 @@ pub mod ledger_cleanup_service;
|
||||
pub mod non_circulating_supply;
|
||||
pub mod optimistic_confirmation_verifier;
|
||||
pub mod optimistically_confirmed_bank_tracker;
|
||||
pub mod packet_hasher;
|
||||
pub mod ping_pong;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
|
34
core/src/packet_hasher.rs
Normal file
34
core/src/packet_hasher.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
// Get a unique hash value for a packet
|
||||
// Used in retransmit and shred fetch to prevent dos with same packet data.
|
||||
|
||||
use ahash::AHasher;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_perf::packet::Packet;
|
||||
use std::hash::Hasher;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PacketHasher {
|
||||
seed1: u128,
|
||||
seed2: u128,
|
||||
}
|
||||
|
||||
impl Default for PacketHasher {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
seed1: thread_rng().gen::<u128>(),
|
||||
seed2: thread_rng().gen::<u128>(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketHasher {
|
||||
pub fn hash_packet(&self, packet: &Packet) -> u64 {
|
||||
let mut hasher = AHasher::new_with_keys(self.seed1, self.seed2);
|
||||
hasher.write(&packet.data[0..packet.meta.size]);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
*self = Self::default();
|
||||
}
|
||||
}
|
@@ -2558,10 +2558,10 @@ pub(crate) mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_replay_commitment_cache() {
|
||||
fn leader_vote(bank: &Arc<Bank>, pubkey: &Pubkey) {
|
||||
fn leader_vote(vote_slot: Slot, bank: &Arc<Bank>, pubkey: &Pubkey) {
|
||||
let mut leader_vote_account = bank.get_account(&pubkey).unwrap();
|
||||
let mut vote_state = VoteState::from(&leader_vote_account).unwrap();
|
||||
vote_state.process_slot_vote_unchecked(bank.slot());
|
||||
vote_state.process_slot_vote_unchecked(vote_slot);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state));
|
||||
VoteState::to(&versioned, &mut leader_vote_account).unwrap();
|
||||
bank.store_account(&pubkey, &leader_vote_account);
|
||||
@@ -2581,10 +2581,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
bank0.freeze();
|
||||
let arc_bank0 = Arc::new(bank0);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(
|
||||
&[arc_bank0.clone()],
|
||||
0,
|
||||
)));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks(&[arc_bank0], 0)));
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
@@ -2608,44 +2605,33 @@ pub(crate) mod tests {
|
||||
.get_block_commitment(1)
|
||||
.is_none());
|
||||
|
||||
let bank1 = Bank::new_from_parent(&arc_bank0, &Pubkey::default(), arc_bank0.slot() + 1);
|
||||
let _res = bank1.transfer(
|
||||
10,
|
||||
&genesis_config_info.mint_keypair,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
);
|
||||
for _ in 0..genesis_config.ticks_per_slot {
|
||||
bank1.register_tick(&Hash::default());
|
||||
for i in 1..=3 {
|
||||
let prev_bank = bank_forks.read().unwrap().get(i - 1).unwrap().clone();
|
||||
let bank = Bank::new_from_parent(&prev_bank, &Pubkey::default(), prev_bank.slot() + 1);
|
||||
let _res = bank.transfer(
|
||||
10,
|
||||
&genesis_config_info.mint_keypair,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
);
|
||||
for _ in 0..genesis_config.ticks_per_slot {
|
||||
bank.register_tick(&Hash::default());
|
||||
}
|
||||
bank_forks.write().unwrap().insert(bank);
|
||||
let arc_bank = bank_forks.read().unwrap().get(i).unwrap().clone();
|
||||
leader_vote(i - 1, &arc_bank, &leader_voting_pubkey);
|
||||
ReplayStage::update_commitment_cache(
|
||||
arc_bank.clone(),
|
||||
0,
|
||||
leader_lamports,
|
||||
&lockouts_sender,
|
||||
);
|
||||
arc_bank.freeze();
|
||||
}
|
||||
bank1.freeze();
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let arc_bank1 = bank_forks.read().unwrap().get(1).unwrap().clone();
|
||||
leader_vote(&arc_bank1, &leader_voting_pubkey);
|
||||
ReplayStage::update_commitment_cache(
|
||||
arc_bank1.clone(),
|
||||
0,
|
||||
leader_lamports,
|
||||
&lockouts_sender,
|
||||
);
|
||||
|
||||
let bank2 = Bank::new_from_parent(&arc_bank1, &Pubkey::default(), arc_bank1.slot() + 1);
|
||||
let _res = bank2.transfer(
|
||||
10,
|
||||
&genesis_config_info.mint_keypair,
|
||||
&solana_sdk::pubkey::new_rand(),
|
||||
);
|
||||
for _ in 0..genesis_config.ticks_per_slot {
|
||||
bank2.register_tick(&Hash::default());
|
||||
}
|
||||
bank2.freeze();
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let arc_bank2 = bank_forks.read().unwrap().get(2).unwrap().clone();
|
||||
leader_vote(&arc_bank2, &leader_voting_pubkey);
|
||||
ReplayStage::update_commitment_cache(arc_bank2, 0, leader_lamports, &lockouts_sender);
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
|
||||
let mut expected0 = BlockCommitment::default();
|
||||
expected0.increase_confirmation_stake(2, leader_lamports);
|
||||
expected0.increase_confirmation_stake(3, leader_lamports);
|
||||
assert_eq!(
|
||||
block_commitment_cache
|
||||
.read()
|
||||
|
@@ -1,8 +1,6 @@
|
||||
//! The `retransmit_stage` retransmits shreds between validators
|
||||
#![allow(clippy::rc_buffer)]
|
||||
|
||||
use crate::shred_fetch_stage::ShredFetchStage;
|
||||
use crate::shred_fetch_stage::ShredFetchStats;
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
@@ -15,10 +13,9 @@ use crate::{
|
||||
result::{Error, Result},
|
||||
window_service::{should_retransmit_and_persist, WindowService},
|
||||
};
|
||||
use ahash::AHasher;
|
||||
use crossbeam_channel::Receiver;
|
||||
use lru::LruCache;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
|
||||
use solana_ledger::{
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
@@ -26,14 +23,13 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_error;
|
||||
use solana_perf::packet::Packets;
|
||||
use solana_perf::packet::{Packet, Packets};
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::clock::{Epoch, Slot};
|
||||
use solana_sdk::epoch_schedule::EpochSchedule;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_streamer::streamer::PacketReceiver;
|
||||
use std::hash::Hasher;
|
||||
use std::{
|
||||
cmp,
|
||||
collections::hash_set::HashSet,
|
||||
@@ -206,7 +202,42 @@ struct EpochStakesCache {
|
||||
stakes_and_index: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
pub type ShredFilterAndSeeds = (LruCache<(Slot, u32), Vec<u64>>, u128, u128);
|
||||
use crate::packet_hasher::PacketHasher;
|
||||
// Map of shred (slot, index, is_data) => list of hash values seen for that key.
|
||||
pub type ShredFilter = LruCache<(Slot, u32, bool), Vec<u64>>;
|
||||
|
||||
pub type ShredFilterAndHasher = (ShredFilter, PacketHasher);
|
||||
|
||||
// Return true if shred is already received and should skip retransmit
|
||||
fn check_if_already_received(
|
||||
packet: &Packet,
|
||||
shreds_received: &Arc<Mutex<ShredFilterAndHasher>>,
|
||||
) -> bool {
|
||||
match get_shred_slot_index_type(packet, &mut ShredFetchStats::default()) {
|
||||
Some(slot_index) => {
|
||||
let mut received = shreds_received.lock().unwrap();
|
||||
let hasher = received.1.clone();
|
||||
if let Some(sent) = received.0.get_mut(&slot_index) {
|
||||
if sent.len() < MAX_DUPLICATE_COUNT {
|
||||
let hash = hasher.hash_packet(packet);
|
||||
if sent.contains(&hash) {
|
||||
return true;
|
||||
}
|
||||
|
||||
sent.push(hash);
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
let hash = hasher.hash_packet(&packet);
|
||||
received.0.put(slot_index, vec![hash]);
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn retransmit(
|
||||
@@ -219,7 +250,7 @@ fn retransmit(
|
||||
stats: &Arc<RetransmitStats>,
|
||||
epoch_stakes_cache: &Arc<RwLock<EpochStakesCache>>,
|
||||
last_peer_update: &Arc<AtomicU64>,
|
||||
shreds_received: &Arc<Mutex<ShredFilterAndSeeds>>,
|
||||
shreds_received: &Arc<Mutex<ShredFilterAndHasher>>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let r_lock = r.lock().unwrap();
|
||||
@@ -271,8 +302,7 @@ fn retransmit(
|
||||
{
|
||||
let mut sr = shreds_received.lock().unwrap();
|
||||
sr.0.clear();
|
||||
sr.1 = thread_rng().gen::<u128>();
|
||||
sr.2 = thread_rng().gen::<u128>();
|
||||
sr.1.reset();
|
||||
}
|
||||
}
|
||||
let mut peers_len = 0;
|
||||
@@ -299,33 +329,10 @@ fn retransmit(
|
||||
continue;
|
||||
}
|
||||
|
||||
match ShredFetchStage::get_slot_index(packet, &mut ShredFetchStats::default()) {
|
||||
Some(slot_index) => {
|
||||
let mut received = shreds_received.lock().unwrap();
|
||||
let seed1 = received.1;
|
||||
let seed2 = received.2;
|
||||
if let Some(sent) = received.0.get_mut(&slot_index) {
|
||||
if sent.len() < MAX_DUPLICATE_COUNT {
|
||||
let mut hasher = AHasher::new_with_keys(seed1, seed2);
|
||||
hasher.write(&packet.data[0..packet.meta.size]);
|
||||
let hash = hasher.finish();
|
||||
if sent.contains(&hash) {
|
||||
continue;
|
||||
}
|
||||
|
||||
sent.push(hash);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
let mut hasher = AHasher::new_with_keys(seed1, seed2);
|
||||
hasher.write(&packet.data[0..packet.meta.size]);
|
||||
let hash = hasher.finish();
|
||||
received.0.put(slot_index, vec![hash]);
|
||||
}
|
||||
}
|
||||
None => continue,
|
||||
if check_if_already_received(packet, shreds_received) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut compute_turbine_peers = Measure::start("turbine_start");
|
||||
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
|
||||
&my_id,
|
||||
@@ -414,7 +421,10 @@ pub fn retransmitter(
|
||||
r: Arc<Mutex<PacketReceiver>>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let stats = Arc::new(RetransmitStats::default());
|
||||
let shreds_received = Arc::new(Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), 0, 0)));
|
||||
let shreds_received = Arc::new(Mutex::new((
|
||||
LruCache::new(DEFAULT_LRU_SIZE),
|
||||
PacketHasher::default(),
|
||||
)));
|
||||
(0..sockets.len())
|
||||
.map(|s| {
|
||||
let sockets = sockets.clone();
|
||||
@@ -568,6 +578,7 @@ mod tests {
|
||||
use solana_ledger::blockstore_processor::{process_blockstore, ProcessOptions};
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_net_utils::find_available_port_in_range;
|
||||
use solana_perf::packet::{Packet, Packets};
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
@@ -616,8 +627,7 @@ mod tests {
|
||||
);
|
||||
let _thread_hdls = vec![t_retransmit];
|
||||
|
||||
let mut shred =
|
||||
solana_ledger::shred::Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0);
|
||||
let mut shred = Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0);
|
||||
let mut packet = Packet::default();
|
||||
shred.copy_to_packet(&mut packet);
|
||||
|
||||
@@ -642,4 +652,52 @@ mod tests {
|
||||
assert_eq!(packets.packets.len(), 1);
|
||||
assert_eq!(packets.packets[0].meta.repair, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_already_received() {
|
||||
let mut packet = Packet::default();
|
||||
let slot = 1;
|
||||
let index = 5;
|
||||
let version = 0x40;
|
||||
let shred = Shred::new_from_data(slot, index, 0, None, true, true, 0, version, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
let shreds_received = Arc::new(Mutex::new((LruCache::new(100), PacketHasher::default())));
|
||||
// unique shred for (1, 5) should pass
|
||||
assert!(!check_if_already_received(&packet, &shreds_received));
|
||||
// duplicate shred for (1, 5) blocked
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
|
||||
let shred = Shred::new_from_data(slot, index, 2, None, true, true, 0, version, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
// first duplicate shred for (1, 5) passed
|
||||
assert!(!check_if_already_received(&packet, &shreds_received));
|
||||
// then blocked
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
|
||||
let shred = Shred::new_from_data(slot, index, 8, None, true, true, 0, version, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
// 2nd duplicate shred for (1, 5) blocked
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
|
||||
let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
// Coding at (1, 5) passes
|
||||
assert!(!check_if_already_received(&packet, &shreds_received));
|
||||
// then blocked
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
|
||||
let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
// 2nd unique coding at (1, 5) passes
|
||||
assert!(!check_if_already_received(&packet, &shreds_received));
|
||||
// same again is blocked
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
|
||||
let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
// Another unique coding at (1, 5) always blocked
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
assert!(check_if_already_received(&packet, &shreds_received));
|
||||
}
|
||||
}
|
||||
|
@@ -601,7 +601,7 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn check_blockstore_max_root<T>(
|
||||
fn check_blockstore_root<T>(
|
||||
&self,
|
||||
result: &std::result::Result<T, BlockstoreError>,
|
||||
slot: Slot,
|
||||
@@ -612,7 +612,7 @@ impl JsonRpcRequestProcessor {
|
||||
if result.is_err() {
|
||||
let err = result.as_ref().unwrap_err();
|
||||
debug!(
|
||||
"check_blockstore_max_root, slot: {:?}, max root: {:?}, err: {:?}",
|
||||
"check_blockstore_root, slot: {:?}, max root: {:?}, err: {:?}",
|
||||
slot,
|
||||
self.blockstore.max_root(),
|
||||
err
|
||||
@@ -620,6 +620,9 @@ impl JsonRpcRequestProcessor {
|
||||
if slot >= self.blockstore.max_root() {
|
||||
return Err(RpcCustomError::BlockNotAvailable { slot }.into());
|
||||
}
|
||||
if self.blockstore.is_skipped(slot) {
|
||||
return Err(RpcCustomError::SlotSkipped { slot }.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -662,7 +665,7 @@ impl JsonRpcRequestProcessor {
|
||||
.highest_confirmed_root()
|
||||
{
|
||||
let result = self.blockstore.get_confirmed_block(slot);
|
||||
self.check_blockstore_max_root(&result, slot)?;
|
||||
self.check_blockstore_root(&result, slot)?;
|
||||
if result.is_err() {
|
||||
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
|
||||
return Ok(self
|
||||
@@ -768,7 +771,7 @@ impl JsonRpcRequestProcessor {
|
||||
.highest_confirmed_root()
|
||||
{
|
||||
let result = self.blockstore.get_block_time(slot);
|
||||
self.check_blockstore_max_root(&result, slot)?;
|
||||
self.check_blockstore_root(&result, slot)?;
|
||||
if result.is_err() {
|
||||
if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage {
|
||||
return Ok(self
|
||||
|
@@ -1,17 +1,10 @@
|
||||
//! The `shred_fetch_stage` pulls shreds from UDP sockets and sends it to a channel.
|
||||
|
||||
use ahash::AHasher;
|
||||
use crate::packet_hasher::PacketHasher;
|
||||
use lru::LruCache;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::hash::Hasher;
|
||||
|
||||
use solana_ledger::blockstore::MAX_DATA_SHREDS_PER_SLOT;
|
||||
use solana_ledger::shred::{
|
||||
CODING_SHRED, DATA_SHRED, OFFSET_OF_SHRED_INDEX, OFFSET_OF_SHRED_SLOT, OFFSET_OF_SHRED_TYPE,
|
||||
SIZE_OF_SHRED_INDEX, SIZE_OF_SHRED_SLOT,
|
||||
};
|
||||
use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
|
||||
use solana_perf::cuda_runtime::PinnedVec;
|
||||
use solana_perf::packet::{limited_deserialize, Packet, PacketsRecycler};
|
||||
use solana_perf::packet::{Packet, PacketsRecycler};
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT};
|
||||
@@ -27,48 +20,11 @@ use std::time::Instant;
|
||||
const DEFAULT_LRU_SIZE: usize = 10_000;
|
||||
pub type ShredsReceived = LruCache<u64, ()>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ShredFetchStats {
|
||||
index_overrun: usize,
|
||||
shred_count: usize,
|
||||
index_bad_deserialize: usize,
|
||||
index_out_of_bounds: usize,
|
||||
slot_bad_deserialize: usize,
|
||||
duplicate_shred: usize,
|
||||
slot_out_of_range: usize,
|
||||
}
|
||||
|
||||
pub struct ShredFetchStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl ShredFetchStage {
|
||||
pub fn get_slot_index(p: &Packet, stats: &mut ShredFetchStats) -> Option<(u64, u32)> {
|
||||
let index_start = OFFSET_OF_SHRED_INDEX;
|
||||
let index_end = index_start + SIZE_OF_SHRED_INDEX;
|
||||
let slot_start = OFFSET_OF_SHRED_SLOT;
|
||||
let slot_end = slot_start + SIZE_OF_SHRED_SLOT;
|
||||
|
||||
if index_end <= p.meta.size {
|
||||
if let Ok(index) = limited_deserialize::<u32>(&p.data[index_start..index_end]) {
|
||||
if index < MAX_DATA_SHREDS_PER_SLOT as u32 && slot_end <= p.meta.size {
|
||||
if let Ok(slot) = limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
|
||||
return Some((slot, index));
|
||||
} else {
|
||||
stats.slot_bad_deserialize += 1;
|
||||
}
|
||||
} else {
|
||||
stats.index_out_of_bounds += 1;
|
||||
}
|
||||
} else {
|
||||
stats.index_bad_deserialize += 1;
|
||||
}
|
||||
} else {
|
||||
stats.index_overrun += 1;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn process_packet<F>(
|
||||
p: &mut Packet,
|
||||
shreds_received: &mut ShredsReceived,
|
||||
@@ -77,32 +33,24 @@ impl ShredFetchStage {
|
||||
last_slot: Slot,
|
||||
slots_per_epoch: u64,
|
||||
modify: &F,
|
||||
seeds: (u128, u128),
|
||||
packet_hasher: &PacketHasher,
|
||||
) where
|
||||
F: Fn(&mut Packet),
|
||||
{
|
||||
p.meta.discard = true;
|
||||
if let Some((slot, _index)) = Self::get_slot_index(p, stats) {
|
||||
if let Some((slot, _index, _shred_type)) = get_shred_slot_index_type(p, stats) {
|
||||
// Seems reasonable to limit shreds to 2 epochs away
|
||||
if slot > last_root
|
||||
&& slot < (last_slot + 2 * slots_per_epoch)
|
||||
&& p.meta.size > OFFSET_OF_SHRED_TYPE
|
||||
{
|
||||
let shred_type = p.data[OFFSET_OF_SHRED_TYPE];
|
||||
if shred_type == DATA_SHRED || shred_type == CODING_SHRED {
|
||||
// Shred filter
|
||||
if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) {
|
||||
// Shred filter
|
||||
|
||||
let mut hasher = AHasher::new_with_keys(seeds.0, seeds.1);
|
||||
hasher.write(&p.data[0..p.meta.size]);
|
||||
let hash = hasher.finish();
|
||||
let hash = packet_hasher.hash_packet(p);
|
||||
|
||||
if shreds_received.get(&hash).is_none() {
|
||||
shreds_received.put(hash, ());
|
||||
p.meta.discard = false;
|
||||
modify(p);
|
||||
} else {
|
||||
stats.duplicate_shred += 1;
|
||||
}
|
||||
if shreds_received.get(&hash).is_none() {
|
||||
shreds_received.put(hash, ());
|
||||
p.meta.discard = false;
|
||||
modify(p);
|
||||
} else {
|
||||
stats.duplicate_shred += 1;
|
||||
}
|
||||
} else {
|
||||
stats.slot_out_of_range += 1;
|
||||
@@ -130,12 +78,12 @@ impl ShredFetchStage {
|
||||
|
||||
let mut last_stats = Instant::now();
|
||||
let mut stats = ShredFetchStats::default();
|
||||
let mut seeds = (thread_rng().gen::<u128>(), thread_rng().gen::<u128>());
|
||||
let mut packet_hasher = PacketHasher::default();
|
||||
|
||||
while let Some(mut p) = recvr.iter().next() {
|
||||
if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT {
|
||||
last_updated = Instant::now();
|
||||
seeds = (thread_rng().gen::<u128>(), thread_rng().gen::<u128>());
|
||||
packet_hasher.reset();
|
||||
shreds_received.clear();
|
||||
if let Some(bank_forks) = bank_forks.as_ref() {
|
||||
let bank_forks_r = bank_forks.read().unwrap();
|
||||
@@ -156,7 +104,7 @@ impl ShredFetchStage {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&modify,
|
||||
seeds,
|
||||
&packet_hasher,
|
||||
);
|
||||
});
|
||||
if last_stats.elapsed().as_millis() > 1000 {
|
||||
@@ -274,6 +222,7 @@ impl ShredFetchStage {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::blockstore::MAX_DATA_SHREDS_PER_SLOT;
|
||||
use solana_ledger::shred::Shred;
|
||||
|
||||
#[test]
|
||||
@@ -287,7 +236,7 @@ mod tests {
|
||||
let shred = Shred::new_from_data(slot, 3, 0, None, true, true, 0, 0, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
|
||||
let seeds = (thread_rng().gen::<u128>(), thread_rng().gen::<u128>());
|
||||
let hasher = PacketHasher::default();
|
||||
|
||||
let last_root = 0;
|
||||
let last_slot = 100;
|
||||
@@ -300,7 +249,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(!packet.meta.discard);
|
||||
|
||||
@@ -315,7 +264,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(!packet.meta.discard);
|
||||
}
|
||||
@@ -329,7 +278,9 @@ mod tests {
|
||||
let last_root = 0;
|
||||
let last_slot = 100;
|
||||
let slots_per_epoch = 10;
|
||||
let seeds = (thread_rng().gen::<u128>(), thread_rng().gen::<u128>());
|
||||
|
||||
let hasher = PacketHasher::default();
|
||||
|
||||
// packet size is 0, so cannot get index
|
||||
ShredFetchStage::process_packet(
|
||||
&mut packet,
|
||||
@@ -339,7 +290,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert_eq!(stats.index_overrun, 1);
|
||||
assert!(packet.meta.discard);
|
||||
@@ -355,7 +306,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
|
||||
@@ -368,7 +319,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(!packet.meta.discard);
|
||||
|
||||
@@ -381,7 +332,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
|
||||
@@ -397,7 +348,7 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
|
||||
@@ -412,20 +363,8 @@ mod tests {
|
||||
last_slot,
|
||||
slots_per_epoch,
|
||||
&|_p| {},
|
||||
seeds,
|
||||
&hasher,
|
||||
);
|
||||
assert!(packet.meta.discard);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_offsets() {
|
||||
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
|
||||
let mut packet = Packet::default();
|
||||
shred.copy_to_packet(&mut packet);
|
||||
let mut stats = ShredFetchStats::default();
|
||||
assert_eq!(
|
||||
Some((1, 3)),
|
||||
ShredFetchStage::get_slot_index(&packet, &mut stats)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -346,6 +346,7 @@ impl TestValidator {
|
||||
compression: CompressionType::NoCompression,
|
||||
snapshot_version: SnapshotVersion::default(),
|
||||
}),
|
||||
enforce_ulimit_nofile: false,
|
||||
..ValidatorConfig::default()
|
||||
};
|
||||
|
||||
|
@@ -341,7 +341,7 @@ pub mod tests {
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receiver,
|
||||
..
|
||||
} = Blockstore::open_with_signal(&blockstore_path, None)
|
||||
} = Blockstore::open_with_signal(&blockstore_path, None, true)
|
||||
.expect("Expected to successfully open ledger");
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let bank = bank_forks.working_bank();
|
||||
|
@@ -86,6 +86,7 @@ pub struct ValidatorConfig {
|
||||
pub max_ledger_shreds: Option<u64>,
|
||||
pub broadcast_stage_type: BroadcastStageType,
|
||||
pub enable_partition: Option<Arc<AtomicBool>>,
|
||||
pub enforce_ulimit_nofile: bool,
|
||||
pub fixed_leader_schedule: Option<FixedSchedule>,
|
||||
pub wait_for_supermajority: Option<Slot>,
|
||||
pub new_hard_forks: Option<Vec<Slot>>,
|
||||
@@ -123,6 +124,7 @@ impl Default for ValidatorConfig {
|
||||
snapshot_config: None,
|
||||
broadcast_stage_type: BroadcastStageType::Standard,
|
||||
enable_partition: None,
|
||||
enforce_ulimit_nofile: true,
|
||||
fixed_leader_schedule: None,
|
||||
wait_for_supermajority: None,
|
||||
new_hard_forks: None,
|
||||
@@ -300,6 +302,7 @@ impl Validator {
|
||||
ledger_path,
|
||||
config.poh_verify,
|
||||
&exit,
|
||||
config.enforce_ulimit_nofile,
|
||||
);
|
||||
|
||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||
@@ -822,6 +825,7 @@ fn new_banks_from_ledger(
|
||||
ledger_path: &Path,
|
||||
poh_verify: bool,
|
||||
exit: &Arc<AtomicBool>,
|
||||
enforce_ulimit_nofile: bool,
|
||||
) -> (
|
||||
GenesisConfig,
|
||||
BankForks,
|
||||
@@ -859,8 +863,12 @@ fn new_banks_from_ledger(
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receiver,
|
||||
..
|
||||
} = Blockstore::open_with_signal(ledger_path, config.wal_recovery_mode.clone())
|
||||
.expect("Failed to open ledger database");
|
||||
} = Blockstore::open_with_signal(
|
||||
ledger_path,
|
||||
config.wal_recovery_mode.clone(),
|
||||
enforce_ulimit_nofile,
|
||||
)
|
||||
.expect("Failed to open ledger database");
|
||||
blockstore.set_no_compaction(config.no_rocksdb_compaction);
|
||||
|
||||
let restored_tower = Tower::restore(ledger_path, &validator_identity);
|
||||
|
@@ -24,19 +24,19 @@ The vote signing service consists of a JSON RPC server and a request processor.
|
||||
|
||||
1. Register a new validator node
|
||||
|
||||
- The request must contain validator's identity \(public key\)
|
||||
- The request must be signed with the validator's private key
|
||||
- The service drops the request if signature of the request cannot be verified
|
||||
- The service creates a new voting asymmetric key for the validator, and returns the public key as a response
|
||||
- If a validator tries to register again, the service returns the public key from the pre-existing keypair
|
||||
- The request must contain validator's identity \(public key\)
|
||||
- The request must be signed with the validator's private key
|
||||
- The service drops the request if signature of the request cannot be verified
|
||||
- The service creates a new voting asymmetric key for the validator, and returns the public key as a response
|
||||
- If a validator tries to register again, the service returns the public key from the pre-existing keypair
|
||||
|
||||
2. Sign a vote
|
||||
2. Sign a vote
|
||||
|
||||
- The request must contain a voting transaction and all verification data
|
||||
- The request must be signed with the validator's private key
|
||||
- The service drops the request if signature of the request cannot be verified
|
||||
- The service verifies the voting data
|
||||
- The service returns a signature for the transaction
|
||||
- The request must contain a voting transaction and all verification data
|
||||
- The request must be signed with the validator's private key
|
||||
- The service drops the request if signature of the request cannot be verified
|
||||
- The service verifies the voting data
|
||||
- The service returns a signature for the transaction
|
||||
|
||||
## Validator voting
|
||||
|
||||
|
@@ -39,13 +39,5 @@ url = "2.1.1"
|
||||
winapi = "0.3.8"
|
||||
winreg = "0.7"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-install"
|
||||
path = "src/main-install.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-install-init"
|
||||
path = "src/main-install-init.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -615,7 +615,7 @@ fn open_blockstore(
|
||||
access_type: AccessType,
|
||||
wal_recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
) -> Blockstore {
|
||||
match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode) {
|
||||
match Blockstore::open_with_access_type(ledger_path, access_type, wal_recovery_mode, true) {
|
||||
Ok(blockstore) => blockstore,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err);
|
||||
|
@@ -66,10 +66,7 @@ pub fn load(
|
||||
compression,
|
||||
genesis_config,
|
||||
process_options.debug_keys.clone(),
|
||||
Some(&crate::builtins::get(
|
||||
genesis_config.cluster_type,
|
||||
process_options.bpf_jit,
|
||||
)),
|
||||
Some(&crate::builtins::get(process_options.bpf_jit)),
|
||||
)
|
||||
.expect("Load from snapshot failed");
|
||||
|
||||
|
@@ -253,26 +253,33 @@ impl Blockstore {
|
||||
|
||||
/// Opens a Ledger in directory, provides "infinite" window of shreds
|
||||
pub fn open(ledger_path: &Path) -> Result<Blockstore> {
|
||||
Self::do_open(ledger_path, AccessType::PrimaryOnly, None)
|
||||
Self::do_open(ledger_path, AccessType::PrimaryOnly, None, true)
|
||||
}
|
||||
|
||||
pub fn open_with_access_type(
|
||||
ledger_path: &Path,
|
||||
access_type: AccessType,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
enforce_ulimit_nofile: bool,
|
||||
) -> Result<Blockstore> {
|
||||
Self::do_open(ledger_path, access_type, recovery_mode)
|
||||
Self::do_open(
|
||||
ledger_path,
|
||||
access_type,
|
||||
recovery_mode,
|
||||
enforce_ulimit_nofile,
|
||||
)
|
||||
}
|
||||
|
||||
fn do_open(
|
||||
ledger_path: &Path,
|
||||
access_type: AccessType,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
enforce_ulimit_nofile: bool,
|
||||
) -> Result<Blockstore> {
|
||||
fs::create_dir_all(&ledger_path)?;
|
||||
let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY);
|
||||
|
||||
adjust_ulimit_nofile()?;
|
||||
adjust_ulimit_nofile(enforce_ulimit_nofile)?;
|
||||
|
||||
// Open the database
|
||||
let mut measure = Measure::start("open");
|
||||
@@ -363,9 +370,14 @@ impl Blockstore {
|
||||
pub fn open_with_signal(
|
||||
ledger_path: &Path,
|
||||
recovery_mode: Option<BlockstoreRecoveryMode>,
|
||||
enforce_ulimit_nofile: bool,
|
||||
) -> Result<BlockstoreSignals> {
|
||||
let mut blockstore =
|
||||
Self::open_with_access_type(ledger_path, AccessType::PrimaryOnly, recovery_mode)?;
|
||||
let mut blockstore = Self::open_with_access_type(
|
||||
ledger_path,
|
||||
AccessType::PrimaryOnly,
|
||||
recovery_mode,
|
||||
enforce_ulimit_nofile,
|
||||
)?;
|
||||
let (ledger_signal_sender, ledger_signal_receiver) = sync_channel(1);
|
||||
let (completed_slots_sender, completed_slots_receiver) =
|
||||
sync_channel(MAX_COMPLETED_SLOTS_IN_CHANNEL);
|
||||
@@ -2649,6 +2661,21 @@ impl Blockstore {
|
||||
matches!(self.db.get::<cf::Root>(slot), Ok(Some(true)))
|
||||
}
|
||||
|
||||
/// Returns true if a slot is between the rooted slot bounds of the ledger, but has not itself
|
||||
/// been rooted. This is either because the slot was skipped, or due to a gap in ledger data,
|
||||
/// as when booting from a newer snapshot.
|
||||
pub fn is_skipped(&self, slot: Slot) -> bool {
|
||||
let lowest_root = self
|
||||
.rooted_slot_iterator(0)
|
||||
.ok()
|
||||
.and_then(|mut iter| iter.next())
|
||||
.unwrap_or_default();
|
||||
match self.db.get::<cf::Root>(slot).ok().flatten() {
|
||||
Some(_) => false,
|
||||
None => slot < self.max_root() && slot > lowest_root,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_roots(&self, rooted_slots: &[u64]) -> Result<()> {
|
||||
let mut write_batch = self.db.batch()?;
|
||||
for slot in rooted_slots {
|
||||
@@ -3267,7 +3294,7 @@ pub fn create_new_ledger(
|
||||
genesis_config.write(&ledger_path)?;
|
||||
|
||||
// Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger.
|
||||
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None)?;
|
||||
let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?;
|
||||
let ticks_per_slot = genesis_config.ticks_per_slot;
|
||||
let hashes_per_tick = genesis_config.poh_config.hashes_per_tick.unwrap_or(0);
|
||||
let entries = create_ticks(ticks_per_slot, hashes_per_tick, genesis_config.hash());
|
||||
@@ -3511,12 +3538,12 @@ pub fn make_chaining_slot_entries(
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn adjust_ulimit_nofile() -> Result<()> {
|
||||
fn adjust_ulimit_nofile(_enforce_ulimit_nofile: bool) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn adjust_ulimit_nofile() -> Result<()> {
|
||||
fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> {
|
||||
// Rocks DB likes to have many open files. The default open file descriptor limit is
|
||||
// usually not enough
|
||||
let desired_nofile = 500000;
|
||||
@@ -3547,7 +3574,9 @@ fn adjust_ulimit_nofile() -> Result<()> {
|
||||
desired_nofile, desired_nofile,
|
||||
);
|
||||
}
|
||||
return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit);
|
||||
if enforce_ulimit_nofile {
|
||||
return Err(BlockstoreError::UnableToSetOpenFileDescriptorLimit);
|
||||
}
|
||||
}
|
||||
|
||||
nofile = get_nofile();
|
||||
@@ -4208,7 +4237,7 @@ pub mod tests {
|
||||
fn test_data_set_completed_on_insert() {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let BlockstoreSignals { blockstore, .. } =
|
||||
Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
|
||||
|
||||
// Create enough entries to fill 2 shreds, only the later one is data complete
|
||||
let slot = 0;
|
||||
@@ -4249,7 +4278,7 @@ pub mod tests {
|
||||
blockstore: ledger,
|
||||
ledger_signal_receiver: recvr,
|
||||
..
|
||||
} = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 50;
|
||||
@@ -4333,7 +4362,7 @@ pub mod tests {
|
||||
blockstore: ledger,
|
||||
completed_slots_receiver: recvr,
|
||||
..
|
||||
} = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 10;
|
||||
@@ -4359,7 +4388,7 @@ pub mod tests {
|
||||
blockstore: ledger,
|
||||
completed_slots_receiver: recvr,
|
||||
..
|
||||
} = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 10;
|
||||
@@ -4403,7 +4432,7 @@ pub mod tests {
|
||||
blockstore: ledger,
|
||||
completed_slots_receiver: recvr,
|
||||
..
|
||||
} = Blockstore::open_with_signal(&ledger_path, None).unwrap();
|
||||
} = Blockstore::open_with_signal(&ledger_path, None, true).unwrap();
|
||||
let ledger = Arc::new(ledger);
|
||||
|
||||
let entries_per_slot = 10;
|
||||
@@ -5523,6 +5552,25 @@ pub mod tests {
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_skipped() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
let roots = vec![2, 4, 7, 12, 15];
|
||||
blockstore.set_roots(&roots).unwrap();
|
||||
|
||||
for i in 0..20 {
|
||||
if i < 2 || roots.contains(&i) || i > 15 {
|
||||
assert!(!blockstore.is_skipped(i));
|
||||
} else {
|
||||
assert!(blockstore.is_skipped(i));
|
||||
}
|
||||
}
|
||||
|
||||
drop(blockstore);
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_iter_bounds() {
|
||||
let blockstore_path = get_tmp_ledger_path!();
|
||||
|
@@ -367,10 +367,7 @@ pub fn process_blockstore(
|
||||
account_paths,
|
||||
&opts.frozen_accounts,
|
||||
opts.debug_keys.clone(),
|
||||
Some(&crate::builtins::get(
|
||||
genesis_config.cluster_type,
|
||||
opts.bpf_jit,
|
||||
)),
|
||||
Some(&crate::builtins::get(opts.bpf_jit)),
|
||||
);
|
||||
let bank0 = Arc::new(bank0);
|
||||
info!("processing ledger for slot 0...");
|
||||
|
@@ -2,7 +2,7 @@ use solana_runtime::{
|
||||
bank::{Builtin, Builtins},
|
||||
builtins::ActivationType,
|
||||
};
|
||||
use solana_sdk::{feature_set, genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use solana_sdk::{feature_set, pubkey::Pubkey};
|
||||
|
||||
macro_rules! to_builtin {
|
||||
($b:expr) => {
|
||||
@@ -11,47 +11,33 @@ macro_rules! to_builtin {
|
||||
}
|
||||
|
||||
/// Builtin programs that are always available
|
||||
fn genesis_builtins(cluster_type: ClusterType, bpf_jit: bool) -> Vec<Builtin> {
|
||||
if cluster_type != ClusterType::MainnetBeta {
|
||||
vec![
|
||||
to_builtin!(solana_bpf_loader_deprecated_program!()),
|
||||
if bpf_jit {
|
||||
to_builtin!(solana_bpf_loader_program_with_jit!())
|
||||
} else {
|
||||
to_builtin!(solana_bpf_loader_program!())
|
||||
},
|
||||
if bpf_jit {
|
||||
to_builtin!(solana_bpf_loader_upgradeable_program_with_jit!())
|
||||
} else {
|
||||
to_builtin!(solana_bpf_loader_upgradeable_program!())
|
||||
},
|
||||
]
|
||||
} else {
|
||||
// Remove this `else` block and the `cluster_type` argument to this function once
|
||||
// `feature_set::bpf_loader2_program::id()` is active on Mainnet Beta
|
||||
vec![to_builtin!(solana_bpf_loader_deprecated_program!())]
|
||||
}
|
||||
}
|
||||
|
||||
/// Builtin programs activated dynamically by feature
|
||||
fn feature_builtins() -> Vec<(Builtin, Pubkey, ActivationType)> {
|
||||
fn genesis_builtins(bpf_jit: bool) -> Vec<Builtin> {
|
||||
vec![
|
||||
(
|
||||
to_builtin!(solana_bpf_loader_program!()),
|
||||
feature_set::bpf_loader2_program::id(),
|
||||
ActivationType::NewProgram,
|
||||
),
|
||||
(
|
||||
to_builtin!(solana_bpf_loader_upgradeable_program!()),
|
||||
feature_set::bpf_loader_upgradeable_program::id(),
|
||||
ActivationType::NewProgram,
|
||||
),
|
||||
to_builtin!(solana_bpf_loader_deprecated_program!()),
|
||||
if bpf_jit {
|
||||
to_builtin!(solana_bpf_loader_program_with_jit!())
|
||||
} else {
|
||||
to_builtin!(solana_bpf_loader_program!())
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
pub(crate) fn get(cluster_type: ClusterType, bpf_jit: bool) -> Builtins {
|
||||
/// Builtin programs activated dynamically by feature
|
||||
fn feature_builtins(bpf_jit: bool) -> Vec<(Builtin, Pubkey, ActivationType)> {
|
||||
vec![(
|
||||
if bpf_jit {
|
||||
to_builtin!(solana_bpf_loader_upgradeable_program_with_jit!())
|
||||
} else {
|
||||
to_builtin!(solana_bpf_loader_upgradeable_program!())
|
||||
},
|
||||
feature_set::bpf_loader_upgradeable_program::id(),
|
||||
ActivationType::NewProgram,
|
||||
)]
|
||||
}
|
||||
|
||||
pub(crate) fn get(bpf_jit: bool) -> Builtins {
|
||||
Builtins {
|
||||
genesis_builtins: genesis_builtins(cluster_type, bpf_jit),
|
||||
feature_builtins: feature_builtins(),
|
||||
genesis_builtins: genesis_builtins(bpf_jit),
|
||||
feature_builtins: feature_builtins(bpf_jit),
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,6 @@
|
||||
//! The `shred` module defines data structures and methods to pull MTU sized data frames from the network.
|
||||
use crate::{
|
||||
blockstore::MAX_DATA_SHREDS_PER_SLOT,
|
||||
entry::{create_ticks, Entry},
|
||||
erasure::Session,
|
||||
};
|
||||
@@ -12,7 +13,7 @@ use rayon::{
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::Packet;
|
||||
use solana_perf::packet::{limited_deserialize, Packet};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
@@ -309,6 +310,27 @@ impl Shred {
|
||||
Ok(shred)
|
||||
}
|
||||
|
||||
pub fn new_empty_coding(
|
||||
slot: Slot,
|
||||
index: u32,
|
||||
fec_set_index: u32,
|
||||
num_data: usize,
|
||||
num_code: usize,
|
||||
position: usize,
|
||||
version: u16,
|
||||
) -> Self {
|
||||
let (header, coding_header) = Shredder::new_coding_shred_header(
|
||||
slot,
|
||||
index,
|
||||
fec_set_index,
|
||||
num_data,
|
||||
num_code,
|
||||
position,
|
||||
version,
|
||||
);
|
||||
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header)
|
||||
}
|
||||
|
||||
pub fn new_empty_from_header(
|
||||
common_header: ShredCommonHeader,
|
||||
data_header: DataShredHeader,
|
||||
@@ -699,7 +721,7 @@ impl Shredder {
|
||||
// Create empty coding shreds, with correctly populated headers
|
||||
let mut coding_shreds = Vec::with_capacity(num_coding);
|
||||
(0..num_coding).for_each(|i| {
|
||||
let (header, coding_header) = Self::new_coding_shred_header(
|
||||
let shred = Shred::new_empty_coding(
|
||||
slot,
|
||||
start_index + i as u32,
|
||||
start_index,
|
||||
@@ -708,8 +730,6 @@ impl Shredder {
|
||||
i,
|
||||
version,
|
||||
);
|
||||
let shred =
|
||||
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header);
|
||||
coding_shreds.push(shred.payload);
|
||||
});
|
||||
|
||||
@@ -730,7 +750,7 @@ impl Shredder {
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, payload)| {
|
||||
let (common_header, coding_header) = Self::new_coding_shred_header(
|
||||
let mut shred = Shred::new_empty_coding(
|
||||
slot,
|
||||
start_index + i as u32,
|
||||
start_index,
|
||||
@@ -739,12 +759,8 @@ impl Shredder {
|
||||
i,
|
||||
version,
|
||||
);
|
||||
Shred {
|
||||
common_header,
|
||||
data_header: DataShredHeader::default(),
|
||||
coding_header,
|
||||
payload,
|
||||
}
|
||||
shred.payload = payload;
|
||||
shred
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
@@ -963,6 +979,71 @@ impl Shredder {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Eq, PartialEq)]
|
||||
pub struct ShredFetchStats {
|
||||
pub index_overrun: usize,
|
||||
pub shred_count: usize,
|
||||
pub index_bad_deserialize: usize,
|
||||
pub index_out_of_bounds: usize,
|
||||
pub slot_bad_deserialize: usize,
|
||||
pub duplicate_shred: usize,
|
||||
pub slot_out_of_range: usize,
|
||||
pub bad_shred_type: usize,
|
||||
}
|
||||
|
||||
// Get slot, index, and type from a packet with partial deserialize
|
||||
pub fn get_shred_slot_index_type(
|
||||
p: &Packet,
|
||||
stats: &mut ShredFetchStats,
|
||||
) -> Option<(Slot, u32, bool)> {
|
||||
let index_start = OFFSET_OF_SHRED_INDEX;
|
||||
let index_end = index_start + SIZE_OF_SHRED_INDEX;
|
||||
let slot_start = OFFSET_OF_SHRED_SLOT;
|
||||
let slot_end = slot_start + SIZE_OF_SHRED_SLOT;
|
||||
|
||||
debug_assert!(index_end > slot_end);
|
||||
debug_assert!(index_end > OFFSET_OF_SHRED_TYPE);
|
||||
|
||||
if index_end > p.meta.size {
|
||||
stats.index_overrun += 1;
|
||||
return None;
|
||||
}
|
||||
|
||||
let index;
|
||||
match limited_deserialize::<u32>(&p.data[index_start..index_end]) {
|
||||
Ok(x) => index = x,
|
||||
Err(_e) => {
|
||||
stats.index_bad_deserialize += 1;
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
if index >= MAX_DATA_SHREDS_PER_SLOT as u32 {
|
||||
stats.index_out_of_bounds += 1;
|
||||
return None;
|
||||
}
|
||||
|
||||
let slot;
|
||||
match limited_deserialize::<Slot>(&p.data[slot_start..slot_end]) {
|
||||
Ok(x) => {
|
||||
slot = x;
|
||||
}
|
||||
Err(_e) => {
|
||||
stats.slot_bad_deserialize += 1;
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
let shred_type = p.data[OFFSET_OF_SHRED_TYPE];
|
||||
if shred_type == DATA_SHRED || shred_type == CODING_SHRED {
|
||||
return Some((slot, index, shred_type == DATA_SHRED));
|
||||
} else {
|
||||
stats.bad_shred_type += 1;
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option<usize>) -> u64 {
|
||||
let ticks = create_ticks(1, 0, Hash::default());
|
||||
max_entries_per_n_shred(&ticks[0], num_shreds, shred_data_size)
|
||||
@@ -1707,4 +1788,60 @@ pub mod tests {
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_offsets() {
|
||||
solana_logger::setup();
|
||||
let mut packet = Packet::default();
|
||||
let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
let mut stats = ShredFetchStats::default();
|
||||
let ret = get_shred_slot_index_type(&packet, &mut stats);
|
||||
assert_eq!(Some((1, 3, true)), ret);
|
||||
assert_eq!(stats, ShredFetchStats::default());
|
||||
|
||||
packet.meta.size = OFFSET_OF_SHRED_TYPE;
|
||||
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
||||
assert_eq!(stats.index_overrun, 1);
|
||||
|
||||
packet.meta.size = OFFSET_OF_SHRED_INDEX;
|
||||
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
||||
assert_eq!(stats.index_overrun, 2);
|
||||
|
||||
packet.meta.size = OFFSET_OF_SHRED_INDEX + 1;
|
||||
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
||||
assert_eq!(stats.index_overrun, 3);
|
||||
|
||||
packet.meta.size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX - 1;
|
||||
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
||||
assert_eq!(stats.index_overrun, 4);
|
||||
|
||||
packet.meta.size = OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX;
|
||||
assert_eq!(
|
||||
Some((1, 3, true)),
|
||||
get_shred_slot_index_type(&packet, &mut stats)
|
||||
);
|
||||
assert_eq!(stats.index_overrun, 4);
|
||||
|
||||
let shred = Shred::new_empty_coding(8, 2, 10, 30, 4, 7, 200);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
assert_eq!(
|
||||
Some((8, 2, false)),
|
||||
get_shred_slot_index_type(&packet, &mut stats)
|
||||
);
|
||||
|
||||
let shred = Shred::new_from_data(1, std::u32::MAX - 10, 0, None, true, true, 0, 0, 0);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
||||
assert_eq!(1, stats.index_out_of_bounds);
|
||||
|
||||
let (mut header, coding_header) =
|
||||
Shredder::new_coding_shred_header(8, 2, 10, 30, 4, 7, 200);
|
||||
header.shred_type = ShredType(u8::MAX);
|
||||
let shred = Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header);
|
||||
shred.copy_to_packet(&mut packet);
|
||||
|
||||
assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats));
|
||||
assert_eq!(1, stats.bad_shred_type);
|
||||
}
|
||||
}
|
||||
|
@@ -783,6 +783,7 @@ fn test_mainnet_beta_cluster_type() {
|
||||
&solana_stake_program::id(),
|
||||
&solana_vote_program::id(),
|
||||
&solana_sdk::bpf_loader_deprecated::id(),
|
||||
&solana_sdk::bpf_loader::id(),
|
||||
]
|
||||
.iter()
|
||||
{
|
||||
@@ -798,7 +799,12 @@ fn test_mainnet_beta_cluster_type() {
|
||||
}
|
||||
|
||||
// Programs that are not available at epoch 0
|
||||
for program_id in [&solana_sdk::bpf_loader::id(), &solana_vest_program::id()].iter() {
|
||||
for program_id in [
|
||||
&solana_sdk::bpf_loader_upgradeable::id(),
|
||||
&solana_vest_program::id(),
|
||||
]
|
||||
.iter()
|
||||
{
|
||||
assert_eq!(
|
||||
(
|
||||
program_id,
|
||||
@@ -1657,11 +1663,10 @@ fn test_validator_saves_tower() {
|
||||
}
|
||||
|
||||
fn open_blockstore(ledger_path: &Path) -> Blockstore {
|
||||
Blockstore::open_with_access_type(ledger_path, AccessType::PrimaryOnly, None).unwrap_or_else(
|
||||
|e| {
|
||||
Blockstore::open_with_access_type(ledger_path, AccessType::PrimaryOnly, None, true)
|
||||
.unwrap_or_else(|e| {
|
||||
panic!("Failed to open ledger at {:?}, err: {}", ledger_path, e);
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) {
|
||||
@@ -1881,6 +1886,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b
|
||||
&val_a_ledger_path,
|
||||
AccessType::TryPrimaryThenSecondary,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
let mut ancestors = AncestorIterator::new(last_vote, &blockstore);
|
||||
|
@@ -11,6 +11,7 @@ static const uint8_t TEST_PPROGRAM_NOT_EXECUTABLE = 4;
|
||||
static const uint8_t TEST_EMPTY_ACCOUNTS_SLICE = 5;
|
||||
static const uint8_t TEST_CAP_SEEDS = 6;
|
||||
static const uint8_t TEST_CAP_SIGNERS = 7;
|
||||
static const uint8_t TEST_ALLOC_ACCESS_VIOLATION = 8;
|
||||
|
||||
static const int MINT_INDEX = 0;
|
||||
static const int ARGUMENT_INDEX = 1;
|
||||
@@ -297,6 +298,7 @@ extern uint64_t entrypoint(const uint8_t *input) {
|
||||
data, SOL_ARRAY_SIZE(data)};
|
||||
|
||||
sol_assert(SUCCESS == sol_invoke(&instruction, 0, 0));
|
||||
break;
|
||||
}
|
||||
case TEST_CAP_SEEDS: {
|
||||
sol_log("Test cap seeds");
|
||||
@@ -321,6 +323,7 @@ extern uint64_t entrypoint(const uint8_t *input) {
|
||||
sol_assert(SUCCESS == sol_invoke_signed(
|
||||
&instruction, accounts, SOL_ARRAY_SIZE(accounts),
|
||||
signers_seeds, SOL_ARRAY_SIZE(signers_seeds)));
|
||||
break;
|
||||
}
|
||||
case TEST_CAP_SIGNERS: {
|
||||
sol_log("Test cap signers");
|
||||
@@ -360,6 +363,46 @@ extern uint64_t entrypoint(const uint8_t *input) {
|
||||
sol_assert(SUCCESS == sol_invoke_signed(
|
||||
&instruction, accounts, SOL_ARRAY_SIZE(accounts),
|
||||
signers_seeds, SOL_ARRAY_SIZE(signers_seeds)));
|
||||
break;
|
||||
}
|
||||
case TEST_ALLOC_ACCESS_VIOLATION: {
|
||||
sol_log("Test resize violation");
|
||||
SolAccountMeta arguments[] = {
|
||||
{accounts[FROM_INDEX].key, true, true},
|
||||
{accounts[DERIVED_KEY1_INDEX].key, true, true}};
|
||||
uint8_t data[4 + 8 + 8 + 32];
|
||||
*(uint64_t *)(data + 4) = 42;
|
||||
*(uint64_t *)(data + 4 + 8) = MAX_PERMITTED_DATA_INCREASE;
|
||||
sol_memcpy(data + 4 + 8 + 8, params.program_id, SIZE_PUBKEY);
|
||||
const SolInstruction instruction = {accounts[SYSTEM_PROGRAM_INDEX].key,
|
||||
arguments, SOL_ARRAY_SIZE(arguments),
|
||||
data, SOL_ARRAY_SIZE(data)};
|
||||
uint8_t seed1[] = {'Y', 'o', 'u', ' ', 'p', 'a', 's', 's',
|
||||
' ', 'b', 'u', 't', 't', 'e', 'r'};
|
||||
const SolSignerSeed seeds1[] = {{seed1, SOL_ARRAY_SIZE(seed1)},
|
||||
{&bump_seed1, 1}};
|
||||
const SolSignerSeeds signers_seeds[] = {{seeds1, SOL_ARRAY_SIZE(seeds1)}};
|
||||
|
||||
SolAccountInfo derived_account = {
|
||||
.key = accounts[DERIVED_KEY1_INDEX].key,
|
||||
.lamports = accounts[DERIVED_KEY1_INDEX].lamports,
|
||||
.data_len = accounts[DERIVED_KEY1_INDEX].data_len,
|
||||
// Point to top edge of heap, attempt to allocate into unprivileged
|
||||
// memory
|
||||
.data = (uint8_t *)0x300007ff8,
|
||||
.owner = accounts[DERIVED_KEY1_INDEX].owner,
|
||||
.rent_epoch = accounts[DERIVED_KEY1_INDEX].rent_epoch,
|
||||
.is_signer = accounts[DERIVED_KEY1_INDEX].is_signer,
|
||||
.is_writable = accounts[DERIVED_KEY1_INDEX].is_writable,
|
||||
.executable = accounts[DERIVED_KEY1_INDEX].executable,
|
||||
};
|
||||
const SolAccountInfo invoke_accounts[] = {
|
||||
accounts[FROM_INDEX], accounts[SYSTEM_PROGRAM_INDEX], derived_account};
|
||||
sol_assert(SUCCESS ==
|
||||
sol_invoke_signed(&instruction,
|
||||
(const SolAccountInfo *)invoke_accounts, 3,
|
||||
signers_seeds, SOL_ARRAY_SIZE(signers_seeds)));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
sol_panic();
|
||||
|
3407
programs/bpf/rust/caller_access/Cargo.lock
generated
3407
programs/bpf/rust/caller_access/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -23,6 +23,7 @@ const TEST_PPROGRAM_NOT_EXECUTABLE: u8 = 4;
|
||||
const TEST_EMPTY_ACCOUNTS_SLICE: u8 = 5;
|
||||
const TEST_CAP_SEEDS: u8 = 6;
|
||||
const TEST_CAP_SIGNERS: u8 = 7;
|
||||
const TEST_ALLOC_ACCESS_VIOLATION: u8 = 8;
|
||||
|
||||
// const MINT_INDEX: usize = 0;
|
||||
const ARGUMENT_INDEX: usize = 1;
|
||||
@@ -33,7 +34,7 @@ const INVOKED_PROGRAM_DUP_INDEX: usize = 4;
|
||||
const DERIVED_KEY1_INDEX: usize = 6;
|
||||
const DERIVED_KEY2_INDEX: usize = 7;
|
||||
const DERIVED_KEY3_INDEX: usize = 8;
|
||||
// const SYSTEM_PROGRAM_INDEX: usize = 9;
|
||||
const SYSTEM_PROGRAM_INDEX: usize = 9;
|
||||
const FROM_INDEX: usize = 10;
|
||||
|
||||
entrypoint!(process_instruction);
|
||||
@@ -426,6 +427,72 @@ fn process_instruction(
|
||||
],
|
||||
)?;
|
||||
}
|
||||
TEST_ALLOC_ACCESS_VIOLATION => {
|
||||
msg!("Test resize violation");
|
||||
let pubkey = *accounts[FROM_INDEX].key;
|
||||
let owner = *accounts[FROM_INDEX].owner;
|
||||
let ptr = accounts[FROM_INDEX].data.borrow().as_ptr() as u64 as *mut _;
|
||||
let len = accounts[FROM_INDEX].data_len();
|
||||
let mut data = unsafe { std::slice::from_raw_parts_mut(ptr, len) };
|
||||
let mut lamports = accounts[FROM_INDEX].lamports();
|
||||
let from_info = AccountInfo::new(
|
||||
&pubkey,
|
||||
false,
|
||||
true,
|
||||
&mut lamports,
|
||||
&mut data,
|
||||
&owner,
|
||||
false,
|
||||
0,
|
||||
);
|
||||
|
||||
let pubkey = *accounts[DERIVED_KEY1_INDEX].key;
|
||||
let owner = *accounts[DERIVED_KEY1_INDEX].owner;
|
||||
// Point to top edge of heap, attempt to allocate into unprivileged memory
|
||||
let mut data = unsafe { std::slice::from_raw_parts_mut(0x300007ff8 as *mut _, 0) };
|
||||
let mut lamports = accounts[DERIVED_KEY1_INDEX].lamports();
|
||||
let derived_info = AccountInfo::new(
|
||||
&pubkey,
|
||||
false,
|
||||
true,
|
||||
&mut lamports,
|
||||
&mut data,
|
||||
&owner,
|
||||
false,
|
||||
0,
|
||||
);
|
||||
|
||||
let pubkey = *accounts[SYSTEM_PROGRAM_INDEX].key;
|
||||
let owner = *accounts[SYSTEM_PROGRAM_INDEX].owner;
|
||||
let ptr = accounts[SYSTEM_PROGRAM_INDEX].data.borrow().as_ptr() as u64 as *mut _;
|
||||
let len = accounts[SYSTEM_PROGRAM_INDEX].data_len();
|
||||
let mut data = unsafe { std::slice::from_raw_parts_mut(ptr, len) };
|
||||
let mut lamports = accounts[SYSTEM_PROGRAM_INDEX].lamports();
|
||||
let system_info = AccountInfo::new(
|
||||
&pubkey,
|
||||
false,
|
||||
false,
|
||||
&mut lamports,
|
||||
&mut data,
|
||||
&owner,
|
||||
true,
|
||||
0,
|
||||
);
|
||||
|
||||
let instruction = system_instruction::create_account(
|
||||
accounts[FROM_INDEX].key,
|
||||
accounts[DERIVED_KEY1_INDEX].key,
|
||||
42,
|
||||
MAX_PERMITTED_DATA_INCREASE as u64,
|
||||
program_id,
|
||||
);
|
||||
|
||||
invoke_signed(
|
||||
&instruction,
|
||||
&[system_info.clone(), from_info.clone(), derived_info.clone()],
|
||||
&[&[b"You pass butter", &[bump_seed1]]],
|
||||
)?;
|
||||
}
|
||||
_ => panic!(),
|
||||
}
|
||||
|
||||
|
3407
programs/bpf/rust/spoof1/Cargo.lock
generated
3407
programs/bpf/rust/spoof1/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
3407
programs/bpf/rust/spoof1_system/Cargo.lock
generated
3407
programs/bpf/rust/spoof1_system/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,9 +8,10 @@ use solana_program::{
|
||||
entrypoint!(process_instruction);
|
||||
fn process_instruction(
|
||||
_program_id: &Pubkey,
|
||||
_accounts: &[AccountInfo],
|
||||
accounts: &[AccountInfo],
|
||||
_instruction_data: &[u8],
|
||||
) -> ProgramResult {
|
||||
msg!("Upgradeable program");
|
||||
assert_eq!(accounts.len(), 2);
|
||||
Err(42.into())
|
||||
}
|
||||
|
@@ -8,9 +8,10 @@ use solana_program::{
|
||||
entrypoint!(process_instruction);
|
||||
fn process_instruction(
|
||||
_program_id: &Pubkey,
|
||||
_accounts: &[AccountInfo],
|
||||
accounts: &[AccountInfo],
|
||||
_instruction_data: &[u8],
|
||||
) -> ProgramResult {
|
||||
msg!("Upgraded program");
|
||||
assert_eq!(accounts.len(), 2);
|
||||
Err(43.into())
|
||||
}
|
||||
|
@@ -565,6 +565,7 @@ fn test_program_bpf_invoke() {
|
||||
const TEST_EMPTY_ACCOUNTS_SLICE: u8 = 5;
|
||||
const TEST_CAP_SEEDS: u8 = 6;
|
||||
const TEST_CAP_SIGNERS: u8 = 7;
|
||||
const TEST_ALLOC_ACCESS_VIOLATION: u8 = 8;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
@@ -890,7 +891,7 @@ fn test_program_bpf_invoke() {
|
||||
assert_eq!(invoked_programs, vec![]);
|
||||
assert_eq!(
|
||||
result.unwrap_err(),
|
||||
TransactionError::InstructionError(0, InstructionError::Custom(194969602))
|
||||
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
|
||||
);
|
||||
|
||||
// Check final state
|
||||
@@ -905,6 +906,42 @@ fn test_program_bpf_invoke() {
|
||||
for i in 0..20 {
|
||||
assert_eq!(i as u8, account.data[i]);
|
||||
}
|
||||
|
||||
// Attempt to realloc into unauthorized address space
|
||||
let account = Account::new(84, 0, &solana_sdk::system_program::id());
|
||||
bank.store_account(&from_keypair.pubkey(), &account);
|
||||
bank.store_account(&derived_key1, &Account::default());
|
||||
let instruction = Instruction::new(
|
||||
invoke_program_id,
|
||||
&[
|
||||
TEST_ALLOC_ACCESS_VIOLATION,
|
||||
bump_seed1,
|
||||
bump_seed2,
|
||||
bump_seed3,
|
||||
],
|
||||
account_metas.clone(),
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
let tx = Transaction::new(
|
||||
&[
|
||||
&mint_keypair,
|
||||
&argument_keypair,
|
||||
&invoked_argument_keypair,
|
||||
&from_keypair,
|
||||
],
|
||||
message.clone(),
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
let (result, inner_instructions) = process_transaction_and_record_inner(&bank, tx);
|
||||
let invoked_programs: Vec<Pubkey> = inner_instructions[0]
|
||||
.iter()
|
||||
.map(|ix| message.account_keys[ix.program_id_index as usize].clone())
|
||||
.collect();
|
||||
assert_eq!(invoked_programs, vec![solana_sdk::system_program::id()]);
|
||||
assert_eq!(
|
||||
result.unwrap_err(),
|
||||
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
|
||||
);
|
||||
}
|
||||
|
||||
// Check for program id spoofing
|
||||
@@ -1033,7 +1070,7 @@ fn test_program_bpf_ro_modify() {
|
||||
let result = bank_client.send_and_confirm_message(&[&mint_keypair, &test_keypair], message);
|
||||
assert_eq!(
|
||||
result.unwrap_err().unwrap(),
|
||||
TransactionError::InstructionError(0, InstructionError::Custom(0xb9f0002))
|
||||
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
|
||||
);
|
||||
|
||||
let instruction = Instruction::new(program_pubkey, &[3_u8], account_metas.clone());
|
||||
@@ -1041,7 +1078,7 @@ fn test_program_bpf_ro_modify() {
|
||||
let result = bank_client.send_and_confirm_message(&[&mint_keypair, &test_keypair], message);
|
||||
assert_eq!(
|
||||
result.unwrap_err().unwrap(),
|
||||
TransactionError::InstructionError(0, InstructionError::Custom(0xb9f0002))
|
||||
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
|
||||
);
|
||||
|
||||
let instruction = Instruction::new(program_pubkey, &[4_u8], account_metas.clone());
|
||||
@@ -1049,7 +1086,7 @@ fn test_program_bpf_ro_modify() {
|
||||
let result = bank_client.send_and_confirm_message(&[&mint_keypair, &test_keypair], message);
|
||||
assert_eq!(
|
||||
result.unwrap_err().unwrap(),
|
||||
TransactionError::InstructionError(0, InstructionError::Custom(0xb9f0002))
|
||||
TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1446,7 +1483,14 @@ fn test_program_bpf_upgrade() {
|
||||
|
||||
// call upgrade program
|
||||
nonce += 1;
|
||||
let instruction = Instruction::new(program_id, &[nonce], vec![]);
|
||||
let instruction = Instruction::new(
|
||||
program_id,
|
||||
&[nonce],
|
||||
vec![
|
||||
AccountMeta::new(clock::id(), false),
|
||||
AccountMeta::new(fees::id(), false),
|
||||
],
|
||||
);
|
||||
let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction);
|
||||
assert_eq!(
|
||||
result.unwrap_err().unwrap(),
|
||||
@@ -1464,7 +1508,14 @@ fn test_program_bpf_upgrade() {
|
||||
|
||||
// call upgraded program
|
||||
nonce += 1;
|
||||
let instruction = Instruction::new(program_id, &[nonce], vec![]);
|
||||
let instruction = Instruction::new(
|
||||
program_id,
|
||||
&[nonce],
|
||||
vec![
|
||||
AccountMeta::new(clock::id(), false),
|
||||
AccountMeta::new(fees::id(), false),
|
||||
],
|
||||
);
|
||||
let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction);
|
||||
assert_eq!(
|
||||
result.unwrap_err().unwrap(),
|
||||
@@ -1492,7 +1543,14 @@ fn test_program_bpf_upgrade() {
|
||||
|
||||
// call original program
|
||||
nonce += 1;
|
||||
let instruction = Instruction::new(program_id, &[nonce], vec![]);
|
||||
let instruction = Instruction::new(
|
||||
program_id,
|
||||
&[nonce],
|
||||
vec![
|
||||
AccountMeta::new(clock::id(), false),
|
||||
AccountMeta::new(fees::id(), false),
|
||||
],
|
||||
);
|
||||
let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction);
|
||||
assert_eq!(
|
||||
result.unwrap_err().unwrap(),
|
||||
|
@@ -13,7 +13,6 @@ use crate::{
|
||||
serialization::{deserialize_parameters, serialize_parameters},
|
||||
syscalls::SyscallError,
|
||||
};
|
||||
use num_derive::{FromPrimitive, ToPrimitive};
|
||||
use solana_rbpf::{
|
||||
ebpf::MM_HEAP_START,
|
||||
error::{EbpfError, UserDefinedError},
|
||||
@@ -26,7 +25,6 @@ use solana_sdk::{
|
||||
bpf_loader, bpf_loader_deprecated,
|
||||
bpf_loader_upgradeable::{self, UpgradeableLoaderState},
|
||||
clock::Clock,
|
||||
decode_error::DecodeError,
|
||||
entrypoint::SUCCESS,
|
||||
feature_set::bpf_compute_budget_balancing,
|
||||
instruction::InstructionError,
|
||||
@@ -48,22 +46,6 @@ solana_sdk::declare_builtin!(
|
||||
solana_bpf_loader_program::process_instruction
|
||||
);
|
||||
|
||||
/// Errors returned by the BPFLoader if the VM fails to run the program
|
||||
#[derive(Error, Debug, Clone, PartialEq, FromPrimitive, ToPrimitive)]
|
||||
pub enum BPFLoaderError {
|
||||
#[error("failed to create virtual machine")]
|
||||
VirtualMachineCreationFailed = 0x0b9f_0001,
|
||||
#[error("virtual machine failed to run the program to completion")]
|
||||
VirtualMachineFailedToRunProgram = 0x0b9f_0002,
|
||||
#[error("failed to compile program")]
|
||||
JustInTimeCompilationFailed = 0x0b9f_0003,
|
||||
}
|
||||
impl<E> DecodeError<E> for BPFLoaderError {
|
||||
fn type_of() -> &'static str {
|
||||
"BPFLoaderError"
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors returned by functions the BPF Loader registers with the VM
|
||||
#[derive(Debug, Error, PartialEq)]
|
||||
pub enum BPFError {
|
||||
@@ -107,6 +89,8 @@ pub fn create_and_cache_executor(
|
||||
invoke_context: &mut dyn InvokeContext,
|
||||
use_jit: bool,
|
||||
) -> Result<Arc<BPFExecutor>, InstructionError> {
|
||||
let logger = invoke_context.get_logger();
|
||||
|
||||
let bpf_compute_budget = invoke_context.get_bpf_compute_budget();
|
||||
let mut program = Executable::<BPFError, ThisInstructionMeter>::from_elf(
|
||||
data,
|
||||
@@ -127,11 +111,16 @@ pub fn create_and_cache_executor(
|
||||
!invoke_context.is_feature_active(&bpf_compute_budget_balancing::id()),
|
||||
)
|
||||
.map_err(|e| map_ebpf_error(invoke_context, EbpfError::UserError(e)))?;
|
||||
let syscall_registry = syscalls::register_syscalls(invoke_context)
|
||||
.map_err(|e| map_ebpf_error(invoke_context, e))?;
|
||||
let syscall_registry = syscalls::register_syscalls(invoke_context).map_err(|e| {
|
||||
log!(logger, "Failed to register syscalls: {}", e);
|
||||
InstructionError::ProgramEnvironmentSetupFailure
|
||||
})?;
|
||||
program.set_syscall_registry(syscall_registry);
|
||||
if use_jit && program.jit_compile().is_err() {
|
||||
return Err(BPFLoaderError::JustInTimeCompilationFailed.into());
|
||||
if use_jit {
|
||||
if let Err(err) = program.jit_compile() {
|
||||
log!(logger, "Failed to compile program {:?}", err);
|
||||
return Err(InstructionError::ProgramFailedToCompile);
|
||||
}
|
||||
}
|
||||
let executor = Arc::new(BPFExecutor { program });
|
||||
invoke_context.add_executor(key, executor.clone());
|
||||
@@ -239,7 +228,7 @@ fn process_instruction_common(
|
||||
let account_iter = &mut keyed_accounts.iter();
|
||||
let first_account = next_keyed_account(account_iter)?;
|
||||
if first_account.executable()? {
|
||||
let (program, offset) = if bpf_loader_upgradeable::check_id(program_id) {
|
||||
let (program, keyed_accounts, offset) = if bpf_loader_upgradeable::check_id(program_id) {
|
||||
if let UpgradeableLoaderState::Program {
|
||||
programdata_address,
|
||||
} = first_account.state()?
|
||||
@@ -251,6 +240,7 @@ fn process_instruction_common(
|
||||
}
|
||||
(
|
||||
programdata,
|
||||
&keyed_accounts[1..],
|
||||
UpgradeableLoaderState::programdata_data_offset()?,
|
||||
)
|
||||
} else {
|
||||
@@ -258,7 +248,7 @@ fn process_instruction_common(
|
||||
return Err(InstructionError::InvalidAccountData);
|
||||
}
|
||||
} else {
|
||||
(first_account, 0)
|
||||
(first_account, keyed_accounts, 0)
|
||||
};
|
||||
|
||||
if program.owner()? != *program_id {
|
||||
@@ -352,8 +342,11 @@ fn process_loader_upgradeable_instruction(
|
||||
log!(logger, "Program account already initialized");
|
||||
return Err(InstructionError::AccountAlreadyInitialized);
|
||||
}
|
||||
|
||||
if program.lamports()? < rent.minimum_balance(UpgradeableLoaderState::program_len()?) {
|
||||
if program.data_len()? < UpgradeableLoaderState::program_len()? {
|
||||
log!(logger, "Program account too small");
|
||||
return Err(InstructionError::AccountDataTooSmall);
|
||||
}
|
||||
if program.lamports()? < rent.minimum_balance(program.data_len()?) {
|
||||
log!(logger, "Program account not rent-exempt");
|
||||
return Err(InstructionError::ExecutableAccountNotRentExempt);
|
||||
}
|
||||
@@ -517,7 +510,8 @@ fn process_loader_upgradeable_instruction(
|
||||
use_jit,
|
||||
)?;
|
||||
|
||||
// Update the ProgramData account and record the upgraded data
|
||||
// Update the ProgramData account, record the upgraded data, and zero
|
||||
// the rest
|
||||
|
||||
programdata.set_state(&UpgradeableLoaderState::ProgramData {
|
||||
slot: clock.slot,
|
||||
@@ -526,6 +520,11 @@ fn process_loader_upgradeable_instruction(
|
||||
programdata.try_account_ref_mut()?.data
|
||||
[programdata_data_offset..programdata_data_offset + buffer_data_len]
|
||||
.copy_from_slice(&buffer.try_account_ref()?.data[buffer_data_offset..]);
|
||||
for i in &mut programdata.try_account_ref_mut()?.data
|
||||
[programdata_data_offset + buffer_data_len..]
|
||||
{
|
||||
*i = 0
|
||||
}
|
||||
|
||||
// Fund ProgramData to rent-exemption, spill the rest
|
||||
|
||||
@@ -682,7 +681,7 @@ impl Executor for BPFExecutor {
|
||||
Ok(info) => info,
|
||||
Err(e) => {
|
||||
log!(logger, "Failed to create BPF VM: {}", e);
|
||||
return Err(BPFLoaderError::VirtualMachineCreationFailed.into());
|
||||
return Err(InstructionError::ProgramEnvironmentSetupFailure);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -721,7 +720,10 @@ impl Executor for BPFExecutor {
|
||||
EbpfError::UserError(BPFError::SyscallError(
|
||||
SyscallError::InstructionError(error),
|
||||
)) => error,
|
||||
_ => BPFLoaderError::VirtualMachineFailedToRunProgram.into(),
|
||||
err => {
|
||||
log!(logger, "Program failed to complete: {:?}", err);
|
||||
InstructionError::ProgramFailedToComplete
|
||||
}
|
||||
};
|
||||
|
||||
stable_log::program_failure(&logger, program.unsigned_key(), &error);
|
||||
@@ -1010,7 +1012,7 @@ mod tests {
|
||||
Arc::new(FeatureSet::default()),
|
||||
);
|
||||
assert_eq!(
|
||||
Err(InstructionError::Custom(194969602)),
|
||||
Err(InstructionError::ProgramFailedToComplete),
|
||||
process_instruction(&bpf_loader::id(), &keyed_accounts, &[], &mut invoke_context)
|
||||
);
|
||||
|
||||
@@ -1549,6 +1551,66 @@ mod tests {
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Test program account not rent exempt because data is larger than needed
|
||||
bank.clear_signatures();
|
||||
bank.store_account(&buffer_address, &buffer_account);
|
||||
bank.store_account(&program_keypair.pubkey(), &Account::default());
|
||||
bank.store_account(&programdata_address, &Account::default());
|
||||
let mut instructions = bpf_loader_upgradeable::deploy_with_max_program_len(
|
||||
&mint_keypair.pubkey(),
|
||||
&program_keypair.pubkey(),
|
||||
&buffer_address,
|
||||
None,
|
||||
min_program_balance,
|
||||
elf.len(),
|
||||
)
|
||||
.unwrap();
|
||||
instructions[0] = system_instruction::create_account(
|
||||
&mint_keypair.pubkey(),
|
||||
&program_keypair.pubkey(),
|
||||
min_program_balance,
|
||||
UpgradeableLoaderState::program_len().unwrap() as u64 + 1,
|
||||
&id(),
|
||||
);
|
||||
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
|
||||
assert_eq!(
|
||||
TransactionError::InstructionError(1, InstructionError::ExecutableAccountNotRentExempt),
|
||||
bank_client
|
||||
.send_and_confirm_message(&[&mint_keypair, &program_keypair], message)
|
||||
.unwrap_err()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Test program account too small
|
||||
bank.clear_signatures();
|
||||
bank.store_account(&buffer_address, &buffer_account);
|
||||
bank.store_account(&program_keypair.pubkey(), &Account::default());
|
||||
bank.store_account(&programdata_address, &Account::default());
|
||||
let mut instructions = bpf_loader_upgradeable::deploy_with_max_program_len(
|
||||
&mint_keypair.pubkey(),
|
||||
&program_keypair.pubkey(),
|
||||
&buffer_address,
|
||||
None,
|
||||
min_program_balance,
|
||||
elf.len(),
|
||||
)
|
||||
.unwrap();
|
||||
instructions[0] = system_instruction::create_account(
|
||||
&mint_keypair.pubkey(),
|
||||
&program_keypair.pubkey(),
|
||||
min_program_balance,
|
||||
UpgradeableLoaderState::program_len().unwrap() as u64 - 1,
|
||||
&id(),
|
||||
);
|
||||
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
|
||||
assert_eq!(
|
||||
TransactionError::InstructionError(1, InstructionError::AccountDataTooSmall),
|
||||
bank_client
|
||||
.send_and_confirm_message(&[&mint_keypair, &program_keypair], message)
|
||||
.unwrap_err()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Test Insufficient payer funds
|
||||
bank.clear_signatures();
|
||||
bank.store_account(
|
||||
|
@@ -827,6 +827,7 @@ struct AccountReferences<'a> {
|
||||
lamports: &'a mut u64,
|
||||
owner: &'a mut Pubkey,
|
||||
data: &'a mut [u8],
|
||||
vm_data_addr: u64,
|
||||
ref_to_len_in_vm: &'a mut u64,
|
||||
serialized_len_ptr: &'a mut u64,
|
||||
}
|
||||
@@ -941,7 +942,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> {
|
||||
account_info.owner as *const _ as u64,
|
||||
self.loader_id,
|
||||
)?;
|
||||
let (data, ref_to_len_in_vm, serialized_len_ptr) = {
|
||||
let (data, vm_data_addr, ref_to_len_in_vm, serialized_len_ptr) = {
|
||||
// Double translate data out of RefCell
|
||||
let data = *translate_type::<&[u8]>(
|
||||
memory_mapping,
|
||||
@@ -961,13 +962,15 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> {
|
||||
ref_of_len_in_input_buffer as *const _ as u64,
|
||||
self.loader_id,
|
||||
)?;
|
||||
let vm_data_addr = data.as_ptr() as u64;
|
||||
(
|
||||
translate_slice_mut::<u8>(
|
||||
memory_mapping,
|
||||
data.as_ptr() as u64,
|
||||
vm_data_addr,
|
||||
data.len() as u64,
|
||||
self.loader_id,
|
||||
)?,
|
||||
vm_data_addr,
|
||||
ref_to_len_in_vm,
|
||||
serialized_len_ptr,
|
||||
)
|
||||
@@ -984,6 +987,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedRust<'a> {
|
||||
lamports,
|
||||
owner,
|
||||
data,
|
||||
vm_data_addr,
|
||||
ref_to_len_in_vm,
|
||||
serialized_len_ptr,
|
||||
});
|
||||
@@ -1206,9 +1210,10 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> {
|
||||
account_info.owner_addr,
|
||||
self.loader_id,
|
||||
)?;
|
||||
let vm_data_addr = account_info.data_addr;
|
||||
let data = translate_slice_mut::<u8>(
|
||||
memory_mapping,
|
||||
account_info.data_addr,
|
||||
vm_data_addr,
|
||||
account_info.data_len,
|
||||
self.loader_id,
|
||||
)?;
|
||||
@@ -1243,6 +1248,7 @@ impl<'a> SyscallInvokeSigned<'a> for SyscallInvokeSignedC<'a> {
|
||||
lamports,
|
||||
owner,
|
||||
data,
|
||||
vm_data_addr,
|
||||
ref_to_len_in_vm,
|
||||
serialized_len_ptr,
|
||||
});
|
||||
@@ -1408,8 +1414,6 @@ fn call<'a>(
|
||||
*account_ref.lamports = account.lamports;
|
||||
*account_ref.owner = account.owner;
|
||||
if account_ref.data.len() != account.data.len() {
|
||||
*account_ref.ref_to_len_in_vm = account.data.len() as u64;
|
||||
*account_ref.serialized_len_ptr = account.data.len() as u64;
|
||||
if !account_ref.data.is_empty() {
|
||||
// Only support for `CreateAccount` at this time.
|
||||
// Need a way to limit total realloc size across multiple CPI calls
|
||||
@@ -1422,6 +1426,14 @@ fn call<'a>(
|
||||
SyscallError::InstructionError(InstructionError::InvalidRealloc).into(),
|
||||
);
|
||||
}
|
||||
let _ = translate(
|
||||
memory_mapping,
|
||||
AccessType::Store,
|
||||
account_ref.vm_data_addr,
|
||||
account.data.len() as u64,
|
||||
)?;
|
||||
*account_ref.ref_to_len_in_vm = account.data.len() as u64;
|
||||
*account_ref.serialized_len_ptr = account.data.len() as u64;
|
||||
}
|
||||
account_ref
|
||||
.data
|
||||
|
@@ -114,7 +114,7 @@ pub const SECONDS_PER_YEAR: f64 = 365.25 * 24.0 * 60.0 * 60.0;
|
||||
pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5;
|
||||
|
||||
type BankStatusCache = StatusCache<Result<()>>;
|
||||
#[frozen_abi(digest = "9b9RfyiGPNGcMyP78YSD799ghJSTsGvqHTsJtQo8uqGX")]
|
||||
#[frozen_abi(digest = "GSPuprru1pomsgvopKG7XRWiXdqdXJdLPkgJ2arPbkXM")]
|
||||
pub type BankSlotDelta = SlotDelta<Result<()>>;
|
||||
type TransactionAccountRefCells = Vec<Rc<RefCell<Account>>>;
|
||||
type TransactionLoaderRefCells = Vec<Vec<(Pubkey, RefCell<Account>)>>;
|
||||
@@ -867,6 +867,8 @@ pub struct Bank {
|
||||
pub feature_set: Arc<FeatureSet>,
|
||||
|
||||
pub drop_callback: RwLock<OptionalDropCallback>,
|
||||
|
||||
pub freeze_started: AtomicBool,
|
||||
}
|
||||
|
||||
impl Default for BlockhashQueue {
|
||||
@@ -1022,6 +1024,7 @@ impl Bank {
|
||||
.as_ref()
|
||||
.map(|drop_callback| drop_callback.clone_box()),
|
||||
)),
|
||||
freeze_started: AtomicBool::new(false),
|
||||
};
|
||||
|
||||
datapoint_info!(
|
||||
@@ -1145,6 +1148,7 @@ impl Bank {
|
||||
transaction_log_collector: new(),
|
||||
feature_set: new(),
|
||||
drop_callback: RwLock::new(OptionalDropCallback(None)),
|
||||
freeze_started: AtomicBool::new(fields.hash != Hash::default()),
|
||||
};
|
||||
bank.finish_init(genesis_config, additional_builtins);
|
||||
|
||||
@@ -1248,6 +1252,10 @@ impl Bank {
|
||||
*self.hash.read().unwrap() != Hash::default()
|
||||
}
|
||||
|
||||
pub fn freeze_started(&self) -> bool {
|
||||
self.freeze_started.load(Relaxed)
|
||||
}
|
||||
|
||||
pub fn status_cache_ancestors(&self) -> Vec<u64> {
|
||||
let mut roots = self.src.status_cache.read().unwrap().roots().clone();
|
||||
let min = roots.iter().min().cloned().unwrap_or(0);
|
||||
@@ -1941,6 +1949,17 @@ impl Bank {
|
||||
}
|
||||
|
||||
pub fn freeze(&self) {
|
||||
// This lock prevents any new commits from BankingStage
|
||||
// `process_and_record_transactions_locked()` from coming
|
||||
// in after the last tick is observed. This is because in
|
||||
// BankingStage, any transaction successfully recorded in
|
||||
// `record_transactions()` is recorded after this `hash` lock
|
||||
// is grabbed. At the time of the successful record,
|
||||
// this means the PoH has not yet reached the last tick,
|
||||
// so this means freeze() hasn't been called yet. And because
|
||||
// BankingStage doesn't release this hash lock until both
|
||||
// record and commit are finished, those transactions will be
|
||||
// committed before this write lock can be obtained here.
|
||||
let mut hash = self.hash.write().unwrap();
|
||||
|
||||
if *hash == Hash::default() {
|
||||
@@ -1952,6 +1971,7 @@ impl Bank {
|
||||
self.run_incinerator();
|
||||
|
||||
// freeze is a one-way trip, idempotent
|
||||
self.freeze_started.store(true, Relaxed);
|
||||
*hash = self.hash_internal_state();
|
||||
}
|
||||
}
|
||||
@@ -2145,7 +2165,7 @@ impl Bank {
|
||||
}
|
||||
|
||||
assert!(
|
||||
!self.is_frozen(),
|
||||
!self.freeze_started(),
|
||||
"Can't change frozen bank by adding not-existing new native program ({}, {}). \
|
||||
Maybe, inconsistent program activation is detected on snapshot restore?",
|
||||
name,
|
||||
@@ -2272,22 +2292,24 @@ impl Bank {
|
||||
/// bank will reject transactions using that `hash`.
|
||||
pub fn register_tick(&self, hash: &Hash) {
|
||||
assert!(
|
||||
!self.is_frozen(),
|
||||
"register_tick() working on a frozen bank!"
|
||||
!self.freeze_started(),
|
||||
"register_tick() working on a bank that is already frozen or is undergoing freezing!"
|
||||
);
|
||||
|
||||
inc_new_counter_debug!("bank-register_tick-registered", 1);
|
||||
// Grab blockhash lock before incrementing tick height so that replay stage does
|
||||
// not attempt to freeze after observing the last tick and before blockhash is
|
||||
// updated
|
||||
let mut w_blockhash_queue = self.blockhash_queue.write().unwrap();
|
||||
let current_tick_height = self.tick_height.fetch_add(1, Relaxed) as u64;
|
||||
if self.is_block_boundary(current_tick_height + 1) {
|
||||
if self.is_block_boundary(self.tick_height.load(Relaxed) + 1) {
|
||||
w_blockhash_queue.register_hash(hash, &self.fee_calculator);
|
||||
if self.fix_recent_blockhashes_sysvar_delay() {
|
||||
self.update_recent_blockhashes_locked(&w_blockhash_queue);
|
||||
}
|
||||
}
|
||||
// ReplayStage will start computing the accounts delta hash when it
|
||||
// detects the tick height has reached the boundary, so the system
|
||||
// needs to guarantee all account updates for the slot have been
|
||||
// committed before this tick height is incremented (like the blockhash
|
||||
// sysvar above)
|
||||
self.tick_height.fetch_add(1, Relaxed);
|
||||
}
|
||||
|
||||
pub fn is_complete(&self) -> bool {
|
||||
@@ -3053,8 +3075,8 @@ impl Bank {
|
||||
signature_count: u64,
|
||||
) -> TransactionResults {
|
||||
assert!(
|
||||
!self.is_frozen(),
|
||||
"commit_transactions() working on a frozen bank!"
|
||||
!self.freeze_started(),
|
||||
"commit_transactions() working on a bank that is already frozen or is undergoing freezing!"
|
||||
);
|
||||
|
||||
self.increment_transaction_count(tx_count);
|
||||
@@ -3768,6 +3790,7 @@ impl Bank {
|
||||
}
|
||||
|
||||
pub fn store_account(&self, pubkey: &Pubkey, account: &Account) {
|
||||
assert!(!self.freeze_started());
|
||||
self.rc.accounts.store_slow(self.slot(), pubkey, account);
|
||||
|
||||
if Stakes::is_stake(account) {
|
||||
|
@@ -171,6 +171,15 @@ pub enum InstructionError {
|
||||
/// Cross-program invocation with unauthorized signer or writable account
|
||||
#[error("Cross-program invocation with unauthorized signer or writable account")]
|
||||
PrivilegeEscalation,
|
||||
|
||||
#[error("Failed to create program execution environment")]
|
||||
ProgramEnvironmentSetupFailure,
|
||||
|
||||
#[error("Program failed to complete")]
|
||||
ProgramFailedToComplete,
|
||||
|
||||
#[error("Program failed to compile")]
|
||||
ProgramFailedToCompile,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
|
@@ -29,8 +29,8 @@ solana-sdk = { path = "../sdk", version = "1.5.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.5.0" }
|
||||
solana-version = { path = "../version", version = "1.5.0" }
|
||||
spl-associated-token-account-v1-0 = { package = "spl-associated-token-account", version = "=1.0.1" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
|
||||
spl-associated-token-account-v1-0 = { package = "spl-associated-token-account", version = "=1.0.2" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
|
||||
|
@@ -22,8 +22,8 @@ solana-sdk = { path = "../sdk", version = "1.5.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.5.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.5.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.5.0" }
|
||||
spl-memo-v1-0 = { package = "spl-memo", version = "=2.0.0", features = ["no-entrypoint"] }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.0", features = ["no-entrypoint"] }
|
||||
spl-memo-v1-0 = { package = "spl-memo", version = "=2.0.1", features = ["no-entrypoint"] }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -14,6 +14,7 @@ base64 = "0.12.3"
|
||||
clap = "2.33.1"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
console = "0.11.3"
|
||||
fd-lock = "1.1.1"
|
||||
indicatif = "0.15.0"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
|
@@ -1,6 +1,7 @@
|
||||
use {
|
||||
clap::{value_t_or_exit, App, Arg},
|
||||
console::style,
|
||||
fd_lock::FdLock,
|
||||
indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle},
|
||||
solana_clap_utils::{input_parsers::pubkey_of, input_validators::is_pubkey},
|
||||
solana_client::{client_error, rpc_client::RpcClient},
|
||||
@@ -18,9 +19,9 @@ use {
|
||||
},
|
||||
solana_validator::{start_logger, test_validator::*},
|
||||
std::{
|
||||
fs,
|
||||
fs, io,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
path::PathBuf,
|
||||
path::{Path, PathBuf},
|
||||
process::exit,
|
||||
sync::mpsc::channel,
|
||||
thread,
|
||||
@@ -90,6 +91,16 @@ fn main() {
|
||||
.default_value("test-ledger")
|
||||
.help("Use DIR as ledger location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("reset")
|
||||
.short("r")
|
||||
.long("reset")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Reset the ledger to genesis if it exists. \
|
||||
By default the validator will resume an existing ledger (if present)",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("quiet")
|
||||
.short("q")
|
||||
@@ -132,18 +143,13 @@ fn main() {
|
||||
};
|
||||
|
||||
let mint_address = pubkey_of(&matches, "mint_address").unwrap_or_else(|| {
|
||||
read_keypair_file(&cli_config.keypair_path)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!(
|
||||
"Error: Unable to read keypair file {}: {}",
|
||||
cli_config.keypair_path, err
|
||||
);
|
||||
exit(1);
|
||||
})
|
||||
read_keypair_file(dbg!(&cli_config.keypair_path))
|
||||
.unwrap_or_else(|_| Keypair::new())
|
||||
.pubkey()
|
||||
});
|
||||
|
||||
let ledger_path = value_t_or_exit!(matches, "ledger_path", PathBuf);
|
||||
let reset_ledger = matches.is_present("reset");
|
||||
let output = if matches.is_present("quiet") {
|
||||
Output::None
|
||||
} else if matches.is_present("log") {
|
||||
@@ -188,19 +194,35 @@ fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
if !ledger_path.exists() {
|
||||
fs::create_dir(&ledger_path).unwrap_or_else(|err| {
|
||||
eprintln!(
|
||||
"Error: Unable to create directory {}: {}",
|
||||
ledger_path.display(),
|
||||
err
|
||||
);
|
||||
exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
let mut ledger_fd_lock = FdLock::new(fs::File::open(&ledger_path).unwrap());
|
||||
let _ledger_lock = ledger_fd_lock.try_lock().unwrap_or_else(|_| {
|
||||
eprintln!(
|
||||
"Error: Unable to lock {} directory. Check if another solana-test-validator is running",
|
||||
ledger_path.display()
|
||||
);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
if reset_ledger {
|
||||
remove_directory_contents(&ledger_path).unwrap_or_else(|err| {
|
||||
eprintln!("Error: Unable to remove {}: {}", ledger_path.display(), err);
|
||||
exit(1);
|
||||
})
|
||||
}
|
||||
|
||||
let validator_log_symlink = ledger_path.join("validator.log");
|
||||
let logfile = if output != Output::Log {
|
||||
if !ledger_path.exists() {
|
||||
fs::create_dir(&ledger_path).unwrap_or_else(|err| {
|
||||
eprintln!(
|
||||
"Error: Unable to create directory {}: {}",
|
||||
ledger_path.display(),
|
||||
err
|
||||
);
|
||||
exit(1);
|
||||
})
|
||||
}
|
||||
|
||||
let validator_log_with_timestamp = format!(
|
||||
"validator-{}.log",
|
||||
SystemTime::now()
|
||||
@@ -330,3 +352,15 @@ fn main() {
|
||||
|
||||
std::thread::park();
|
||||
}
|
||||
|
||||
fn remove_directory_contents(ledger_path: &Path) -> Result<(), io::Error> {
|
||||
for entry in fs::read_dir(&ledger_path)? {
|
||||
let entry = entry?;
|
||||
if entry.metadata()?.is_file() {
|
||||
fs::remove_file(&entry.path())?
|
||||
} else {
|
||||
fs::remove_dir_all(&entry.path())?
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
Reference in New Issue
Block a user