* uses enum for shred type Current code is using u8 which does not have any type-safety and can contain invalid values: https://github.com/solana-labs/solana/blob/66fa062f1/ledger/src/shred.rs#L167 Checks for invalid shred-types are scattered through the code: https://github.com/solana-labs/solana/blob/66fa062f1/ledger/src/blockstore.rs#L849-L851 https://github.com/solana-labs/solana/blob/66fa062f1/ledger/src/shred.rs#L346-L348 The commit uses enum for shred type with #[repr(u8)]. Backward compatibility is maintained by implementing Serialize and Deserialize compatible with u8, and adding a test to assert that. (cherry picked from commit57057f8d39
) # Conflicts: # core/src/retransmit_stage.rs # gossip/src/cluster_info.rs # ledger/Cargo.toml # ledger/src/blockstore.rs # ledger/src/shred.rs * changes Blockstore::is_shred_duplicate arg type to ShredType (cherry picked from commit48dfdfb4d5
) # Conflicts: # ledger/src/blockstore.rs * removes backport merge conflicts Co-authored-by: behzad nouri <behzadnouri@gmail.com>
This commit is contained in:
@@ -18,7 +18,9 @@ use {
|
||||
solana_client::rpc_response::SlotUpdate,
|
||||
solana_gossip::cluster_info::{ClusterInfo, DATA_PLANE_FANOUT},
|
||||
solana_ledger::{
|
||||
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
|
||||
blockstore::Blockstore,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
shred::{Shred, ShredType},
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::PacketBatch,
|
||||
@@ -137,14 +139,14 @@ impl RetransmitStats {
|
||||
}
|
||||
}
|
||||
|
||||
// Map of shred (slot, index, is_data) => list of hash values seen for that key.
|
||||
type ShredFilter = LruCache<(Slot, u32, bool), Vec<u64>>;
|
||||
// Map of shred (slot, index, type) => list of hash values seen for that key.
|
||||
type ShredFilter = LruCache<(Slot, u32, ShredType), Vec<u64>>;
|
||||
|
||||
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
|
||||
|
||||
// Returns true if shred is already received and should skip retransmit.
|
||||
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
|
||||
let key = (shred.slot(), shred.index(), shred.is_data());
|
||||
let key = (shred.slot(), shred.index(), shred.shred_type());
|
||||
let mut shreds_received = shreds_received.lock().unwrap();
|
||||
let (cache, hasher) = shreds_received.deref_mut();
|
||||
match cache.get_mut(&key) {
|
||||
|
@@ -18,7 +18,7 @@ use {
|
||||
solana_ledger::{
|
||||
blockstore::{self, Blockstore, BlockstoreInsertionMetrics, MAX_DATA_SHREDS_PER_SLOT},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
shred::{Nonce, Shred},
|
||||
shred::{Nonce, Shred, ShredType},
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{inc_new_counter_debug, inc_new_counter_error},
|
||||
@@ -161,12 +161,11 @@ impl ReceiveWindowStats {
|
||||
}
|
||||
|
||||
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
|
||||
if shred.is_data() {
|
||||
match shred.shred_type() {
|
||||
// Only data shreds have parent information
|
||||
blockstore::verify_shred_slots(shred.slot(), shred.parent(), root)
|
||||
} else {
|
||||
ShredType::Data => blockstore::verify_shred_slots(shred.slot(), shred.parent(), root),
|
||||
// Filter out outdated coding shreds
|
||||
shred.slot() >= root
|
||||
ShredType::Code => shred.slot() >= root,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,8 +217,8 @@ fn run_check_duplicate(
|
||||
if let Some(existing_shred_payload) = blockstore.is_shred_duplicate(
|
||||
shred_slot,
|
||||
shred.index(),
|
||||
&shred.payload,
|
||||
shred.is_data(),
|
||||
shred.payload.clone(),
|
||||
shred.shred_type(),
|
||||
) {
|
||||
cluster_info.push_duplicate_shred(&shred, &existing_shred_payload)?;
|
||||
blockstore.store_duplicate_slot(
|
||||
|
Reference in New Issue
Block a user