Compare commits

...

9 Commits

Author SHA1 Message Date
f1e9a944ef Invoke on-reboot from cloud startup script to avoid racing with cron (#6579) (#6580)
automerge

(cherry picked from commit 0c14ca58c7)
2019-10-27 13:55:37 -07:00
4cb38ddf01 -a is optional 2019-10-26 22:50:08 -07:00
593fde628c Blocktree metrics (#6527) (#6577)
automerge
2019-10-26 17:00:38 -07:00
34fa025b17 Fix race in blocktree.insert_shreds (#6550) (#6576)
automerge
2019-10-26 04:46:01 -07:00
33843f824a Bootstrap leader's stake is now authorized to the bootstrap leader's identity key (#6571) (#6574)
(cherry picked from commit 68acfd36d0)
2019-10-26 00:05:39 -07:00
542bda0a6f Add NET_NUM_xyz variables 2019-10-25 22:59:54 -07:00
d8bdbbf291 optimize verify_instruction (#6539) (#6572)
automerge
2019-10-25 22:19:16 -07:00
168b0f71f5 Disable sigverify on blockstreamer node
This node get overloaded at high TPS trying to manage both a validator
and the blockexplorer.  Reduce it's workload by turning off sigverify,
which doesn't really matter since this node doesn't even vote
2019-10-25 21:33:32 -07:00
be79d97dde Increase node start stagger (#6566) (#6567)
automerge
2019-10-25 17:20:13 -07:00
11 changed files with 313 additions and 104 deletions

View File

@ -288,11 +288,15 @@ if ! $skipCreate; then
echo "--- $cloudProvider.sh create" echo "--- $cloudProvider.sh create"
create_args=( create_args=(
-p "$netName" -p "$netName"
-a "$bootstrapValidatorAddress"
-c "$clientNodeCount" -c "$clientNodeCount"
-n "$additionalValidatorCount" -n "$additionalValidatorCount"
--dedicated --dedicated
) )
if [[ -n $bootstrapValidatorAddress ]]; then
create_args+=(-a "$bootstrapValidatorAddress")
fi
# shellcheck disable=SC2206 # shellcheck disable=SC2206
create_args+=(${zone_args[@]}) create_args+=(${zone_args[@]})

View File

@ -85,6 +85,7 @@ where
total_packets += more_packets.packets.len(); total_packets += more_packets.packets.len();
packets.push(more_packets) packets.push(more_packets)
} }
let now = Instant::now(); let now = Instant::now();
inc_new_counter_debug!("streamer-recv_window-recv", total_packets); inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
@ -127,7 +128,8 @@ where
} }
} }
blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?; let blocktree_insert_metrics = blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?;
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
trace!( trace!(
"Elapsed processing time in recv_window(): {}", "Elapsed processing time in recv_window(): {}",

View File

@ -310,16 +310,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?; let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?;
let mint_keypair = read_keypair_file(mint_keypair_file)?; let mint_keypair = read_keypair_file(mint_keypair_file)?;
let vote_account = vote_state::create_account( let bootstrap_leader_vote_account = vote_state::create_account(
&bootstrap_vote_keypair.pubkey(), &bootstrap_vote_keypair.pubkey(),
&bootstrap_leader_keypair.pubkey(), &bootstrap_leader_keypair.pubkey(),
0, 0,
1, 1,
); );
let stake_account = stake_state::create_account( let bootstrap_leader_stake_account = stake_state::create_account(
&bootstrap_stake_keypair.pubkey(), &bootstrap_leader_keypair.pubkey(),
&bootstrap_vote_keypair.pubkey(), &bootstrap_vote_keypair.pubkey(),
&vote_account, &bootstrap_leader_vote_account,
bootstrap_leader_stake_lamports, bootstrap_leader_stake_lamports,
); );
@ -335,9 +335,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
Account::new(bootstrap_leader_lamports, 0, &system_program::id()), Account::new(bootstrap_leader_lamports, 0, &system_program::id()),
), ),
// where votes go to // where votes go to
(bootstrap_vote_keypair.pubkey(), vote_account), (
bootstrap_vote_keypair.pubkey(),
bootstrap_leader_vote_account,
),
// passive bootstrap leader stake // passive bootstrap leader stake
(bootstrap_stake_keypair.pubkey(), stake_account), (
bootstrap_stake_keypair.pubkey(),
bootstrap_leader_stake_account,
),
( (
bootstrap_storage_keypair.pubkey(), bootstrap_storage_keypair.pubkey(),
storage_contract::create_validator_storage_account( storage_contract::create_validator_storage_account(

View File

@ -17,6 +17,7 @@ use rayon::iter::IntoParallelRefIterator;
use rayon::iter::ParallelIterator; use rayon::iter::ParallelIterator;
use rayon::ThreadPool; use rayon::ThreadPool;
use rocksdb::DBRawIterator; use rocksdb::DBRawIterator;
use solana_measure::measure::Measure;
use solana_metrics::{datapoint_debug, datapoint_error}; use solana_metrics::{datapoint_debug, datapoint_error};
use solana_rayon_threadlimit::get_thread_count; use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
@ -30,7 +31,7 @@ use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::rc::Rc; use std::rc::Rc;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, Mutex, RwLock};
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb"; pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
@ -55,10 +56,54 @@ pub struct Blocktree {
data_shred_cf: LedgerColumn<cf::ShredData>, data_shred_cf: LedgerColumn<cf::ShredData>,
code_shred_cf: LedgerColumn<cf::ShredCode>, code_shred_cf: LedgerColumn<cf::ShredCode>,
last_root: Arc<RwLock<u64>>, last_root: Arc<RwLock<u64>>,
insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>, pub new_shreds_signals: Vec<SyncSender<bool>>,
pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>, pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>,
} }
pub struct BlocktreeInsertionMetrics {
pub num_shreds: usize,
pub insert_lock_elapsed: u64,
pub insert_shreds_elapsed: u64,
pub shred_recovery_elapsed: u64,
pub chaining_elapsed: u64,
pub commit_working_sets_elapsed: u64,
pub write_batch_elapsed: u64,
pub total_elapsed: u64,
pub num_inserted: u64,
pub num_recovered: usize,
}
impl BlocktreeInsertionMetrics {
pub fn report_metrics(&self, metric_name: &'static str) {
datapoint_info!(
metric_name,
("num_shreds", self.num_shreds as i64, i64),
("total_elapsed", self.total_elapsed as i64, i64),
("insert_lock_elapsed", self.insert_lock_elapsed as i64, i64),
(
"insert_shreds_elapsed",
self.insert_shreds_elapsed as i64,
i64
),
(
"shred_recovery_elapsed",
self.shred_recovery_elapsed as i64,
i64
),
("chaining_elapsed", self.chaining_elapsed as i64, i64),
(
"commit_working_sets_elapsed",
self.commit_working_sets_elapsed as i64,
i64
),
("write_batch_elapsed", self.write_batch_elapsed as i64, i64),
("num_inserted", self.num_inserted as i64, i64),
("num_recovered", self.num_recovered as i64, i64),
);
}
}
impl Blocktree { impl Blocktree {
/// Opens a Ledger in directory, provides "infinite" window of shreds /// Opens a Ledger in directory, provides "infinite" window of shreds
pub fn open(ledger_path: &Path) -> Result<Blocktree> { pub fn open(ledger_path: &Path) -> Result<Blocktree> {
@ -106,6 +151,7 @@ impl Blocktree {
code_shred_cf, code_shred_cf,
new_shreds_signals: vec![], new_shreds_signals: vec![],
completed_slots_senders: vec![], completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
last_root, last_root,
}) })
} }
@ -358,7 +404,13 @@ impl Blocktree {
&self, &self,
shreds: Vec<Shred>, shreds: Vec<Shred>,
leader_schedule: Option<&Arc<LeaderScheduleCache>>, leader_schedule: Option<&Arc<LeaderScheduleCache>>,
) -> Result<()> { ) -> Result<BlocktreeInsertionMetrics> {
let mut total_start = Measure::start("Total elapsed");
let mut start = Measure::start("Blocktree lock");
let _lock = self.insert_shreds_lock.lock().unwrap();
start.stop();
let insert_lock_elapsed = start.as_us();
let db = &*self.db; let db = &*self.db;
let mut write_batch = db.batch()?; let mut write_batch = db.batch()?;
@ -368,26 +420,40 @@ impl Blocktree {
let mut slot_meta_working_set = HashMap::new(); let mut slot_meta_working_set = HashMap::new();
let mut index_working_set = HashMap::new(); let mut index_working_set = HashMap::new();
let num_shreds = shreds.len();
let mut start = Measure::start("Shred insertion");
let mut num_inserted = 0;
shreds.into_iter().for_each(|shred| { shreds.into_iter().for_each(|shred| {
if shred.is_data() { let insert_success = {
self.check_insert_data_shred( if shred.is_data() {
shred, self.check_insert_data_shred(
&mut index_working_set, shred,
&mut slot_meta_working_set, &mut index_working_set,
&mut write_batch, &mut slot_meta_working_set,
&mut just_inserted_data_shreds, &mut write_batch,
); &mut just_inserted_data_shreds,
} else if shred.is_code() { )
self.check_insert_coding_shred( } else if shred.is_code() {
shred, self.check_insert_coding_shred(
&mut erasure_metas, shred,
&mut index_working_set, &mut erasure_metas,
&mut write_batch, &mut index_working_set,
&mut just_inserted_coding_shreds, &mut write_batch,
); &mut just_inserted_coding_shreds,
)
} else {
panic!("There should be no other case");
}
};
if insert_success {
num_inserted += 1;
} }
}); });
start.stop();
let insert_shreds_elapsed = start.as_us();
let mut start = Measure::start("Shred recovery");
let mut num_recovered = 0;
if let Some(leader_schedule_cache) = leader_schedule { if let Some(leader_schedule_cache) = leader_schedule {
let recovered_data = Self::try_shred_recovery( let recovered_data = Self::try_shred_recovery(
&db, &db,
@ -397,6 +463,7 @@ impl Blocktree {
&mut just_inserted_coding_shreds, &mut just_inserted_coding_shreds,
); );
num_recovered = recovered_data.len();
recovered_data.into_iter().for_each(|shred| { recovered_data.into_iter().for_each(|shred| {
if let Some(leader) = leader_schedule_cache.slot_leader_at(shred.slot(), None) { if let Some(leader) = leader_schedule_cache.slot_leader_at(shred.slot(), None) {
if shred.verify(&leader) { if shred.verify(&leader) {
@ -406,15 +473,21 @@ impl Blocktree {
&mut slot_meta_working_set, &mut slot_meta_working_set,
&mut write_batch, &mut write_batch,
&mut just_inserted_coding_shreds, &mut just_inserted_coding_shreds,
) );
} }
} }
}); });
} }
start.stop();
let shred_recovery_elapsed = start.as_us();
let mut start = Measure::start("Shred recovery");
// Handle chaining for the working set // Handle chaining for the working set
handle_chaining(&self.db, &mut write_batch, &slot_meta_working_set)?; handle_chaining(&self.db, &mut write_batch, &slot_meta_working_set)?;
start.stop();
let chaining_elapsed = start.as_us();
let mut start = Measure::start("Commit Worknig Sets");
let (should_signal, newly_completed_slots) = commit_slot_meta_working_set( let (should_signal, newly_completed_slots) = commit_slot_meta_working_set(
&slot_meta_working_set, &slot_meta_working_set,
&self.completed_slots_senders, &self.completed_slots_senders,
@ -428,8 +501,13 @@ impl Blocktree {
for (&slot, index) in index_working_set.iter() { for (&slot, index) in index_working_set.iter() {
write_batch.put::<cf::Index>(slot, index)?; write_batch.put::<cf::Index>(slot, index)?;
} }
start.stop();
let commit_working_sets_elapsed = start.as_us();
let mut start = Measure::start("Write Batch");
self.db.write(write_batch)?; self.db.write(write_batch)?;
start.stop();
let write_batch_elapsed = start.as_us();
if should_signal { if should_signal {
for signal in &self.new_shreds_signals { for signal in &self.new_shreds_signals {
@ -444,7 +522,20 @@ impl Blocktree {
newly_completed_slots, newly_completed_slots,
)?; )?;
Ok(()) total_start.stop();
Ok(BlocktreeInsertionMetrics {
num_shreds,
total_elapsed: total_start.as_us(),
insert_lock_elapsed,
insert_shreds_elapsed,
shred_recovery_elapsed,
chaining_elapsed,
commit_working_sets_elapsed,
write_batch_elapsed,
num_inserted,
num_recovered,
})
} }
fn check_insert_coding_shred( fn check_insert_coding_shred(
@ -454,7 +545,7 @@ impl Blocktree {
index_working_set: &mut HashMap<u64, Index>, index_working_set: &mut HashMap<u64, Index>,
write_batch: &mut WriteBatch, write_batch: &mut WriteBatch,
just_inserted_coding_shreds: &mut HashMap<(u64, u64), Shred>, just_inserted_coding_shreds: &mut HashMap<(u64, u64), Shred>,
) { ) -> bool {
let slot = shred.slot(); let slot = shred.slot();
let shred_index = u64::from(shred.index()); let shred_index = u64::from(shred.index());
@ -465,13 +556,16 @@ impl Blocktree {
// This gives the index of first coding shred in this FEC block // This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index // So, all coding shreds in a given FEC block will have the same set index
if Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) { if Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) {
if let Ok(()) = self.insert_coding_shred(erasure_metas, index_meta, &shred, write_batch) self.insert_coding_shred(erasure_metas, index_meta, &shred, write_batch)
{ .map(|_| {
just_inserted_coding_shreds just_inserted_coding_shreds
.entry((slot, shred_index)) .entry((slot, shred_index))
.or_insert_with(|| shred); .or_insert_with(|| shred);
new_index_meta.map(|n| index_working_set.insert(slot, n)); new_index_meta.map(|n| index_working_set.insert(slot, n))
} })
.is_ok()
} else {
false
} }
} }
@ -482,7 +576,7 @@ impl Blocktree {
slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>, slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
write_batch: &mut WriteBatch, write_batch: &mut WriteBatch,
just_inserted_data_shreds: &mut HashMap<(u64, u64), Shred>, just_inserted_data_shreds: &mut HashMap<(u64, u64), Shred>,
) { ) -> bool {
let slot = shred.slot(); let slot = shred.slot();
let shred_index = u64::from(shred.index()); let shred_index = u64::from(shred.index());
let (index_meta, mut new_index_meta) = let (index_meta, mut new_index_meta) =
@ -521,6 +615,8 @@ impl Blocktree {
if insert_success { if insert_success {
new_slot_meta_entry.map(|n| slot_meta_working_set.insert(slot, n)); new_slot_meta_entry.map(|n| slot_meta_working_set.insert(slot, n));
} }
insert_success
} }
fn should_insert_coding_shred( fn should_insert_coding_shred(

51
ledger/tests/blocktree.rs Normal file
View File

@ -0,0 +1,51 @@
#[macro_use]
extern crate solana_ledger;
use solana_ledger::blocktree::{self, get_tmp_ledger_path, Blocktree};
use solana_ledger::entry;
use solana_sdk::hash::Hash;
use std::sync::Arc;
use std::thread::Builder;
#[test]
fn test_multiple_threads_insert_shred() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
for _ in 0..100 {
let num_threads = 10;
// Create `num_threads` different ticks in slots 1..num_therads + 1, all
// with parent = slot 0
let threads: Vec<_> = (0..num_threads)
.map(|i| {
let entries = entry::create_ticks(1, Hash::default());
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false);
let blocktree_ = blocktree.clone();
Builder::new()
.name("blocktree-writer".to_string())
.spawn(move || {
blocktree_.insert_shreds(shreds, None).unwrap();
})
.unwrap()
})
.collect();
for t in threads {
t.join().unwrap()
}
// Check slot 0 has the correct children
let mut meta0 = blocktree.meta(0).unwrap().unwrap();
meta0.next_slots.sort();
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
assert_eq!(meta0.next_slots, expected_next_slots);
// Delete slots for next iteration
blocktree.purge_slots(0, None);
}
// Cleanup
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}

View File

@ -662,6 +662,7 @@ EOF
set -ex set -ex
if [[ -f /solana-scratch/.instance-startup-complete ]]; then if [[ -f /solana-scratch/.instance-startup-complete ]]; then
echo reboot
$( $(
cd "$here"/scripts/ cd "$here"/scripts/
if "$enableGpu"; then if "$enableGpu"; then
@ -672,6 +673,9 @@ if [[ -f /solana-scratch/.instance-startup-complete ]]; then
cat mount-additional-disk.sh cat mount-additional-disk.sh
fi fi
) )
if [[ -x ~solana/solana/on-reboot ]]; then
sudo -u solana ~solana/solana/on-reboot
fi
# Skip most setup on instance reboot # Skip most setup on instance reboot
exit 0 exit 0
@ -732,6 +736,8 @@ $(
) )
cat > /etc/motd <<EOM cat > /etc/motd <<EOM
See startup script log messages in /var/log/syslog for status:
$ sudo cat /var/log/syslog | egrep \\(startup-script\\|cloud-init\)
$(printNetworkInfo) $(printNetworkInfo)
$(creationInfo) $(creationInfo)
EOM EOM
@ -819,7 +825,12 @@ info)
printf " %-16s | %-15s | %-15s | %s\n" "$nodeType" "$ip" "$ipPrivate" "$zone" printf " %-16s | %-15s | %-15s | %s\n" "$nodeType" "$ip" "$ipPrivate" "$zone"
} }
if ! $evalInfo; then if $evalInfo; then
echo "NET_NUM_VALIDATORS=${#validatorIpList[@]}"
echo "NET_NUM_CLIENTS=${#clientIpList[@]}"
echo "NET_NUM_BLOCKSTREAMERS=${#blockstreamerIpList[@]}"
echo "NET_NUM_ARCHIVERS=${#archiverIpList[@]}"
else
printNode "Node Type" "Public IP" "Private IP" "Zone" printNode "Node Type" "Public IP" "Private IP" "Zone"
echo "-------------------+-----------------+-----------------+--------------" echo "-------------------+-----------------+-----------------+--------------"
fi fi

View File

@ -722,8 +722,10 @@ deploy() {
# Stagger additional node start time. If too many nodes start simultaneously # Stagger additional node start time. If too many nodes start simultaneously
# the bootstrap node gets more rsync requests from the additional nodes than # the bootstrap node gets more rsync requests from the additional nodes than
# it can handle. # it can handle.
if ((nodeIndex % 2 == 0)); then if ((nodeIndex % 3 == 0)); then
sleep 2 sleep 2
elif ((nodeIndex % 3 == 1)); then
sleep 4
fi fi
fi fi
done done

View File

@ -76,7 +76,6 @@ now=\$(date -u +"%Y-%m-%dT%H:%M:%SZ")
ln -sfT validator.log.\$now validator.log ln -sfT validator.log.\$now validator.log
EOF EOF
chmod +x ~/solana/on-reboot chmod +x ~/solana/on-reboot
echo "@reboot ~/solana/on-reboot" | crontab -
GPU_CUDA_OK=false GPU_CUDA_OK=false
GPU_FAIL_IF_NONE=false GPU_FAIL_IF_NONE=false
@ -105,7 +104,7 @@ waitForNodeToInit() {
echo "--- waiting for $hostname to boot up" echo "--- waiting for $hostname to boot up"
SECONDS= SECONDS=
while [[ ! -r $initCompleteFile ]]; do while [[ ! -r $initCompleteFile ]]; do
if [[ $SECONDS -ge 120 ]]; then if [[ $SECONDS -ge 240 ]]; then
echo "^^^ +++" echo "^^^ +++"
echo "Error: $initCompleteFile not found in $SECONDS seconds" echo "Error: $initCompleteFile not found in $SECONDS seconds"
exit 1 exit 1
@ -262,6 +261,7 @@ EOF
args+=( args+=(
--blockstream /tmp/solana-blockstream.sock --blockstream /tmp/solana-blockstream.sock
--no-voting --no-voting
--dev-no-sigverify
) )
else else
args+=(--enable-rpc-exit) args+=(--enable-rpc-exit)

View File

@ -1,34 +0,0 @@
#![feature(test)]
extern crate test;
use solana_runtime::message_processor::is_zeroed;
use test::Bencher;
const BUFSIZE: usize = 1024 * 1024 + 127;
static BUF0: [u8; BUFSIZE] = [0; BUFSIZE];
static BUF1: [u8; BUFSIZE] = [1; BUFSIZE];
#[bench]
fn bench_is_zeroed(bencher: &mut Bencher) {
bencher.iter(|| {
is_zeroed(&BUF0);
});
}
#[bench]
fn bench_is_zeroed_not(bencher: &mut Bencher) {
bencher.iter(|| {
is_zeroed(&BUF1);
});
}
#[bench]
fn bench_is_zeroed_by_iter(bencher: &mut Bencher) {
bencher.iter(|| BUF0.iter().all(|item| *item == 0));
}
#[bench]
fn bench_is_zeroed_not_by_iter(bencher: &mut Bencher) {
bencher.iter(|| BUF1.iter().all(|item| *item == 0));
}

View File

@ -2,7 +2,9 @@
extern crate test; extern crate test;
use log::*;
use solana_runtime::message_processor::*; use solana_runtime::message_processor::*;
use solana_sdk::{account::Account, pubkey::Pubkey};
use test::Bencher; use test::Bencher;
#[bench] #[bench]
@ -12,3 +14,59 @@ fn bench_has_duplicates(bencher: &mut Bencher) {
assert!(!has_duplicates(&data)); assert!(!has_duplicates(&data));
}) })
} }
#[bench]
fn bench_verify_instruction_data(bencher: &mut Bencher) {
solana_logger::setup();
let owner = Pubkey::new_rand();
let non_owner = Pubkey::new_rand();
let pre = Account::new(0, BUFSIZE, &owner);
let post = Account::new(0, BUFSIZE, &owner);
assert_eq!(verify_instruction(true, &owner, &pre, &post), Ok(()));
bencher.iter(|| pre.data == post.data);
let summary = bencher.bench(|_bencher| {}).unwrap();
info!("data compare {} ns/iter", summary.median);
// this one should be faster
bencher.iter(|| {
verify_instruction(true, &owner, &pre, &post).unwrap();
});
let summary = bencher.bench(|_bencher| {}).unwrap();
info!("data no change by owner: {} ns/iter", summary.median);
bencher.iter(|| {
verify_instruction(true, &non_owner, &pre, &post).unwrap();
});
let summary = bencher.bench(|_bencher| {}).unwrap();
info!("data no change by non owner: {} ns/iter", summary.median);
}
const BUFSIZE: usize = 1024 * 1024 + 127;
static BUF0: [u8; BUFSIZE] = [0; BUFSIZE];
static BUF1: [u8; BUFSIZE] = [1; BUFSIZE];
#[bench]
fn bench_is_zeroed(bencher: &mut Bencher) {
bencher.iter(|| {
is_zeroed(&BUF0);
});
}
#[bench]
fn bench_is_zeroed_not(bencher: &mut Bencher) {
bencher.iter(|| {
is_zeroed(&BUF1);
});
}
#[bench]
fn bench_is_zeroed_by_iter(bencher: &mut Bencher) {
bencher.iter(|| BUF0.iter().all(|item| *item == 0));
}
#[bench]
fn bench_is_zeroed_not_by_iter(bencher: &mut Bencher) {
bencher.iter(|| BUF1.iter().all(|item| *item == 0));
}

View File

@ -58,7 +58,7 @@ fn get_subset_unchecked_mut<'a, T>(
.collect()) .collect())
} }
fn verify_instruction( pub fn verify_instruction(
is_debitable: bool, is_debitable: bool,
program_id: &Pubkey, program_id: &Pubkey,
pre: &Account, pre: &Account,
@ -70,26 +70,22 @@ fn verify_instruction(
// only if the account is credit-debit and // only if the account is credit-debit and
// only if the data is zero-initialized or empty // only if the data is zero-initialized or empty
if pre.owner != post.owner if pre.owner != post.owner
&& (!is_debitable && (!is_debitable // line coverage used to get branch coverage
// line coverage used to get branch coverage || *program_id != pre.owner // line coverage used to get branch coverage
|| *program_id != pre.owner
// line coverage used to get branch coverage
|| !is_zeroed(&post.data)) || !is_zeroed(&post.data))
{ {
return Err(InstructionError::ModifiedProgramId); return Err(InstructionError::ModifiedProgramId);
} }
// An account not assigned to the program cannot have its balance decrease. // An account not assigned to the program cannot have its balance decrease.
if *program_id != pre.owner if *program_id != pre.owner // line coverage used to get branch coverage
// line coverage used to get branch coverage
&& pre.lamports > post.lamports && pre.lamports > post.lamports
{ {
return Err(InstructionError::ExternalAccountLamportSpend); return Err(InstructionError::ExternalAccountLamportSpend);
} }
// The balance of credit-only accounts may only increase. // The balance of credit-only accounts may only increase.
if !is_debitable if !is_debitable // line coverage used to get branch coverage
// line coverage used to get branch coverage
&& pre.lamports > post.lamports && pre.lamports > post.lamports
{ {
return Err(InstructionError::CreditOnlyLamportSpend); return Err(InstructionError::CreditOnlyLamportSpend);
@ -98,32 +94,50 @@ fn verify_instruction(
// Only the system program can change the size of the data // Only the system program can change the size of the data
// and only if the system program owns the account // and only if the system program owns the account
if pre.data.len() != post.data.len() if pre.data.len() != post.data.len()
&& (!system_program::check_id(program_id) && (!system_program::check_id(program_id) // line coverage used to get branch coverage
// line coverage used to get branch coverage
|| !system_program::check_id(&pre.owner)) || !system_program::check_id(&pre.owner))
{ {
return Err(InstructionError::AccountDataSizeChanged); return Err(InstructionError::AccountDataSizeChanged);
} }
// Verify data... enum DataChanged {
if pre.data != post.data { Unchecked,
// Credit-only account data may not change. Checked(bool),
if !is_debitable { };
return Err(InstructionError::CreditOnlyDataModified);
} // Verify data, remember answer because comparing
// For accounts not assigned to the program, the data may not change. // a megabyte costs us multiple microseconds...
if *program_id != pre.owner { let mut data_changed = DataChanged::Unchecked;
return Err(InstructionError::ExternalAccountDataModified); let mut data_changed = || -> bool {
match data_changed {
DataChanged::Unchecked => {
let changed = pre.data != post.data;
data_changed = DataChanged::Checked(changed);
changed
}
DataChanged::Checked(changed) => changed,
} }
};
// For accounts not assigned to the program, the data may not change.
if *program_id != pre.owner // line coverage used to get branch coverage
&& data_changed()
{
return Err(InstructionError::ExternalAccountDataModified);
}
// Credit-only account data may not change.
if !is_debitable // line coverage used to get branch coverage
&& data_changed()
{
return Err(InstructionError::CreditOnlyDataModified);
} }
// executable is one-way (false->true) and // executable is one-way (false->true) and
// only system or the account owner may modify. // only system or the account owner may modify.
if pre.executable != post.executable if pre.executable != post.executable
&& (!is_debitable && (!is_debitable // line coverage used to get branch coverage
// line coverage used to get branch coverage || pre.executable // line coverage used to get branch coverage
|| pre.executable
// line coverage used to get branch coverage
|| *program_id != pre.owner) || *program_id != pre.owner)
{ {
return Err(InstructionError::ExecutableModified); return Err(InstructionError::ExecutableModified);
@ -589,7 +603,6 @@ mod tests {
verify_instruction(is_debitable, &program_id, &pre, &post) verify_instruction(is_debitable, &program_id, &pre, &post)
}; };
let system_program_id = system_program::id();
let mallory_program_id = Pubkey::new_rand(); let mallory_program_id = Pubkey::new_rand();
assert_eq!( assert_eq!(
@ -600,13 +613,13 @@ mod tests {
assert_eq!( assert_eq!(
change_data(&mallory_program_id, true), change_data(&mallory_program_id, true),
Err(InstructionError::ExternalAccountDataModified), Err(InstructionError::ExternalAccountDataModified),
"malicious Mallory should not be able to change the account data" "non-owner mallory should not be able to change the account data"
); );
assert_eq!( assert_eq!(
change_data(&system_program_id, false), change_data(&alice_program_id, false),
Err(InstructionError::CreditOnlyDataModified), Err(InstructionError::CreditOnlyDataModified),
"system program should not be able to change the data if credit-only" "alice isn't allowed to touch a CO account"
); );
} }
@ -641,7 +654,7 @@ mod tests {
assert_eq!( assert_eq!(
verify_instruction(true, &alice_program_id, &pre, &post), verify_instruction(true, &alice_program_id, &pre, &post),
Ok(()), Ok(()),
"alice should be able to deduct lamports and the account to bob if the data is zeroed", "alice should be able to deduct lamports and give the account to bob if the data is zeroed",
); );
} }