Rename solana-runtime to sealevel (#6239)

automerge
This commit is contained in:
Greg Fitzgerald
2019-10-04 16:02:44 -06:00
committed by Grimes
parent 5617162cb6
commit 2e921437cd
38 changed files with 135 additions and 106 deletions

2
runtime/.gitignore vendored
View File

@ -1,2 +0,0 @@
/target/
/farf/

View File

@ -9,40 +9,8 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
bincode = "1.2.0"
bv = { version = "0.11.0", features = ["serde"] }
byteorder = "1.3.2"
fnv = "1.0.6"
fs_extra = "1.1.0"
lazy_static = "1.4.0"
libc = "0.2.62"
libloading = "0.5.2"
log = "0.4.8"
memmap = "0.6.2"
rand = "0.6.5"
rayon = "1.2.0"
serde = { version = "1.0.101", features = ["rc"] }
serde_derive = "1.0.101"
serde_json = "1.0.41"
solana-logger = { path = "../logger", version = "0.20.0" }
solana-measure = { path = "../measure", version = "0.20.0" }
solana-metrics = { path = "../metrics", version = "0.20.0" }
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.0" }
solana-sdk = { path = "../sdk", version = "0.20.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
solana-stake-program = { path = "../programs/stake_program", version = "0.20.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.20.0" }
sys-info = "0.5.8"
tempfile = "3.1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
itertools = "0.8.0"
sealevel = { path = "../sealevel", version = "0.20.0" }
[lib]
crate-type = ["lib"]
name = "solana_runtime"
[dev-dependencies]
solana-noop-program = { path = "../programs/noop_program", version = "0.20.0" }

View File

@ -1,69 +0,0 @@
#![feature(test)]
extern crate test;
use solana_runtime::accounts::{create_test_accounts, Accounts};
use solana_runtime::bank::*;
use solana_sdk::account::Account;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::pubkey::Pubkey;
use std::sync::Arc;
use test::Bencher;
fn deposit_many(bank: &Bank, pubkeys: &mut Vec<Pubkey>, num: usize) {
for t in 0..num {
let pubkey = Pubkey::new_rand();
let account = Account::new((t + 1) as u64, 0, &Account::default().owner);
pubkeys.push(pubkey.clone());
assert!(bank.get_account(&pubkey).is_none());
bank.deposit(&pubkey, (t + 1) as u64);
assert_eq!(bank.get_account(&pubkey).unwrap(), account);
}
}
#[bench]
fn test_accounts_create(bencher: &mut Bencher) {
let (genesis_block, _) = create_genesis_block(10_000);
let bank0 = Bank::new_with_paths(&genesis_block, Some("bench_a0".to_string()));
bencher.iter(|| {
let mut pubkeys: Vec<Pubkey> = vec![];
deposit_many(&bank0, &mut pubkeys, 1000);
});
}
#[bench]
fn test_accounts_squash(bencher: &mut Bencher) {
let (genesis_block, _) = create_genesis_block(100_000);
let mut banks: Vec<Arc<Bank>> = Vec::with_capacity(10);
banks.push(Arc::new(Bank::new_with_paths(
&genesis_block,
Some("bench_a1".to_string()),
)));
let mut pubkeys: Vec<Pubkey> = vec![];
deposit_many(&banks[0], &mut pubkeys, 250000);
banks[0].freeze();
// Measures the performance of the squash operation merging the accounts
// with the majority of the accounts present in the parent bank that is
// moved over to this bank.
bencher.iter(|| {
banks.push(Arc::new(Bank::new_from_parent(
&banks[0],
&Pubkey::default(),
1u64,
)));
for accounts in 0..10000 {
banks[1].deposit(&pubkeys[accounts], (accounts + 1) as u64);
}
banks[1].squash();
});
}
#[bench]
fn test_accounts_hash_internal_state(bencher: &mut Bencher) {
let accounts = Accounts::new(Some("bench_accounts_hash_internal".to_string()));
let mut pubkeys: Vec<Pubkey> = vec![];
create_test_accounts(&accounts, &mut pubkeys, 60000, 0);
bencher.iter(|| {
accounts.hash_internal_state(0);
});
}

View File

@ -1,46 +0,0 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use solana_runtime::accounts_db::AccountInfo;
use solana_runtime::accounts_index::AccountsIndex;
use solana_sdk::pubkey::Pubkey;
use test::Bencher;
#[bench]
fn bench_accounts_index(bencher: &mut Bencher) {
const NUM_PUBKEYS: usize = 10_000;
let pubkeys: Vec<_> = (0..NUM_PUBKEYS)
.into_iter()
.map(|_| Pubkey::new_rand())
.collect();
const NUM_FORKS: u64 = 16;
let mut reclaims = vec![];
let mut index = AccountsIndex::<AccountInfo>::default();
for f in 0..NUM_FORKS {
for _p in 0..NUM_PUBKEYS {
index.insert(f, &pubkeys[_p], AccountInfo::default(), &mut reclaims);
}
}
let mut fork = NUM_FORKS;
let mut root = 0;
bencher.iter(|| {
for _p in 0..NUM_PUBKEYS {
let pubkey = thread_rng().gen_range(0, NUM_PUBKEYS);
index.insert(
fork,
&pubkeys[pubkey],
AccountInfo::default(),
&mut reclaims,
);
reclaims.clear();
}
index.add_root(root);
root += 1;
fork += 1;
});
}

View File

@ -1,128 +0,0 @@
#![feature(test)]
extern crate test;
use rand::{thread_rng, Rng};
use solana_runtime::append_vec::test_utils::{create_test_account, get_append_vec_path};
use solana_runtime::append_vec::AppendVec;
use solana_sdk::hash::Hash;
use std::sync::{Arc, Mutex};
use std::thread::sleep;
use std::thread::spawn;
use std::time::Duration;
use test::Bencher;
#[bench]
fn append_vec_append(bencher: &mut Bencher) {
let path = get_append_vec_path("bench_append");
let vec = AppendVec::new(&path.path, true, 64 * 1024);
bencher.iter(|| {
let (meta, account) = create_test_account(0);
if vec
.append_account(meta, &account, Hash::default())
.is_none()
{
vec.reset();
}
});
}
fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> {
(0..size)
.into_iter()
.filter_map(|sample| {
let (meta, account) = create_test_account(sample);
vec.append_account(meta, &account, Hash::default())
.map(|pos| (sample, pos))
})
.collect()
}
#[bench]
fn append_vec_sequential_read(bencher: &mut Bencher) {
let path = get_append_vec_path("seq_read");
let vec = AppendVec::new(&path.path, true, 64 * 1024);
let size = 1_000;
let mut indexes = add_test_accounts(&vec, size);
bencher.iter(|| {
let (sample, pos) = indexes.pop().unwrap();
let (account, _next) = vec.get_account(pos).unwrap();
let (_meta, test) = create_test_account(sample);
assert_eq!(account.data, test.data.as_slice());
indexes.push((sample, pos));
});
}
#[bench]
fn append_vec_random_read(bencher: &mut Bencher) {
let path = get_append_vec_path("random_read");
let vec = AppendVec::new(&path.path, true, 64 * 1024);
let size = 1_000;
let indexes = add_test_accounts(&vec, size);
bencher.iter(|| {
let random_index: usize = thread_rng().gen_range(0, indexes.len());
let (sample, pos) = &indexes[random_index];
let (account, _next) = vec.get_account(*pos).unwrap();
let (_meta, test) = create_test_account(*sample);
assert_eq!(account.data, test.data.as_slice());
});
}
#[bench]
fn append_vec_concurrent_append_read(bencher: &mut Bencher) {
let path = get_append_vec_path("concurrent_read");
let vec = Arc::new(AppendVec::new(&path.path, true, 1024 * 1024));
let vec1 = vec.clone();
let indexes: Arc<Mutex<Vec<(usize, usize)>>> = Arc::new(Mutex::new(vec![]));
let indexes1 = indexes.clone();
spawn(move || loop {
let sample = indexes1.lock().unwrap().len();
let (meta, account) = create_test_account(sample);
if let Some(pos) = vec1.append_account(meta, &account, Hash::default()) {
indexes1.lock().unwrap().push((sample, pos))
} else {
break;
}
});
while indexes.lock().unwrap().is_empty() {
sleep(Duration::from_millis(100));
}
bencher.iter(|| {
let len = indexes.lock().unwrap().len();
let random_index: usize = thread_rng().gen_range(0, len);
let (sample, pos) = indexes.lock().unwrap().get(random_index).unwrap().clone();
let (account, _next) = vec.get_account(pos).unwrap();
let (_meta, test) = create_test_account(sample);
assert_eq!(account.data, test.data.as_slice());
});
}
#[bench]
fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
let path = get_append_vec_path("concurrent_read");
let vec = Arc::new(AppendVec::new(&path.path, true, 1024 * 1024));
let vec1 = vec.clone();
let indexes: Arc<Mutex<Vec<(usize, usize)>>> = Arc::new(Mutex::new(vec![]));
let indexes1 = indexes.clone();
spawn(move || loop {
let len = indexes1.lock().unwrap().len();
if len == 0 {
continue;
}
let random_index: usize = thread_rng().gen_range(0, len + 1);
let (sample, pos) = indexes1
.lock()
.unwrap()
.get(random_index % len)
.unwrap()
.clone();
let (account, _next) = vec1.get_account(pos).unwrap();
let (_meta, test) = create_test_account(sample);
assert_eq!(account.data, test.data.as_slice());
});
bencher.iter(|| {
let sample: usize = thread_rng().gen_range(0, 256);
let (meta, account) = create_test_account(sample);
if let Some(pos) = vec.append_account(meta, &account, Hash::default()) {
indexes.lock().unwrap().push((sample, pos))
}
});
}

View File

@ -1,175 +0,0 @@
#![feature(test)]
extern crate test;
use log::*;
use solana_runtime::bank::*;
use solana_runtime::bank_client::BankClient;
use solana_runtime::loader_utils::create_invoke_instruction;
use solana_sdk::account::KeyedAccount;
use solana_sdk::client::AsyncClient;
use solana_sdk::client::SyncClient;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::instruction::InstructionError;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::transaction::Transaction;
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use test::Bencher;
const BUILTIN_PROGRAM_ID: [u8; 32] = [
098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
const NOOP_PROGRAM_ID: [u8; 32] = [
098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
];
fn process_instruction(
_program_id: &Pubkey,
_keyed_accounts: &mut [KeyedAccount],
_data: &[u8],
) -> Result<(), InstructionError> {
Ok(())
}
pub fn create_builtin_transactions(
bank_client: &BankClient,
mint_keypair: &Keypair,
) -> Vec<Transaction> {
let program_id = Pubkey::new(&BUILTIN_PROGRAM_ID);
(0..4096)
.into_iter()
.map(|_| {
// Seed the signer account
let rando0 = Keypair::new();
bank_client
.transfer(10_000, &mint_keypair, &rando0.pubkey())
.expect(&format!("{}:{}", line!(), file!()));
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
Transaction::new_signed_instructions(&[&rando0], vec![instruction], blockhash)
})
.collect()
}
pub fn create_native_loader_transactions(
bank_client: &BankClient,
mint_keypair: &Keypair,
) -> Vec<Transaction> {
let program_id = Pubkey::new(&NOOP_PROGRAM_ID);
(0..4096)
.into_iter()
.map(|_| {
// Seed the signer account©41
let rando0 = Keypair::new();
bank_client
.transfer(10_000, &mint_keypair, &rando0.pubkey())
.expect(&format!("{}:{}", line!(), file!()));
let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8);
let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap();
Transaction::new_signed_instructions(&[&rando0], vec![instruction], blockhash)
})
.collect()
}
fn sync_bencher(bank: &Arc<Bank>, _bank_client: &BankClient, transactions: &Vec<Transaction>) {
let results = bank.process_transactions(&transactions);
assert!(results.iter().all(Result::is_ok));
}
fn async_bencher(bank: &Arc<Bank>, bank_client: &BankClient, transactions: &Vec<Transaction>) {
for transaction in transactions.clone() {
bank_client.async_send_transaction(transaction).unwrap();
}
for _ in 0..1_000_000_000_u64 {
if bank
.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
.is_some()
{
break;
}
sleep(Duration::from_nanos(1));
}
if !bank
.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
.unwrap()
.is_ok()
{
error!(
"transaction failed: {:?}",
bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap())
.unwrap()
);
assert!(false);
}
}
fn do_bench_transactions(
bencher: &mut Bencher,
bench_work: &dyn Fn(&Arc<Bank>, &BankClient, &Vec<Transaction>),
create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec<Transaction>,
) {
solana_logger::setup();
let ns_per_s = 1_000_000_000;
let (mut genesis_block, mint_keypair) = create_genesis_block(100_000_000);
genesis_block.ticks_per_slot = 100;
let mut bank = Bank::new(&genesis_block);
bank.add_instruction_processor(Pubkey::new(&BUILTIN_PROGRAM_ID), process_instruction);
bank.register_native_instruction_processor(
"solana_noop_program",
&Pubkey::new(&NOOP_PROGRAM_ID),
);
let bank = Arc::new(bank);
let bank_client = BankClient::new_shared(&bank);
let transactions = create_transactions(&bank_client, &mint_keypair);
// Do once to fund accounts, load modules, etc...
let results = bank.process_transactions(&transactions);
assert!(results.iter().all(Result::is_ok));
bencher.iter(|| {
// Since bencher runs this multiple times, we need to clear the signatures.
bank.clear_signatures();
bench_work(&bank, &bank_client, &transactions);
});
let summary = bencher.bench(|_bencher| {}).unwrap();
info!(" {:?} transactions", transactions.len());
info!(" {:?} ns/iter median", summary.median as u64);
assert!(0f64 != summary.median);
let tps = transactions.len() as u64 * (ns_per_s / summary.median as u64);
info!(" {:?} TPS", tps);
}
#[bench]
#[ignore]
fn bench_bank_sync_process_builtin_transactions(bencher: &mut Bencher) {
do_bench_transactions(bencher, &sync_bencher, &create_builtin_transactions);
}
#[bench]
#[ignore]
fn bench_bank_sync_process_native_loader_transactions(bencher: &mut Bencher) {
do_bench_transactions(bencher, &sync_bencher, &create_native_loader_transactions);
}
#[bench]
#[ignore]
fn bench_bank_async_process_builtin_transactions(bencher: &mut Bencher) {
do_bench_transactions(bencher, &async_bencher, &create_builtin_transactions);
}
#[bench]
#[ignore]
fn bench_bank_async_process_native_loader_transactions(bencher: &mut Bencher) {
do_bench_transactions(bencher, &async_bencher, &create_native_loader_transactions);
}

View File

@ -1,100 +0,0 @@
#![feature(test)]
extern crate test;
use bv::BitVec;
use fnv::FnvHasher;
use solana_runtime::bloom::{Bloom, BloomHashIndex};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::Signature;
use std::collections::HashSet;
use std::hash::Hasher;
use test::Bencher;
#[bench]
#[ignore]
fn bench_bits_set(bencher: &mut Bencher) {
let mut bits: BitVec<u8> = BitVec::new_fill(false, 38_340_234 as u64);
let mut hasher = FnvHasher::default();
bencher.iter(|| {
let idx = hasher.finish() % bits.len();
bits.set(idx, true);
hasher.write_u64(idx);
});
// subtract the next bencher result from this one to get a number for raw
// bits.set()
}
#[bench]
#[ignore]
fn bench_bits_set_hasher(bencher: &mut Bencher) {
let bits: BitVec<u8> = BitVec::new_fill(false, 38_340_234 as u64);
let mut hasher = FnvHasher::default();
bencher.iter(|| {
let idx = hasher.finish() % bits.len();
hasher.write_u64(idx);
});
}
#[bench]
#[ignore]
fn bench_sigs_bloom(bencher: &mut Bencher) {
// 1M TPS * 1s (length of block in sigs) == 1M items in filter
// 1.0E-8 false positive rate
// https://hur.st/bloomfilter/?n=1000000&p=1.0E-8&m=&k=
let blockhash = hash(Hash::default().as_ref());
// info!("blockhash = {:?}", blockhash);
let keys = (0..27)
.into_iter()
.map(|i| blockhash.hash_at_index(i))
.collect();
let mut sigs: Bloom<Signature> = Bloom::new(38_340_234, keys);
let mut id = blockhash;
let mut falses = 0;
let mut iterations = 0;
bencher.iter(|| {
id = hash(id.as_ref());
let mut sigbytes = Vec::from(id.as_ref());
id = hash(id.as_ref());
sigbytes.extend(id.as_ref());
let sig = Signature::new(&sigbytes);
if sigs.contains(&sig) {
falses += 1;
}
sigs.add(&sig);
sigs.contains(&sig);
iterations += 1;
});
assert_eq!(falses, 0);
}
#[bench]
#[ignore]
fn bench_sigs_hashmap(bencher: &mut Bencher) {
// same structure as above, new
let blockhash = hash(Hash::default().as_ref());
// info!("blockhash = {:?}", blockhash);
let mut sigs: HashSet<Signature> = HashSet::new();
let mut id = blockhash;
let mut falses = 0;
let mut iterations = 0;
bencher.iter(|| {
id = hash(id.as_ref());
let mut sigbytes = Vec::from(id.as_ref());
id = hash(id.as_ref());
sigbytes.extend(id.as_ref());
let sig = Signature::new(&sigbytes);
if sigs.contains(&sig) {
falses += 1;
}
sigs.insert(sig);
sigs.contains(&sig);
iterations += 1;
});
assert_eq!(falses, 0);
}

View File

@ -1,14 +0,0 @@
#![feature(test)]
extern crate test;
use solana_runtime::message_processor::*;
use test::Bencher;
#[bench]
fn bench_has_duplicates(bencher: &mut Bencher) {
bencher.iter(|| {
let data = test::black_box([1, 2, 3]);
assert!(!has_duplicates(&data));
})
}

View File

@ -1,33 +0,0 @@
#![feature(test)]
extern crate test;
use bincode::serialize;
use solana_runtime::status_cache::*;
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::Signature;
use test::Bencher;
type BankStatusCache = StatusCache<()>;
#[bench]
fn test_statuscache_serialize(bencher: &mut Bencher) {
let mut status_cache = BankStatusCache::default();
status_cache.add_root(0);
status_cache.clear_signatures();
for hash_index in 0..100 {
let blockhash = Hash::new(&vec![hash_index; std::mem::size_of::<Hash>()]);
let mut id = blockhash;
for _ in 0..100 {
id = hash(id.as_ref());
let mut sigbytes = Vec::from(id.as_ref());
id = hash(id.as_ref());
sigbytes.extend(id.as_ref());
let sig = Signature::new(&sigbytes);
status_cache.insert(&blockhash, &sig, 0, ());
}
}
bencher.iter(|| {
let _ = serialize(&status_cache.slot_deltas(&vec![0])).unwrap();
});
}

View File

@ -1,19 +0,0 @@
#![feature(test)]
extern crate test;
use rand::seq::SliceRandom;
use rand::thread_rng;
use solana_runtime::transaction_utils::OrderedIterator;
use test::Bencher;
#[bench]
fn bench_ordered_iterator_with_order_shuffling(bencher: &mut Bencher) {
let vec: Vec<usize> = (0..100_usize).collect();
bencher.iter(|| {
let mut order: Vec<usize> = (0..100_usize).collect();
order.shuffle(&mut thread_rng());
let _ordered_iterator_resp: Vec<&usize> =
OrderedIterator::new(&vec, Some(&order)).collect();
});
}

0
runtime/lib.rs Normal file
View File

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,366 +0,0 @@
use solana_sdk::pubkey::Pubkey;
use std::collections::{HashMap, HashSet};
use std::sync::{RwLock, RwLockReadGuard};
pub type Fork = u64;
type ForkList<T> = Vec<(Fork, T)>;
#[derive(Debug, Default)]
pub struct AccountsIndex<T> {
pub account_maps: HashMap<Pubkey, RwLock<ForkList<T>>>,
pub roots: HashSet<Fork>,
//This value that needs to be stored to recover the index from AppendVec
pub last_root: Fork,
}
impl<T: Clone> AccountsIndex<T> {
/// call func with every pubkey and index visible from a given set of ancestors
pub fn scan_accounts<F>(&self, ancestors: &HashMap<Fork, usize>, mut func: F)
where
F: FnMut(&Pubkey, (&T, Fork)) -> (),
{
for (pubkey, list) in self.account_maps.iter() {
let list_r = list.read().unwrap();
if let Some(index) = self.latest_fork(ancestors, &list_r) {
func(pubkey, (&list_r[index].1, list_r[index].0));
}
}
}
// find the latest fork and T in a list for a given ancestor
// returns index into 'list' if found, None if not.
fn latest_fork(&self, ancestors: &HashMap<Fork, usize>, list: &[(Fork, T)]) -> Option<usize> {
let mut max = 0;
let mut rv = None;
for (i, (fork, _t)) in list.iter().rev().enumerate() {
if *fork >= max && (ancestors.get(fork).is_some() || self.is_root(*fork)) {
rv = Some((list.len() - 1) - i);
max = *fork;
}
}
rv
}
/// Get an account
/// The latest account that appears in `ancestors` or `roots` is returned.
pub fn get(
&self,
pubkey: &Pubkey,
ancestors: &HashMap<Fork, usize>,
) -> Option<(RwLockReadGuard<ForkList<T>>, usize)> {
self.account_maps.get(pubkey).and_then(|list| {
let lock = list.read().unwrap();
if let Some(found_index) = self.latest_fork(ancestors, &lock) {
Some((lock, found_index))
} else {
None
}
})
}
pub fn get_max_root(roots: &HashSet<Fork>, fork_vec: &[(Fork, T)]) -> Fork {
let mut max_root = 0;
for (f, _) in fork_vec.iter() {
if *f > max_root && roots.contains(f) {
max_root = *f;
}
}
max_root
}
pub fn insert(
&mut self,
fork: Fork,
pubkey: &Pubkey,
account_info: T,
reclaims: &mut Vec<(Fork, T)>,
) {
let _fork_vec = self
.account_maps
.entry(*pubkey)
.or_insert_with(|| RwLock::new(Vec::with_capacity(32)));
self.update(fork, pubkey, account_info, reclaims);
}
// Try to update an item in account_maps. If the account is not
// already present, then the function will return back Some(account_info) which
// the caller can then take the write lock and do an 'insert' with the item.
// It returns None if the item is already present and thus successfully updated.
pub fn update(
&self,
fork: Fork,
pubkey: &Pubkey,
account_info: T,
reclaims: &mut Vec<(Fork, T)>,
) -> Option<T> {
let roots = &self.roots;
if let Some(lock) = self.account_maps.get(pubkey) {
let mut fork_vec = lock.write().unwrap();
// filter out old entries
reclaims.extend(fork_vec.iter().filter(|(f, _)| *f == fork).cloned());
fork_vec.retain(|(f, _)| *f != fork);
// add the new entry
fork_vec.push((fork, account_info));
let max_root = Self::get_max_root(roots, &fork_vec);
reclaims.extend(
fork_vec
.iter()
.filter(|(fork, _)| Self::can_purge(max_root, *fork))
.cloned(),
);
fork_vec.retain(|(fork, _)| !Self::can_purge(max_root, *fork));
None
} else {
Some(account_info)
}
}
pub fn add_index(&mut self, fork: Fork, pubkey: &Pubkey, account_info: T) {
let entry = self
.account_maps
.entry(*pubkey)
.or_insert_with(|| RwLock::new(vec![]));
entry.write().unwrap().push((fork, account_info));
}
pub fn is_purged(&self, fork: Fork) -> bool {
fork < self.last_root
}
pub fn can_purge(max_root: Fork, fork: Fork) -> bool {
fork < max_root
}
pub fn is_root(&self, fork: Fork) -> bool {
self.roots.contains(&fork)
}
pub fn add_root(&mut self, fork: Fork) {
assert!(
(self.last_root == 0 && fork == 0) || (fork >= self.last_root),
"new roots must be increasing"
);
self.last_root = fork;
self.roots.insert(fork);
}
/// Remove the fork when the storage for the fork is freed
/// Accounts no longer reference this fork.
pub fn cleanup_dead_fork(&mut self, fork: Fork) {
self.roots.remove(&fork);
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::signature::{Keypair, KeypairUtil};
#[test]
fn test_get_empty() {
let key = Keypair::new();
let index = AccountsIndex::<bool>::default();
let ancestors = HashMap::new();
assert!(index.get(&key.pubkey(), &ancestors).is_none());
let mut num = 0;
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
assert_eq!(num, 0);
}
#[test]
fn test_insert_no_ancestors() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = HashMap::new();
assert!(index.get(&key.pubkey(), &ancestors).is_none());
let mut num = 0;
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
assert_eq!(num, 0);
}
#[test]
fn test_insert_wrong_ancestors() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = vec![(1, 1)].into_iter().collect();
assert!(index.get(&key.pubkey(), &ancestors).is_none());
let mut num = 0;
index.scan_accounts(&ancestors, |_pubkey, _index| num += 1);
assert_eq!(num, 0);
}
#[test]
fn test_insert_with_ancestors() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = vec![(0, 0)].into_iter().collect();
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (0, true));
let mut num = 0;
let mut found_key = false;
index.scan_accounts(&ancestors, |pubkey, _index| {
if pubkey == &key.pubkey() {
found_key = true
};
num += 1
});
assert_eq!(num, 1);
assert!(found_key);
}
#[test]
fn test_is_root() {
let mut index = AccountsIndex::<bool>::default();
assert!(!index.is_root(0));
index.add_root(0);
assert!(index.is_root(0));
}
#[test]
fn test_insert_with_root() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let ancestors = vec![].into_iter().collect();
index.add_root(0);
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (0, true));
}
#[test]
fn test_is_purged() {
let mut index = AccountsIndex::<bool>::default();
assert!(!index.is_purged(0));
index.add_root(1);
assert!(index.is_purged(0));
}
#[test]
fn test_max_last_root() {
let mut index = AccountsIndex::<bool>::default();
index.add_root(1);
assert_eq!(index.last_root, 1);
}
#[test]
#[should_panic]
fn test_max_last_root_old() {
let mut index = AccountsIndex::<bool>::default();
index.add_root(1);
index.add_root(0);
}
#[test]
fn test_cleanup_first() {
let mut index = AccountsIndex::<bool>::default();
index.add_root(0);
index.add_root(1);
index.cleanup_dead_fork(0);
assert!(index.is_root(1));
assert!(!index.is_root(0));
}
#[test]
fn test_cleanup_last() {
//this behavior might be undefined, clean up should only occur on older forks
let mut index = AccountsIndex::<bool>::default();
index.add_root(0);
index.add_root(1);
index.cleanup_dead_fork(1);
assert!(!index.is_root(1));
assert!(index.is_root(0));
}
#[test]
fn test_update_last_wins() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let ancestors = vec![(0, 0)].into_iter().collect();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (0, true));
drop(list);
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), false, &mut gc);
assert_eq!(gc, vec![(0, true)]);
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (0, false));
}
#[test]
fn test_update_new_fork() {
solana_logger::setup();
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let ancestors = vec![(0, 0)].into_iter().collect();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
index.insert(1, &key.pubkey(), false, &mut gc);
assert!(gc.is_empty());
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (0, true));
let ancestors = vec![(1, 0)].into_iter().collect();
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (1, false));
}
#[test]
fn test_update_gc_purged_fork() {
let key = Keypair::new();
let mut index = AccountsIndex::<bool>::default();
let mut gc = Vec::new();
index.insert(0, &key.pubkey(), true, &mut gc);
assert!(gc.is_empty());
index.insert(1, &key.pubkey(), false, &mut gc);
index.insert(2, &key.pubkey(), true, &mut gc);
index.insert(3, &key.pubkey(), true, &mut gc);
index.add_root(0);
index.add_root(1);
index.add_root(3);
index.insert(4, &key.pubkey(), true, &mut gc);
assert_eq!(gc, vec![(0, true), (1, false), (2, true)]);
let ancestors = vec![].into_iter().collect();
let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap();
assert_eq!(list[idx], (3, true));
let mut num = 0;
let mut found_key = false;
index.scan_accounts(&ancestors, |pubkey, _index| {
if pubkey == &key.pubkey() {
found_key = true;
assert_eq!(_index, (&true, 3));
};
num += 1
});
assert_eq!(num, 1);
assert!(found_key);
}
}

View File

@ -1,524 +0,0 @@
use bincode::{deserialize_from, serialize_into};
use memmap::MmapMut;
use serde::{Deserialize, Serialize};
use solana_sdk::{account::Account, clock::Epoch, hash::Hash, pubkey::Pubkey};
use std::{
fmt,
fs::{create_dir_all, remove_file, OpenOptions},
io,
io::{Cursor, Seek, SeekFrom, Write},
mem,
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
sync::Mutex,
};
//Data is aligned at the next 64 byte offset. Without alignment loading the memory may
//crash on some architectures.
macro_rules! align_up {
($addr: expr, $align: expr) => {
($addr + ($align - 1)) & !($align - 1)
};
}
/// Meta contains enough context to recover the index from storage itself
#[derive(Clone, PartialEq, Debug)]
pub struct StoredMeta {
/// global write version
pub write_version: u64,
/// key for the account
pub pubkey: Pubkey,
pub data_len: u64,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, Eq, PartialEq)]
pub struct AccountMeta {
/// lamports in the account
pub lamports: u64,
/// the program that owns this account. If executable, the program that loads this account.
pub owner: Pubkey,
/// this account's data contains a loaded program (and is now read-only)
pub executable: bool,
/// the epoch at which this account will next owe rent
pub rent_epoch: Epoch,
}
/// References to Memory Mapped memory
/// The Account is stored separately from its data, so getting the actual account requires a clone
#[derive(PartialEq, Debug)]
pub struct StoredAccount<'a> {
pub meta: &'a StoredMeta,
/// account data
pub account_meta: &'a AccountMeta,
pub data: &'a [u8],
pub offset: usize,
pub hash: &'a Hash,
}
impl<'a> StoredAccount<'a> {
pub fn clone_account(&self) -> Account {
Account {
lamports: self.account_meta.lamports,
owner: self.account_meta.owner,
executable: self.account_meta.executable,
rent_epoch: self.account_meta.rent_epoch,
data: self.data.to_vec(),
hash: *self.hash,
}
}
}
#[derive(Debug)]
#[allow(clippy::mutex_atomic)]
pub struct AppendVec {
path: PathBuf,
map: MmapMut,
// This mutex forces append to be single threaded, but concurrent with reads
#[allow(clippy::mutex_atomic)]
append_offset: Mutex<usize>,
current_len: AtomicUsize,
file_size: u64,
}
impl Drop for AppendVec {
fn drop(&mut self) {
let _ignored = remove_file(&self.path);
}
}
impl AppendVec {
#[allow(clippy::mutex_atomic)]
pub fn new(file: &Path, create: bool, size: usize) -> Self {
if create {
let _ignored = remove_file(file);
if let Some(parent) = file.parent() {
create_dir_all(parent).expect("Create directory failed");
}
}
let mut data = OpenOptions::new()
.read(true)
.write(true)
.create(create)
.open(file)
.map_err(|e| {
let mut msg = format!("in current dir {:?}\n", std::env::current_dir());
for ancestor in file.ancestors() {
msg.push_str(&format!(
"{:?} is {:?}\n",
ancestor,
std::fs::metadata(ancestor)
));
}
panic!(
"{}Unable to {} data file {}, err {:?}",
msg,
if create { "create" } else { "open" },
file.display(),
e
);
})
.unwrap();
data.seek(SeekFrom::Start((size - 1) as u64)).unwrap();
data.write_all(&[0]).unwrap();
data.seek(SeekFrom::Start(0)).unwrap();
data.flush().unwrap();
//UNSAFE: Required to create a Mmap
let map = unsafe { MmapMut::map_mut(&data).expect("failed to map the data file") };
AppendVec {
path: file.to_path_buf(),
map,
// This mutex forces append to be single threaded, but concurrent with reads
// See UNSAFE usage in `append_ptr`
append_offset: Mutex::new(0),
current_len: AtomicUsize::new(0),
file_size: size as u64,
}
}
pub fn flush(&self) -> io::Result<()> {
self.map.flush()
}
#[allow(clippy::mutex_atomic)]
pub fn reset(&self) {
// This mutex forces append to be single threaded, but concurrent with reads
// See UNSAFE usage in `append_ptr`
let mut offset = self.append_offset.lock().unwrap();
self.current_len.store(0, Ordering::Relaxed);
*offset = 0;
}
pub fn len(&self) -> usize {
self.current_len.load(Ordering::Relaxed)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn capacity(&self) -> u64 {
self.file_size
}
// Get the file path relative to the top level accounts directory
pub fn get_relative_path<P: AsRef<Path>>(append_vec_path: P) -> Option<PathBuf> {
append_vec_path.as_ref().file_name().map(PathBuf::from)
}
pub fn new_relative_path(fork_id: u64, id: usize) -> PathBuf {
PathBuf::from(&format!("{}.{}", fork_id, id))
}
#[allow(clippy::mutex_atomic)]
pub fn set_file<P: AsRef<Path>>(&mut self, path: P) -> io::Result<()> {
self.path = path.as_ref().to_path_buf();
let data = OpenOptions::new()
.read(true)
.write(true)
.create(false)
.open(&path)?;
let map = unsafe { MmapMut::map_mut(&data)? };
self.map = map;
Ok(())
}
fn get_slice(&self, offset: usize, size: usize) -> Option<(&[u8], usize)> {
let len = self.len();
if len < offset + size {
return None;
}
let data = &self.map[offset..offset + size];
//Data is aligned at the next 64 byte offset. Without alignment loading the memory may
//crash on some architectures.
let next = align_up!(offset + size, mem::size_of::<u64>());
Some((
//UNSAFE: This unsafe creates a slice that represents a chunk of self.map memory
//The lifetime of this slice is tied to &self, since it points to self.map memory
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, size) },
next,
))
}
fn append_ptr(&self, offset: &mut usize, src: *const u8, len: usize) {
//Data is aligned at the next 64 byte offset. Without alignment loading the memory may
//crash on some architectures.
let pos = align_up!(*offset as usize, mem::size_of::<u64>());
let data = &self.map[pos..(pos + len)];
//UNSAFE: This mut append is safe because only 1 thread can append at a time
//Mutex<append_offset> guarantees exclusive write access to the memory occupied in
//the range.
unsafe {
let dst = data.as_ptr() as *mut u8;
std::ptr::copy(src, dst, len);
};
*offset = pos + len;
}
fn append_ptrs_locked(&self, offset: &mut usize, vals: &[(*const u8, usize)]) -> Option<usize> {
let mut end = *offset;
for val in vals {
//Data is aligned at the next 64 byte offset. Without alignment loading the memory may
//crash on some architectures.
end = align_up!(end, mem::size_of::<u64>());
end += val.1;
}
if (self.file_size as usize) < end {
return None;
}
//Data is aligned at the next 64 byte offset. Without alignment loading the memory may
//crash on some architectures.
let pos = align_up!(*offset, mem::size_of::<u64>());
for val in vals {
self.append_ptr(offset, val.0, val.1)
}
self.current_len.store(*offset, Ordering::Relaxed);
Some(pos)
}
fn get_type<'a, T>(&self, offset: usize) -> Option<(&'a T, usize)> {
let (data, next) = self.get_slice(offset, mem::size_of::<T>())?;
let ptr: *const T = data.as_ptr() as *const T;
//UNSAFE: The cast is safe because the slice is aligned and fits into the memory
//and the lifetime of he &T is tied to self, which holds the underlying memory map
Some((unsafe { &*ptr }, next))
}
pub fn get_account<'a>(&'a self, offset: usize) -> Option<(StoredAccount<'a>, usize)> {
let (meta, next): (&'a StoredMeta, _) = self.get_type(offset)?;
let (account_meta, next): (&'a AccountMeta, _) = self.get_type(next)?;
let (hash, next): (&'a Hash, _) = self.get_type(next)?;
let (data, next) = self.get_slice(next, meta.data_len as usize)?;
Some((
StoredAccount {
meta,
account_meta,
data,
offset,
hash,
},
next,
))
}
pub fn get_account_test(&self, offset: usize) -> Option<(StoredMeta, Account)> {
let (stored_account, _) = self.get_account(offset)?;
let meta = stored_account.meta.clone();
Some((meta, stored_account.clone_account()))
}
pub fn get_path(&self) -> PathBuf {
self.path.clone()
}
pub fn accounts<'a>(&'a self, mut start: usize) -> Vec<StoredAccount<'a>> {
let mut accounts = vec![];
while let Some((account, next)) = self.get_account(start) {
accounts.push(account);
start = next;
}
accounts
}
#[allow(clippy::mutex_atomic)]
pub fn append_accounts(
&self,
accounts: &[(StoredMeta, &Account)],
hashes: &[Hash],
) -> Vec<usize> {
let mut offset = self.append_offset.lock().unwrap();
let mut rv = vec![];
for ((stored_meta, account), hash) in accounts.iter().zip(hashes) {
let meta_ptr = stored_meta as *const StoredMeta;
let account_meta = AccountMeta {
lamports: account.lamports,
owner: account.owner,
executable: account.executable,
rent_epoch: account.rent_epoch,
};
let account_meta_ptr = &account_meta as *const AccountMeta;
let data_len = stored_meta.data_len as usize;
let data_ptr = account.data.as_ptr();
let hash_ptr = hash.as_ref().as_ptr();
let ptrs = [
(meta_ptr as *const u8, mem::size_of::<StoredMeta>()),
(account_meta_ptr as *const u8, mem::size_of::<AccountMeta>()),
(hash_ptr as *const u8, mem::size_of::<Hash>()),
(data_ptr, data_len),
];
if let Some(res) = self.append_ptrs_locked(&mut offset, &ptrs) {
rv.push(res)
} else {
break;
}
}
rv
}
pub fn append_account(
&self,
storage_meta: StoredMeta,
account: &Account,
hash: Hash,
) -> Option<usize> {
self.append_accounts(&[(storage_meta, account)], &[hash])
.first()
.cloned()
}
}
pub mod test_utils {
use super::StoredMeta;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use std::fs::create_dir_all;
use std::path::PathBuf;
pub struct TempFile {
pub path: PathBuf,
}
impl Drop for TempFile {
fn drop(&mut self) {
let mut path = PathBuf::new();
std::mem::swap(&mut path, &mut self.path);
let _ignored = std::fs::remove_file(path);
}
}
pub fn get_append_vec_dir() -> String {
std::env::var("FARF_DIR").unwrap_or_else(|_| "farf/append_vec_tests".to_string())
}
pub fn get_append_vec_path(path: &str) -> TempFile {
let out_dir = get_append_vec_dir();
let rand_string: String = thread_rng().sample_iter(&Alphanumeric).take(30).collect();
let dir = format!("{}/{}", out_dir, rand_string);
let mut buf = PathBuf::new();
buf.push(&format!("{}/{}", dir, path));
create_dir_all(dir).expect("Create directory failed");
TempFile { path: buf }
}
pub fn create_test_account(sample: usize) -> (StoredMeta, Account) {
let data_len = sample % 256;
let mut account = Account::new(sample as u64, 0, &Pubkey::default());
account.data = (0..data_len).map(|_| data_len as u8).collect();
let stored_meta = StoredMeta {
write_version: 0,
pubkey: Pubkey::default(),
data_len: data_len as u64,
};
(stored_meta, account)
}
}
#[allow(clippy::mutex_atomic)]
impl Serialize for AppendVec {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
use serde::ser::Error;
let len = std::mem::size_of::<usize>() as u64;
let mut buf = vec![0u8; len as usize];
let mut wr = Cursor::new(&mut buf[..]);
serialize_into(&mut wr, &(self.current_len.load(Ordering::Relaxed) as u64))
.map_err(Error::custom)?;
let len = wr.position() as usize;
serializer.serialize_bytes(&wr.into_inner()[..len])
}
}
struct AppendVecVisitor;
impl<'a> serde::de::Visitor<'a> for AppendVecVisitor {
type Value = AppendVec;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Expecting AppendVec")
}
#[allow(clippy::mutex_atomic)]
// Note this does not initialize a valid Mmap in the AppendVec, needs to be done
// externally
fn visit_bytes<E>(self, data: &[u8]) -> std::result::Result<Self::Value, E>
where
E: serde::de::Error,
{
use serde::de::Error;
let mut rd = Cursor::new(&data[..]);
let current_len: usize = deserialize_from(&mut rd).map_err(Error::custom)?;
let map = MmapMut::map_anon(1).map_err(|e| Error::custom(e.to_string()))?;
Ok(AppendVec {
path: PathBuf::from(String::default()),
map,
append_offset: Mutex::new(current_len),
current_len: AtomicUsize::new(current_len),
file_size: current_len as u64,
})
}
}
impl<'de> Deserialize<'de> for AppendVec {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: ::serde::Deserializer<'de>,
{
deserializer.deserialize_bytes(AppendVecVisitor)
}
}
#[cfg(test)]
pub mod tests {
use super::test_utils::*;
use super::*;
use log::*;
use rand::{thread_rng, Rng};
use solana_sdk::timing::duration_as_ms;
use std::time::Instant;
impl AppendVec {
fn append_account_test(&self, data: &(StoredMeta, Account)) -> Option<usize> {
self.append_account(data.0.clone(), &data.1, Hash::default())
}
}
#[test]
fn test_append_vec_one() {
let path = get_append_vec_path("test_append");
let av = AppendVec::new(&path.path, true, 1024 * 1024);
let account = create_test_account(0);
let index = av.append_account_test(&account).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account);
}
#[test]
fn test_append_vec_data() {
let path = get_append_vec_path("test_append_data");
let av = AppendVec::new(&path.path, true, 1024 * 1024);
let account = create_test_account(5);
let index = av.append_account_test(&account).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account);
let account1 = create_test_account(6);
let index1 = av.append_account_test(&account1).unwrap();
assert_eq!(av.get_account_test(index).unwrap(), account);
assert_eq!(av.get_account_test(index1).unwrap(), account1);
}
#[test]
fn test_append_vec_append_many() {
let path = get_append_vec_path("test_append_many");
let av = AppendVec::new(&path.path, true, 1024 * 1024);
let size = 1000;
let mut indexes = vec![];
let now = Instant::now();
for sample in 0..size {
let account = create_test_account(sample);
let pos = av.append_account_test(&account).unwrap();
assert_eq!(av.get_account_test(pos).unwrap(), account);
indexes.push(pos)
}
trace!("append time: {} ms", duration_as_ms(&now.elapsed()),);
let now = Instant::now();
for _ in 0..size {
let sample = thread_rng().gen_range(0, indexes.len());
let account = create_test_account(sample);
assert_eq!(av.get_account_test(indexes[sample]).unwrap(), account);
}
trace!("random read time: {} ms", duration_as_ms(&now.elapsed()),);
let now = Instant::now();
assert_eq!(indexes.len(), size);
assert_eq!(indexes[0], 0);
let mut accounts = av.accounts(indexes[0]);
assert_eq!(accounts.len(), size);
for (sample, v) in accounts.iter_mut().enumerate() {
let account = create_test_account(sample);
let recovered = v.clone_account();
assert_eq!(recovered, account.1)
}
trace!(
"sequential read time: {} ms",
duration_as_ms(&now.elapsed()),
);
}
#[test]
fn test_relative_path() {
let relative_path = AppendVec::new_relative_path(0, 2);
let full_path = Path::new("/tmp").join(&relative_path);
assert_eq!(
relative_path,
AppendVec::get_relative_path(full_path).unwrap()
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,246 +0,0 @@
use crate::bank::Bank;
use solana_sdk::account::Account;
use solana_sdk::client::{AsyncClient, Client, SyncClient};
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::hash::Hash;
use solana_sdk::instruction::Instruction;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_instruction;
use solana_sdk::transaction::{self, Transaction};
use solana_sdk::transport::{Result, TransportError};
use std::io;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc;
use std::sync::Mutex;
use std::thread::{sleep, Builder};
use std::time::{Duration, Instant};
pub struct BankClient {
bank: Arc<Bank>,
transaction_sender: Mutex<Sender<Transaction>>,
}
impl Client for BankClient {
fn tpu_addr(&self) -> String {
"Local BankClient".to_string()
}
}
impl AsyncClient for BankClient {
fn async_send_transaction(&self, transaction: Transaction) -> io::Result<Signature> {
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
let transaction_sender = self.transaction_sender.lock().unwrap();
transaction_sender.send(transaction).unwrap();
Ok(signature)
}
fn async_send_message(
&self,
keypairs: &[&Keypair],
message: Message,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let transaction = Transaction::new(&keypairs, message, recent_blockhash);
self.async_send_transaction(transaction)
}
fn async_send_instruction(
&self,
keypair: &Keypair,
instruction: Instruction,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let message = Message::new(vec![instruction]);
self.async_send_message(&[keypair], message, recent_blockhash)
}
/// Transfer `lamports` from `keypair` to `pubkey`
fn async_transfer(
&self,
lamports: u64,
keypair: &Keypair,
pubkey: &Pubkey,
recent_blockhash: Hash,
) -> io::Result<Signature> {
let transfer_instruction =
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
self.async_send_instruction(keypair, transfer_instruction, recent_blockhash)
}
}
impl SyncClient for BankClient {
fn send_message(&self, keypairs: &[&Keypair], message: Message) -> Result<Signature> {
let blockhash = self.bank.last_blockhash();
let transaction = Transaction::new(&keypairs, message, blockhash);
self.bank.process_transaction(&transaction)?;
Ok(transaction.signatures.get(0).cloned().unwrap_or_default())
}
/// Create and process a transaction from a single instruction.
fn send_instruction(&self, keypair: &Keypair, instruction: Instruction) -> Result<Signature> {
let message = Message::new(vec![instruction]);
self.send_message(&[keypair], message)
}
/// Transfer `lamports` from `keypair` to `pubkey`
fn transfer(&self, lamports: u64, keypair: &Keypair, pubkey: &Pubkey) -> Result<Signature> {
let transfer_instruction =
system_instruction::transfer(&keypair.pubkey(), pubkey, lamports);
self.send_instruction(keypair, transfer_instruction)
}
fn get_account_data(&self, pubkey: &Pubkey) -> Result<Option<Vec<u8>>> {
Ok(self.bank.get_account(pubkey).map(|account| account.data))
}
fn get_account(&self, pubkey: &Pubkey) -> Result<Option<Account>> {
Ok(self.bank.get_account(pubkey))
}
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
Ok(self.bank.get_balance(pubkey))
}
fn get_recent_blockhash(&self) -> Result<(Hash, FeeCalculator)> {
Ok(self.bank.last_blockhash_with_fee_calculator())
}
fn get_signature_status(
&self,
signature: &Signature,
) -> Result<Option<transaction::Result<()>>> {
Ok(self.bank.get_signature_status(signature))
}
fn get_slot(&self) -> Result<u64> {
Ok(self.bank.slot())
}
fn get_transaction_count(&self) -> Result<u64> {
Ok(self.bank.transaction_count())
}
fn poll_for_signature_confirmation(
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> Result<usize> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
let response = self.bank.get_signature_confirmation_status(signature);
if let Some((confirmations, res)) = response {
if res.is_ok() {
if confirmed_blocks != confirmations {
now = Instant::now();
confirmed_blocks = confirmations;
}
if confirmations >= min_confirmed_blocks {
break;
}
}
};
if now.elapsed().as_secs() > 15 {
// TODO: Return a better error.
return Err(TransportError::IoError(io::Error::new(
io::ErrorKind::Other,
"signature not found",
)));
}
sleep(Duration::from_millis(250));
}
Ok(confirmed_blocks)
}
fn poll_for_signature(&self, signature: &Signature) -> Result<()> {
let now = Instant::now();
loop {
let response = self.bank.get_signature_status(signature);
if let Some(res) = response {
if res.is_ok() {
break;
}
}
if now.elapsed().as_secs() > 15 {
// TODO: Return a better error.
return Err(TransportError::IoError(io::Error::new(
io::ErrorKind::Other,
"signature not found",
)));
}
sleep(Duration::from_millis(250));
}
Ok(())
}
fn get_new_blockhash(&self, blockhash: &Hash) -> Result<(Hash, FeeCalculator)> {
let (last_blockhash, fee_calculator) = self.get_recent_blockhash()?;
if last_blockhash != *blockhash {
Ok((last_blockhash, fee_calculator))
} else {
Err(TransportError::IoError(io::Error::new(
io::ErrorKind::Other,
"Unable to get new blockhash",
)))
}
}
}
impl BankClient {
fn run(bank: &Bank, transaction_receiver: Receiver<Transaction>) {
while let Ok(tx) = transaction_receiver.recv() {
let _ = bank.process_transaction(&tx);
}
}
pub fn new_shared(bank: &Arc<Bank>) -> Self {
let (transaction_sender, transaction_receiver) = channel();
let transaction_sender = Mutex::new(transaction_sender);
let thread_bank = bank.clone();
let bank = bank.clone();
Builder::new()
.name("solana-bank-client".to_string())
.spawn(move || Self::run(&thread_bank, transaction_receiver))
.unwrap();
Self {
bank,
transaction_sender,
}
}
pub fn new(bank: Bank) -> Self {
Self::new_shared(&Arc::new(bank))
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::instruction::AccountMeta;
#[test]
fn test_bank_client_new_with_keypairs() {
let (genesis_block, john_doe_keypair) = create_genesis_block(10_000);
let john_pubkey = john_doe_keypair.pubkey();
let jane_doe_keypair = Keypair::new();
let jane_pubkey = jane_doe_keypair.pubkey();
let doe_keypairs = vec![&john_doe_keypair, &jane_doe_keypair];
let bank = Bank::new(&genesis_block);
let bank_client = BankClient::new(bank);
// Create 2-2 Multisig Transfer instruction.
let bob_pubkey = Pubkey::new_rand();
let mut transfer_instruction = system_instruction::transfer(&john_pubkey, &bob_pubkey, 42);
transfer_instruction
.accounts
.push(AccountMeta::new(jane_pubkey, true));
let message = Message::new(vec![transfer_instruction]);
bank_client.send_message(&doe_keypairs, message).unwrap();
assert_eq!(bank_client.get_balance(&bob_pubkey).unwrap(), 42);
}
}

View File

@ -1,153 +0,0 @@
use serde::{Deserialize, Serialize};
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::hash::Hash;
use solana_sdk::timing::timestamp;
use std::collections::HashMap;
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
struct HashAge {
fee_calculator: FeeCalculator,
hash_height: u64,
timestamp: u64,
}
/// Low memory overhead, so can be cloned for every checkpoint
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlockhashQueue {
/// updated whenever an hash is registered
hash_height: u64,
/// last hash to be registered
last_hash: Option<Hash>,
ages: HashMap<Hash, HashAge>,
/// hashes older than `max_age` will be dropped from the queue
max_age: usize,
}
impl BlockhashQueue {
pub fn new(max_age: usize) -> Self {
Self {
ages: HashMap::new(),
hash_height: 0,
last_hash: None,
max_age,
}
}
#[allow(dead_code)]
pub fn hash_height(&self) -> u64 {
self.hash_height
}
pub fn last_hash(&self) -> Hash {
self.last_hash.expect("no hash has been set")
}
pub fn get_fee_calculator(&self, hash: &Hash) -> Option<&FeeCalculator> {
self.ages.get(hash).map(|hash_age| &hash_age.fee_calculator)
}
/// Check if the age of the hash is within the max_age
/// return false for any hashes with an age above max_age
pub fn check_hash_age(&self, hash: &Hash, max_age: usize) -> bool {
let hash_age = self.ages.get(hash);
match hash_age {
Some(age) => self.hash_height - age.hash_height <= max_age as u64,
_ => false,
}
}
/// check if hash is valid
#[cfg(test)]
pub fn check_hash(&self, hash: Hash) -> bool {
self.ages.get(&hash).is_some()
}
pub fn genesis_hash(&mut self, hash: &Hash, fee_calculator: &FeeCalculator) {
self.ages.insert(
*hash,
HashAge {
fee_calculator: fee_calculator.clone(),
hash_height: 0,
timestamp: timestamp(),
},
);
self.last_hash = Some(*hash);
}
fn check_age(hash_height: u64, max_age: usize, age: &HashAge) -> bool {
hash_height - age.hash_height <= max_age as u64
}
pub fn register_hash(&mut self, hash: &Hash, fee_calculator: &FeeCalculator) {
self.hash_height += 1;
let hash_height = self.hash_height;
// this clean up can be deferred until sigs gets larger
// because we verify age.nth every place we check for validity
let max_age = self.max_age;
if self.ages.len() >= max_age {
self.ages
.retain(|_, age| Self::check_age(hash_height, max_age, age));
}
self.ages.insert(
*hash,
HashAge {
fee_calculator: fee_calculator.clone(),
hash_height,
timestamp: timestamp(),
},
);
self.last_hash = Some(*hash);
}
/// Maps a hash height to a timestamp
pub fn hash_height_to_timestamp(&self, hash_height: u64) -> Option<u64> {
for age in self.ages.values() {
if age.hash_height == hash_height {
return Some(age.timestamp);
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_sdk::hash::hash;
#[test]
fn test_register_hash() {
let last_hash = Hash::default();
let mut hash_queue = BlockhashQueue::new(100);
assert!(!hash_queue.check_hash(last_hash));
hash_queue.register_hash(&last_hash, &FeeCalculator::default());
assert!(hash_queue.check_hash(last_hash));
assert_eq!(hash_queue.hash_height(), 1);
}
#[test]
fn test_reject_old_last_hash() {
let mut hash_queue = BlockhashQueue::new(100);
let last_hash = hash(&serialize(&0).unwrap());
for i in 0..102 {
let last_hash = hash(&serialize(&i).unwrap());
hash_queue.register_hash(&last_hash, &FeeCalculator::default());
}
// Assert we're no longer able to use the oldest hash.
assert!(!hash_queue.check_hash(last_hash));
}
/// test that when max age is 0, that a valid last_hash still passes the age check
#[test]
fn test_queue_init_blockhash() {
let last_hash = Hash::default();
let mut hash_queue = BlockhashQueue::new(100);
hash_queue.register_hash(&last_hash, &FeeCalculator::default());
assert_eq!(last_hash, hash_queue.last_hash());
assert!(hash_queue.check_hash_age(&last_hash, 0));
}
}

View File

@ -1,160 +0,0 @@
//! Simple Bloom Filter
use bv::BitVec;
use fnv::FnvHasher;
use rand::{self, Rng};
use serde::{Deserialize, Serialize};
use std::cmp;
use std::hash::Hasher;
use std::marker::PhantomData;
/// Generate a stable hash of `self` for each `hash_index`
/// Best effort can be made for uniqueness of each hash.
pub trait BloomHashIndex {
fn hash_at_index(&self, hash_index: u64) -> u64;
}
#[derive(Serialize, Deserialize, Default, Clone, Debug, PartialEq)]
pub struct Bloom<T: BloomHashIndex> {
pub keys: Vec<u64>,
pub bits: BitVec<u64>,
num_bits_set: u64,
_phantom: PhantomData<T>,
}
impl<T: BloomHashIndex> Bloom<T> {
pub fn new(num_bits: usize, keys: Vec<u64>) -> Self {
let bits = BitVec::new_fill(false, num_bits as u64);
Bloom {
keys,
bits,
num_bits_set: 0,
_phantom: PhantomData::default(),
}
}
/// create filter optimal for num size given the `FALSE_RATE`
/// the keys are randomized for picking data out of a collision resistant hash of size
/// `keysize` bytes
/// https://hur.st/bloomfilter/
pub fn random(num_items: usize, false_rate: f64, max_bits: usize) -> Self {
let m = Self::num_bits(num_items as f64, false_rate);
let num_bits = cmp::max(1, cmp::min(m as usize, max_bits));
let num_keys = Self::num_keys(num_bits as f64, num_items as f64) as usize;
let keys: Vec<u64> = (0..num_keys).map(|_| rand::thread_rng().gen()).collect();
Self::new(num_bits, keys)
}
pub fn num_bits(num_items: f64, false_rate: f64) -> f64 {
let n = num_items;
let p = false_rate;
((n * p.ln()) / (1f64 / 2f64.powf(2f64.ln())).ln()).ceil()
}
pub fn num_keys(num_bits: f64, num_items: f64) -> f64 {
let n = num_items;
let m = num_bits;
1f64.max(((m / n) * 2f64.ln()).round())
}
fn pos(&self, key: &T, k: u64) -> u64 {
key.hash_at_index(k) % self.bits.len()
}
pub fn clear(&mut self) {
self.bits = BitVec::new_fill(false, self.bits.len());
self.num_bits_set = 0;
}
pub fn add(&mut self, key: &T) {
for k in &self.keys {
let pos = self.pos(key, *k);
if !self.bits.get(pos) {
self.num_bits_set += 1;
self.bits.set(pos, true);
}
}
}
pub fn contains(&self, key: &T) -> bool {
for k in &self.keys {
let pos = self.pos(key, *k);
if !self.bits.get(pos) {
return false;
}
}
true
}
}
fn slice_hash(slice: &[u8], hash_index: u64) -> u64 {
let mut hasher = FnvHasher::with_key(hash_index);
hasher.write(slice);
hasher.finish()
}
impl<T: AsRef<[u8]>> BloomHashIndex for T {
fn hash_at_index(&self, hash_index: u64) -> u64 {
slice_hash(self.as_ref(), hash_index)
}
}
#[cfg(test)]
mod test {
use super::*;
use solana_sdk::hash::{hash, Hash};
#[test]
fn test_bloom_filter() {
//empty
let bloom: Bloom<Hash> = Bloom::random(0, 0.1, 100);
assert_eq!(bloom.keys.len(), 0);
assert_eq!(bloom.bits.len(), 1);
//normal
let bloom: Bloom<Hash> = Bloom::random(10, 0.1, 100);
assert_eq!(bloom.keys.len(), 3);
assert_eq!(bloom.bits.len(), 48);
//saturated
let bloom: Bloom<Hash> = Bloom::random(100, 0.1, 100);
assert_eq!(bloom.keys.len(), 1);
assert_eq!(bloom.bits.len(), 100);
}
#[test]
fn test_add_contains() {
let mut bloom: Bloom<Hash> = Bloom::random(100, 0.1, 100);
//known keys to avoid false positives in the test
bloom.keys = vec![0, 1, 2, 3];
let key = hash(b"hello");
assert!(!bloom.contains(&key));
bloom.add(&key);
assert!(bloom.contains(&key));
let key = hash(b"world");
assert!(!bloom.contains(&key));
bloom.add(&key);
assert!(bloom.contains(&key));
}
#[test]
fn test_random() {
let mut b1: Bloom<Hash> = Bloom::random(10, 0.1, 100);
let mut b2: Bloom<Hash> = Bloom::random(10, 0.1, 100);
b1.keys.sort();
b2.keys.sort();
assert_ne!(b1.keys, b2.keys);
}
// Bloom filter math in python
// n number of items
// p false rate
// m number of bits
// k number of keys
//
// n = ceil(m / (-k / log(1 - exp(log(p) / k))))
// p = pow(1 - exp(-k / (m / n)), k)
// m = ceil((n * log(p)) / log(1 / pow(2, log(2))));
// k = round((m / n) * log(2));
#[test]
fn test_filter_math() {
assert_eq!(Bloom::<Hash>::num_bits(100f64, 0.1f64) as u64, 480u64);
assert_eq!(Bloom::<Hash>::num_bits(100f64, 0.01f64) as u64, 959u64);
assert_eq!(Bloom::<Hash>::num_keys(1000f64, 50f64) as u64, 14u64);
assert_eq!(Bloom::<Hash>::num_keys(2000f64, 50f64) as u64, 28u64);
assert_eq!(Bloom::<Hash>::num_keys(2000f64, 25f64) as u64, 55u64);
//ensure min keys is 1
assert_eq!(Bloom::<Hash>::num_keys(20f64, 1000f64) as u64, 1u64);
}
}

View File

@ -1,169 +0,0 @@
use solana_vote_api::vote_state::MAX_LOCKOUT_HISTORY;
pub const MINIMUM_SLOTS_PER_EPOCH: u64 = (MAX_LOCKOUT_HISTORY + 1) as u64;
#[derive(Default, Debug, Clone, Copy, PartialEq, Deserialize, Serialize)]
pub struct EpochSchedule {
/// The maximum number of slots in each epoch.
pub slots_per_epoch: u64,
/// A number of slots before slot_index 0. Used to calculate finalized staked nodes.
pub stakers_slot_offset: u64,
/// basically: log2(slots_per_epoch) - log2(MINIMUM_SLOT_LEN)
pub first_normal_epoch: u64,
/// basically: 2.pow(first_normal_epoch) - MINIMUM_SLOT_LEN
pub first_normal_slot: u64,
}
impl EpochSchedule {
pub fn new(slots_per_epoch: u64, stakers_slot_offset: u64, warmup: bool) -> Self {
assert!(slots_per_epoch >= MINIMUM_SLOTS_PER_EPOCH as u64);
let (first_normal_epoch, first_normal_slot) = if warmup {
let next_power_of_two = slots_per_epoch.next_power_of_two();
let log2_slots_per_epoch = next_power_of_two
.trailing_zeros()
.saturating_sub(MINIMUM_SLOTS_PER_EPOCH.trailing_zeros());
(
u64::from(log2_slots_per_epoch),
next_power_of_two.saturating_sub(MINIMUM_SLOTS_PER_EPOCH),
)
} else {
(0, 0)
};
EpochSchedule {
slots_per_epoch,
stakers_slot_offset,
first_normal_epoch,
first_normal_slot,
}
}
/// get the length of the given epoch (in slots)
pub fn get_slots_in_epoch(&self, epoch: u64) -> u64 {
if epoch < self.first_normal_epoch {
2u64.pow(epoch as u32 + MINIMUM_SLOTS_PER_EPOCH.trailing_zeros() as u32)
} else {
self.slots_per_epoch
}
}
/// get the epoch for which the given slot should save off
/// information about stakers
pub fn get_stakers_epoch(&self, slot: u64) -> u64 {
if slot < self.first_normal_slot {
// until we get to normal slots, behave as if stakers_slot_offset == slots_per_epoch
self.get_epoch_and_slot_index(slot).0 + 1
} else {
self.first_normal_epoch
+ (slot - self.first_normal_slot + self.stakers_slot_offset) / self.slots_per_epoch
}
}
/// get epoch for the given slot
pub fn get_epoch(&self, slot: u64) -> u64 {
self.get_epoch_and_slot_index(slot).0
}
/// get epoch and offset into the epoch for the given slot
pub fn get_epoch_and_slot_index(&self, slot: u64) -> (u64, u64) {
if slot < self.first_normal_slot {
let epoch = (slot + MINIMUM_SLOTS_PER_EPOCH + 1)
.next_power_of_two()
.trailing_zeros()
- MINIMUM_SLOTS_PER_EPOCH.trailing_zeros()
- 1;
let epoch_len = 2u64.pow(epoch + MINIMUM_SLOTS_PER_EPOCH.trailing_zeros());
(
u64::from(epoch),
slot - (epoch_len - MINIMUM_SLOTS_PER_EPOCH),
)
} else {
(
self.first_normal_epoch + ((slot - self.first_normal_slot) / self.slots_per_epoch),
(slot - self.first_normal_slot) % self.slots_per_epoch,
)
}
}
pub fn get_first_slot_in_epoch(&self, epoch: u64) -> u64 {
if epoch <= self.first_normal_epoch {
(2u64.pow(epoch as u32) - 1) * MINIMUM_SLOTS_PER_EPOCH
} else {
(epoch - self.first_normal_epoch) * self.slots_per_epoch + self.first_normal_slot
}
}
pub fn get_last_slot_in_epoch(&self, epoch: u64) -> u64 {
self.get_first_slot_in_epoch(epoch) + self.get_slots_in_epoch(epoch) - 1
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_epoch_schedule() {
// one week of slots at 8 ticks/slot, 10 ticks/sec is
// (1 * 7 * 24 * 4500u64).next_power_of_two();
// test values between MINIMUM_SLOT_LEN and MINIMUM_SLOT_LEN * 16, should cover a good mix
for slots_per_epoch in MINIMUM_SLOTS_PER_EPOCH..=MINIMUM_SLOTS_PER_EPOCH * 16 {
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
assert_eq!(epoch_schedule.get_first_slot_in_epoch(0), 0);
assert_eq!(
epoch_schedule.get_last_slot_in_epoch(0),
MINIMUM_SLOTS_PER_EPOCH - 1
);
let mut last_stakers = 0;
let mut last_epoch = 0;
let mut last_slots_in_epoch = MINIMUM_SLOTS_PER_EPOCH;
for slot in 0..(2 * slots_per_epoch) {
// verify that stakers_epoch is continuous over the warmup
// and into the first normal epoch
let stakers = epoch_schedule.get_stakers_epoch(slot);
if stakers != last_stakers {
assert_eq!(stakers, last_stakers + 1);
last_stakers = stakers;
}
let (epoch, offset) = epoch_schedule.get_epoch_and_slot_index(slot);
// verify that epoch increases continuously
if epoch != last_epoch {
assert_eq!(epoch, last_epoch + 1);
last_epoch = epoch;
assert_eq!(epoch_schedule.get_first_slot_in_epoch(epoch), slot);
assert_eq!(epoch_schedule.get_last_slot_in_epoch(epoch - 1), slot - 1);
// verify that slots in an epoch double continuously
// until they reach slots_per_epoch
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
if slots_in_epoch != last_slots_in_epoch {
if slots_in_epoch != slots_per_epoch {
assert_eq!(slots_in_epoch, last_slots_in_epoch * 2);
}
}
last_slots_in_epoch = slots_in_epoch;
}
// verify that the slot offset is less than slots_in_epoch
assert!(offset < last_slots_in_epoch);
}
// assert that these changed ;)
assert!(last_stakers != 0); // t
assert!(last_epoch != 0);
// assert that we got to "normal" mode
assert!(last_slots_in_epoch == slots_per_epoch);
}
}
}

View File

@ -1,85 +0,0 @@
use solana_sdk::{
account::Account,
fee_calculator::FeeCalculator,
genesis_block::{Builder, GenesisBlock},
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_program::{self, solana_system_program},
};
use solana_stake_api::stake_state;
use solana_vote_api::vote_state;
// The default stake placed with the bootstrap leader
pub const BOOTSTRAP_LEADER_LAMPORTS: u64 = 42;
pub struct GenesisBlockInfo {
pub genesis_block: GenesisBlock,
pub mint_keypair: Keypair,
pub voting_keypair: Keypair,
}
pub fn create_genesis_block(mint_lamports: u64) -> GenesisBlockInfo {
create_genesis_block_with_leader(mint_lamports, &Pubkey::new_rand(), 0)
}
pub fn create_genesis_block_with_leader(
mint_lamports: u64,
bootstrap_leader_pubkey: &Pubkey,
bootstrap_leader_stake_lamports: u64,
) -> GenesisBlockInfo {
let bootstrap_leader_lamports = BOOTSTRAP_LEADER_LAMPORTS; // TODO: pass this in as an argument?
let mint_keypair = Keypair::new();
let voting_keypair = Keypair::new();
let staking_keypair = Keypair::new();
// TODO: de-duplicate the stake... passive staking is fully implemented
let vote_account = vote_state::create_account(
&voting_keypair.pubkey(),
&bootstrap_leader_pubkey,
0,
bootstrap_leader_stake_lamports,
);
let stake_account = stake_state::create_account(
&staking_keypair.pubkey(),
&voting_keypair.pubkey(),
&vote_account,
bootstrap_leader_stake_lamports,
);
let mut builder = Builder::new()
.accounts(&[
// the mint
(
mint_keypair.pubkey(),
Account::new(mint_lamports, 0, &system_program::id()),
),
// node needs an account to issue votes and storage proofs from, this will require
// airdrops at some point to cover fees...
(
*bootstrap_leader_pubkey,
Account::new(bootstrap_leader_lamports, 0, &system_program::id()),
),
// where votes go to
(voting_keypair.pubkey(), vote_account),
// passive bootstrap leader stake, duplicates above temporarily
(staking_keypair.pubkey(), stake_account),
])
// Bare minimum program set
.native_instruction_processors(&[
solana_system_program(),
solana_bpf_loader_program!(),
solana_vote_program!(),
solana_stake_program!(),
])
.fee_calculator(FeeCalculator::new(0)); // most tests don't want fees
builder = solana_stake_api::genesis(builder);
builder = solana_storage_api::rewards_pools::genesis(builder);
GenesisBlockInfo {
genesis_block: builder.build(),
mint_keypair,
voting_keypair,
}
}

View File

@ -1,39 +1,5 @@
pub mod accounts;
pub mod accounts_db;
pub mod accounts_index;
pub mod append_vec;
pub mod bank;
pub mod bank_client;
mod blockhash_queue;
pub mod bloom;
pub mod epoch_schedule;
pub mod genesis_utils;
pub mod loader_utils;
pub mod message_processor;
mod native_loader;
pub mod rent_collector;
mod serde_utils;
pub mod stakes;
pub mod status_cache;
pub mod storage_utils;
mod system_instruction_processor;
pub mod transaction_batch;
pub mod transaction_utils;
#[macro_use]
extern crate solana_metrics;
#[macro_use]
extern crate solana_vote_program;
#[macro_use]
extern crate solana_stake_program;
#[macro_use]
extern crate solana_bpf_loader_program;
#[macro_use]
extern crate serde_derive;
extern crate fs_extra;
extern crate tempfile;
pub use sealevel::{
accounts, accounts_db, accounts_index, append_vec, bank, bank_client, bloom, epoch_schedule,
genesis_utils, loader_utils, message_processor, rent_collector, stakes, status_cache,
storage_utils, transaction_batch, transaction_utils,
};

View File

@ -1,60 +0,0 @@
use serde::Serialize;
use solana_sdk::client::Client;
use solana_sdk::instruction::{AccountMeta, Instruction};
use solana_sdk::loader_instruction;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_instruction;
pub fn load_program<T: Client>(
bank_client: &T,
from_keypair: &Keypair,
loader_pubkey: &Pubkey,
program: Vec<u8>,
) -> Pubkey {
let program_keypair = Keypair::new();
let program_pubkey = program_keypair.pubkey();
let instruction = system_instruction::create_account(
&from_keypair.pubkey(),
&program_pubkey,
1,
program.len() as u64,
loader_pubkey,
);
bank_client
.send_instruction(&from_keypair, instruction)
.unwrap();
let chunk_size = 256; // Size of chunk just needs to fit into tx
let mut offset = 0;
for chunk in program.chunks(chunk_size) {
let instruction =
loader_instruction::write(&program_pubkey, loader_pubkey, offset, chunk.to_vec());
let message = Message::new_with_payer(vec![instruction], Some(&from_keypair.pubkey()));
bank_client
.send_message(&[from_keypair, &program_keypair], message)
.unwrap();
offset += chunk_size as u32;
}
let instruction = loader_instruction::finalize(&program_pubkey, loader_pubkey);
let message = Message::new_with_payer(vec![instruction], Some(&from_keypair.pubkey()));
bank_client
.send_message(&[from_keypair, &program_keypair], message)
.unwrap();
program_pubkey
}
// Return an Instruction that invokes `program_id` with `data` and required
// a signature from `from_pubkey`.
pub fn create_invoke_instruction<T: Serialize>(
from_pubkey: Pubkey,
program_id: Pubkey,
data: &T,
) -> Instruction {
let account_metas = vec![AccountMeta::new(from_pubkey, true)];
Instruction::new(program_id, data, account_metas)
}

View File

@ -1,678 +0,0 @@
use crate::native_loader;
use crate::system_instruction_processor;
use serde::{Deserialize, Serialize};
use solana_sdk::account::{
create_keyed_credit_only_accounts, Account, KeyedAccount, LamportCredit,
};
use solana_sdk::instruction::{CompiledInstruction, InstructionError};
use solana_sdk::instruction_processor_utils;
use solana_sdk::loader_instruction::LoaderInstruction;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::system_program;
use solana_sdk::transaction::TransactionError;
use std::collections::HashMap;
use std::io::Write;
use std::sync::RwLock;
#[cfg(unix)]
use libloading::os::unix::*;
#[cfg(windows)]
use libloading::os::windows::*;
/// Return true if the slice has any duplicate elements
pub fn has_duplicates<T: PartialEq>(xs: &[T]) -> bool {
// Note: This is an O(n^2) algorithm, but requires no heap allocations. The benchmark
// `bench_has_duplicates` in benches/message_processor.rs shows that this implementation is
// ~50 times faster than using HashSet for very short slices.
for i in 1..xs.len() {
if xs[i..].contains(&xs[i - 1]) {
return true;
}
}
false
}
/// Get mut references to a subset of elements.
fn get_subset_unchecked_mut<'a, T>(
xs: &'a mut [T],
indexes: &[u8],
) -> Result<Vec<&'a mut T>, InstructionError> {
// Since the compiler doesn't know the indexes are unique, dereferencing
// multiple mut elements is assumed to be unsafe. If, however, all
// indexes are unique, it's perfectly safe. The returned elements will share
// the liftime of the input slice.
// Make certain there are no duplicate indexes. If there are, return an error
// because we can't return multiple mut references to the same element.
if has_duplicates(indexes) {
return Err(InstructionError::DuplicateAccountIndex);
}
Ok(indexes
.iter()
.map(|i| {
let ptr = &mut xs[*i as usize] as *mut T;
unsafe { &mut *ptr }
})
.collect())
}
fn verify_instruction(
is_debitable: bool,
program_id: &Pubkey,
pre: &Account,
post: &Account,
) -> Result<(), InstructionError> {
// Verify the transaction
// Make sure that program_id is still the same or this was just assigned by the system program,
// but even the system program can't touch a credit-only account
if pre.owner != post.owner && (!is_debitable || !system_program::check_id(&program_id)) {
return Err(InstructionError::ModifiedProgramId);
}
// For accounts unassigned to the program, the individual balance of each accounts cannot decrease.
if *program_id != post.owner && pre.lamports > post.lamports {
return Err(InstructionError::ExternalAccountLamportSpend);
}
// The balance of credit-only accounts may only increase
if !is_debitable && pre.lamports > post.lamports {
return Err(InstructionError::CreditOnlyLamportSpend);
}
// For accounts unassigned to the program, the data may not change.
if *program_id != post.owner && !system_program::check_id(&program_id) && pre.data != post.data
{
return Err(InstructionError::ExternalAccountDataModified);
}
// Credit-only account data may not change.
if !is_debitable && pre.data != post.data {
return Err(InstructionError::CreditOnlyDataModified);
}
// executable is one-way (false->true) and
// only system or the account owner may modify
if pre.executable != post.executable
&& (!is_debitable
|| pre.executable
|| *program_id != post.owner && !system_program::check_id(&program_id))
{
return Err(InstructionError::ExecutableModified);
}
// no one modifies rent_epoch (yet)
if pre.rent_epoch != post.rent_epoch {
return Err(InstructionError::RentEpochModified);
}
Ok(())
}
/// Return instruction data to pass to process_instruction().
/// When a loader is detected, the instruction data is wrapped with a LoaderInstruction
/// to signal to the loader that the instruction data should be used as arguments when
/// invoking a "main()" function.
fn get_loader_instruction_data<'a>(
loaders: &[(Pubkey, Account)],
ix_data: &'a [u8],
loader_ix_data: &'a mut Vec<u8>,
) -> &'a [u8] {
if loaders.len() > 1 {
let ix = LoaderInstruction::InvokeMain {
data: ix_data.to_vec(),
};
let ix_data = bincode::serialize(&ix).unwrap();
loader_ix_data.write_all(&ix_data).unwrap();
loader_ix_data
} else {
ix_data
}
}
pub type ProcessInstruction =
fn(&Pubkey, &mut [KeyedAccount], &[u8]) -> Result<(), InstructionError>;
pub type SymbolCache = RwLock<HashMap<Vec<u8>, Symbol<instruction_processor_utils::Entrypoint>>>;
#[derive(Serialize, Deserialize)]
pub struct MessageProcessor {
#[serde(skip)]
instruction_processors: Vec<(Pubkey, ProcessInstruction)>,
#[serde(skip)]
symbol_cache: SymbolCache,
}
impl Default for MessageProcessor {
fn default() -> Self {
let instruction_processors: Vec<(Pubkey, ProcessInstruction)> = vec![(
system_program::id(),
system_instruction_processor::process_instruction,
)];
Self {
instruction_processors,
symbol_cache: RwLock::new(HashMap::new()),
}
}
}
impl MessageProcessor {
/// Add a static entrypoint to intercept instructions before the dynamic loader.
pub fn add_instruction_processor(
&mut self,
program_id: Pubkey,
process_instruction: ProcessInstruction,
) {
self.instruction_processors
.push((program_id, process_instruction));
}
/// Process an instruction
/// This method calls the instruction's program entrypoint method
fn process_instruction(
&self,
message: &Message,
instruction: &CompiledInstruction,
executable_accounts: &mut [(Pubkey, Account)],
program_accounts: &mut [&mut Account],
) -> Result<(), InstructionError> {
let program_id = instruction.program_id(&message.account_keys);
let mut loader_ix_data = vec![];
let ix_data = get_loader_instruction_data(
executable_accounts,
&instruction.data,
&mut loader_ix_data,
);
let mut keyed_accounts = create_keyed_credit_only_accounts(executable_accounts);
let mut keyed_accounts2: Vec<_> = instruction
.accounts
.iter()
.map(|&index| {
let index = index as usize;
let key = &message.account_keys[index];
let is_debitable = message.is_debitable(index);
(
key,
index < message.header.num_required_signatures as usize,
is_debitable,
)
})
.zip(program_accounts.iter_mut())
.map(|((key, is_signer, is_debitable), account)| {
if is_debitable {
KeyedAccount::new(key, is_signer, account)
} else {
KeyedAccount::new_credit_only(key, is_signer, account)
}
})
.collect();
keyed_accounts.append(&mut keyed_accounts2);
assert!(
keyed_accounts[0].account.executable,
"loader not executable"
);
let loader_id = keyed_accounts[0].unsigned_key();
for (id, process_instruction) in &self.instruction_processors {
if id == loader_id {
return process_instruction(&program_id, &mut keyed_accounts[1..], &ix_data);
}
}
native_loader::invoke_entrypoint(
&program_id,
&mut keyed_accounts,
ix_data,
&self.symbol_cache,
)
}
/// Execute an instruction
/// This method calls the instruction's program entrypoint method and verifies that the result of
/// the call does not violate the bank's accounting rules.
/// The accounts are committed back to the bank only if this function returns Ok(_).
fn execute_instruction(
&self,
message: &Message,
instruction: &CompiledInstruction,
executable_accounts: &mut [(Pubkey, Account)],
program_accounts: &mut [&mut Account],
credits: &mut [&mut LamportCredit],
) -> Result<(), InstructionError> {
let program_id = instruction.program_id(&message.account_keys);
assert_eq!(instruction.accounts.len(), program_accounts.len());
// TODO: the runtime should be checking read/write access to memory
// we are trusting the hard-coded programs not to clobber or allocate
let pre_total: u128 = program_accounts
.iter()
.map(|a| u128::from(a.lamports))
.sum();
#[allow(clippy::map_clone)]
let pre_accounts: Vec<_> = program_accounts
.iter_mut()
.map(|account| account.clone()) // cloned() doesn't work on & &
.collect();
self.process_instruction(message, instruction, executable_accounts, program_accounts)?;
// Verify the instruction
for (pre_account, (i, post_account, is_debitable)) in
pre_accounts
.iter()
.zip(program_accounts.iter().enumerate().map(|(i, account)| {
(
i,
account,
message.is_debitable(instruction.accounts[i] as usize),
)
}))
{
verify_instruction(is_debitable, &program_id, pre_account, post_account)?;
if !is_debitable {
*credits[i] += post_account.lamports - pre_account.lamports;
}
}
// The total sum of all the lamports in all the accounts cannot change.
let post_total: u128 = program_accounts
.iter()
.map(|a| u128::from(a.lamports))
.sum();
if pre_total != post_total {
return Err(InstructionError::UnbalancedInstruction);
}
Ok(())
}
/// Process a message.
/// This method calls each instruction in the message over the set of loaded Accounts
/// The accounts are committed back to the bank only if every instruction succeeds
pub fn process_message(
&self,
message: &Message,
loaders: &mut [Vec<(Pubkey, Account)>],
accounts: &mut [Account],
credits: &mut [LamportCredit],
) -> Result<(), TransactionError> {
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
let executable_index = message
.program_position(instruction.program_id_index as usize)
.ok_or(TransactionError::InvalidAccountIndex)?;
let executable_accounts = &mut loaders[executable_index];
let mut program_accounts = get_subset_unchecked_mut(accounts, &instruction.accounts)
.map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?;
// TODO: `get_subset_unchecked_mut` panics on an index out of bounds if an executable
// account is also included as a regular account for an instruction, because the
// executable account is not passed in as part of the accounts slice
let mut instruction_credits = get_subset_unchecked_mut(credits, &instruction.accounts)
.map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?;
self.execute_instruction(
message,
instruction,
executable_accounts,
&mut program_accounts,
&mut instruction_credits,
)
.map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::instruction::{AccountMeta, Instruction, InstructionError};
use solana_sdk::message::Message;
use solana_sdk::native_loader::create_loadable_account;
#[test]
fn test_has_duplicates() {
assert!(!has_duplicates(&[1, 2]));
assert!(has_duplicates(&[1, 2, 1]));
}
#[test]
fn test_get_subset_unchecked_mut() {
assert_eq!(
get_subset_unchecked_mut(&mut [7, 8], &[0]).unwrap(),
vec![&mut 7]
);
assert_eq!(
get_subset_unchecked_mut(&mut [7, 8], &[0, 1]).unwrap(),
vec![&mut 7, &mut 8]
);
}
#[test]
fn test_get_subset_unchecked_mut_duplicate_index() {
// This panics, because it assumes duplicate detection is done elsewhere.
assert_eq!(
get_subset_unchecked_mut(&mut [7, 8], &[0, 0]).unwrap_err(),
InstructionError::DuplicateAccountIndex
);
}
#[test]
#[should_panic]
fn test_get_subset_unchecked_mut_out_of_bounds() {
// This panics, because it assumes bounds validation is done elsewhere.
get_subset_unchecked_mut(&mut [7, 8], &[2]).unwrap();
}
#[test]
fn test_verify_instruction_change_program_id() {
fn change_program_id(
ix: &Pubkey,
pre: &Pubkey,
post: &Pubkey,
is_debitable: bool,
) -> Result<(), InstructionError> {
verify_instruction(
is_debitable,
&ix,
&Account::new(0, 0, pre),
&Account::new(0, 0, post),
)
}
let system_program_id = system_program::id();
let alice_program_id = Pubkey::new_rand();
let mallory_program_id = Pubkey::new_rand();
assert_eq!(
change_program_id(
&system_program_id,
&system_program_id,
&alice_program_id,
true
),
Ok(()),
"system program should be able to change the account owner"
);
assert_eq!(
change_program_id(&system_program_id, &system_program_id, &alice_program_id, false),
Err(InstructionError::ModifiedProgramId),
"system program should not be able to change the account owner of a credit only account"
);
assert_eq!(
change_program_id(
&mallory_program_id,
&system_program_id,
&alice_program_id,
true
),
Err(InstructionError::ModifiedProgramId),
"malicious Mallory should not be able to change the account owner"
);
}
#[test]
fn test_verify_instruction_change_executable() {
let alice_program_id = Pubkey::new_rand();
let change_executable = |program_id: &Pubkey,
is_debitable: bool,
pre_executable: bool,
post_executable: bool|
-> Result<(), InstructionError> {
let pre = Account {
owner: alice_program_id,
executable: pre_executable,
..Account::default()
};
let post = Account {
owner: alice_program_id,
executable: post_executable,
..Account::default()
};
verify_instruction(is_debitable, &program_id, &pre, &post)
};
let mallory_program_id = Pubkey::new_rand();
let system_program_id = system_program::id();
assert_eq!(
change_executable(&system_program_id, true, false, true),
Ok(()),
"system program should be able to change executable"
);
assert_eq!(
change_executable(&alice_program_id, true, false, true),
Ok(()),
"alice program should be able to change executable"
);
assert_eq!(
change_executable(&system_program_id, false, false, true),
Err(InstructionError::ExecutableModified),
"system program can't modify executable of credit-only accounts"
);
assert_eq!(
change_executable(&system_program_id, true, true, false),
Err(InstructionError::ExecutableModified),
"system program can't reverse executable"
);
assert_eq!(
change_executable(&mallory_program_id, true, false, true),
Err(InstructionError::ExecutableModified),
"malicious Mallory should not be able to change the account executable"
);
}
#[test]
fn test_verify_instruction_change_data() {
let alice_program_id = Pubkey::new_rand();
let change_data =
|program_id: &Pubkey, is_debitable: bool| -> Result<(), InstructionError> {
let pre = Account::new(0, 0, &alice_program_id);
let post = Account::new_data(0, &[42], &alice_program_id).unwrap();
verify_instruction(is_debitable, &program_id, &pre, &post)
};
let system_program_id = system_program::id();
let mallory_program_id = Pubkey::new_rand();
assert_eq!(
change_data(&system_program_id, true),
Ok(()),
"system program should be able to change the data"
);
assert_eq!(
change_data(&alice_program_id, true),
Ok(()),
"alice program should be able to change the data"
);
assert_eq!(
change_data(&mallory_program_id, true),
Err(InstructionError::ExternalAccountDataModified),
"malicious Mallory should not be able to change the account data"
);
assert_eq!(
change_data(&system_program_id, false),
Err(InstructionError::CreditOnlyDataModified),
"system program should not be able to change the data if credit-only"
);
}
#[test]
fn test_verify_instruction_rent_epoch() {
let alice_program_id = Pubkey::new_rand();
let pre = Account::new(0, 0, &alice_program_id);
let mut post = Account::new(0, 0, &alice_program_id);
assert_eq!(
verify_instruction(false, &system_program::id(), &pre, &post),
Ok(()),
"nothing changed!"
);
post.rent_epoch += 1;
assert_eq!(
verify_instruction(false, &system_program::id(), &pre, &post),
Err(InstructionError::RentEpochModified),
"no one touches rent_epoch"
);
}
#[test]
fn test_verify_instruction_credit_only() {
let alice_program_id = Pubkey::new_rand();
let pre = Account::new(42, 0, &alice_program_id);
let post = Account::new(0, 0, &alice_program_id);
assert_eq!(
verify_instruction(false, &system_program::id(), &pre, &post),
Err(InstructionError::ExternalAccountLamportSpend),
"debit should fail, even if system program"
);
assert_eq!(
verify_instruction(false, &alice_program_id, &pre, &post,),
Err(InstructionError::CreditOnlyLamportSpend),
"debit should fail, even if owning program"
);
}
#[test]
fn test_process_message_credit_only_handling() {
#[derive(Serialize, Deserialize)]
enum MockSystemInstruction {
Correct { lamports: u64 },
AttemptDebit { lamports: u64 },
Misbehave { lamports: u64 },
}
fn mock_system_process_instruction(
_program_id: &Pubkey,
keyed_accounts: &mut [KeyedAccount],
data: &[u8],
) -> Result<(), InstructionError> {
if let Ok(instruction) = bincode::deserialize(data) {
match instruction {
MockSystemInstruction::Correct { lamports } => {
keyed_accounts[0].account.lamports -= lamports;
keyed_accounts[1].account.lamports += lamports;
Ok(())
}
MockSystemInstruction::AttemptDebit { lamports } => {
keyed_accounts[0].account.lamports += lamports;
keyed_accounts[1].account.lamports -= lamports;
Ok(())
}
// Credit a credit-only account for more lamports than debited
MockSystemInstruction::Misbehave { lamports } => {
keyed_accounts[0].account.lamports -= lamports;
keyed_accounts[1].account.lamports = 2 * lamports;
Ok(())
}
}
} else {
Err(InstructionError::InvalidInstructionData)
}
}
let mock_system_program_id = Pubkey::new(&[2u8; 32]);
let mut message_processor = MessageProcessor::default();
message_processor
.add_instruction_processor(mock_system_program_id, mock_system_process_instruction);
let mut accounts: Vec<Account> = Vec::new();
let account = Account::new(100, 1, &mock_system_program_id);
accounts.push(account);
let account = Account::new(0, 1, &mock_system_program_id);
accounts.push(account);
let mut loaders: Vec<Vec<(Pubkey, Account)>> = Vec::new();
let account = create_loadable_account("mock_system_program");
loaders.push(vec![(mock_system_program_id, account)]);
let from_pubkey = Pubkey::new_rand();
let to_pubkey = Pubkey::new_rand();
let account_metas = vec![
AccountMeta::new(from_pubkey, true),
AccountMeta::new_credit_only(to_pubkey, false),
];
let message = Message::new(vec![Instruction::new(
mock_system_program_id,
&MockSystemInstruction::Correct { lamports: 50 },
account_metas.clone(),
)]);
let mut deltas = vec![0, 0];
let result =
message_processor.process_message(&message, &mut loaders, &mut accounts, &mut deltas);
assert_eq!(result, Ok(()));
assert_eq!(accounts[0].lamports, 50);
assert_eq!(accounts[1].lamports, 50);
assert_eq!(deltas, vec![0, 50]);
let message = Message::new(vec![Instruction::new(
mock_system_program_id,
&MockSystemInstruction::AttemptDebit { lamports: 50 },
account_metas.clone(),
)]);
let mut deltas = vec![0, 0];
let result =
message_processor.process_message(&message, &mut loaders, &mut accounts, &mut deltas);
assert_eq!(
result,
Err(TransactionError::InstructionError(
0,
InstructionError::CreditOnlyLamportSpend
))
);
let message = Message::new(vec![Instruction::new(
mock_system_program_id,
&MockSystemInstruction::Misbehave { lamports: 50 },
account_metas,
)]);
let mut deltas = vec![0, 0];
let result =
message_processor.process_message(&message, &mut loaders, &mut accounts, &mut deltas);
assert_eq!(
result,
Err(TransactionError::InstructionError(
0,
InstructionError::UnbalancedInstruction
))
);
}
#[test]
fn test_get_loader_instruction_data() {
// First ensure the ix_data is unaffected if not invoking via a loader.
let ix_data = [1];
let mut loader_ix_data = vec![];
let native_pubkey = Pubkey::new_rand();
let native_loader = (native_pubkey, Account::new(0, 0, &native_pubkey));
assert_eq!(
get_loader_instruction_data(&[native_loader.clone()], &ix_data, &mut loader_ix_data),
&ix_data
);
// Now ensure the ix_data is wrapped when there's a loader present.
let acme_pubkey = Pubkey::new_rand();
let acme_loader = (acme_pubkey, Account::new(0, 0, &native_pubkey));
let expected_ix = LoaderInstruction::InvokeMain {
data: ix_data.to_vec(),
};
let expected_ix_data = bincode::serialize(&expected_ix).unwrap();
assert_eq!(
get_loader_instruction_data(
&[native_loader.clone(), acme_loader.clone()],
&ix_data,
&mut loader_ix_data
),
&expected_ix_data[..]
);
// Note there was an allocation in the input vector.
assert_eq!(loader_ix_data, expected_ix_data);
}
}

View File

@ -1,116 +0,0 @@
//! Native loader
use crate::message_processor::SymbolCache;
#[cfg(unix)]
use libloading::os::unix::*;
#[cfg(windows)]
use libloading::os::windows::*;
use log::*;
use solana_sdk::account::KeyedAccount;
use solana_sdk::instruction::InstructionError;
use solana_sdk::instruction_processor_utils;
use solana_sdk::pubkey::Pubkey;
use std::env;
use std::path::PathBuf;
use std::str;
/// Dynamic link library prefixes
#[cfg(unix)]
const PLATFORM_FILE_PREFIX_NATIVE: &str = "lib";
#[cfg(windows)]
const PLATFORM_FILE_PREFIX_NATIVE: &str = "";
/// Dynamic link library file extension specific to the platform
#[cfg(any(target_os = "macos", target_os = "ios"))]
const PLATFORM_FILE_EXTENSION_NATIVE: &str = "dylib";
/// Dynamic link library file extension specific to the platform
#[cfg(all(unix, not(any(target_os = "macos", target_os = "ios"))))]
const PLATFORM_FILE_EXTENSION_NATIVE: &str = "so";
/// Dynamic link library file extension specific to the platform
#[cfg(windows)]
const PLATFORM_FILE_EXTENSION_NATIVE: &str = "dll";
fn create_path(name: &str) -> PathBuf {
let current_exe = env::current_exe()
.unwrap_or_else(|e| panic!("create_path(\"{}\"): current exe not found: {:?}", name, e));
let current_exe_directory = PathBuf::from(current_exe.parent().unwrap_or_else(|| {
panic!(
"create_path(\"{}\"): no parent directory of {:?}",
name, current_exe,
)
}));
let library_file_name = PathBuf::from(PLATFORM_FILE_PREFIX_NATIVE.to_string() + name)
.with_extension(PLATFORM_FILE_EXTENSION_NATIVE);
// Check the current_exe directory for the library as `cargo tests` are run
// from the deps/ subdirectory
let file_path = current_exe_directory.join(&library_file_name);
if file_path.exists() {
file_path
} else {
// `cargo build` places dependent libraries in the deps/ subdirectory
current_exe_directory.join("deps").join(library_file_name)
}
}
#[cfg(windows)]
fn library_open(path: &PathBuf) -> std::io::Result<Library> {
Library::new(path)
}
#[cfg(not(windows))]
fn library_open(path: &PathBuf) -> std::io::Result<Library> {
// TODO linux tls bug can cause crash on dlclose(), workaround by never unloading
Library::open(Some(path), libc::RTLD_NODELETE | libc::RTLD_NOW)
}
pub fn invoke_entrypoint(
program_id: &Pubkey,
keyed_accounts: &mut [KeyedAccount],
ix_data: &[u8],
symbol_cache: &SymbolCache,
) -> Result<(), InstructionError> {
// dispatch it
let (names, params) = keyed_accounts.split_at_mut(1);
let name_vec = &names[0].account.data;
if let Some(entrypoint) = symbol_cache.read().unwrap().get(name_vec) {
unsafe {
return entrypoint(program_id, params, ix_data);
}
}
let name = match str::from_utf8(name_vec) {
Ok(v) => v,
Err(e) => {
warn!("Invalid UTF-8 sequence: {}", e);
return Err(InstructionError::GenericError);
}
};
trace!("Call native {:?}", name);
let path = create_path(&name);
match library_open(&path) {
Ok(library) => unsafe {
let entrypoint: Symbol<instruction_processor_utils::Entrypoint> =
match library.get(instruction_processor_utils::ENTRYPOINT.as_bytes()) {
Ok(s) => s,
Err(e) => {
warn!(
"{:?}: Unable to find {:?} in program",
e,
instruction_processor_utils::ENTRYPOINT
);
return Err(InstructionError::GenericError);
}
};
let ret = entrypoint(program_id, params, ix_data);
symbol_cache
.write()
.unwrap()
.insert(name_vec.to_vec(), entrypoint);
ret
},
Err(e) => {
warn!("Unable to load: {:?}", e);
Err(InstructionError::GenericError)
}
}
}

View File

@ -1,65 +0,0 @@
//! calculate and collect rent from Accounts
use crate::epoch_schedule::EpochSchedule;
use solana_sdk::{account::Account, clock::Epoch, rent_calculator::RentCalculator};
#[derive(Default, Serialize, Deserialize, Clone)]
pub struct RentCollector {
pub epoch: Epoch,
pub epoch_schedule: EpochSchedule,
pub slots_per_year: f64,
pub rent_calculator: RentCalculator,
}
impl RentCollector {
pub fn new(
epoch: Epoch,
epoch_schedule: &EpochSchedule,
slots_per_year: f64,
rent_calculator: &RentCalculator,
) -> Self {
Self {
epoch,
epoch_schedule: *epoch_schedule,
slots_per_year,
rent_calculator: *rent_calculator,
}
}
pub fn clone_with_epoch(&self, epoch: Epoch) -> Self {
Self {
epoch,
..self.clone()
}
}
// updates this account's lamports and status and returns
// the account rent collected, if any
//
pub fn update(&self, mut account: Account) -> Option<(Account, u64)> {
if account.data.is_empty() || account.rent_epoch > self.epoch {
Some((account, 0))
} else {
let slots_elapsed: u64 = (account.rent_epoch..=self.epoch)
.map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1))
.sum();
let (rent_due, exempt) = self.rent_calculator.due(
account.lamports,
account.data.len(),
slots_elapsed as f64 / self.slots_per_year,
);
if exempt || rent_due != 0 {
if account.lamports > rent_due {
account.rent_epoch = self.epoch + 1;
account.lamports -= rent_due;
Some((account, rent_due))
} else {
None
}
} else {
// maybe collect rent later, leave account alone
Some((account, 0))
}
}
}
}

View File

@ -1,62 +0,0 @@
use std::fmt;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
struct U64Visitor;
impl<'a> serde::de::Visitor<'a> for U64Visitor {
type Value = u64;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Expecting u64")
}
fn visit_u64<E>(self, data: u64) -> std::result::Result<u64, E>
where
E: serde::de::Error,
{
Ok(data)
}
}
pub fn deserialize_atomicu64<'de, D>(d: D) -> Result<AtomicU64, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let value = d.deserialize_u64(U64Visitor)?;
Ok(AtomicU64::new(value))
}
pub fn serialize_atomicu64<S>(x: &AtomicU64, s: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
s.serialize_u64(x.load(Ordering::Relaxed))
}
struct BoolVisitor;
impl<'a> serde::de::Visitor<'a> for BoolVisitor {
type Value = bool;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Expecting bool")
}
fn visit_bool<E>(self, data: bool) -> std::result::Result<bool, E>
where
E: serde::de::Error,
{
Ok(data)
}
}
pub fn deserialize_atomicbool<'de, D>(d: D) -> Result<AtomicBool, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let value = d.deserialize_bool(BoolVisitor)?;
Ok(AtomicBool::new(value))
}
pub fn serialize_atomicbool<S>(x: &AtomicBool, s: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
s.serialize_bool(x.load(Ordering::Relaxed))
}

View File

@ -1,517 +0,0 @@
//! Stakes serve as a cache of stake and vote accounts to derive
//! node stakes
use solana_sdk::account::Account;
use solana_sdk::clock::Epoch;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::sysvar::stake_history::StakeHistory;
use solana_stake_api::stake_state::{new_stake_history_entry, StakeState};
use solana_vote_api::vote_state::VoteState;
use std::collections::HashMap;
#[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize)]
pub struct Stakes {
/// vote accounts
vote_accounts: HashMap<Pubkey, (u64, Account)>,
/// stake_accounts
stake_accounts: HashMap<Pubkey, Account>,
/// unclaimed points.
// a point is a credit multiplied by the stake
points: u64,
/// current epoch, used to calculate current stake
epoch: Epoch,
/// history of staking levels
stake_history: StakeHistory,
}
impl Stakes {
pub fn history(&self) -> &StakeHistory {
&self.stake_history
}
pub fn clone_with_epoch(&self, epoch: Epoch) -> Self {
if self.epoch == epoch {
self.clone()
} else {
let mut stake_history = self.stake_history.clone();
stake_history.add(
self.epoch,
new_stake_history_entry(
self.epoch,
self.stake_accounts
.iter()
.filter_map(|(_pubkey, stake_account)| {
StakeState::stake_from(stake_account)
})
.collect::<Vec<_>>()
.iter(),
Some(&self.stake_history),
),
);
Stakes {
stake_accounts: self.stake_accounts.clone(),
points: self.points,
epoch,
vote_accounts: self
.vote_accounts
.iter()
.map(|(pubkey, (_stake, account))| {
(
*pubkey,
(
self.calculate_stake(pubkey, epoch, Some(&stake_history)),
account.clone(),
),
)
})
.collect(),
stake_history,
}
}
}
// sum the stakes that point to the given voter_pubkey
fn calculate_stake(
&self,
voter_pubkey: &Pubkey,
epoch: Epoch,
stake_history: Option<&StakeHistory>,
) -> u64 {
self.stake_accounts
.iter()
.map(|(_, stake_account)| {
StakeState::stake_from(stake_account).map_or(0, |stake| {
if stake.voter_pubkey(epoch) == voter_pubkey {
stake.stake(epoch, stake_history)
} else {
0
}
})
})
.sum()
}
pub fn is_stake(account: &Account) -> bool {
solana_vote_api::check_id(&account.owner)
|| solana_stake_api::check_id(&account.owner)
&& account.data.len() >= std::mem::size_of::<StakeState>()
}
pub fn store(&mut self, pubkey: &Pubkey, account: &Account) {
if solana_vote_api::check_id(&account.owner) {
if account.lamports == 0 {
self.vote_accounts.remove(pubkey);
} else {
let old = self.vote_accounts.get(pubkey);
let stake = old.map_or_else(
|| self.calculate_stake(pubkey, self.epoch, Some(&self.stake_history)),
|v| v.0,
);
// count any increase in points, can only go forward
let old_credits = old
.and_then(|(_stake, old_account)| VoteState::credits_from(old_account))
.unwrap_or(0);
let credits = VoteState::credits_from(account).unwrap_or(old_credits);
self.points += credits.saturating_sub(old_credits) * stake;
self.vote_accounts.insert(*pubkey, (stake, account.clone()));
}
} else if solana_stake_api::check_id(&account.owner) {
// old_stake is stake lamports and voter_pubkey from the pre-store() version
let old_stake = self.stake_accounts.get(pubkey).and_then(|old_account| {
StakeState::stake_from(old_account).map(|stake| {
(
*stake.voter_pubkey(self.epoch),
stake.stake(self.epoch, Some(&self.stake_history)),
)
})
});
let stake = StakeState::stake_from(account).map(|stake| {
(
*stake.voter_pubkey(self.epoch),
if account.lamports != 0 {
stake.stake(self.epoch, Some(&self.stake_history))
} else {
0
},
)
});
// if adjustments need to be made...
if stake != old_stake {
if let Some((voter_pubkey, stake)) = old_stake {
self.vote_accounts
.entry(voter_pubkey)
.and_modify(|e| e.0 -= stake);
}
if let Some((voter_pubkey, stake)) = stake {
self.vote_accounts
.entry(voter_pubkey)
.and_modify(|e| e.0 += stake);
}
}
if account.lamports == 0 {
self.stake_accounts.remove(pubkey);
} else {
self.stake_accounts.insert(*pubkey, account.clone());
}
}
}
pub fn vote_accounts(&self) -> &HashMap<Pubkey, (u64, Account)> {
&self.vote_accounts
}
pub fn stake_accounts(&self) -> &HashMap<Pubkey, Account> {
&self.stake_accounts
}
pub fn rewards_pools(&self) -> impl Iterator<Item = (&Pubkey, &Account)> {
self.stake_accounts
.iter()
.filter(|(_key, account)| match StakeState::from(account) {
Some(StakeState::RewardsPool) => true,
_ => false,
})
}
pub fn highest_staked_node(&self) -> Option<Pubkey> {
self.vote_accounts
.iter()
.max_by(|(_ak, av), (_bk, bv)| av.0.cmp(&bv.0))
.and_then(|(_k, (_stake, account))| VoteState::from(account))
.map(|vote_state| vote_state.node_pubkey)
}
/// currently unclaimed points
pub fn points(&self) -> u64 {
self.points
}
/// "claims" points, resets points to 0
pub fn claim_points(&mut self) -> u64 {
let points = self.points;
self.points = 0;
points
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use solana_sdk::pubkey::Pubkey;
use solana_stake_api::stake_state;
use solana_vote_api::vote_state::{self, VoteState, MAX_LOCKOUT_HISTORY};
// set up some dummies for a staked node (( vote ) ( stake ))
pub fn create_staked_node_accounts(stake: u64) -> ((Pubkey, Account), (Pubkey, Account)) {
let vote_pubkey = Pubkey::new_rand();
let vote_account = vote_state::create_account(&vote_pubkey, &Pubkey::new_rand(), 0, 1);
(
(vote_pubkey, vote_account),
create_stake_account(stake, &vote_pubkey),
)
}
// add stake to a vote_pubkey ( stake )
pub fn create_stake_account(stake: u64, vote_pubkey: &Pubkey) -> (Pubkey, Account) {
let stake_pubkey = Pubkey::new_rand();
(
stake_pubkey,
stake_state::create_account(
&stake_pubkey,
&vote_pubkey,
&vote_state::create_account(&vote_pubkey, &Pubkey::new_rand(), 0, 1),
stake,
),
)
}
#[test]
fn test_stakes_basic() {
for i in 0..4 {
let mut stakes = Stakes::default();
stakes.epoch = i;
let ((vote_pubkey, vote_account), (stake_pubkey, mut stake_account)) =
create_staked_node_accounts(10);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&stake_pubkey, &stake_account);
let stake = StakeState::stake_from(&stake_account).unwrap();
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(
vote_accounts.get(&vote_pubkey).unwrap().0,
stake.stake(i, None)
);
}
stake_account.lamports = 42;
stakes.store(&stake_pubkey, &stake_account);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(
vote_accounts.get(&vote_pubkey).unwrap().0,
stake.stake(i, None)
); // stays old stake, because only 10 is activated
}
// activate more
let (_stake_pubkey, mut stake_account) = create_stake_account(42, &vote_pubkey);
stakes.store(&stake_pubkey, &stake_account);
let stake = StakeState::stake_from(&stake_account).unwrap();
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(
vote_accounts.get(&vote_pubkey).unwrap().0,
stake.stake(i, None)
); // now stake of 42 is activated
}
stake_account.lamports = 0;
stakes.store(&stake_pubkey, &stake_account);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0);
}
}
}
#[test]
fn test_stakes_highest() {
let mut stakes = Stakes::default();
assert_eq!(stakes.highest_staked_node(), None);
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(10);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&stake_pubkey, &stake_account);
let ((vote11_pubkey, vote11_account), (stake11_pubkey, stake11_account)) =
create_staked_node_accounts(20);
stakes.store(&vote11_pubkey, &vote11_account);
stakes.store(&stake11_pubkey, &stake11_account);
let vote11_node_pubkey = VoteState::from(&vote11_account).unwrap().node_pubkey;
assert_eq!(stakes.highest_staked_node(), Some(vote11_node_pubkey))
}
#[test]
fn test_stakes_points() {
let mut stakes = Stakes::default();
stakes.epoch = 4;
let stake = 42;
assert_eq!(stakes.points(), 0);
assert_eq!(stakes.claim_points(), 0);
assert_eq!(stakes.claim_points(), 0);
let ((vote_pubkey, mut vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(stake);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&stake_pubkey, &stake_account);
assert_eq!(stakes.points(), 0);
assert_eq!(stakes.claim_points(), 0);
let mut vote_state = VoteState::from(&vote_account).unwrap();
for i in 0..MAX_LOCKOUT_HISTORY + 42 {
vote_state.process_slot_vote_unchecked(i as u64);
vote_state.to(&mut vote_account).unwrap();
stakes.store(&vote_pubkey, &vote_account);
assert_eq!(stakes.points(), vote_state.credits() * stake);
}
vote_account.lamports = 0;
stakes.store(&vote_pubkey, &vote_account);
assert_eq!(stakes.points(), vote_state.credits() * stake);
assert_eq!(stakes.claim_points(), vote_state.credits() * stake);
assert_eq!(stakes.claim_points(), 0);
assert_eq!(stakes.claim_points(), 0);
// points come out of nowhere, but don't care here ;)
vote_account.lamports = 1;
stakes.store(&vote_pubkey, &vote_account);
assert_eq!(stakes.points(), vote_state.credits() * stake);
// test going backwards, should never go backwards
let old_vote_state = vote_state;
let vote_account = vote_state::create_account(&vote_pubkey, &Pubkey::new_rand(), 0, 1);
stakes.store(&vote_pubkey, &vote_account);
assert_eq!(stakes.points(), old_vote_state.credits() * stake);
}
#[test]
fn test_stakes_vote_account_disappear_reappear() {
let mut stakes = Stakes::default();
stakes.epoch = 4;
let ((vote_pubkey, mut vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(10);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&stake_pubkey, &stake_account);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10);
}
vote_account.lamports = 0;
stakes.store(&vote_pubkey, &vote_account);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_none());
}
vote_account.lamports = 1;
stakes.store(&vote_pubkey, &vote_account);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10);
}
}
#[test]
fn test_stakes_change_delegate() {
let mut stakes = Stakes::default();
stakes.epoch = 4;
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(10);
let ((vote_pubkey2, vote_account2), (_stake_pubkey2, stake_account2)) =
create_staked_node_accounts(10);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&vote_pubkey2, &vote_account2);
// delegates to vote_pubkey
stakes.store(&stake_pubkey, &stake_account);
let stake = StakeState::stake_from(&stake_account).unwrap();
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(
vote_accounts.get(&vote_pubkey).unwrap().0,
stake.stake(stakes.epoch, Some(&stakes.stake_history))
);
assert!(vote_accounts.get(&vote_pubkey2).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey2).unwrap().0, 0);
}
// delegates to vote_pubkey2
stakes.store(&stake_pubkey, &stake_account2);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0);
assert!(vote_accounts.get(&vote_pubkey2).is_some());
assert_eq!(
vote_accounts.get(&vote_pubkey2).unwrap().0,
stake.stake(stakes.epoch, Some(&stakes.stake_history))
);
}
}
#[test]
fn test_stakes_multiple_stakers() {
let mut stakes = Stakes::default();
stakes.epoch = 4;
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(10);
let (stake_pubkey2, stake_account2) = create_stake_account(10, &vote_pubkey);
stakes.store(&vote_pubkey, &vote_account);
// delegates to vote_pubkey
stakes.store(&stake_pubkey, &stake_account);
stakes.store(&stake_pubkey2, &stake_account2);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 20);
}
}
#[test]
fn test_clone_with_epoch() {
let mut stakes = Stakes::default();
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(10);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&stake_pubkey, &stake_account);
let stake = StakeState::stake_from(&stake_account).unwrap();
{
let vote_accounts = stakes.vote_accounts();
assert_eq!(
vote_accounts.get(&vote_pubkey).unwrap().0,
stake.stake(stakes.epoch, Some(&stakes.stake_history))
);
}
let stakes = stakes.clone_with_epoch(3);
{
let vote_accounts = stakes.vote_accounts();
assert_eq!(
vote_accounts.get(&vote_pubkey).unwrap().0,
stake.stake(stakes.epoch, Some(&stakes.stake_history))
);
}
}
#[test]
fn test_stakes_not_delegate() {
let mut stakes = Stakes::default();
stakes.epoch = 4;
let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) =
create_staked_node_accounts(10);
stakes.store(&vote_pubkey, &vote_account);
stakes.store(&stake_pubkey, &stake_account);
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10);
}
// not a stake account, and whacks above entry
stakes.store(&stake_pubkey, &Account::new(1, 0, &solana_stake_api::id()));
{
let vote_accounts = stakes.vote_accounts();
assert!(vote_accounts.get(&vote_pubkey).is_some());
assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0);
}
}
}

View File

@ -1,437 +0,0 @@
use log::*;
use rand::{thread_rng, Rng};
use serde::Serialize;
use solana_sdk::clock::{Slot, MAX_RECENT_BLOCKHASHES};
use solana_sdk::hash::Hash;
use solana_sdk::signature::Signature;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex};
pub const MAX_CACHE_ENTRIES: usize = MAX_RECENT_BLOCKHASHES;
const CACHED_SIGNATURE_SIZE: usize = 20;
// Store forks in a single chunk of memory to avoid another lookup.
pub type ForkStatus<T> = Vec<(Slot, T)>;
type SignatureSlice = [u8; CACHED_SIGNATURE_SIZE];
type SignatureMap<T> = HashMap<SignatureSlice, ForkStatus<T>>;
// Map of Hash and signature status
pub type SignatureStatus<T> = Arc<Mutex<HashMap<Hash, (usize, Vec<(SignatureSlice, T)>)>>>;
// A Map of hash + the highest fork it's been observed on along with
// the signature offset and a Map of the signature slice + Fork status for that signature
type StatusMap<T> = HashMap<Hash, (Slot, usize, SignatureMap<T>)>;
// A map of signatures recorded in each fork; used to serialize for snapshots easily.
// Doesn't store a `SlotDelta` in it because the bool `root` is usually set much later
type SlotDeltaMap<T> = HashMap<Slot, SignatureStatus<T>>;
// The signature statuses added during a slot, can be used to build on top of a status cache or to
// construct a new one. Usually derived from a status cache's `SlotDeltaMap`
pub type SlotDelta<T> = (Slot, bool, SignatureStatus<T>);
#[derive(Clone, Debug)]
pub struct StatusCache<T: Serialize + Clone> {
cache: StatusMap<T>,
roots: HashSet<Slot>,
/// all signatures seen during a fork/slot
slot_deltas: SlotDeltaMap<T>,
}
impl<T: Serialize + Clone> Default for StatusCache<T> {
fn default() -> Self {
Self {
cache: HashMap::default(),
// 0 is always a root
roots: [0].iter().cloned().collect(),
slot_deltas: HashMap::default(),
}
}
}
impl<T: Serialize + Clone + PartialEq> PartialEq for StatusCache<T> {
fn eq(&self, other: &Self) -> bool {
self.roots == other.roots
&& self.cache.iter().all(|(hash, (slot, sig_index, sig_map))| {
if let Some((other_slot, other_sig_index, other_sig_map)) = other.cache.get(hash) {
if slot == other_slot && sig_index == other_sig_index {
return sig_map.iter().all(|(slice, fork_map)| {
if let Some(other_fork_map) = other_sig_map.get(slice) {
// all this work just to compare the highest forks in the fork map
// per signature
return fork_map.last() == other_fork_map.last();
}
false
});
}
}
false
})
}
}
impl<T: Serialize + Clone> StatusCache<T> {
/// Check if the signature from a transaction is in any of the forks in the ancestors set.
pub fn get_signature_status(
&self,
sig: &Signature,
transaction_blockhash: &Hash,
ancestors: &HashMap<Slot, usize>,
) -> Option<(Slot, T)> {
let map = self.cache.get(transaction_blockhash)?;
let (_, index, sigmap) = map;
let mut sig_slice = [0u8; CACHED_SIGNATURE_SIZE];
sig_slice.clone_from_slice(&sig.as_ref()[*index..*index + CACHED_SIGNATURE_SIZE]);
if let Some(stored_forks) = sigmap.get(&sig_slice) {
let res = stored_forks
.iter()
.filter(|(f, _)| ancestors.get(f).is_some() || self.roots.get(f).is_some())
.nth(0)
.cloned();
if res.is_some() {
return res;
}
}
None
}
/// TODO: wallets should send the Transactions recent blockhash as well
pub fn get_signature_status_slow(
&self,
sig: &Signature,
ancestors: &HashMap<Slot, usize>,
) -> Option<(usize, T)> {
trace!("get_signature_status_slow");
let mut keys = vec![];
let mut val: Vec<_> = self.cache.iter().map(|(k, _)| *k).collect();
keys.append(&mut val);
for blockhash in keys.iter() {
trace!("get_signature_status_slow: trying {}", blockhash);
if let Some((forkid, res)) = self.get_signature_status(sig, blockhash, ancestors) {
trace!("get_signature_status_slow: got {}", forkid);
return ancestors
.get(&forkid)
.map(|id| (*id, res.clone()))
.or_else(|| Some((ancestors.len(), res)));
}
}
None
}
/// Add a known root fork. Roots are always valid ancestors.
/// After MAX_CACHE_ENTRIES, roots are removed, and any old signatures are cleared.
pub fn add_root(&mut self, fork: Slot) {
self.roots.insert(fork);
self.purge_roots();
}
pub fn roots(&self) -> &HashSet<u64> {
&self.roots
}
/// Insert a new signature for a specific slot.
pub fn insert(&mut self, transaction_blockhash: &Hash, sig: &Signature, slot: Slot, res: T) {
let sig_index: usize;
if let Some(sig_map) = self.cache.get(transaction_blockhash) {
sig_index = sig_map.1;
} else {
sig_index =
thread_rng().gen_range(0, std::mem::size_of::<Hash>() - CACHED_SIGNATURE_SIZE);
}
let sig_map =
self.cache
.entry(*transaction_blockhash)
.or_insert((slot, sig_index, HashMap::new()));
sig_map.0 = std::cmp::max(slot, sig_map.0);
let index = sig_map.1;
let mut sig_slice = [0u8; CACHED_SIGNATURE_SIZE];
sig_slice.clone_from_slice(&sig.as_ref()[index..index + CACHED_SIGNATURE_SIZE]);
self.insert_with_slice(transaction_blockhash, slot, sig_index, sig_slice, res);
}
pub fn purge_roots(&mut self) {
if self.roots.len() > MAX_CACHE_ENTRIES {
if let Some(min) = self.roots.iter().min().cloned() {
self.roots.remove(&min);
self.cache.retain(|_, (fork, _, _)| *fork > min);
self.slot_deltas.retain(|slot, _| *slot > min);
}
}
}
/// Clear for testing
pub fn clear_signatures(&mut self) {
for v in self.cache.values_mut() {
v.2 = HashMap::new();
}
self.slot_deltas
.iter_mut()
.for_each(|(_, status)| status.lock().unwrap().clear());
}
// returns the signature statuses for each slot in the slots provided
pub fn slot_deltas(&self, slots: &[Slot]) -> Vec<SlotDelta<T>> {
let empty = Arc::new(Mutex::new(HashMap::new()));
slots
.iter()
.map(|slot| {
(
*slot,
self.roots.contains(slot),
self.slot_deltas.get(slot).unwrap_or_else(|| &empty).clone(),
)
})
.collect()
}
// replay deltas into a status_cache allows "appending" data
pub fn append(&mut self, slot_deltas: &[SlotDelta<T>]) {
for (slot, is_root, statuses) in slot_deltas {
statuses
.lock()
.unwrap()
.iter()
.for_each(|(tx_hash, (sig_index, statuses))| {
for (sig_slice, res) in statuses.iter() {
self.insert_with_slice(&tx_hash, *slot, *sig_index, *sig_slice, res.clone())
}
});
if *is_root {
self.add_root(*slot);
}
}
}
pub fn from_slot_deltas(slot_deltas: &[SlotDelta<T>]) -> Self {
// play all deltas back into the status cache
let mut me = Self::default();
me.append(slot_deltas);
me
}
fn insert_with_slice(
&mut self,
transaction_blockhash: &Hash,
slot: Slot,
sig_index: usize,
sig_slice: [u8; CACHED_SIGNATURE_SIZE],
res: T,
) {
let sig_map =
self.cache
.entry(*transaction_blockhash)
.or_insert((slot, sig_index, HashMap::new()));
sig_map.0 = std::cmp::max(slot, sig_map.0);
let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(|| vec![]);
sig_forks.push((slot, res.clone()));
let slot_deltas = self.slot_deltas.entry(slot).or_default();
let mut fork_entry = slot_deltas.lock().unwrap();
let (_, hash_entry) = fork_entry
.entry(*transaction_blockhash)
.or_insert((sig_index, vec![]));
hash_entry.push((sig_slice, res))
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::hash::hash;
type BankStatusCache = StatusCache<()>;
#[test]
fn test_empty_has_no_sigs() {
let sig = Signature::default();
let blockhash = hash(Hash::default().as_ref());
let status_cache = BankStatusCache::default();
assert_eq!(
status_cache.get_signature_status(&sig, &blockhash, &HashMap::new()),
None
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &HashMap::new()),
None
);
}
#[test]
fn test_find_sig_with_ancestor_fork() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = vec![(0, 1)].into_iter().collect();
status_cache.insert(&blockhash, &sig, 0, ());
assert_eq!(
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
Some((0, ()))
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
Some((1, ()))
);
}
#[test]
fn test_find_sig_without_ancestor_fork() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = HashMap::new();
status_cache.insert(&blockhash, &sig, 1, ());
assert_eq!(
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
None
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
None
);
}
#[test]
fn test_find_sig_with_root_ancestor_fork() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = HashMap::new();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.add_root(0);
assert_eq!(
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
Some((0, ()))
);
}
#[test]
fn test_find_sig_with_root_ancestor_fork_max_len() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = vec![(2, 2)].into_iter().collect();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.add_root(0);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
Some((ancestors.len(), ()))
);
}
#[test]
fn test_insert_picks_latest_blockhash_fork() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = vec![(0, 0)].into_iter().collect();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, &sig, 1, ());
for i in 0..(MAX_CACHE_ENTRIES + 1) {
status_cache.add_root(i as u64);
}
assert!(status_cache
.get_signature_status(&sig, &blockhash, &ancestors)
.is_some());
}
#[test]
fn test_root_expires() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = HashMap::new();
status_cache.insert(&blockhash, &sig, 0, ());
for i in 0..(MAX_CACHE_ENTRIES + 1) {
status_cache.add_root(i as u64);
}
assert_eq!(
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
None
);
assert_eq!(
status_cache.get_signature_status_slow(&sig, &ancestors),
None
);
}
#[test]
fn test_clear_signatures_sigs_are_gone() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = HashMap::new();
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.add_root(0);
status_cache.clear_signatures();
assert_eq!(
status_cache.get_signature_status(&sig, &blockhash, &ancestors),
None
);
}
#[test]
fn test_clear_signatures_insert_works() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let ancestors = HashMap::new();
status_cache.add_root(0);
status_cache.clear_signatures();
status_cache.insert(&blockhash, &sig, 0, ());
assert!(status_cache
.get_signature_status(&sig, &blockhash, &ancestors)
.is_some());
}
#[test]
fn test_signatures_slice() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
status_cache.clear_signatures();
status_cache.insert(&blockhash, &sig, 0, ());
let (_, index, sig_map) = status_cache.cache.get(&blockhash).unwrap();
let mut sig_slice = [0u8; CACHED_SIGNATURE_SIZE];
sig_slice.clone_from_slice(&sig.as_ref()[*index..*index + CACHED_SIGNATURE_SIZE]);
assert!(sig_map.get(&sig_slice).is_some());
}
#[test]
fn test_slot_deltas() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
status_cache.clear_signatures();
status_cache.insert(&blockhash, &sig, 0, ());
let slot_deltas = status_cache.slot_deltas(&[0]);
let cache = StatusCache::from_slot_deltas(&slot_deltas);
assert_eq!(cache, status_cache);
let slot_deltas = cache.slot_deltas(&[0]);
let cache = StatusCache::from_slot_deltas(&slot_deltas);
assert_eq!(cache, status_cache);
}
#[test]
fn test_roots_deltas() {
let sig = Signature::default();
let mut status_cache = BankStatusCache::default();
let blockhash = hash(Hash::default().as_ref());
let blockhash2 = hash(blockhash.as_ref());
status_cache.insert(&blockhash, &sig, 0, ());
status_cache.insert(&blockhash, &sig, 1, ());
status_cache.insert(&blockhash2, &sig, 1, ());
for i in 0..(MAX_CACHE_ENTRIES + 1) {
status_cache.add_root(i as u64);
}
let slots: Vec<_> = (0_u64..MAX_CACHE_ENTRIES as u64 + 1).collect();
let slot_deltas = status_cache.slot_deltas(&slots);
let cache = StatusCache::from_slot_deltas(&slot_deltas);
assert_eq!(cache, status_cache);
}
#[test]
fn test_age_sanity() {
assert!(MAX_CACHE_ENTRIES <= MAX_RECENT_BLOCKHASHES);
}
}

View File

@ -1,233 +0,0 @@
use crate::bank::Bank;
use solana_sdk::account::Account;
use solana_sdk::account_utils::State;
use solana_sdk::pubkey::Pubkey;
use solana_storage_api::storage_contract::StorageContract;
use std::collections::{HashMap, HashSet};
#[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize)]
pub struct StorageAccounts {
/// validator storage accounts and their credits
validator_accounts: HashSet<Pubkey>,
/// replicator storage accounts and their credits
replicator_accounts: HashSet<Pubkey>,
/// unclaimed points.
// 1 point == 1 storage account credit
points: HashMap<Pubkey, u64>,
}
pub fn is_storage(account: &Account) -> bool {
solana_storage_api::check_id(&account.owner)
}
impl StorageAccounts {
pub fn store(&mut self, pubkey: &Pubkey, account: &Account) {
if let Ok(storage_state) = account.state() {
if let StorageContract::ReplicatorStorage { credits, .. } = storage_state {
if account.lamports == 0 {
self.replicator_accounts.remove(pubkey);
} else {
self.replicator_accounts.insert(*pubkey);
self.points.insert(*pubkey, credits.current_epoch);
}
} else if let StorageContract::ValidatorStorage { credits, .. } = storage_state {
if account.lamports == 0 {
self.validator_accounts.remove(pubkey);
} else {
self.validator_accounts.insert(*pubkey);
self.points.insert(*pubkey, credits.current_epoch);
}
}
};
}
/// currently unclaimed points
pub fn points(&self) -> u64 {
self.points.values().sum()
}
/// "claims" points, resets points to 0
pub fn claim_points(&mut self) -> u64 {
let points = self.points();
self.points.clear();
points
}
}
pub fn validator_accounts(bank: &Bank) -> HashMap<Pubkey, Account> {
bank.storage_accounts()
.validator_accounts
.iter()
.filter_map(|account_id| {
bank.get_account(account_id)
.and_then(|account| Some((*account_id, account)))
})
.collect()
}
pub fn replicator_accounts(bank: &Bank) -> HashMap<Pubkey, Account> {
bank.storage_accounts()
.replicator_accounts
.iter()
.filter_map(|account_id| {
bank.get_account(account_id)
.and_then(|account| Some((*account_id, account)))
})
.collect()
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::message::Message;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_storage_api::{
storage_contract::{StorageAccount, STORAGE_ACCOUNT_SPACE},
storage_instruction::{self, StorageAccountType},
storage_processor,
};
use std::sync::Arc;
#[test]
fn test_store_and_recover() {
let (genesis_block, mint_keypair) = create_genesis_block(1000);
let mint_pubkey = mint_keypair.pubkey();
let replicator_keypair = Keypair::new();
let replicator_pubkey = replicator_keypair.pubkey();
let validator_keypair = Keypair::new();
let validator_pubkey = validator_keypair.pubkey();
let mut bank = Bank::new(&genesis_block);
bank.add_instruction_processor(
solana_storage_api::id(),
storage_processor::process_instruction,
);
let bank = Arc::new(bank);
let bank_client = BankClient::new_shared(&bank);
bank_client
.transfer(10, &mint_keypair, &replicator_pubkey)
.unwrap();
let message = Message::new(storage_instruction::create_storage_account(
&mint_pubkey,
&Pubkey::default(),
&replicator_pubkey,
1,
StorageAccountType::Replicator,
));
bank_client.send_message(&[&mint_keypair], message).unwrap();
bank_client
.transfer(10, &mint_keypair, &validator_pubkey)
.unwrap();
let message = Message::new(storage_instruction::create_storage_account(
&mint_pubkey,
&Pubkey::default(),
&validator_pubkey,
1,
StorageAccountType::Validator,
));
bank_client.send_message(&[&mint_keypair], message).unwrap();
assert_eq!(validator_accounts(bank.as_ref()).len(), 1);
assert_eq!(replicator_accounts(bank.as_ref()).len(), 1);
}
#[test]
fn test_points() {
// note: storage_points == storage_credits
let credits = 42;
let mut storage_accounts = StorageAccounts::default();
assert_eq!(storage_accounts.points(), 0);
assert_eq!(storage_accounts.claim_points(), 0);
// create random validator and replicator accounts with `credits`
let ((validator_pubkey, validator_account), (replicator_pubkey, replicator_account)) =
create_storage_accounts_with_credits(credits);
storage_accounts.store(&validator_pubkey, &validator_account);
storage_accounts.store(&replicator_pubkey, &replicator_account);
// check that 2x credits worth of points are available
assert_eq!(storage_accounts.points(), credits * 2);
let ((validator_pubkey, validator_account), (replicator_pubkey, mut replicator_account)) =
create_storage_accounts_with_credits(credits);
storage_accounts.store(&validator_pubkey, &validator_account);
storage_accounts.store(&replicator_pubkey, &replicator_account);
// check that 4x credits worth of points are available
assert_eq!(storage_accounts.points(), credits * 2 * 2);
storage_accounts.store(&validator_pubkey, &validator_account);
storage_accounts.store(&replicator_pubkey, &replicator_account);
// check that storing again has no effect
assert_eq!(storage_accounts.points(), credits * 2 * 2);
let storage_contract = &mut replicator_account.state().unwrap();
if let StorageContract::ReplicatorStorage {
credits: account_credits,
..
} = storage_contract
{
account_credits.current_epoch += 1;
}
replicator_account.set_state(storage_contract).unwrap();
storage_accounts.store(&replicator_pubkey, &replicator_account);
// check that incremental store increases credits
assert_eq!(storage_accounts.points(), credits * 2 * 2 + 1);
assert_eq!(storage_accounts.claim_points(), credits * 2 * 2 + 1);
// check that once redeemed, the points are gone
assert_eq!(storage_accounts.claim_points(), 0);
}
pub fn create_storage_accounts_with_credits(
credits: u64,
) -> ((Pubkey, Account), (Pubkey, Account)) {
let validator_pubkey = Pubkey::new_rand();
let replicator_pubkey = Pubkey::new_rand();
let mut validator_account =
Account::new(1, STORAGE_ACCOUNT_SPACE as usize, &solana_storage_api::id());
let mut validator = StorageAccount::new(validator_pubkey, &mut validator_account);
validator
.initialize_storage(validator_pubkey, StorageAccountType::Validator)
.unwrap();
let storage_contract = &mut validator_account.state().unwrap();
if let StorageContract::ValidatorStorage {
credits: account_credits,
..
} = storage_contract
{
account_credits.current_epoch = credits;
}
validator_account.set_state(storage_contract).unwrap();
let mut replicator_account =
Account::new(1, STORAGE_ACCOUNT_SPACE as usize, &solana_storage_api::id());
let mut replicator = StorageAccount::new(replicator_pubkey, &mut replicator_account);
replicator
.initialize_storage(replicator_pubkey, StorageAccountType::Replicator)
.unwrap();
let storage_contract = &mut replicator_account.state().unwrap();
if let StorageContract::ReplicatorStorage {
credits: account_credits,
..
} = storage_contract
{
account_credits.current_epoch = credits;
}
replicator_account.set_state(storage_contract).unwrap();
(
(validator_pubkey, validator_account),
(replicator_pubkey, replicator_account),
)
}
}

View File

@ -1,366 +0,0 @@
use log::*;
use solana_sdk::account::KeyedAccount;
use solana_sdk::instruction::InstructionError;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::system_instruction::{SystemError, SystemInstruction};
use solana_sdk::system_program;
use solana_sdk::sysvar;
const FROM_ACCOUNT_INDEX: usize = 0;
const TO_ACCOUNT_INDEX: usize = 1;
fn create_system_account(
keyed_accounts: &mut [KeyedAccount],
lamports: u64,
space: u64,
program_id: &Pubkey,
) -> Result<(), SystemError> {
if !system_program::check_id(&keyed_accounts[FROM_ACCOUNT_INDEX].account.owner) {
debug!(
"CreateAccount: invalid account[from] owner {} ",
&keyed_accounts[FROM_ACCOUNT_INDEX].account.owner
);
return Err(SystemError::SourceNotSystemAccount);
}
if !keyed_accounts[TO_ACCOUNT_INDEX].account.data.is_empty()
|| !system_program::check_id(&keyed_accounts[TO_ACCOUNT_INDEX].account.owner)
{
debug!(
"CreateAccount: invalid argument; account {} already in use",
keyed_accounts[TO_ACCOUNT_INDEX].unsigned_key()
);
return Err(SystemError::AccountAlreadyInUse);
}
if sysvar::check_id(&program_id) {
debug!(
"CreateAccount: invalid argument; program id {} invalid",
program_id
);
return Err(SystemError::InvalidProgramId);
}
if sysvar::is_sysvar_id(&keyed_accounts[TO_ACCOUNT_INDEX].unsigned_key()) {
debug!(
"CreateAccount: invalid argument; account id {} invalid",
program_id
);
return Err(SystemError::InvalidAccountId);
}
if lamports > keyed_accounts[FROM_ACCOUNT_INDEX].account.lamports {
debug!(
"CreateAccount: insufficient lamports ({}, need {})",
keyed_accounts[FROM_ACCOUNT_INDEX].account.lamports, lamports
);
return Err(SystemError::ResultWithNegativeLamports);
}
keyed_accounts[FROM_ACCOUNT_INDEX].account.lamports -= lamports;
keyed_accounts[TO_ACCOUNT_INDEX].account.lamports += lamports;
keyed_accounts[TO_ACCOUNT_INDEX].account.owner = *program_id;
keyed_accounts[TO_ACCOUNT_INDEX].account.data = vec![0; space as usize];
keyed_accounts[TO_ACCOUNT_INDEX].account.executable = false;
Ok(())
}
fn assign_account_to_program(
keyed_accounts: &mut [KeyedAccount],
program_id: &Pubkey,
) -> Result<(), SystemError> {
keyed_accounts[FROM_ACCOUNT_INDEX].account.owner = *program_id;
Ok(())
}
fn transfer_lamports(
keyed_accounts: &mut [KeyedAccount],
lamports: u64,
) -> Result<(), SystemError> {
if lamports > keyed_accounts[FROM_ACCOUNT_INDEX].account.lamports {
debug!(
"Transfer: insufficient lamports ({}, need {})",
keyed_accounts[FROM_ACCOUNT_INDEX].account.lamports, lamports
);
return Err(SystemError::ResultWithNegativeLamports);
}
keyed_accounts[FROM_ACCOUNT_INDEX].account.lamports -= lamports;
keyed_accounts[TO_ACCOUNT_INDEX].account.lamports += lamports;
Ok(())
}
pub fn process_instruction(
_program_id: &Pubkey,
keyed_accounts: &mut [KeyedAccount],
data: &[u8],
) -> Result<(), InstructionError> {
if let Ok(instruction) = bincode::deserialize(data) {
trace!("process_instruction: {:?}", instruction);
trace!("keyed_accounts: {:?}", keyed_accounts);
// All system instructions require that accounts_keys[0] be a signer
if keyed_accounts[FROM_ACCOUNT_INDEX].signer_key().is_none() {
debug!("account[from] is unsigned");
return Err(InstructionError::MissingRequiredSignature);
}
match instruction {
SystemInstruction::CreateAccount {
lamports,
space,
program_id,
} => create_system_account(keyed_accounts, lamports, space, &program_id),
SystemInstruction::Assign { program_id } => {
if !system_program::check_id(&keyed_accounts[FROM_ACCOUNT_INDEX].account.owner) {
return Err(InstructionError::IncorrectProgramId);
}
assign_account_to_program(keyed_accounts, &program_id)
}
SystemInstruction::Transfer { lamports } => transfer_lamports(keyed_accounts, lamports),
}
.map_err(|e| InstructionError::CustomError(e as u32))
} else {
debug!("Invalid instruction data: {:?}", data);
Err(InstructionError::InvalidInstructionData)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bank::Bank;
use crate::bank_client::BankClient;
use bincode::serialize;
use solana_sdk::account::Account;
use solana_sdk::client::SyncClient;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::instruction::{AccountMeta, Instruction, InstructionError};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_program;
use solana_sdk::transaction::TransactionError;
#[test]
fn test_create_system_account() {
let new_program_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &system_program::id());
let to = Pubkey::new_rand();
let mut to_account = Account::new(0, 0, &Pubkey::default());
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&to, false, &mut to_account),
];
create_system_account(&mut keyed_accounts, 50, 2, &new_program_owner).unwrap();
let from_lamports = from_account.lamports;
let to_lamports = to_account.lamports;
let to_owner = to_account.owner;
let to_data = to_account.data.clone();
assert_eq!(from_lamports, 50);
assert_eq!(to_lamports, 50);
assert_eq!(to_owner, new_program_owner);
assert_eq!(to_data, [0, 0]);
}
#[test]
fn test_create_negative_lamports() {
// Attempt to create account with more lamports than remaining in from_account
let new_program_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &system_program::id());
let to = Pubkey::new_rand();
let mut to_account = Account::new(0, 0, &Pubkey::default());
let unchanged_account = to_account.clone();
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&to, false, &mut to_account),
];
let result = create_system_account(&mut keyed_accounts, 150, 2, &new_program_owner);
assert_eq!(result, Err(SystemError::ResultWithNegativeLamports));
let from_lamports = from_account.lamports;
assert_eq!(from_lamports, 100);
assert_eq!(to_account, unchanged_account);
}
#[test]
fn test_create_already_owned() {
// Attempt to create system account in account already owned by another program
let new_program_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &system_program::id());
let original_program_owner = Pubkey::new(&[5; 32]);
let owned_key = Pubkey::new_rand();
let mut owned_account = Account::new(0, 0, &original_program_owner);
let unchanged_account = owned_account.clone();
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&owned_key, false, &mut owned_account),
];
let result = create_system_account(&mut keyed_accounts, 50, 2, &new_program_owner);
assert_eq!(result, Err(SystemError::AccountAlreadyInUse));
let from_lamports = from_account.lamports;
assert_eq!(from_lamports, 100);
assert_eq!(owned_account, unchanged_account);
}
#[test]
fn test_create_sysvar_invalid_id() {
// Attempt to create system account in account already owned by another program
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &system_program::id());
let to = Pubkey::new_rand();
let mut to_account = Account::default();
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&to, false, &mut to_account),
];
// fail to create a sysvar::id() owned account
let result = create_system_account(&mut keyed_accounts, 50, 2, &sysvar::id());
assert_eq!(result, Err(SystemError::InvalidProgramId));
let to = sysvar::fees::id();
let mut to_account = Account::default();
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&to, false, &mut to_account),
];
// fail to create an account with a sysvar id
let result = create_system_account(&mut keyed_accounts, 50, 2, &system_program::id());
assert_eq!(result, Err(SystemError::InvalidAccountId));
let from_lamports = from_account.lamports;
assert_eq!(from_lamports, 100);
}
#[test]
fn test_create_data_populated() {
// Attempt to create system account in account with populated data
let new_program_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &system_program::id());
let populated_key = Pubkey::new_rand();
let mut populated_account = Account {
data: vec![0, 1, 2, 3],
..Account::default()
};
let unchanged_account = populated_account.clone();
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&populated_key, false, &mut populated_account),
];
let result = create_system_account(&mut keyed_accounts, 50, 2, &new_program_owner);
assert_eq!(result, Err(SystemError::AccountAlreadyInUse));
assert_eq!(from_account.lamports, 100);
assert_eq!(populated_account, unchanged_account);
}
#[test]
fn test_create_not_system_account() {
let other_program = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &other_program);
let to = Pubkey::new_rand();
let mut to_account = Account::new(0, 0, &Pubkey::default());
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new(&to, false, &mut to_account),
];
let result = create_system_account(&mut keyed_accounts, 50, 2, &other_program);
assert_eq!(result, Err(SystemError::SourceNotSystemAccount));
}
#[test]
fn test_assign_account_to_program() {
let new_program_owner = Pubkey::new(&[9; 32]);
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &system_program::id());
let mut keyed_accounts = [KeyedAccount::new(&from, true, &mut from_account)];
assign_account_to_program(&mut keyed_accounts, &new_program_owner).unwrap();
let from_owner = from_account.owner;
assert_eq!(from_owner, new_program_owner);
// Attempt to assign account not owned by system program
let another_program_owner = Pubkey::new(&[8; 32]);
keyed_accounts = [KeyedAccount::new(&from, true, &mut from_account)];
let instruction = SystemInstruction::Assign {
program_id: another_program_owner,
};
let data = serialize(&instruction).unwrap();
let result = process_instruction(&system_program::id(), &mut keyed_accounts, &data);
assert_eq!(result, Err(InstructionError::IncorrectProgramId));
assert_eq!(from_account.owner, new_program_owner);
}
#[test]
fn test_transfer_lamports() {
let from = Pubkey::new_rand();
let mut from_account = Account::new(100, 0, &Pubkey::new(&[2; 32])); // account owner should not matter
let to = Pubkey::new_rand();
let mut to_account = Account::new(1, 0, &Pubkey::new(&[3; 32])); // account owner should not matter
let mut keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new_credit_only(&to, false, &mut to_account),
];
transfer_lamports(&mut keyed_accounts, 50).unwrap();
let from_lamports = from_account.lamports;
let to_lamports = to_account.lamports;
assert_eq!(from_lamports, 50);
assert_eq!(to_lamports, 51);
// Attempt to move more lamports than remaining in from_account
keyed_accounts = [
KeyedAccount::new(&from, true, &mut from_account),
KeyedAccount::new_credit_only(&to, false, &mut to_account),
];
let result = transfer_lamports(&mut keyed_accounts, 100);
assert_eq!(result, Err(SystemError::ResultWithNegativeLamports));
assert_eq!(from_account.lamports, 50);
assert_eq!(to_account.lamports, 51);
}
#[test]
fn test_system_unsigned_transaction() {
let (genesis_block, alice_keypair) = create_genesis_block(100);
let alice_pubkey = alice_keypair.pubkey();
let mallory_keypair = Keypair::new();
let mallory_pubkey = mallory_keypair.pubkey();
// Fund to account to bypass AccountNotFound error
let bank = Bank::new(&genesis_block);
let bank_client = BankClient::new(bank);
bank_client
.transfer(50, &alice_keypair, &mallory_pubkey)
.unwrap();
// Erroneously sign transaction with recipient account key
// No signature case is tested by bank `test_zero_signatures()`
let account_metas = vec![
AccountMeta::new(alice_pubkey, false),
AccountMeta::new(mallory_pubkey, true),
];
let malicious_instruction = Instruction::new(
system_program::id(),
&SystemInstruction::Transfer { lamports: 10 },
account_metas,
);
assert_eq!(
bank_client
.send_instruction(&mallory_keypair, malicious_instruction)
.unwrap_err()
.unwrap(),
TransactionError::InstructionError(0, InstructionError::MissingRequiredSignature)
);
assert_eq!(bank_client.get_balance(&alice_pubkey).unwrap(), 50);
assert_eq!(bank_client.get_balance(&mallory_pubkey).unwrap(), 50);
}
}

View File

@ -1,106 +0,0 @@
use crate::bank::Bank;
use solana_sdk::transaction::{Result, Transaction};
// Represents the results of trying to lock a set of accounts
pub struct TransactionBatch<'a, 'b> {
lock_results: Vec<Result<()>>,
bank: &'a Bank,
transactions: &'b [Transaction],
iteration_order: Option<Vec<usize>>,
pub(crate) needs_unlock: bool,
}
impl<'a, 'b> TransactionBatch<'a, 'b> {
pub fn new(
lock_results: Vec<Result<()>>,
bank: &'a Bank,
transactions: &'b [Transaction],
iteration_order: Option<Vec<usize>>,
) -> Self {
assert_eq!(lock_results.len(), transactions.len());
if let Some(iteration_order) = &iteration_order {
assert_eq!(transactions.len(), iteration_order.len());
}
Self {
lock_results,
bank,
transactions,
iteration_order,
needs_unlock: true,
}
}
pub fn lock_results(&self) -> &Vec<Result<()>> {
&self.lock_results
}
pub fn transactions(&self) -> &[Transaction] {
self.transactions
}
pub fn iteration_order(&self) -> Option<&[usize]> {
self.iteration_order.as_ref().map(|v| v.as_slice())
}
pub fn bank(&self) -> &Bank {
self.bank
}
}
// Unlock all locked accounts in destructor.
impl<'a, 'b> Drop for TransactionBatch<'a, 'b> {
fn drop(&mut self) {
self.bank.unlock_accounts(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_block_with_leader, GenesisBlockInfo};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
#[test]
fn test_transaction_batch() {
let (bank, txs) = setup();
// Test getting locked accounts
let batch = bank.prepare_batch(&txs, None);
// Grab locks
assert!(batch.lock_results().iter().all(|x| x.is_ok()));
// Trying to grab locks again should fail
let batch2 = bank.prepare_batch(&txs, None);
assert!(batch2.lock_results().iter().all(|x| x.is_err()));
// Drop the first set of locks
drop(batch);
// Now grabbing locks should work again
let batch2 = bank.prepare_batch(&txs, None);
assert!(batch2.lock_results().iter().all(|x| x.is_ok()));
}
fn setup() -> (Bank, Vec<Transaction>) {
let dummy_leader_pubkey = Pubkey::new_rand();
let GenesisBlockInfo {
genesis_block,
mint_keypair,
..
} = create_genesis_block_with_leader(500, &dummy_leader_pubkey, 100);
let bank = Bank::new(&genesis_block);
let pubkey = Pubkey::new_rand();
let keypair2 = Keypair::new();
let pubkey2 = Pubkey::new_rand();
let txs = vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_block.hash()),
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_block.hash()),
];
(bank, txs)
}
}

View File

@ -1,73 +0,0 @@
use std::ops::Index;
/// OrderedIterator allows iterating with specific order specified
pub struct OrderedIterator<'a, T: 'a> {
element_order: Option<&'a [usize]>,
current: usize,
vec: &'a [T],
}
impl<'a, T> OrderedIterator<'a, T> {
pub fn new(vec: &'a [T], element_order: Option<&'a [usize]>) -> OrderedIterator<'a, T> {
if let Some(custom_order) = element_order {
assert!(custom_order.len() == vec.len());
}
OrderedIterator {
element_order,
current: 0,
vec,
}
}
}
impl<'a, T> Iterator for OrderedIterator<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.current >= self.vec.len() {
None
} else {
let index: usize;
if let Some(custom_order) = self.element_order {
index = custom_order[self.current];
} else {
index = self.current;
}
self.current += 1;
Some(self.vec.index(index))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ordered_iterator_custom_order() {
let vec: Vec<usize> = vec![1, 2, 3, 4];
let custom_order: Vec<usize> = vec![3, 1, 0, 2];
let ordered_iterator = OrderedIterator::new(&vec, Some(&custom_order));
let expected_response: Vec<usize> = vec![4, 2, 1, 3];
let resp: Vec<(&usize, &usize)> = ordered_iterator
.zip(expected_response.iter())
.filter(|(actual_elem, expected_elem)| *actual_elem == *expected_elem)
.collect();
assert_eq!(resp.len(), custom_order.len());
}
#[test]
fn test_ordered_iterator_original_order() {
let vec: Vec<usize> = vec![1, 2, 3, 4];
let ordered_iterator = OrderedIterator::new(&vec, None);
let resp: Vec<(&usize, &usize)> = ordered_iterator
.zip(vec.iter())
.filter(|(actual_elem, expected_elem)| *actual_elem == *expected_elem)
.collect();
assert_eq!(resp.len(), vec.len());
}
}

View File

@ -1,24 +0,0 @@
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_runtime::loader_utils::create_invoke_instruction;
use solana_sdk::client::SyncClient;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::KeypairUtil;
#[test]
fn test_program_native_noop() {
solana_logger::setup();
let (genesis_block, alice_keypair) = create_genesis_block(50);
let program_id = Pubkey::new_rand();
let bank = Bank::new(&genesis_block);
bank.register_native_instruction_processor("solana_noop_program", &program_id);
// Call user program
let instruction = create_invoke_instruction(alice_keypair.pubkey(), program_id, &1u8);
let bank_client = BankClient::new(bank);
bank_client
.send_instruction(&alice_keypair, instruction)
.unwrap();
}