Compare commits

...

42 Commits

Author SHA1 Message Date
d9a9d6547f Fix incorrectly signed CrdsValues (#6696) (#6699)
automerge
2019-11-03 10:57:59 -08:00
c86bf60a40 Send repairman shreds to the repair socket (bp #6671) (#6673)
automerge
2019-11-01 17:20:48 -07:00
03ed4b868d Get Azure provider working again (#6659) (#6688)
automerge
2019-11-01 14:33:13 -07:00
de83bce0ce Rework get_slot_meta (#6642) (#6665)
automerge
2019-10-31 22:24:47 -07:00
8b494272bf Have cargo-install-all.sh also look in program target dirs for so's (#6631) (#6666)
automerge
2019-10-31 16:02:17 -07:00
ee5c890c5d Fix check in should_insert_data_shred (#6649) (#6650)
automerge
2019-10-31 00:12:59 -07:00
a4f5397ea4 log bench (#6643) (#6648)
automerge
2019-10-30 20:39:00 -07:00
66f3b4a3d6 Add ~/.cargo/bin to PATH (#6641) (#6646)
automerge
2019-10-30 20:15:34 -07:00
3a4cd94391 Fix PohRecorder Metrics (#6644) (#6645)
automerge
2019-10-30 19:41:35 -07:00
f4658f3be2 Install ag on nodes (#6634) (#6638)
automerge

(cherry picked from commit 7bb224f54a)
2019-10-30 19:41:18 -07:00
41c70b9f41 Rework get_index_meta (#6636) (#6639)
automerge
2019-10-30 17:41:59 -07:00
d1c92db7ab Ignore flaky move test (#6616) (#6637)
automerge
2019-10-30 17:09:14 -07:00
a8721a5e19 Add --no-multi-client (#6624) (#6633)
automerge
2019-10-30 15:29:54 -07:00
dec9d00a64 Remove stray println 2019-10-30 14:44:51 -07:00
09252ef084 Bump jsonrpc-http-server from 14.0.1 to 14.0.3 (#6597) (#6622)
Bumps [jsonrpc-http-server](https://github.com/paritytech/jsonrpc) from 14.0.1 to 14.0.3.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/compare/v14.0.1...v14.0.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-30 10:29:03 -07:00
c9d568c910 Re-enable tests (#6615) (#6620)
automerge
2019-10-29 22:11:40 -07:00
b054f5f12d Remove move feature (#6605) (#6612)
automerge
2019-10-29 20:50:32 -07:00
23b4df0fef Update cargo versions to 0.20.2 (#6613) 2019-10-29 18:42:31 -06:00
ca35841cb1 Fix Weighted Best calculation (#6606) (#6610)
automerge

(cherry picked from commit ef3aa2731c)
2019-10-29 17:16:02 -07:00
33d77357bf Expand CF's (#6528) (#6609)
automerge
2019-10-29 16:47:43 -07:00
22e84abe5a Fixup running-validator docs (bp #6607) (#6608)
* Fixup running-validator docs (#6607)

* Fixup validator docs

* Remove $

(cherry picked from commit 4156cea704)
2019-10-29 16:23:32 -07:00
9b532b16a9 Add Ramp TPS table 2019-10-29 16:19:29 -07:00
c5a98a5b57 Update libra to new fork (#6523) (#6601)
automerge
2019-10-29 14:04:02 -07:00
22d60d496b Remove unstable default-run directive (#6599) (#6600)
automerge
2019-10-29 11:09:44 -07:00
8243792430 Ensure nofiles is not capped at 1024 on a node reboot 2019-10-28 23:16:55 -07:00
1d1d85e0c5 0.20.0 -> 0.20.1 2019-10-28 22:27:48 -07:00
9b0e40d1dc Revert "nodns"
This reverts commit cd2c09c473.
2019-10-28 21:01:36 -07:00
a231fbe978 Ensure redis-server is started on a reboot 2019-10-28 20:59:06 -07:00
cd2c09c473 nodns 2019-10-28 20:59:05 -07:00
774cd48cb1 Implement allowing validator boot failure into automation (#6589) (#6593)
automerge
2019-10-28 16:25:00 -07:00
d580603cd4 Run localnet in development mode (#6587) (#6592)
automerge
2019-10-28 15:57:53 -07:00
f0c931ea84 Demote blocktree metrics log level (#6590) (#6591)
automerge
2019-10-28 15:17:49 -07:00
74b2eb4328 Update install instructions to point at 0.20.0 (#6585) 2019-10-28 12:26:18 -06:00
f1e9a944ef Invoke on-reboot from cloud startup script to avoid racing with cron (#6579) (#6580)
automerge

(cherry picked from commit 0c14ca58c7)
2019-10-27 13:55:37 -07:00
4cb38ddf01 -a is optional 2019-10-26 22:50:08 -07:00
593fde628c Blocktree metrics (#6527) (#6577)
automerge
2019-10-26 17:00:38 -07:00
34fa025b17 Fix race in blocktree.insert_shreds (#6550) (#6576)
automerge
2019-10-26 04:46:01 -07:00
33843f824a Bootstrap leader's stake is now authorized to the bootstrap leader's identity key (#6571) (#6574)
(cherry picked from commit 68acfd36d0)
2019-10-26 00:05:39 -07:00
542bda0a6f Add NET_NUM_xyz variables 2019-10-25 22:59:54 -07:00
d8bdbbf291 optimize verify_instruction (#6539) (#6572)
automerge
2019-10-25 22:19:16 -07:00
168b0f71f5 Disable sigverify on blockstreamer node
This node get overloaded at high TPS trying to manage both a validator
and the blockexplorer.  Reduce it's workload by turning off sigverify,
which doesn't really matter since this node doesn't even vote
2019-10-25 21:33:32 -07:00
be79d97dde Increase node start stagger (#6566) (#6567)
automerge
2019-10-25 17:20:13 -07:00
142 changed files with 14072 additions and 2157 deletions

1540
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -34,9 +34,6 @@ members = [
"programs/exchange_api", "programs/exchange_api",
"programs/exchange_program", "programs/exchange_program",
"programs/failure_program", "programs/failure_program",
"programs/move_loader_api",
"programs/move_loader_program",
"programs/librapay_api",
"programs/noop_program", "programs/noop_program",
"programs/stake_api", "programs/stake_api",
"programs/stake_program", "programs/stake_program",
@ -61,4 +58,7 @@ members = [
exclude = [ exclude = [
"programs/bpf", "programs/bpf",
"programs/move_loader_api",
"programs/move_loader_program",
"programs/librapay_api",
] ]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-archiver" name = "solana-archiver"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -10,9 +10,9 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
console = "0.9.0" console = "0.9.0"
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-metrics = { path = "../metrics", version = "0.20.0" } solana-metrics = { path = "../metrics", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-banking-bench" name = "solana-banking-bench"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
log = "0.4.6" log = "0.4.6"
rayon = "1.2.0" rayon = "1.2.0"
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-ledger = { path = "../ledger", version = "0.20.0" } solana-ledger = { path = "../ledger", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-measure = { path = "../measure", version = "0.20.0" } solana-measure = { path = "../measure", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
rand = "0.6.5" rand = "0.6.5"
crossbeam-channel = "0.3" crossbeam-channel = "0.3"

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-exchange" name = "solana-bench-exchange"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -24,16 +24,16 @@ serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
# solana-runtime = { path = "../solana/runtime"} # solana-runtime = { path = "../solana/runtime"}
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-genesis = { path = "../genesis", version = "0.20.0" } solana-genesis = { path = "../genesis", version = "0.20.2" }
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-drone = { path = "../drone", version = "0.20.0" } solana-drone = { path = "../drone", version = "0.20.2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" } solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-metrics = { path = "../metrics", version = "0.20.0" } solana-metrics = { path = "../metrics", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
untrusted = "0.7.0" untrusted = "0.7.0"
ws = "0.9.1" ws = "0.9.1"

View File

@ -2,13 +2,13 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-tps" name = "solana-bench-tps"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -16,23 +16,23 @@ serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-genesis = { path = "../genesis", version = "0.20.0" } solana-genesis = { path = "../genesis", version = "0.20.2" }
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-drone = { path = "../drone", version = "0.20.0" } solana-drone = { path = "../drone", version = "0.20.2" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.0", optional = true } solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.2", optional = true }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-metrics = { path = "../metrics", version = "0.20.0" } solana-metrics = { path = "../metrics", version = "0.20.2" }
solana-measure = { path = "../measure", version = "0.20.0" } solana-measure = { path = "../measure", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true } solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.2", optional = true }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true } solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.2", optional = true }
[dev-dependencies] [dev-dependencies]
serial_test = "0.2.0" serial_test = "0.2.0"
serial_test_derive = "0.2.0" serial_test_derive = "0.2.0"
[features] [features]
move = ["solana-core/move", "solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"] move = ["solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"]

View File

@ -18,7 +18,7 @@ use solana_sdk::{
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, KeypairUtil}, signature::{Keypair, KeypairUtil},
system_instruction, system_transaction, system_instruction, system_transaction,
timing::{duration_as_ms, duration_as_s, timestamp}, timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
transaction::Transaction, transaction::Transaction,
}; };
use std::{ use std::{
@ -158,12 +158,13 @@ where
let mut reclaim_lamports_back_to_source_account = false; let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance; let mut i = keypair0_balance;
let mut blockhash = Hash::default(); let mut blockhash = Hash::default();
let mut blockhash_time = Instant::now(); let mut blockhash_time;
while start.elapsed() < duration { while start.elapsed() < duration {
// ping-pong between source and destination accounts for each loop iteration // ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual // this seems to be faster than trying to determine the balance of individual
// accounts // accounts
let len = tx_count as usize; let len = tx_count as usize;
blockhash_time = Instant::now();
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) { if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
blockhash = new_blockhash; blockhash = new_blockhash;
} else { } else {
@ -173,13 +174,19 @@ where
sleep(Duration::from_millis(100)); sleep(Duration::from_millis(100));
continue; continue;
} }
info!( datapoint_debug!(
"Took {} ms for new blockhash", "bench-tps-get_blockhash",
duration_as_ms(&blockhash_time.elapsed()) ("duration", duration_as_us(&blockhash_time.elapsed()), i64)
); );
blockhash_time = Instant::now(); blockhash_time = Instant::now();
let balance = client.get_balance(&id.pubkey()).unwrap_or(0); let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance); metrics_submit_lamport_balance(balance);
datapoint_debug!(
"bench-tps-get_balance",
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
);
generate_txs( generate_txs(
&shared_txs, &shared_txs,
&blockhash, &blockhash,
@ -367,7 +374,7 @@ fn generate_txs(
); );
datapoint_debug!( datapoint_debug!(
"bench-tps-generate_txs", "bench-tps-generate_txs",
("duration", duration_as_ms(&duration), i64) ("duration", duration_as_us(&duration), i64)
); );
let sz = transactions.len() / threads; let sz = transactions.len() / threads;
@ -432,7 +439,7 @@ fn do_tx_transfers<T: Client>(
); );
datapoint_debug!( datapoint_debug!(
"bench-tps-do_tx_transfers", "bench-tps-do_tx_transfers",
("duration", duration_as_ms(&transfer_start.elapsed()), i64), ("duration", duration_as_us(&transfer_start.elapsed()), i64),
("count", tx_len, i64) ("count", tx_len, i64)
); );
} }

View File

@ -21,6 +21,7 @@ pub struct Config {
pub write_to_client_file: bool, pub write_to_client_file: bool,
pub read_from_client_file: bool, pub read_from_client_file: bool,
pub target_lamports_per_signature: u64, pub target_lamports_per_signature: u64,
pub multi_client: bool,
pub use_move: bool, pub use_move: bool,
pub num_lamports_per_account: u64, pub num_lamports_per_account: u64,
} }
@ -41,6 +42,7 @@ impl Default for Config {
write_to_client_file: false, write_to_client_file: false,
read_from_client_file: false, read_from_client_file: false,
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature, target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
multi_client: true,
use_move: false, use_move: false,
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT, num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
} }
@ -108,6 +110,11 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.long("use-move") .long("use-move")
.help("Use Move language transactions to perform transfers."), .help("Use Move language transactions to perform transfers."),
) )
.arg(
Arg::with_name("no-multi-client")
.long("no-multi-client")
.help("Disable multi-client support, only transact with the entrypoint."),
)
.arg( .arg(
Arg::with_name("tx_count") Arg::with_name("tx_count")
.long("tx_count") .long("tx_count")
@ -229,6 +236,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
} }
args.use_move = matches.is_present("use-move"); args.use_move = matches.is_present("use-move");
args.multi_client = !matches.is_present("no-multi-client");
if let Some(v) = matches.value_of("num_lamports_per_account") { if let Some(v) = matches.value_of("num_lamports_per_account") {
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports"); args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");

View File

@ -1,7 +1,7 @@
use log::*; use log::*;
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs}; use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
use solana_bench_tps::cli; use solana_bench_tps::cli;
use solana_core::gossip_service::{discover_cluster, get_multi_client}; use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
use solana_genesis::Base64Account; use solana_genesis::Base64Account;
use solana_sdk::fee_calculator::FeeCalculator; use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
@ -29,6 +29,7 @@ fn main() {
read_from_client_file, read_from_client_file,
target_lamports_per_signature, target_lamports_per_signature,
use_move, use_move,
multi_client,
num_lamports_per_account, num_lamports_per_account,
.. ..
} = &cli_config; } = &cli_config;
@ -70,15 +71,19 @@ fn main() {
exit(1); exit(1);
}); });
let (client, num_clients) = get_multi_client(&nodes); let client = if *multi_client {
let (client, num_clients) = get_multi_client(&nodes);
if nodes.len() < num_clients { if nodes.len() < num_clients {
eprintln!( eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more", "Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes num_nodes
); );
exit(1); exit(1);
} }
client
} else {
get_client(&nodes)
};
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move { let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
let path = Path::new(&client_ids_and_stake_file); let path = Path::new(&client_ids_and_stake_file);

View File

@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage ## Usage
### solana-cli ### solana-cli
```text ```text
solana-cli 0.20.0 solana-cli 0.20.2
Blockchain, Rebuilt for Scale Blockchain, Rebuilt for Scale
USAGE: USAGE:

View File

@ -126,7 +126,7 @@ The result field will be a JSON object with the following sub fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1} {"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.20.2,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
``` ```
### getBalance ### getBalance
@ -729,7 +729,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format: #### Notification Format:
```bash ```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}} {"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.20.2,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
``` ```
### accountUnsubscribe ### accountUnsubscribe
@ -787,7 +787,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\) * `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
```bash ```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}} {"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.20.2,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
``` ```
### programUnsubscribe ### programUnsubscribe

View File

@ -11,7 +11,7 @@ This document proposes an easy to use software install and updater that can be u
The easiest install method for supported platforms: The easiest install method for supported platforms:
```bash ```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh
``` ```
This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there. This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there.
@ -20,7 +20,7 @@ If additional arguments need to be specified during the installation, the follow
```bash ```bash
$ init_args=.... # arguments for `solana-install-init ...` $ init_args=.... # arguments for `solana-install-init ...`
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - ${init_args} $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s - ${init_args}
``` ```
### Fetch and run a pre-built installer from a Github release ### Fetch and run a pre-built installer from a Github release
@ -28,7 +28,7 @@ $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install
With a well-known release URL, a pre-built binary can be obtained for supported platforms: With a well-known release URL, a pre-built binary can be obtained for supported platforms:
```bash ```bash
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.18.0/solana-install-init-x86_64-apple-darwin $ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.20.2/solana-install-init-x86_64-apple-darwin
$ chmod +x ./solana-install-init $ chmod +x ./solana-install-init
$ ./solana-install-init --help $ ./solana-install-init --help
``` ```

View File

@ -29,7 +29,7 @@ Before starting an archiver node, sanity check that the cluster is accessible to
Fetch the current transaction count over JSON RPC: Fetch the current transaction count over JSON RPC:
```bash ```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899 curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
``` ```
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity. Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
@ -47,13 +47,13 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
#### Linux and mac OS #### Linux and mac OS
```bash ```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s
``` ```
Alternatively build the `solana-install` program from source and run the following command to obtain the same result: Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
```bash ```bash
$ solana-install init solana-install init
``` ```
#### Windows #### Windows
@ -71,9 +71,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive: Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
```bash ```bash
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2 tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/ cd solana-release/
$ export PATH=$PWD/bin:$PATH export PATH=$PWD/bin:$PATH
``` ```
#### mac OS #### mac OS
@ -81,9 +81,9 @@ $ export PATH=$PWD/bin:$PATH
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive: Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
```bash ```bash
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2 tar jxf solana-release-x86_64-apple-darwin.tar.bz2
$ cd solana-release/ cd solana-release/
$ export PATH=$PWD/bin:$PATH export PATH=$PWD/bin:$PATH
``` ```
#### Windows #### Windows
@ -95,7 +95,7 @@ Download the binaries by navigating to [https://github.com/solana-labs/solana/re
Try running following command to join the gossip network and view all the other nodes in the cluster: Try running following command to join the gossip network and view all the other nodes in the cluster:
```bash ```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy solana-gossip --entrypoint testnet.solana.com:8001 spy
# Press ^C to exit # Press ^C to exit
``` ```
@ -104,8 +104,8 @@ Now configure the keypairs for your archiver by running:
Navigate to the solana install location and open a cmd prompt Navigate to the solana install location and open a cmd prompt
```bash ```bash
$ solana-keygen new -o archiver-keypair.json solana-keygen new -o archiver-keypair.json
$ solana-keygen new -o storage-keypair.json solana-keygen new -o storage-keypair.json
``` ```
Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step: Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step:
@ -114,23 +114,23 @@ Use solana-keygen to show the public keys for each of the keypairs, they will be
```bash ```bash
# The archiver's identity # The archiver's identity
$ solana-keygen pubkey archiver-keypair.json solana-keygen pubkey archiver-keypair.json
$ solana-keygen pubkey storage-keypair.json solana-keygen pubkey storage-keypair.json
``` ```
* Linux and mac OS * Linux and mac OS
\`\`\`bash \`\`\`bash
$ export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\) export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
$ export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\) export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
```text ```text
Then set up the storage accounts for your archiver by running: Then set up the storage accounts for your archiver by running:
```bash ```bash
$ solana --keypair archiver-keypair.json airdrop 100000 lamports solana --keypair archiver-keypair.json airdrop 100000 lamports
$ solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
``` ```
Note: Every time the testnet restarts, run the steps to setup the archiver accounts again. Note: Every time the testnet restarts, run the steps to setup the archiver accounts again.
@ -138,7 +138,7 @@ Note: Every time the testnet restarts, run the steps to setup the archiver accou
To start the archiver: To start the archiver:
```bash ```bash
$ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
``` ```
## Verify Archiver Setup ## Verify Archiver Setup
@ -146,12 +146,11 @@ $ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypa
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running: From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
```bash ```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy solana-gossip --entrypoint testnet.solana.com:8001 spy
``` ```
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver: Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
```bash ```bash
$ solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
``` ```

View File

@ -7,13 +7,13 @@ You can publish your validator information to the chain to be publicly visible t
Run the solana CLI to populate a validator info account: Run the solana CLI to populate a validator info account:
```bash ```bash
$ solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME> solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
``` ```
For details about optional fields for VALIDATOR\_INFO\_ARGS: For details about optional fields for VALIDATOR\_INFO\_ARGS:
```bash ```bash
$ solana validator-info publish --help solana validator-info publish --help
``` ```
## Keybase ## Keybase
@ -33,4 +33,3 @@ Including a Keybase username allows client applications \(like the Solana Networ
3. Add or update your `solana validator-info` with your Keybase username. The 3. Add or update your `solana validator-info` with your Keybase username. The
CLI will verify the `validator-<PUBKEY>` file CLI will verify the `validator-<PUBKEY>` file

View File

@ -5,13 +5,13 @@
The **identity pubkey** for your validator can also be found by running: The **identity pubkey** for your validator can also be found by running:
```bash ```bash
$ solana-keygen pubkey ~/validator-keypair.json solana-keygen pubkey ~/validator-keypair.json
``` ```
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the gossip network by running: From another console, confirm the IP address and **identity pubkey** of your validator is visible in the gossip network by running:
```bash ```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy solana-gossip --entrypoint testnet.solana.com:8001 spy
``` ```
## Check Vote Activity ## Check Vote Activity
@ -19,13 +19,13 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
The vote pubkey for the validator can be found by running: The vote pubkey for the validator can be found by running:
```bash ```bash
$ solana-keygen pubkey ~/validator-vote-keypair.json solana-keygen pubkey ~/validator-vote-keypair.json
``` ```
Provide the **vote pubkey** to the `solana show-vote-account` command to view the recent voting activity from your validator: Provide the **vote pubkey** to the `solana show-vote-account` command to view the recent voting activity from your validator:
```bash ```bash
$ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
``` ```
## Check Your Balance ## Check Your Balance
@ -33,7 +33,7 @@ $ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
Your account balance should decrease by the transaction fee amount as your validator submits votes, and increase after serving as the leader. Pass the `--lamports` are to observe in finer detail: Your account balance should decrease by the transaction fee amount as your validator submits votes, and increase after serving as the leader. Pass the `--lamports` are to observe in finer detail:
```bash ```bash
$ solana balance --lamports solana balance --lamports
``` ```
## Check Slot Number ## Check Slot Number
@ -41,13 +41,13 @@ $ solana balance --lamports
After your validator boots, it may take some time to catch up with the cluster. Use the `get-slot` command to view the current slot that the cluster is processing: After your validator boots, it may take some time to catch up with the cluster. Use the `get-slot` command to view the current slot that the cluster is processing:
```bash ```bash
$ solana get-slot solana get-slot
``` ```
The current slot that your validator is processing can then been seen with: The current slot that your validator is processing can then been seen with:
```bash ```bash
$ solana --url http://127.0.0.1:8899 get-slot solana --url http://127.0.0.1:8899 get-slot
``` ```
Until your validator has caught up, it will not be able to vote successfully and stake cannot be delegated to it. Until your validator has caught up, it will not be able to vote successfully and stake cannot be delegated to it.
@ -60,11 +60,11 @@ There are several useful JSON-RPC endpoints for monitoring your validator on the
```bash ```bash
# Similar to solana-gossip, you should see your validator in the list of cluster nodes # Similar to solana-gossip, you should see your validator in the list of cluster nodes
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
# If your validator is properly voting, it should appear in the list of `current` vote accounts. If staked, `stake` should be > 0 # If your validator is properly voting, it should appear in the list of `current` vote accounts. If staked, `stake` should be > 0
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
# Returns the current leader schedule # Returns the current leader schedule
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
# Returns info about the current epoch. slotIndex should progress on subsequent calls. # Returns info about the current epoch. slotIndex should progress on subsequent calls.
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://testnet.solana.com:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://testnet.solana.com:8899
``` ```
@ -76,9 +76,9 @@ Metrics are available for local monitoring of your validator.
Docker must be installed and the current user added to the docker group. Then download `solana-metrics.tar.bz2` from the Github Release and run Docker must be installed and the current user added to the docker group. Then download `solana-metrics.tar.bz2` from the Github Release and run
```bash ```bash
$ tar jxf solana-metrics.tar.bz2 tar jxf solana-metrics.tar.bz2
$ cd solana-metrics/ cd solana-metrics/
$ ./start.sh ./start.sh
``` ```
A local InfluxDB and Grafana instance is now running on your machine. Define `SOLANA_METRICS_CONFIG` in your environment as described at the end of the `start.sh` output and restart your validator. A local InfluxDB and Grafana instance is now running on your machine. Define `SOLANA_METRICS_CONFIG` in your environment as described at the end of the `start.sh` output and restart your validator.
@ -92,6 +92,5 @@ Log messages emitted by your validator include a timestamp. When sharing logs wi
To make it easier to compare logs between different sources we request that everybody use Pacific Time on their validator nodes. In Linux this can be accomplished by running: To make it easier to compare logs between different sources we request that everybody use Pacific Time on their validator nodes. In Linux this can be accomplished by running:
```bash ```bash
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
``` ```

View File

@ -5,13 +5,13 @@
The `solana-install` tool can be used to easily install and upgrade the validator software on Linux x86\_64 and mac OS systems. The `solana-install` tool can be used to easily install and upgrade the validator software on Linux x86\_64 and mac OS systems.
```bash ```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s
``` ```
Alternatively build the `solana-install` program from source and run the following command to obtain the same result: Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
```bash ```bash
$ solana-install init solana-install init
``` ```
After a successful install, `solana-install update` may be used to easily update the cluster software to a newer version at any time. After a successful install, `solana-install update` may be used to easily update the cluster software to a newer version at any time.
@ -25,9 +25,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive: Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
```bash ```bash
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2 tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/ cd solana-release/
$ export PATH=$PWD/bin:$PATH export PATH=$PWD/bin:$PATH
``` ```
### mac OS ### mac OS
@ -35,9 +35,9 @@ $ export PATH=$PWD/bin:$PATH
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive: Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
```bash ```bash
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2 tar jxf solana-release-x86_64-apple-darwin.tar.bz2
$ cd solana-release/ cd solana-release/
$ export PATH=$PWD/bin:$PATH export PATH=$PWD/bin:$PATH
``` ```
## Build From Source ## Build From Source
@ -45,7 +45,6 @@ $ export PATH=$PWD/bin:$PATH
If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with: If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with:
```bash ```bash
$ ./scripts/cargo-install-all.sh . ./scripts/cargo-install-all.sh .
$ export PATH=$PWD/bin:$PATH export PATH=$PWD/bin:$PATH
``` ```

View File

@ -7,14 +7,14 @@ Adding stake can be accomplished by using the `solana` CLI
First create a stake account keypair with `solana-keygen`: First create a stake account keypair with `solana-keygen`:
```bash ```bash
$ solana-keygen new -o ~/validator-config/stake-keypair.json solana-keygen new -o ~/validator-stake-keypair.json
``` ```
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 42 lamports: and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 4242 lamports:
```bash ```bash
$ solana create-stake-account ~/validator-config/stake-keypair.json 42 lamports solana create-stake-account ~/validator-stake-keypair.json 4242 lamports
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json solana delegate-stake ~/validator-stake-keypair.json ~/validator-vote-keypair.json
``` ```
Note that stakes need to warm up, and warmup increments are applied at Epoch boundaries, so it can take an hour or more for the change to fully take effect. Note that stakes need to warm up, and warmup increments are applied at Epoch boundaries, so it can take an hour or more for the change to fully take effect.
@ -22,13 +22,13 @@ Note that stakes need to warm up, and warmup increments are applied at Epoch bou
Stakes can be re-delegated to another node at any time with the same command: Stakes can be re-delegated to another node at any time with the same command:
```bash ```bash
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/some-other-validator-vote-keypair.json solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote-keypair.json
``` ```
Assuming the node is voting, now you're up and running and generating validator rewards. You'll want to periodically redeem/claim your rewards: Assuming the node is voting, now you're up and running and generating validator rewards. You'll want to periodically redeem/claim your rewards:
```bash ```bash
$ solana redeem-vote-credits ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json solana redeem-vote-credits ~/validator-stake-keypair.json ~/validator-vote-keypair.json
``` ```
The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can only be earned while the validator is up and running. Further, once staked, the validator becomes an important part of the network. In order to safely remove a validator from the network, first deactivate its stake. The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can only be earned while the validator is up and running. Further, once staked, the validator becomes an important part of the network. In order to safely remove a validator from the network, first deactivate its stake.
@ -36,7 +36,7 @@ The rewards lamports earned are split between your stake account and the vote ac
Stake can be deactivated by running: Stake can be deactivated by running:
```bash ```bash
$ solana deactivate-stake ~/validator-config/stake-keypair.json solana deactivate-stake ~/validator-stake-keypair.json
``` ```
The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake. The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake.

View File

@ -7,7 +7,7 @@ Before attaching a validator node, sanity check that the cluster is accessible t
Fetch the current transaction count over JSON RPC: Fetch the current transaction count over JSON RPC:
```bash ```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899 curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
``` ```
Inspect the network explorer at [https://explorer.solana.com/](https://explorer.solana.com/) for activity. Inspect the network explorer at [https://explorer.solana.com/](https://explorer.solana.com/) for activity.
@ -19,16 +19,16 @@ View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/test
Sanity check that you are able to interact with the cluster by receiving a small airdrop of lamports from the testnet drone: Sanity check that you are able to interact with the cluster by receiving a small airdrop of lamports from the testnet drone:
```bash ```bash
$ solana set --url http://testnet.solana.com:8899 solana set --url http://testnet.solana.com:8899
$ solana get solana get
$ solana airdrop 123 lamports solana airdrop 123 lamports
$ solana balance --lamports solana balance --lamports
``` ```
Also try running following command to join the gossip network and view all the other nodes in the cluster: Also try running following command to join the gossip network and view all the other nodes in the cluster:
```bash ```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy solana-gossip --entrypoint testnet.solana.com:8001 spy
# Press ^C to exit # Press ^C to exit
``` ```
@ -37,7 +37,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
Create an identity keypair for your validator by running: Create an identity keypair for your validator by running:
```bash ```bash
$ solana-keygen new -o ~/validator-keypair.json solana-keygen new -o ~/validator-keypair.json
``` ```
### Wallet Configuration ### Wallet Configuration
@ -45,30 +45,30 @@ $ solana-keygen new -o ~/validator-keypair.json
You can set solana configuration to use your validator keypair for all following commands: You can set solana configuration to use your validator keypair for all following commands:
```bash ```bash
$ solana set --keypair ~/validator-keypair.json solana set --keypair ~/validator-keypair.json
``` ```
**All following solana commands assume you have set `--keypair` config to** your validator identity keypair.\*\* If you haven't, you will need to add the `--keypair` argument to each command, like: **All following solana commands assume you have set `--keypair` config to** your validator identity keypair.\*\* If you haven't, you will need to add the `--keypair` argument to each command, like:
```bash ```bash
$ solana --keypair ~/validator-keypair.json airdrop 1000 lamports solana --keypair ~/validator-keypair.json airdrop 10
``` ```
\(You can always override the set configuration by explicitly passing the `--keypair` argument with a command.\) \(You can always override the set configuration by explicitly passing the `--keypair` argument with a command.\)
### Validator Start ### Validator Start
Airdrop yourself some lamports to get started: Airdrop yourself some SOL to get started:
```bash ```bash
$ solana airdrop 1000 lamports solana airdrop 10
``` ```
Your validator will need a vote account. Create it now with the following commands: Your validator will need a vote account. Create it now with the following commands:
```bash ```bash
$ solana-keygen new -o ~/validator-vote-keypair.json solana-keygen new -o ~/validator-vote-keypair.json
$ solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json 1 lamports solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json
``` ```
Then use one of the following commands, depending on your installation choice, to start the node: Then use one of the following commands, depending on your installation choice, to start the node:
@ -76,19 +76,19 @@ Then use one of the following commands, depending on your installation choice, t
If this is a `solana-install`-installation: If this is a `solana-install`-installation:
```bash ```bash
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001 solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
``` ```
Alternatively, the `solana-install run` command can be used to run the validator node while periodically checking for and applying software updates: Alternatively, the `solana-install run` command can be used to run the validator node while periodically checking for and applying software updates:
```bash ```bash
$ solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001 solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
``` ```
If you built from source: If you built from source:
```bash ```bash
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001 NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
``` ```
### Enabling CUDA ### Enabling CUDA
@ -98,7 +98,7 @@ If your machine has a GPU with CUDA installed \(Linux-only currently\), include
Or if you built from source, define the SOLANA\_CUDA flag in your environment _before_ running any of the previously mentioned commands Or if you built from source, define the SOLANA\_CUDA flag in your environment _before_ running any of the previously mentioned commands
```bash ```bash
$ export SOLANA_CUDA=1 export SOLANA_CUDA=1
``` ```
When your validator is started look for the following log message to indicate that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"` When your validator is started look for the following log message to indicate that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`
@ -110,4 +110,3 @@ By default the validator will dynamically select available network ports in the
### Limiting ledger size to conserve disk space ### Limiting ledger size to conserve disk space
By default the validator will retain the full ledger. To conserve disk space start the validator with the `--limit-ledger-size`, which will instruct the validator to only retain the last couple hours of ledger. By default the validator will retain the full ledger. To conserve disk space start the validator with the `--limit-ledger-size`, which will instruct the validator to only retain the last couple hours of ledger.

View File

@ -15,8 +15,8 @@ Prior to mainnet, the testnets may be running different versions of solana softw
You can submit a JSON-RPC request to see the specific version of the cluster. You can submit a JSON-RPC request to see the specific version of the cluster.
```bash ```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899 curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
{"jsonrpc":"2.0","result":{"solana-core":"0.18.0-pre1"},"id":1} {"jsonrpc":"2.0","result":{"solana-core":"0.20.2"},"id":1}
``` ```
## Using a Different Testnet ## Using a Different Testnet
@ -28,17 +28,17 @@ This guide is written in the context of testnet.solana.com, our most stable clus
If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet. If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet.
```bash ```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0 curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s - 0.20.2
``` ```
```bash ```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s - beta
``` ```
Similarly, you can add this argument to the `solana-install` command if you've built the program from source: Similarly, you can add this argument to the `solana-install` command if you've built the program from source:
```bash ```bash
$ solana-install init 0.18.0 solana-install init 0.20.2
``` ```
If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet. If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet.
@ -48,14 +48,14 @@ If you are downloading pre-compiled binaries or building from source, simply cho
The Solana CLI tool points at testnet.solana.com by default. Include a `--url` argument to point at a different testnet. For instance: The Solana CLI tool points at testnet.solana.com by default. Include a `--url` argument to point at a different testnet. For instance:
```bash ```bash
$ solana --url http://beta.testnet.solana.com:8899 balance solana --url http://beta.testnet.solana.com:8899 balance
``` ```
The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future cli commands. For example: The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future cli commands. For example:
```bash ```bash
$ solana set --url http://beta.testnet.solana.com:8899 solana set --url http://beta.testnet.solana.com:8899
$ solana balance # Same result as command above solana balance # Same result as command above
``` ```
\(You can always override the set configuration by explicitly passing the `--url` argument with a command.\) \(You can always override the set configuration by explicitly passing the `--url` argument with a command.\)
@ -63,12 +63,11 @@ $ solana balance # Same result as command above
Solana-gossip and solana-validator commands already require an explicit `--entrypoint` argument. Simply replace testnet.solana.com in the examples with an alternate url to interact with a different testnet. For example: Solana-gossip and solana-validator commands already require an explicit `--entrypoint` argument. Simply replace testnet.solana.com in the examples with an alternate url to interact with a different testnet. For example:
```bash ```bash
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
``` ```
You can also submit JSON-RPC requests to a different testnet, like: You can also submit JSON-RPC requests to a different testnet, like:
```bash ```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899 curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
``` ```

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-chacha-sys" name = "solana-chacha-sys"
version = "0.20.0" version = "0.20.2"
description = "Solana chacha-sys" description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -31,10 +31,11 @@ testName=$(basename "$0" .sh)
case $testName in case $testName in
test-stable) test-stable)
echo "Executing $testName" echo "Executing $testName"
_ cargo +"$rust_stable" build --tests --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture _ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path local_cluster/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture _ cargo +"$rust_stable" test --manifest-path local_cluster/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
_ cargo +"$rust_stable" test --manifest-path programs/move_loader_api/Cargo.toml ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path programs/move_loader_program/Cargo.toml ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path programs/librapay_api/Cargo.toml ${V:+--verbose} -- --nocapture
;; ;;
test-stable-perf) test-stable-perf)
echo "Executing $testName" echo "Executing $testName"

View File

@ -288,11 +288,15 @@ if ! $skipCreate; then
echo "--- $cloudProvider.sh create" echo "--- $cloudProvider.sh create"
create_args=( create_args=(
-p "$netName" -p "$netName"
-a "$bootstrapValidatorAddress"
-c "$clientNodeCount" -c "$clientNodeCount"
-n "$additionalValidatorCount" -n "$additionalValidatorCount"
--dedicated --dedicated
) )
if [[ -n $bootstrapValidatorAddress ]]; then
create_args+=(-a "$bootstrapValidatorAddress")
fi
# shellcheck disable=SC2206 # shellcheck disable=SC2206
create_args+=(${zone_args[@]}) create_args+=(${zone_args[@]})

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-cli" name = "solana-cli"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -26,23 +26,23 @@ serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-config-api = { path = "../programs/config_api", version = "0.20.0" } solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
solana-drone = { path = "../drone", version = "0.20.0" } solana-drone = { path = "../drone", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" } solana-vote-signer = { path = "../vote-signer", version = "0.20.2" }
url = "2.1.0" url = "2.1.0"
[dev-dependencies] [dev-dependencies]
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" } solana-budget-program = { path = "../programs/budget_program", version = "0.20.2" }
[[bin]] [[bin]]
name = "solana" name = "solana"

View File

@ -329,7 +329,6 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
} }
("balance", Some(matches)) => { ("balance", Some(matches)) => {
let pubkey = pubkey_of(&matches, "pubkey"); let pubkey = pubkey_of(&matches, "pubkey");
println!("{:?}", pubkey);
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::Balance { command: CliCommand::Balance {
pubkey, pubkey,

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-client" name = "solana-client"
version = "0.20.0" version = "0.20.2"
description = "Solana Client" description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
jsonrpc-core = "14.0.3" jsonrpc-core = "14.0.3"
jsonrpc-http-server = "14.0.1" jsonrpc-http-server = "14.0.3"
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-core" name = "solana-core"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "../README.md" readme = "../README.md"
@ -15,7 +15,6 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
[features] [features]
pin_gpu_memory = [] pin_gpu_memory = []
move = []
[dependencies] [dependencies]
bincode = "1.2.0" bincode = "1.2.0"
@ -30,7 +29,7 @@ indexmap = "1.1"
itertools = "0.8.0" itertools = "0.8.0"
jsonrpc-core = "14.0.3" jsonrpc-core = "14.0.3"
jsonrpc-derive = "14.0.3" jsonrpc-derive = "14.0.3"
jsonrpc-http-server = "14.0.1" jsonrpc-http-server = "14.0.3"
jsonrpc-pubsub = "14.0.3" jsonrpc-pubsub = "14.0.3"
jsonrpc-ws-server = "14.0.3" jsonrpc-ws-server = "14.0.3"
lazy_static = "1.4.0" lazy_static = "1.4.0"
@ -45,25 +44,25 @@ rayon = "1.2.0"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" } solana-budget-program = { path = "../programs/budget_program", version = "0.20.2" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.20.0" } solana-chacha-sys = { path = "../chacha-sys", version = "0.20.2" }
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-drone = { path = "../drone", version = "0.20.0" } solana-drone = { path = "../drone", version = "0.20.2" }
solana-ed25519-dalek = "0.2.0" solana-ed25519-dalek = "0.2.0"
solana-ledger = { path = "../ledger", version = "0.20.0" } solana-ledger = { path = "../ledger", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" } solana-merkle-tree = { path = "../merkle-tree", version = "0.20.2" }
solana-metrics = { path = "../metrics", version = "0.20.0" } solana-metrics = { path = "../metrics", version = "0.20.2" }
solana-measure = { path = "../measure", version = "0.20.0" } solana-measure = { path = "../measure", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" } solana-storage-program = { path = "../programs/storage_program", version = "0.20.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" } solana-vote-signer = { path = "../vote-signer", version = "0.20.2" }
symlink = "0.1.0" symlink = "0.1.0"
sys-info = "0.5.8" sys-info = "0.5.8"
tempfile = "3.1.0" tempfile = "3.1.0"
@ -72,7 +71,7 @@ tokio-codec = "0.1"
tokio-fs = "0.1" tokio-fs = "0.1"
tokio-io = "0.1" tokio-io = "0.1"
untrusted = "0.7.0" untrusted = "0.7.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.2" }
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] } reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
[dev-dependencies] [dev-dependencies]

View File

@ -12,17 +12,20 @@
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes. //! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
//! //!
//! Bank needs to provide an interface for us to query the stake weight //! Bank needs to provide an interface for us to query the stake weight
use crate::contact_info::ContactInfo; use crate::crds_value::CrdsValue;
use crate::crds_gossip::CrdsGossip; use crate::{
use crate::crds_gossip_error::CrdsGossipError; contact_info::ContactInfo,
use crate::crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS}; crds_gossip::CrdsGossip,
use crate::crds_value::{CrdsValue, CrdsValueLabel, EpochSlots, Vote}; crds_gossip_error::CrdsGossipError,
use crate::packet::{to_shared_blob, Blob, Packet, SharedBlob}; crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
use crate::repair_service::RepairType; crds_value::{CrdsData, CrdsValueLabel, EpochSlots, Vote},
use crate::result::{Error, Result}; packet::{to_shared_blob, Blob, Packet, SharedBlob},
use crate::sendmmsg::{multicast, send_mmsg}; repair_service::RepairType,
use crate::streamer::{BlobReceiver, BlobSender}; result::{Error, Result},
use crate::weighted_shuffle::{weighted_best, weighted_shuffle}; sendmmsg::{multicast, send_mmsg},
streamer::{BlobReceiver, BlobSender},
weighted_shuffle::{weighted_best, weighted_shuffle},
};
use bincode::{deserialize, serialize, serialized_size}; use bincode::{deserialize, serialize, serialized_size};
use core::cmp; use core::cmp;
use itertools::Itertools; use itertools::Itertools;
@ -195,8 +198,8 @@ impl ClusterInfo {
pub fn insert_self(&mut self, contact_info: ContactInfo) { pub fn insert_self(&mut self, contact_info: ContactInfo) {
if self.id() == contact_info.id { if self.id() == contact_info.id {
let mut value = CrdsValue::ContactInfo(contact_info.clone()); let value =
value.sign(&self.keypair); CrdsValue::new_signed(CrdsData::ContactInfo(contact_info.clone()), &self.keypair);
let _ = self.gossip.crds.insert(value, timestamp()); let _ = self.gossip.crds.insert(value, timestamp());
} }
} }
@ -205,8 +208,7 @@ impl ClusterInfo {
let mut my_data = self.my_data(); let mut my_data = self.my_data();
let now = timestamp(); let now = timestamp();
my_data.wallclock = now; my_data.wallclock = now;
let mut entry = CrdsValue::ContactInfo(my_data); let entry = CrdsValue::new_signed(CrdsData::ContactInfo(my_data), &self.keypair);
entry.sign(&self.keypair);
self.gossip.refresh_push_active_set(stakes); self.gossip.refresh_push_active_set(stakes);
self.gossip self.gossip
.process_push_message(&self.id(), vec![entry], now); .process_push_message(&self.id(), vec![entry], now);
@ -214,8 +216,7 @@ impl ClusterInfo {
// TODO kill insert_info, only used by tests // TODO kill insert_info, only used by tests
pub fn insert_info(&mut self, contact_info: ContactInfo) { pub fn insert_info(&mut self, contact_info: ContactInfo) {
let mut value = CrdsValue::ContactInfo(contact_info); let value = CrdsValue::new_signed(CrdsData::ContactInfo(contact_info), &self.keypair);
value.sign(&self.keypair);
let _ = self.gossip.crds.insert(value, timestamp()); let _ = self.gossip.crds.insert(value, timestamp());
} }
@ -297,8 +298,10 @@ impl ClusterInfo {
pub fn push_epoch_slots(&mut self, id: Pubkey, root: u64, slots: BTreeSet<u64>) { pub fn push_epoch_slots(&mut self, id: Pubkey, root: u64, slots: BTreeSet<u64>) {
let now = timestamp(); let now = timestamp();
let mut entry = CrdsValue::EpochSlots(EpochSlots::new(id, root, slots, now)); let entry = CrdsValue::new_signed(
entry.sign(&self.keypair); CrdsData::EpochSlots(EpochSlots::new(id, root, slots, now)),
&self.keypair,
);
self.gossip self.gossip
.process_push_message(&self.id(), vec![entry], now); .process_push_message(&self.id(), vec![entry], now);
} }
@ -306,8 +309,7 @@ impl ClusterInfo {
pub fn push_vote(&mut self, vote: Transaction) { pub fn push_vote(&mut self, vote: Transaction) {
let now = timestamp(); let now = timestamp();
let vote = Vote::new(&self.id(), vote, now); let vote = Vote::new(&self.id(), vote, now);
let mut entry = CrdsValue::Vote(vote); let entry = CrdsValue::new_signed(CrdsData::Vote(vote), &self.keypair);
entry.sign(&self.keypair);
self.gossip self.gossip
.process_push_message(&self.id(), vec![entry], now); .process_push_message(&self.id(), vec![entry], now);
} }
@ -915,7 +917,7 @@ impl ClusterInfo {
.expect("unable to serialize default filter") as usize; .expect("unable to serialize default filter") as usize;
let protocol = Protocol::PullRequest( let protocol = Protocol::PullRequest(
CrdsFilter::default(), CrdsFilter::default(),
CrdsValue::ContactInfo(ContactInfo::default()), CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())),
); );
let protocol_size = let protocol_size =
serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize; serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize;
@ -1161,9 +1163,7 @@ impl ClusterInfo {
1 1
); );
} else if caller.contact_info().is_some() { } else if caller.contact_info().is_some() {
if caller.contact_info().unwrap().pubkey() if caller.contact_info().unwrap().id == me.read().unwrap().gossip.id {
== me.read().unwrap().gossip.id
{
warn!("PullRequest ignored, I'm talking to myself"); warn!("PullRequest ignored, I'm talking to myself");
inc_new_counter_debug!("cluster_info-window-request-loopback", 1); inc_new_counter_debug!("cluster_info-window-request-loopback", 1);
} else { } else {
@ -1509,6 +1509,7 @@ impl ClusterInfo {
daddr, daddr,
daddr, daddr,
daddr, daddr,
daddr,
timestamp(), timestamp(),
); );
(node, gossip_socket, Some(ip_echo)) (node, gossip_socket, Some(ip_echo))
@ -1529,6 +1530,7 @@ impl ClusterInfo {
daddr, daddr,
daddr, daddr,
daddr, daddr,
daddr,
timestamp(), timestamp(),
); );
(node, gossip_socket, None) (node, gossip_socket, None)
@ -1612,6 +1614,7 @@ impl Node {
gossip.local_addr().unwrap(), gossip.local_addr().unwrap(),
tvu.local_addr().unwrap(), tvu.local_addr().unwrap(),
tvu_forwards.local_addr().unwrap(), tvu_forwards.local_addr().unwrap(),
repair.local_addr().unwrap(),
empty, empty,
empty, empty,
storage.local_addr().unwrap(), storage.local_addr().unwrap(),
@ -1658,6 +1661,7 @@ impl Node {
gossip_addr, gossip_addr,
tvu.local_addr().unwrap(), tvu.local_addr().unwrap(),
tvu_forwards.local_addr().unwrap(), tvu_forwards.local_addr().unwrap(),
repair.local_addr().unwrap(),
tpu.local_addr().unwrap(), tpu.local_addr().unwrap(),
tpu_forwards.local_addr().unwrap(), tpu_forwards.local_addr().unwrap(),
storage.local_addr().unwrap(), storage.local_addr().unwrap(),
@ -1719,7 +1723,7 @@ impl Node {
let (_, retransmit_sockets) = let (_, retransmit_sockets) =
multi_bind_in_range(port_range, 8).expect("retransmit multi_bind"); multi_bind_in_range(port_range, 8).expect("retransmit multi_bind");
let (_, repair) = Self::bind(port_range); let (repair_port, repair) = Self::bind(port_range);
let (_, broadcast) = Self::bind(port_range); let (_, broadcast) = Self::bind(port_range);
let info = ContactInfo::new( let info = ContactInfo::new(
@ -1727,6 +1731,7 @@ impl Node {
SocketAddr::new(gossip_addr.ip(), gossip_port), SocketAddr::new(gossip_addr.ip(), gossip_port),
SocketAddr::new(gossip_addr.ip(), tvu_port), SocketAddr::new(gossip_addr.ip(), tvu_port),
SocketAddr::new(gossip_addr.ip(), tvu_forwards_port), SocketAddr::new(gossip_addr.ip(), tvu_forwards_port),
SocketAddr::new(gossip_addr.ip(), repair_port),
SocketAddr::new(gossip_addr.ip(), tpu_port), SocketAddr::new(gossip_addr.ip(), tpu_port),
SocketAddr::new(gossip_addr.ip(), tpu_forwards_port), SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
socketaddr_any!(), socketaddr_any!(),
@ -1884,6 +1889,7 @@ mod tests {
socketaddr!([127, 0, 0, 1], 1239), socketaddr!([127, 0, 0, 1], 1239),
socketaddr!([127, 0, 0, 1], 1240), socketaddr!([127, 0, 0, 1], 1240),
socketaddr!([127, 0, 0, 1], 1241), socketaddr!([127, 0, 0, 1], 1241),
socketaddr!([127, 0, 0, 1], 1242),
0, 0,
); );
cluster_info.insert_info(nxt.clone()); cluster_info.insert_info(nxt.clone());
@ -1904,6 +1910,7 @@ mod tests {
socketaddr!([127, 0, 0, 1], 1239), socketaddr!([127, 0, 0, 1], 1239),
socketaddr!([127, 0, 0, 1], 1240), socketaddr!([127, 0, 0, 1], 1240),
socketaddr!([127, 0, 0, 1], 1241), socketaddr!([127, 0, 0, 1], 1241),
socketaddr!([127, 0, 0, 1], 1242),
0, 0,
); );
cluster_info.insert_info(nxt); cluster_info.insert_info(nxt);
@ -1941,6 +1948,7 @@ mod tests {
socketaddr!("127.0.0.1:1239"), socketaddr!("127.0.0.1:1239"),
socketaddr!("127.0.0.1:1240"), socketaddr!("127.0.0.1:1240"),
socketaddr!("127.0.0.1:1241"), socketaddr!("127.0.0.1:1241"),
socketaddr!("127.0.0.1:1242"),
0, 0,
); );
let rv = ClusterInfo::run_window_request( let rv = ClusterInfo::run_window_request(
@ -2376,7 +2384,8 @@ mod tests {
} }
// now add this message back to the table and make sure after the next pull, the entrypoint is unset // now add this message back to the table and make sure after the next pull, the entrypoint is unset
let entrypoint_crdsvalue = CrdsValue::ContactInfo(entrypoint.clone()); let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
let cluster_info = Arc::new(RwLock::new(cluster_info)); let cluster_info = Arc::new(RwLock::new(cluster_info));
ClusterInfo::handle_pull_response( ClusterInfo::handle_pull_response(
&cluster_info, &cluster_info,
@ -2393,7 +2402,7 @@ mod tests {
#[test] #[test]
fn test_split_messages_small() { fn test_split_messages_small() {
let value = CrdsValue::ContactInfo(ContactInfo::default()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
test_split_messages(value); test_split_messages(value);
} }
@ -2403,13 +2412,12 @@ mod tests {
for i in 0..128 { for i in 0..128 {
btree_slots.insert(i); btree_slots.insert(i);
} }
let value = CrdsValue::EpochSlots(EpochSlots { let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots {
from: Pubkey::default(), from: Pubkey::default(),
root: 0, root: 0,
slots: btree_slots, slots: btree_slots,
signature: Signature::default(),
wallclock: 0, wallclock: 0,
}); }));
test_split_messages(value); test_split_messages(value);
} }
@ -2433,7 +2441,7 @@ mod tests {
} }
fn check_pull_request_size(filter: CrdsFilter) { fn check_pull_request_size(filter: CrdsFilter) {
let value = CrdsValue::ContactInfo(ContactInfo::default()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
let protocol = Protocol::PullRequest(filter, value.clone()); let protocol = Protocol::PullRequest(filter, value.clone());
assert!(serialized_size(&protocol).unwrap() <= PACKET_DATA_SIZE as u64); assert!(serialized_size(&protocol).unwrap() <= PACKET_DATA_SIZE as u64);
} }

View File

@ -210,13 +210,13 @@ impl ClusterInfoRepairListener {
for (repairee_pubkey, repairee_epoch_slots) in repairees { for (repairee_pubkey, repairee_epoch_slots) in repairees {
let repairee_root = repairee_epoch_slots.root; let repairee_root = repairee_epoch_slots.root;
let repairee_tvu = { let repairee_repair_addr = {
let r_cluster_info = cluster_info.read().unwrap(); let r_cluster_info = cluster_info.read().unwrap();
let contact_info = r_cluster_info.get_contact_info_for_node(repairee_pubkey); let contact_info = r_cluster_info.get_contact_info_for_node(repairee_pubkey);
contact_info.map(|c| c.tvu) contact_info.map(|c| c.repair)
}; };
if let Some(repairee_tvu) = repairee_tvu { if let Some(repairee_addr) = repairee_repair_addr {
// For every repairee, get the set of repairmen who are responsible for // For every repairee, get the set of repairmen who are responsible for
let mut eligible_repairmen = Self::find_eligible_repairmen( let mut eligible_repairmen = Self::find_eligible_repairmen(
my_pubkey, my_pubkey,
@ -242,7 +242,7 @@ impl ClusterInfoRepairListener {
&repairee_epoch_slots, &repairee_epoch_slots,
&eligible_repairmen, &eligible_repairmen,
socket, socket,
&repairee_tvu, &repairee_addr,
NUM_SLOTS_PER_UPDATE, NUM_SLOTS_PER_UPDATE,
epoch_schedule, epoch_schedule,
); );
@ -261,7 +261,7 @@ impl ClusterInfoRepairListener {
repairee_epoch_slots: &EpochSlots, repairee_epoch_slots: &EpochSlots,
eligible_repairmen: &[&Pubkey], eligible_repairmen: &[&Pubkey],
socket: &UdpSocket, socket: &UdpSocket,
repairee_tvu: &SocketAddr, repairee_addr: &SocketAddr,
num_slots_to_repair: usize, num_slots_to_repair: usize,
epoch_schedule: &EpochSchedule, epoch_schedule: &EpochSchedule,
) -> Result<()> { ) -> Result<()> {
@ -320,7 +320,7 @@ impl ClusterInfoRepairListener {
.get_data_shred(slot, blob_index as u64) .get_data_shred(slot, blob_index as u64)
.expect("Failed to read data blob from blocktree") .expect("Failed to read data blob from blocktree")
{ {
socket.send_to(&blob_data[..], repairee_tvu)?; socket.send_to(&blob_data[..], repairee_addr)?;
total_data_blobs_sent += 1; total_data_blobs_sent += 1;
} }
@ -328,7 +328,7 @@ impl ClusterInfoRepairListener {
.get_coding_shred(slot, blob_index as u64) .get_coding_shred(slot, blob_index as u64)
.expect("Failed to read coding blob from blocktree") .expect("Failed to read coding blob from blocktree")
{ {
socket.send_to(&coding_bytes[..], repairee_tvu)?; socket.send_to(&coding_bytes[..], repairee_addr)?;
total_coding_blobs_sent += 1; total_coding_blobs_sent += 1;
} }
} }

View File

@ -1,12 +1,9 @@
use bincode::serialize;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
#[cfg(test)] #[cfg(test)]
use solana_sdk::rpc_port; use solana_sdk::rpc_port;
#[cfg(test)] #[cfg(test)]
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::signature::{Signable, Signature};
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use std::borrow::Cow;
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd}; use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
@ -14,14 +11,14 @@ use std::net::{IpAddr, SocketAddr};
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ContactInfo { pub struct ContactInfo {
pub id: Pubkey, pub id: Pubkey,
/// signature of this ContactInfo
pub signature: Signature,
/// gossip address /// gossip address
pub gossip: SocketAddr, pub gossip: SocketAddr,
/// address to connect to for replication /// address to connect to for replication
pub tvu: SocketAddr, pub tvu: SocketAddr,
/// address to forward blobs to /// address to forward shreds to
pub tvu_forwards: SocketAddr, pub tvu_forwards: SocketAddr,
/// address to send repairs to
pub repair: SocketAddr,
/// transactions address /// transactions address
pub tpu: SocketAddr, pub tpu: SocketAddr,
/// address to forward unprocessed transactions to /// address to forward unprocessed transactions to
@ -80,13 +77,13 @@ impl Default for ContactInfo {
gossip: socketaddr_any!(), gossip: socketaddr_any!(),
tvu: socketaddr_any!(), tvu: socketaddr_any!(),
tvu_forwards: socketaddr_any!(), tvu_forwards: socketaddr_any!(),
repair: socketaddr_any!(),
tpu: socketaddr_any!(), tpu: socketaddr_any!(),
tpu_forwards: socketaddr_any!(), tpu_forwards: socketaddr_any!(),
storage_addr: socketaddr_any!(), storage_addr: socketaddr_any!(),
rpc: socketaddr_any!(), rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(), rpc_pubsub: socketaddr_any!(),
wallclock: 0, wallclock: 0,
signature: Signature::default(),
} }
} }
} }
@ -98,6 +95,7 @@ impl ContactInfo {
gossip: SocketAddr, gossip: SocketAddr,
tvu: SocketAddr, tvu: SocketAddr,
tvu_forwards: SocketAddr, tvu_forwards: SocketAddr,
repair: SocketAddr,
tpu: SocketAddr, tpu: SocketAddr,
tpu_forwards: SocketAddr, tpu_forwards: SocketAddr,
storage_addr: SocketAddr, storage_addr: SocketAddr,
@ -107,10 +105,10 @@ impl ContactInfo {
) -> Self { ) -> Self {
Self { Self {
id: *id, id: *id,
signature: Signature::default(),
gossip, gossip,
tvu, tvu,
tvu_forwards, tvu_forwards,
repair,
tpu, tpu,
tpu_forwards, tpu_forwards,
storage_addr, storage_addr,
@ -131,6 +129,7 @@ impl ContactInfo {
socketaddr!("127.0.0.1:1239"), socketaddr!("127.0.0.1:1239"),
socketaddr!("127.0.0.1:1240"), socketaddr!("127.0.0.1:1240"),
socketaddr!("127.0.0.1:1241"), socketaddr!("127.0.0.1:1241"),
socketaddr!("127.0.0.1:1242"),
now, now,
) )
} }
@ -150,6 +149,7 @@ impl ContactInfo {
addr, addr,
addr, addr,
addr, addr,
addr,
0, 0,
) )
} }
@ -167,6 +167,7 @@ impl ContactInfo {
let tvu_addr = next_port(&bind_addr, 2); let tvu_addr = next_port(&bind_addr, 2);
let tpu_forwards_addr = next_port(&bind_addr, 3); let tpu_forwards_addr = next_port(&bind_addr, 3);
let tvu_forwards_addr = next_port(&bind_addr, 4); let tvu_forwards_addr = next_port(&bind_addr, 4);
let repair = next_port(&bind_addr, 5);
let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT); let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT); let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
Self::new( Self::new(
@ -174,6 +175,7 @@ impl ContactInfo {
gossip_addr, gossip_addr,
tvu_addr, tvu_addr,
tvu_forwards_addr, tvu_forwards_addr,
repair,
tpu_addr, tpu_addr,
tpu_forwards_addr, tpu_forwards_addr,
"0.0.0.0:0".parse().unwrap(), "0.0.0.0:0".parse().unwrap(),
@ -202,6 +204,7 @@ impl ContactInfo {
daddr, daddr,
daddr, daddr,
daddr, daddr,
daddr,
timestamp(), timestamp(),
) )
} }
@ -232,49 +235,6 @@ impl ContactInfo {
} }
} }
impl Signable for ContactInfo {
fn pubkey(&self) -> Pubkey {
self.id
}
fn signable_data(&self) -> Cow<[u8]> {
#[derive(Serialize)]
struct SignData {
id: Pubkey,
gossip: SocketAddr,
tvu: SocketAddr,
tpu: SocketAddr,
tpu_forwards: SocketAddr,
storage_addr: SocketAddr,
rpc: SocketAddr,
rpc_pubsub: SocketAddr,
wallclock: u64,
}
let me = self;
let data = SignData {
id: me.id,
gossip: me.gossip,
tvu: me.tvu,
tpu: me.tpu,
storage_addr: me.storage_addr,
tpu_forwards: me.tpu_forwards,
rpc: me.rpc,
rpc_pubsub: me.rpc_pubsub,
wallclock: me.wallclock,
};
Cow::Owned(serialize(&data).expect("failed to serialize ContactInfo"))
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -165,11 +165,12 @@ impl Crds {
mod test { mod test {
use super::*; use super::*;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
#[test] #[test]
fn test_insert() { fn test_insert() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::ContactInfo(ContactInfo::default()); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 0).ok(), Some(None)); assert_eq!(crds.insert(val.clone(), 0).ok(), Some(None));
assert_eq!(crds.table.len(), 1); assert_eq!(crds.table.len(), 1);
assert!(crds.table.contains_key(&val.label())); assert!(crds.table.contains_key(&val.label()));
@ -178,7 +179,7 @@ mod test {
#[test] #[test]
fn test_update_old() { fn test_update_old() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::ContactInfo(ContactInfo::default()); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 0), Ok(None)); assert_eq!(crds.insert(val.clone(), 0), Ok(None));
assert_eq!(crds.insert(val.clone(), 1), Err(CrdsError::InsertFailed)); assert_eq!(crds.insert(val.clone(), 1), Err(CrdsError::InsertFailed));
assert_eq!(crds.table[&val.label()].local_timestamp, 0); assert_eq!(crds.table[&val.label()].local_timestamp, 0);
@ -186,9 +187,15 @@ mod test {
#[test] #[test]
fn test_update_new() { fn test_update_new() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let original = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)); let original = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::default(),
0,
)));
assert_matches!(crds.insert(original.clone(), 0), Ok(_)); assert_matches!(crds.insert(original.clone(), 0), Ok(_));
let val = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 1)); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::default(),
1,
)));
assert_eq!( assert_eq!(
crds.insert(val.clone(), 1).unwrap().unwrap().value, crds.insert(val.clone(), 1).unwrap().unwrap().value,
original original
@ -198,14 +205,17 @@ mod test {
#[test] #[test]
fn test_update_timestamp() { fn test_update_timestamp() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::default(),
0,
)));
assert_eq!(crds.insert(val.clone(), 0), Ok(None)); assert_eq!(crds.insert(val.clone(), 0), Ok(None));
crds.update_label_timestamp(&val.label(), 1); crds.update_label_timestamp(&val.label(), 1);
assert_eq!(crds.table[&val.label()].local_timestamp, 1); assert_eq!(crds.table[&val.label()].local_timestamp, 1);
assert_eq!(crds.table[&val.label()].insert_timestamp, 0); assert_eq!(crds.table[&val.label()].insert_timestamp, 0);
let val2 = CrdsValue::ContactInfo(ContactInfo::default()); let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(val2.label().pubkey(), val.label().pubkey()); assert_eq!(val2.label().pubkey(), val.label().pubkey());
assert_matches!(crds.insert(val2.clone(), 0), Ok(Some(_))); assert_matches!(crds.insert(val2.clone(), 0), Ok(Some(_)));
@ -221,7 +231,7 @@ mod test {
let mut ci = ContactInfo::default(); let mut ci = ContactInfo::default();
ci.wallclock += 1; ci.wallclock += 1;
let val3 = CrdsValue::ContactInfo(ci); let val3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
assert_matches!(crds.insert(val3.clone(), 3), Ok(Some(_))); assert_matches!(crds.insert(val3.clone(), 3), Ok(Some(_)));
assert_eq!(crds.table[&val2.label()].local_timestamp, 3); assert_eq!(crds.table[&val2.label()].local_timestamp, 3);
assert_eq!(crds.table[&val2.label()].insert_timestamp, 3); assert_eq!(crds.table[&val2.label()].insert_timestamp, 3);
@ -229,7 +239,7 @@ mod test {
#[test] #[test]
fn test_find_old_records() { fn test_find_old_records() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::ContactInfo(ContactInfo::default()); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(crds.insert(val.clone(), 1), Ok(None)); assert_eq!(crds.insert(val.clone(), 1), Ok(None));
assert!(crds.find_old_labels(0).is_empty()); assert!(crds.find_old_labels(0).is_empty());
@ -239,7 +249,7 @@ mod test {
#[test] #[test]
fn test_remove() { fn test_remove() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let val = CrdsValue::ContactInfo(ContactInfo::default()); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_matches!(crds.insert(val.clone(), 1), Ok(_)); assert_matches!(crds.insert(val.clone(), 1), Ok(_));
assert_eq!(crds.find_old_labels(1), vec![val.label()]); assert_eq!(crds.find_old_labels(1), vec![val.label()]);
@ -248,7 +258,7 @@ mod test {
} }
#[test] #[test]
fn test_equal() { fn test_equal() {
let val = CrdsValue::ContactInfo(ContactInfo::default()); let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
let v1 = VersionedCrdsValue::new(1, val.clone()); let v1 = VersionedCrdsValue::new(1, val.clone());
let v2 = VersionedCrdsValue::new(1, val); let v2 = VersionedCrdsValue::new(1, val);
assert_eq!(v1, v2); assert_eq!(v1, v2);
@ -258,12 +268,15 @@ mod test {
fn test_hash_order() { fn test_hash_order() {
let v1 = VersionedCrdsValue::new( let v1 = VersionedCrdsValue::new(
1, 1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)), CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::default(),
0,
))),
); );
let v2 = VersionedCrdsValue::new(1, { let v2 = VersionedCrdsValue::new(1, {
let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0); let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0);
contact_info.rpc = socketaddr!("0.0.0.0:0"); contact_info.rpc = socketaddr!("0.0.0.0:0");
CrdsValue::ContactInfo(contact_info) CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info))
}); });
assert_eq!(v1.value.label(), v2.value.label()); assert_eq!(v1.value.label(), v2.value.label());
@ -285,11 +298,17 @@ mod test {
fn test_wallclock_order() { fn test_wallclock_order() {
let v1 = VersionedCrdsValue::new( let v1 = VersionedCrdsValue::new(
1, 1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 1)), CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::default(),
1,
))),
); );
let v2 = VersionedCrdsValue::new( let v2 = VersionedCrdsValue::new(
1, 1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)), CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::default(),
0,
))),
); );
assert_eq!(v1.value.label(), v2.value.label()); assert_eq!(v1.value.label(), v2.value.label());
assert!(v1 > v2); assert!(v1 > v2);
@ -301,11 +320,17 @@ mod test {
fn test_label_order() { fn test_label_order() {
let v1 = VersionedCrdsValue::new( let v1 = VersionedCrdsValue::new(
1, 1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)), CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
))),
); );
let v2 = VersionedCrdsValue::new( let v2 = VersionedCrdsValue::new(
1, 1,
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)), CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
))),
); );
assert_ne!(v1, v2); assert_ne!(v1, v2);
assert!(!(v1 == v2)); assert!(!(v1 == v2));

View File

@ -9,7 +9,6 @@ use crate::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE}; use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE};
use crate::crds_value::{CrdsValue, CrdsValueLabel}; use crate::crds_value::{CrdsValue, CrdsValueLabel};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signable;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
///The min size for bloom filters ///The min size for bloom filters
@ -204,6 +203,7 @@ pub fn get_weight(max_weight: f32, time_since_last_selected: u32, stake: f32) ->
mod test { mod test {
use super::*; use super::*;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
@ -216,7 +216,10 @@ mod test {
let prune_pubkey = Pubkey::new(&[2; 32]); let prune_pubkey = Pubkey::new(&[2; 32]);
crds_gossip crds_gossip
.crds .crds
.insert(CrdsValue::ContactInfo(ci.clone()), 0) .insert(
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
0,
)
.unwrap(); .unwrap();
crds_gossip.refresh_push_active_set(&HashMap::new()); crds_gossip.refresh_push_active_set(&HashMap::new());
let now = timestamp(); let now = timestamp();

View File

@ -294,6 +294,7 @@ impl CrdsGossipPull {
mod test { mod test {
use super::*; use super::*;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use itertools::Itertools; use itertools::Itertools;
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
use solana_sdk::packet::PACKET_DATA_SIZE; use solana_sdk::packet::PACKET_DATA_SIZE;
@ -303,10 +304,16 @@ mod test {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
let node = CrdsGossipPull::default(); let node = CrdsGossipPull::default();
let me = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
crds.insert(me.clone(), 0).unwrap(); crds.insert(me.clone(), 0).unwrap();
for i in 1..=30 { for i in 1..=30 {
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let id = entry.label().pubkey(); let id = entry.label().pubkey();
crds.insert(entry.clone(), 0).unwrap(); crds.insert(entry.clone(), 0).unwrap();
stakes.insert(id, i * 100); stakes.insert(id, i * 100);
@ -325,7 +332,10 @@ mod test {
#[test] #[test]
fn test_new_pull_request() { fn test_new_pull_request() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let id = entry.label().pubkey(); let id = entry.label().pubkey();
let node = CrdsGossipPull::default(); let node = CrdsGossipPull::default();
assert_eq!( assert_eq!(
@ -339,7 +349,10 @@ mod test {
Err(CrdsGossipError::NoPeers) Err(CrdsGossipError::NoPeers)
); );
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
crds.insert(new.clone(), 0).unwrap(); crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE); let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE);
let (to, _, self_info) = req.unwrap(); let (to, _, self_info) = req.unwrap();
@ -350,13 +363,22 @@ mod test {
#[test] #[test]
fn test_new_mark_creation_time() { fn test_new_mark_creation_time() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
let mut node = CrdsGossipPull::default(); let mut node = CrdsGossipPull::default();
crds.insert(entry.clone(), 0).unwrap(); crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
crds.insert(old.clone(), 0).unwrap(); crds.insert(old.clone(), 0).unwrap();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
crds.insert(new.clone(), 0).unwrap(); crds.insert(new.clone(), 0).unwrap();
// set request creation time to max_value // set request creation time to max_value
@ -380,11 +402,17 @@ mod test {
#[test] #[test]
fn test_process_pull_request() { fn test_process_pull_request() {
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
let node = CrdsGossipPull::default(); let node = CrdsGossipPull::default();
node_crds.insert(entry.clone(), 0).unwrap(); node_crds.insert(entry.clone(), 0).unwrap();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
node_crds.insert(new.clone(), 0).unwrap(); node_crds.insert(new.clone(), 0).unwrap();
let req = node.new_pull_request( let req = node.new_pull_request(
&node_crds, &node_crds,
@ -419,22 +447,32 @@ mod test {
#[test] #[test]
fn test_process_pull_request_response() { fn test_process_pull_request_response() {
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let node_pubkey = entry.label().pubkey(); let node_pubkey = entry.label().pubkey();
let mut node = CrdsGossipPull::default(); let mut node = CrdsGossipPull::default();
node_crds.insert(entry.clone(), 0).unwrap(); node_crds.insert(entry.clone(), 0).unwrap();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
node_crds.insert(new.clone(), 0).unwrap(); node_crds.insert(new.clone(), 0).unwrap();
let mut dest = CrdsGossipPull::default(); let mut dest = CrdsGossipPull::default();
let mut dest_crds = Crds::default(); let mut dest_crds = Crds::default();
let new_id = Pubkey::new_rand(); let new_id = Pubkey::new_rand();
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 1)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&new_id, 1,
)));
dest_crds.insert(new.clone(), 0).unwrap(); dest_crds.insert(new.clone(), 0).unwrap();
// node contains a key from the dest node, but at an older local timestamp // node contains a key from the dest node, but at an older local timestamp
let same_key = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 0)); let same_key = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&new_id, 0,
)));
assert_eq!(same_key.label(), new.label()); assert_eq!(same_key.label(), new.label());
assert!(same_key.wallclock() < new.wallclock()); assert!(same_key.wallclock() < new.wallclock());
node_crds.insert(same_key.clone(), 0).unwrap(); node_crds.insert(same_key.clone(), 0).unwrap();
@ -494,12 +532,18 @@ mod test {
#[test] #[test]
fn test_gossip_purge() { fn test_gossip_purge() {
let mut node_crds = Crds::default(); let mut node_crds = Crds::default();
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let node_label = entry.label(); let node_label = entry.label();
let node_pubkey = node_label.pubkey(); let node_pubkey = node_label.pubkey();
let mut node = CrdsGossipPull::default(); let mut node = CrdsGossipPull::default();
node_crds.insert(entry.clone(), 0).unwrap(); node_crds.insert(entry.clone(), 0).unwrap();
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
node_crds.insert(old.clone(), 0).unwrap(); node_crds.insert(old.clone(), 0).unwrap();
let value_hash = node_crds.lookup_versioned(&old.label()).unwrap().value_hash; let value_hash = node_crds.lookup_versioned(&old.label()).unwrap().value_hash;

View File

@ -340,7 +340,7 @@ impl CrdsGossipPush {
mod test { mod test {
use super::*; use super::*;
use crate::contact_info::ContactInfo; use crate::contact_info::ContactInfo;
use solana_sdk::signature::Signable; use crate::crds_value::CrdsData;
#[test] #[test]
fn test_prune() { fn test_prune() {
@ -353,7 +353,9 @@ mod test {
stakes.insert(self_id, 100); stakes.insert(self_id, 100);
stakes.insert(origin, 100); stakes.insert(origin, 100);
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&origin, 0)); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&origin, 0,
)));
let label = value.label(); let label = value.label();
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand()); let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
let mut low_staked_set = HashSet::new(); let mut low_staked_set = HashSet::new();
@ -395,7 +397,10 @@ mod test {
fn test_process_push() { fn test_process_push() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let label = value.label(); let label = value.label();
// push a new message // push a new message
assert_eq!( assert_eq!(
@ -416,7 +421,7 @@ mod test {
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1; ci.wallclock = 1;
let value = CrdsValue::ContactInfo(ci.clone()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
// push a new message // push a new message
assert_eq!( assert_eq!(
@ -426,7 +431,7 @@ mod test {
// push an old version // push an old version
ci.wallclock = 0; ci.wallclock = 0;
let value = CrdsValue::ContactInfo(ci.clone()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value, 0), push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Err(CrdsGossipError::PushMessageOldVersion) Err(CrdsGossipError::PushMessageOldVersion)
@ -441,7 +446,7 @@ mod test {
// push a version to far in the future // push a version to far in the future
ci.wallclock = timeout + 1; ci.wallclock = timeout + 1;
let value = CrdsValue::ContactInfo(ci.clone()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value, 0), push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Err(CrdsGossipError::PushMessageTimeout) Err(CrdsGossipError::PushMessageTimeout)
@ -449,7 +454,7 @@ mod test {
// push a version to far in the past // push a version to far in the past
ci.wallclock = 0; ci.wallclock = 0;
let value = CrdsValue::ContactInfo(ci.clone()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value, timeout + 1), push.process_push_message(&mut crds, &Pubkey::default(), value, timeout + 1),
Err(CrdsGossipError::PushMessageTimeout) Err(CrdsGossipError::PushMessageTimeout)
@ -461,7 +466,7 @@ mod test {
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 0; ci.wallclock = 0;
let value_old = CrdsValue::ContactInfo(ci.clone()); let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
// push a new message // push a new message
assert_eq!( assert_eq!(
@ -471,7 +476,7 @@ mod test {
// push an old version // push an old version
ci.wallclock = 1; ci.wallclock = 1;
let value = CrdsValue::ContactInfo(ci.clone()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value, 0) push.process_push_message(&mut crds, &Pubkey::default(), value, 0)
.unwrap() .unwrap()
@ -492,13 +497,19 @@ mod test {
solana_logger::setup(); solana_logger::setup();
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let value1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(value1.clone(), 0), Ok(None)); assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
assert!(push.active_set.get(&value1.label().pubkey()).is_some()); assert!(push.active_set.get(&value1.label().pubkey()).is_some());
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert!(push.active_set.get(&value2.label().pubkey()).is_none()); assert!(push.active_set.get(&value2.label().pubkey()).is_none());
assert_eq!(crds.insert(value2.clone(), 0), Ok(None)); assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
for _ in 0..30 { for _ in 0..30 {
@ -510,7 +521,9 @@ mod test {
assert!(push.active_set.get(&value2.label().pubkey()).is_some()); assert!(push.active_set.get(&value2.label().pubkey()).is_some());
for _ in 0..push.num_active { for _ in 0..push.num_active {
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&Pubkey::new_rand(), 0),
));
assert_eq!(crds.insert(value2.clone(), 0), Ok(None)); assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
} }
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
@ -523,8 +536,10 @@ mod test {
let push = CrdsGossipPush::default(); let push = CrdsGossipPush::default();
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
for i in 1..=100 { for i in 1..=100 {
let peer = let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), time)); &Pubkey::new_rand(),
time,
)));
let id = peer.label().pubkey(); let id = peer.label().pubkey();
crds.insert(peer.clone(), time).unwrap(); crds.insert(peer.clone(), time).unwrap();
stakes.insert(id, i * 100); stakes.insert(id, i * 100);
@ -542,11 +557,17 @@ mod test {
fn test_new_push_messages() { fn test_new_push_messages() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None)); assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let mut expected = HashMap::new(); let mut expected = HashMap::new();
expected.insert(peer.label().pubkey(), vec![new_msg.clone()]); expected.insert(peer.label().pubkey(), vec![new_msg.clone()]);
assert_eq!( assert_eq!(
@ -560,11 +581,20 @@ mod test {
fn test_personalized_push_messages() { fn test_personalized_push_messages() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer_1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer_1.clone(), 0), Ok(None)); assert_eq!(crds.insert(peer_1.clone(), 0), Ok(None));
let peer_2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None)); assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
let peer_3 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0), push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
Ok(None) Ok(None)
@ -572,7 +602,10 @@ mod test {
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
// push 3's contact info to 1 and 2 and 3 // push 3's contact info to 1 and 2 and 3
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&peer_3.pubkey(), 0)); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&peer_3.pubkey(),
0,
)));
let mut expected = HashMap::new(); let mut expected = HashMap::new();
expected.insert(peer_1.pubkey(), vec![new_msg.clone()]); expected.insert(peer_1.pubkey(), vec![new_msg.clone()]);
expected.insert(peer_2.pubkey(), vec![new_msg.clone()]); expected.insert(peer_2.pubkey(), vec![new_msg.clone()]);
@ -583,11 +616,17 @@ mod test {
fn test_process_prune() { fn test_process_prune() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None)); assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let expected = HashMap::new(); let expected = HashMap::new();
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0), push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
@ -600,13 +639,16 @@ mod test {
fn test_purge_old_pending_push_messages() { fn test_purge_old_pending_push_messages() {
let mut crds = Crds::default(); let mut crds = Crds::default();
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
assert_eq!(crds.insert(peer.clone(), 0), Ok(None)); assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1); push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 1; ci.wallclock = 1;
let new_msg = CrdsValue::ContactInfo(ci.clone()); let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
let expected = HashMap::new(); let expected = HashMap::new();
assert_eq!( assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 1), push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 1),
@ -622,7 +664,7 @@ mod test {
let mut push = CrdsGossipPush::default(); let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0); let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
ci.wallclock = 0; ci.wallclock = 0;
let value = CrdsValue::ContactInfo(ci.clone()); let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
let label = value.label(); let label = value.label();
// push a new message // push a new message
assert_eq!( assert_eq!(

View File

@ -8,9 +8,34 @@ use std::collections::BTreeSet;
use std::fmt; use std::fmt;
/// CrdsValue that is replicated across the cluster /// CrdsValue that is replicated across the cluster
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct CrdsValue {
pub signature: Signature,
pub data: CrdsData,
}
impl Signable for CrdsValue {
fn pubkey(&self) -> Pubkey {
self.pubkey()
}
fn signable_data(&self) -> Cow<[u8]> {
Cow::Owned(serialize(&self.data).expect("failed to serialize CrdsData"))
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature
}
}
/// CrdsData that defines the different types of items CrdsValues can hold
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum CrdsValue { pub enum CrdsData {
/// * Merge Strategy - Latest wallclock is picked /// * Merge Strategy - Latest wallclock is picked
ContactInfo(ContactInfo), ContactInfo(ContactInfo),
/// * Merge Strategy - Latest wallclock is picked /// * Merge Strategy - Latest wallclock is picked
@ -24,7 +49,6 @@ pub struct EpochSlots {
pub from: Pubkey, pub from: Pubkey,
pub root: u64, pub root: u64,
pub slots: BTreeSet<u64>, pub slots: BTreeSet<u64>,
pub signature: Signature,
pub wallclock: u64, pub wallclock: u64,
} }
@ -34,46 +58,15 @@ impl EpochSlots {
from, from,
root, root,
slots, slots,
signature: Signature::default(),
wallclock, wallclock,
} }
} }
} }
impl Signable for EpochSlots {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
#[derive(Serialize)]
struct SignData<'a> {
root: u64,
slots: &'a BTreeSet<u64>,
wallclock: u64,
}
let data = SignData {
root: self.root,
slots: &self.slots,
wallclock: self.wallclock,
};
Cow::Owned(serialize(&data).expect("unable to serialize EpochSlots"))
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature;
}
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub struct Vote { pub struct Vote {
pub from: Pubkey, pub from: Pubkey,
pub transaction: Transaction, pub transaction: Transaction,
pub signature: Signature,
pub wallclock: u64, pub wallclock: u64,
} }
@ -82,39 +75,11 @@ impl Vote {
Self { Self {
from: *from, from: *from,
transaction, transaction,
signature: Signature::default(),
wallclock, wallclock,
} }
} }
} }
impl Signable for Vote {
fn pubkey(&self) -> Pubkey {
self.from
}
fn signable_data(&self) -> Cow<[u8]> {
#[derive(Serialize)]
struct SignData<'a> {
transaction: &'a Transaction,
wallclock: u64,
}
let data = SignData {
transaction: &self.transaction,
wallclock: self.wallclock,
};
Cow::Owned(serialize(&data).expect("unable to serialize Vote"))
}
fn get_signature(&self) -> Signature {
self.signature
}
fn set_signature(&mut self, signature: Signature) {
self.signature = signature
}
}
/// Type of the replicated value /// Type of the replicated value
/// These are labels for values in a record that is associated with `Pubkey` /// These are labels for values in a record that is associated with `Pubkey`
#[derive(PartialEq, Hash, Eq, Clone, Debug)] #[derive(PartialEq, Hash, Eq, Clone, Debug)]
@ -145,40 +110,57 @@ impl CrdsValueLabel {
} }
impl CrdsValue { impl CrdsValue {
pub fn new_unsigned(data: CrdsData) -> Self {
Self {
signature: Signature::default(),
data,
}
}
pub fn new_signed(data: CrdsData, keypair: &Keypair) -> Self {
let mut value = Self::new_unsigned(data);
value.sign(keypair);
value
}
/// Totally unsecure unverfiable wallclock of the node that generated this message /// Totally unsecure unverfiable wallclock of the node that generated this message
/// Latest wallclock is always picked. /// Latest wallclock is always picked.
/// This is used to time out push messages. /// This is used to time out push messages.
pub fn wallclock(&self) -> u64 { pub fn wallclock(&self) -> u64 {
match self { match &self.data {
CrdsValue::ContactInfo(contact_info) => contact_info.wallclock, CrdsData::ContactInfo(contact_info) => contact_info.wallclock,
CrdsValue::Vote(vote) => vote.wallclock, CrdsData::Vote(vote) => vote.wallclock,
CrdsValue::EpochSlots(vote) => vote.wallclock, CrdsData::EpochSlots(vote) => vote.wallclock,
}
}
pub fn pubkey(&self) -> Pubkey {
match &self.data {
CrdsData::ContactInfo(contact_info) => contact_info.id,
CrdsData::Vote(vote) => vote.from,
CrdsData::EpochSlots(slots) => slots.from,
} }
} }
pub fn label(&self) -> CrdsValueLabel { pub fn label(&self) -> CrdsValueLabel {
match self { match &self.data {
CrdsValue::ContactInfo(contact_info) => { CrdsData::ContactInfo(_) => CrdsValueLabel::ContactInfo(self.pubkey()),
CrdsValueLabel::ContactInfo(contact_info.pubkey()) CrdsData::Vote(_) => CrdsValueLabel::Vote(self.pubkey()),
} CrdsData::EpochSlots(_) => CrdsValueLabel::EpochSlots(self.pubkey()),
CrdsValue::Vote(vote) => CrdsValueLabel::Vote(vote.pubkey()),
CrdsValue::EpochSlots(slots) => CrdsValueLabel::EpochSlots(slots.pubkey()),
} }
} }
pub fn contact_info(&self) -> Option<&ContactInfo> { pub fn contact_info(&self) -> Option<&ContactInfo> {
match self { match &self.data {
CrdsValue::ContactInfo(contact_info) => Some(contact_info), CrdsData::ContactInfo(contact_info) => Some(contact_info),
_ => None, _ => None,
} }
} }
pub fn vote(&self) -> Option<&Vote> { pub fn vote(&self) -> Option<&Vote> {
match self { match &self.data {
CrdsValue::Vote(vote) => Some(vote), CrdsData::Vote(vote) => Some(vote),
_ => None, _ => None,
} }
} }
pub fn epoch_slots(&self) -> Option<&EpochSlots> { pub fn epoch_slots(&self) -> Option<&EpochSlots> {
match self { match &self.data {
CrdsValue::EpochSlots(slots) => Some(slots), CrdsData::EpochSlots(slots) => Some(slots),
_ => None, _ => None,
} }
} }
@ -197,48 +179,6 @@ impl CrdsValue {
} }
} }
impl Signable for CrdsValue {
fn sign(&mut self, keypair: &Keypair) {
match self {
CrdsValue::ContactInfo(contact_info) => contact_info.sign(keypair),
CrdsValue::Vote(vote) => vote.sign(keypair),
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.sign(keypair),
};
}
fn verify(&self) -> bool {
match self {
CrdsValue::ContactInfo(contact_info) => contact_info.verify(),
CrdsValue::Vote(vote) => vote.verify(),
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.verify(),
}
}
fn pubkey(&self) -> Pubkey {
match self {
CrdsValue::ContactInfo(contact_info) => contact_info.pubkey(),
CrdsValue::Vote(vote) => vote.pubkey(),
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.pubkey(),
}
}
fn signable_data(&self) -> Cow<[u8]> {
unimplemented!()
}
fn get_signature(&self) -> Signature {
match self {
CrdsValue::ContactInfo(contact_info) => contact_info.get_signature(),
CrdsValue::Vote(vote) => vote.get_signature(),
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.get_signature(),
}
}
fn set_signature(&mut self, _: Signature) {
unimplemented!()
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
@ -263,17 +203,23 @@ mod test {
} }
#[test] #[test]
fn test_keys_and_values() { fn test_keys_and_values() {
let v = CrdsValue::ContactInfo(ContactInfo::default()); let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(v.wallclock(), 0); assert_eq!(v.wallclock(), 0);
let key = v.clone().contact_info().unwrap().id; let key = v.clone().contact_info().unwrap().id;
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key)); assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
let v = CrdsValue::Vote(Vote::new(&Pubkey::default(), test_tx(), 0)); let v =
CrdsValue::new_unsigned(CrdsData::Vote(Vote::new(&Pubkey::default(), test_tx(), 0)));
assert_eq!(v.wallclock(), 0); assert_eq!(v.wallclock(), 0);
let key = v.clone().vote().unwrap().from; let key = v.clone().vote().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::Vote(key)); assert_eq!(v.label(), CrdsValueLabel::Vote(key));
let v = CrdsValue::EpochSlots(EpochSlots::new(Pubkey::default(), 0, BTreeSet::new(), 0)); let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots::new(
Pubkey::default(),
0,
BTreeSet::new(),
0,
)));
assert_eq!(v.wallclock(), 0); assert_eq!(v.wallclock(), 0);
let key = v.clone().epoch_slots().unwrap().from; let key = v.clone().epoch_slots().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::EpochSlots(key)); assert_eq!(v.label(), CrdsValueLabel::EpochSlots(key));
@ -282,13 +228,24 @@ mod test {
fn test_signature() { fn test_signature() {
let keypair = Keypair::new(); let keypair = Keypair::new();
let wrong_keypair = Keypair::new(); let wrong_keypair = Keypair::new();
let mut v = let mut v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
CrdsValue::ContactInfo(ContactInfo::new_localhost(&keypair.pubkey(), timestamp())); &keypair.pubkey(),
timestamp(),
)));
verify_signatures(&mut v, &keypair, &wrong_keypair); verify_signatures(&mut v, &keypair, &wrong_keypair);
v = CrdsValue::Vote(Vote::new(&keypair.pubkey(), test_tx(), timestamp())); v = CrdsValue::new_unsigned(CrdsData::Vote(Vote::new(
&keypair.pubkey(),
test_tx(),
timestamp(),
)));
verify_signatures(&mut v, &keypair, &wrong_keypair); verify_signatures(&mut v, &keypair, &wrong_keypair);
let btreeset: BTreeSet<u64> = vec![1, 2, 3, 6, 8].into_iter().collect(); let btreeset: BTreeSet<u64> = vec![1, 2, 3, 6, 8].into_iter().collect();
v = CrdsValue::EpochSlots(EpochSlots::new(keypair.pubkey(), 0, btreeset, timestamp())); v = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots::new(
keypair.pubkey(),
0,
btreeset,
timestamp(),
)));
verify_signatures(&mut v, &keypair, &wrong_keypair); verify_signatures(&mut v, &keypair, &wrong_keypair);
} }

View File

@ -313,7 +313,7 @@ impl PohRecorder {
let poh_entry = self.poh.lock().unwrap().tick(); let poh_entry = self.poh.lock().unwrap().tick();
inc_new_counter_warn!( inc_new_counter_warn!(
"poh_recorder-tick_lock_contention", "poh_recorder-tick_lock_contention",
timing::duration_as_ms(&now.elapsed()) as usize timing::duration_as_us(&now.elapsed()) as usize
); );
let now = Instant::now(); let now = Instant::now();
if let Some(poh_entry) = poh_entry { if let Some(poh_entry) = poh_entry {
@ -323,7 +323,7 @@ impl PohRecorder {
if self.leader_first_tick_height.is_none() { if self.leader_first_tick_height.is_none() {
inc_new_counter_warn!( inc_new_counter_warn!(
"poh_recorder-tick_overhead", "poh_recorder-tick_overhead",
timing::duration_as_ms(&now.elapsed()) as usize timing::duration_as_us(&now.elapsed()) as usize
); );
return; return;
} }
@ -339,7 +339,7 @@ impl PohRecorder {
} }
inc_new_counter_warn!( inc_new_counter_warn!(
"poh_recorder-tick_overhead", "poh_recorder-tick_overhead",
timing::duration_as_ms(&now.elapsed()) as usize timing::duration_as_us(&now.elapsed()) as usize
); );
} }
@ -363,20 +363,29 @@ impl PohRecorder {
return Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached)); return Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached));
} }
let now = Instant::now(); {
if let Some(poh_entry) = self.poh.lock().unwrap().record(mixin) { let now = Instant::now();
let mut poh_lock = self.poh.lock().unwrap();
inc_new_counter_warn!( inc_new_counter_warn!(
"poh_recorder-record_lock_contention", "poh_recorder-record_lock_contention",
timing::duration_as_ms(&now.elapsed()) as usize timing::duration_as_us(&now.elapsed()) as usize
); );
let entry = Entry { let now = Instant::now();
num_hashes: poh_entry.num_hashes, let res = poh_lock.record(mixin);
hash: poh_entry.hash, inc_new_counter_warn!(
transactions, "poh_recorder-record_ms",
}; timing::duration_as_us(&now.elapsed()) as usize
self.sender );
.send((working_bank.bank.clone(), (entry, self.tick_height)))?; if let Some(poh_entry) = res {
return Ok(()); let entry = Entry {
num_hashes: poh_entry.num_hashes,
hash: poh_entry.hash,
transactions,
};
self.sender
.send((working_bank.bank.clone(), (entry, self.tick_height)))?;
return Ok(());
}
} }
// record() might fail if the next PoH hash needs to be a tick. But that's ok, tick() // record() might fail if the next PoH hash needs to be a tick. But that's ok, tick()
// and re-record() // and re-record()

View File

@ -9,25 +9,26 @@ use std::ops::Div;
/// Returns a list of indexes shuffled based on the input weights /// Returns a list of indexes shuffled based on the input weights
/// Note - The sum of all weights must not exceed `u64::MAX` /// Note - The sum of all weights must not exceed `u64::MAX`
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize> pub fn weighted_shuffle<T>(weights: Vec<T>, mut rng: ChaChaRng) -> Vec<usize>
where where
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive, T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
{ {
let mut rng = rng;
let total_weight: T = weights.clone().into_iter().sum(); let total_weight: T = weights.clone().into_iter().sum();
weights weights
.into_iter() .into_iter()
.enumerate() .enumerate()
.map(|(i, v)| { .map(|(i, v)| {
// This generates an "inverse" weight but it avoids floating point math
let x = (total_weight / v) let x = (total_weight / v)
.to_u64() .to_u64()
.expect("values > u64::max are not supported"); .expect("values > u64::max are not supported");
( (
i, i,
// capture the u64 into u128s to prevent overflow // capture the u64 into u128s to prevent overflow
(&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x), rng.gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
) )
}) })
// sort in ascending order
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val)) .sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
.map(|x| x.0) .map(|x| x.0)
.collect() .collect()
@ -35,22 +36,23 @@ where
/// Returns the highest index after computing a weighted shuffle. /// Returns the highest index after computing a weighted shuffle.
/// Saves doing any sorting for O(n) max calculation. /// Saves doing any sorting for O(n) max calculation.
pub fn weighted_best(weights_and_indicies: &[(u64, usize)], rng: ChaChaRng) -> usize { pub fn weighted_best(weights_and_indexes: &[(u64, usize)], mut rng: ChaChaRng) -> usize {
let mut rng = rng; if weights_and_indexes.is_empty() {
if weights_and_indicies.is_empty() {
return 0; return 0;
} }
let total_weight: u64 = weights_and_indicies.iter().map(|x| x.0).sum(); let total_weight: u64 = weights_and_indexes.iter().map(|x| x.0).sum();
let mut best_weight = 0; let mut lowest_weight = std::u128::MAX;
let mut best_index = 0; let mut best_index = 0;
for v in weights_and_indicies { for v in weights_and_indexes {
// This generates an "inverse" weight but it avoids floating point math
let x = (total_weight / v.0) let x = (total_weight / v.0)
.to_u64() .to_u64()
.expect("values > u64::max are not supported"); .expect("values > u64::max are not supported");
// capture the u64 into u128s to prevent overflow // capture the u64 into u128s to prevent overflow
let weight = (&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x); let computed_weight = rng.gen_range(1, u128::from(std::u16::MAX)) * u128::from(x);
if weight > best_weight { // The highest input weight maps to the lowest computed weight
best_weight = weight; if computed_weight < lowest_weight {
lowest_weight = computed_weight;
best_index = v.1; best_index = v.1;
} }
} }
@ -120,9 +122,12 @@ mod tests {
#[test] #[test]
fn test_weighted_best() { fn test_weighted_best() {
let mut weights = vec![(std::u32::MAX as u64, 0); 3]; let weights_and_indexes: Vec<_> = vec![100u64, 1000, 10_000, 10]
weights.push((1, 5)); .into_iter()
let best = weighted_best(&weights, ChaChaRng::from_seed([0x5b; 32])); .enumerate()
assert_eq!(best, 5); .map(|(i, weight)| (weight, i))
.collect();
let best_index = weighted_best(&weights_and_indexes, ChaChaRng::from_seed([0x5b; 32]));
assert_eq!(best_index, 2);
} }
} }

View File

@ -85,6 +85,7 @@ where
total_packets += more_packets.packets.len(); total_packets += more_packets.packets.len();
packets.push(more_packets) packets.push(more_packets)
} }
let now = Instant::now(); let now = Instant::now();
inc_new_counter_debug!("streamer-recv_window-recv", total_packets); inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
@ -127,7 +128,8 @@ where
} }
} }
blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?; let blocktree_insert_metrics = blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?;
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
trace!( trace!(
"Elapsed processing time in recv_window(): {}", "Elapsed processing time in recv_window(): {}",

View File

@ -6,8 +6,8 @@ use solana_core::contact_info::ContactInfo;
use solana_core::crds_gossip::*; use solana_core::crds_gossip::*;
use solana_core::crds_gossip_error::CrdsGossipError; use solana_core::crds_gossip_error::CrdsGossipError;
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS; use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
use solana_core::crds_value::CrdsValue;
use solana_core::crds_value::CrdsValueLabel; use solana_core::crds_value::CrdsValueLabel;
use solana_core::crds_value::{CrdsData, CrdsValue};
use solana_sdk::hash::hash; use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
@ -72,10 +72,16 @@ fn stakes(network: &Network) -> HashMap<Pubkey, u64> {
} }
fn star_network_create(num: usize) -> Network { fn star_network_create(num: usize) -> Network {
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let mut network: HashMap<_, _> = (1..num) let mut network: HashMap<_, _> = (1..num)
.map(|_| { .map(|_| {
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let id = new.label().pubkey(); let id = new.label().pubkey();
let mut node = CrdsGossip::default(); let mut node = CrdsGossip::default();
node.crds.insert(new.clone(), 0).unwrap(); node.crds.insert(new.clone(), 0).unwrap();
@ -93,14 +99,20 @@ fn star_network_create(num: usize) -> Network {
} }
fn rstar_network_create(num: usize) -> Network { fn rstar_network_create(num: usize) -> Network {
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let mut origin = CrdsGossip::default(); let mut origin = CrdsGossip::default();
let id = entry.label().pubkey(); let id = entry.label().pubkey();
origin.crds.insert(entry.clone(), 0).unwrap(); origin.crds.insert(entry.clone(), 0).unwrap();
origin.set_self(&id); origin.set_self(&id);
let mut network: HashMap<_, _> = (1..num) let mut network: HashMap<_, _> = (1..num)
.map(|_| { .map(|_| {
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let id = new.label().pubkey(); let id = new.label().pubkey();
let mut node = CrdsGossip::default(); let mut node = CrdsGossip::default();
node.crds.insert(new.clone(), 0).unwrap(); node.crds.insert(new.clone(), 0).unwrap();
@ -116,7 +128,10 @@ fn rstar_network_create(num: usize) -> Network {
fn ring_network_create(num: usize) -> Network { fn ring_network_create(num: usize) -> Network {
let mut network: HashMap<_, _> = (0..num) let mut network: HashMap<_, _> = (0..num)
.map(|_| { .map(|_| {
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let id = new.label().pubkey(); let id = new.label().pubkey();
let mut node = CrdsGossip::default(); let mut node = CrdsGossip::default();
node.crds.insert(new.clone(), 0).unwrap(); node.crds.insert(new.clone(), 0).unwrap();
@ -147,7 +162,10 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network {
let num = stakes.len(); let num = stakes.len();
let mut network: HashMap<_, _> = (0..num) let mut network: HashMap<_, _> = (0..num)
.map(|n| { .map(|n| {
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)); let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
)));
let id = new.label().pubkey(); let id = new.label().pubkey();
let mut node = CrdsGossip::default(); let mut node = CrdsGossip::default();
node.crds.insert(new.clone(), 0).unwrap(); node.crds.insert(new.clone(), 0).unwrap();
@ -219,7 +237,11 @@ fn network_simulator(network: &mut Network, max_convergance: f64) {
.and_then(|v| v.contact_info().cloned()) .and_then(|v| v.contact_info().cloned())
.unwrap(); .unwrap();
m.wallclock = now; m.wallclock = now;
node.process_push_message(&Pubkey::default(), vec![CrdsValue::ContactInfo(m)], now); node.process_push_message(
&Pubkey::default(),
vec![CrdsValue::new_unsigned(CrdsData::ContactInfo(m))],
now,
);
}); });
// push for a bit // push for a bit
let (queue_size, bytes_tx) = network_run_push(network, start, end); let (queue_size, bytes_tx) = network_run_push(network, start, end);
@ -547,7 +569,10 @@ fn test_prune_errors() {
let prune_pubkey = Pubkey::new(&[2; 32]); let prune_pubkey = Pubkey::new(&[2; 32]);
crds_gossip crds_gossip
.crds .crds
.insert(CrdsValue::ContactInfo(ci.clone()), 0) .insert(
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
0,
)
.unwrap(); .unwrap();
crds_gossip.refresh_push_active_set(&HashMap::new()); crds_gossip.refresh_push_active_set(&HashMap::new());
let now = timestamp(); let now = timestamp();

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-crate-features" name = "solana-crate-features"
version = "0.20.0" version = "0.20.2"
description = "Solana Crate Features" description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-drone" name = "solana-drone"
version = "0.20.0" version = "0.20.2"
description = "Solana Drone" description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -19,9 +19,9 @@ clap = "2.33"
log = "0.4.8" log = "0.4.8"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-metrics = { path = "../metrics", version = "0.20.0" } solana-metrics = { path = "../metrics", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-fixed-buf" name = "solana-fixed-buf"
version = "0.20.0" version = "0.20.2"
description = "A fixed-size byte array that supports bincode serde" description = "A fixed-size byte array that supports bincode serde"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-genesis" name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -15,10 +15,10 @@ serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" } solana-genesis-programs = { path = "../genesis_programs", version = "0.20.2" }
solana-ledger = { path = "../ledger", version = "0.20.0" } solana-ledger = { path = "../ledger", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
tempfile = "3.1.0" tempfile = "3.1.0"

View File

@ -310,16 +310,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?; let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?;
let mint_keypair = read_keypair_file(mint_keypair_file)?; let mint_keypair = read_keypair_file(mint_keypair_file)?;
let vote_account = vote_state::create_account( let bootstrap_leader_vote_account = vote_state::create_account(
&bootstrap_vote_keypair.pubkey(), &bootstrap_vote_keypair.pubkey(),
&bootstrap_leader_keypair.pubkey(), &bootstrap_leader_keypair.pubkey(),
0, 0,
1, 1,
); );
let stake_account = stake_state::create_account( let bootstrap_leader_stake_account = stake_state::create_account(
&bootstrap_stake_keypair.pubkey(), &bootstrap_leader_keypair.pubkey(),
&bootstrap_vote_keypair.pubkey(), &bootstrap_vote_keypair.pubkey(),
&vote_account, &bootstrap_leader_vote_account,
bootstrap_leader_stake_lamports, bootstrap_leader_stake_lamports,
); );
@ -335,9 +335,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
Account::new(bootstrap_leader_lamports, 0, &system_program::id()), Account::new(bootstrap_leader_lamports, 0, &system_program::id()),
), ),
// where votes go to // where votes go to
(bootstrap_vote_keypair.pubkey(), vote_account), (
bootstrap_vote_keypair.pubkey(),
bootstrap_leader_vote_account,
),
// passive bootstrap leader stake // passive bootstrap leader stake
(bootstrap_stake_keypair.pubkey(), stake_account), (
bootstrap_stake_keypair.pubkey(),
bootstrap_leader_stake_account,
),
( (
bootstrap_storage_keypair.pubkey(), bootstrap_storage_keypair.pubkey(),
storage_contract::create_validator_storage_account( storage_contract::create_validator_storage_account(

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-genesis-programs" name = "solana-genesis-programs"
version = "0.20.0" version = "0.20.2"
description = "Solana genesis programs" description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,30 +10,25 @@ edition = "2018"
[dependencies] [dependencies]
log = { version = "0.4.8" } log = { version = "0.4.8" }
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.0" } solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.2" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.2" }
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" } solana-budget-program = { path = "../programs/budget_program", version = "0.20.2" }
solana-config-api = { path = "../programs/config_api", version = "0.20.0" } solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
solana-config-program = { path = "../programs/config_program", version = "0.20.0" } solana-config-program = { path = "../programs/config_program", version = "0.20.2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" } solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.2" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-stake-program = { path = "../programs/stake_program", version = "0.20.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
solana-stake-program = { path = "../programs/stake_program", version = "0.20.0" } solana-storage-program = { path = "../programs/storage_program", version = "0.20.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" } solana-vest-api = { path = "../programs/vest_api", version = "0.20.2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" } solana-vest-program = { path = "../programs/vest_program", version = "0.20.2" }
solana-vest-api = { path = "../programs/vest_api", version = "0.20.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
solana-vest-program = { path = "../programs/vest_program", version = "0.20.0" } solana-vote-program = { path = "../programs/vote_program", version = "0.20.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.20.0" }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]
name = "solana_genesis_programs" name = "solana_genesis_programs"
[features]
move = ["solana-move-loader-program", "solana-move-loader-api"]

View File

@ -1,6 +1,6 @@
use solana_sdk::{ use solana_sdk::{
clock::Epoch, genesis_block::OperatingMode, pubkey::Pubkey, clock::Epoch, genesis_block::OperatingMode, move_loader::solana_move_loader_program,
system_program::solana_system_program, pubkey::Pubkey, system_program::solana_system_program,
}; };
#[macro_use] #[macro_use]
@ -11,9 +11,6 @@ extern crate solana_budget_program;
extern crate solana_config_program; extern crate solana_config_program;
#[macro_use] #[macro_use]
extern crate solana_exchange_program; extern crate solana_exchange_program;
#[cfg(feature = "move")]
#[macro_use]
extern crate solana_move_loader_program;
#[macro_use] #[macro_use]
extern crate solana_stake_program; extern crate solana_stake_program;
#[macro_use] #[macro_use]
@ -42,8 +39,7 @@ pub fn get(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(String, P
// Programs that are only available in Development mode // Programs that are only available in Development mode
solana_budget_program!(), solana_budget_program!(),
solana_exchange_program!(), solana_exchange_program!(),
#[cfg(feature = "move")] solana_move_loader_program(),
solana_move_loader_program!(),
]) ])
} else { } else {
None None
@ -107,7 +103,7 @@ mod tests {
#[test] #[test]
fn test_development_programs() { fn test_development_programs() {
assert_eq!(get(OperatingMode::Development, 0).unwrap().len(), 9); assert_eq!(get(OperatingMode::Development, 0).unwrap().len(), 10);
assert_eq!(get(OperatingMode::Development, 1), None); assert_eq!(get(OperatingMode::Development, 1), None);
} }

View File

@ -3,18 +3,18 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-gossip" name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-netutil = { path = "../netutil", version = "0.20.0" } solana-netutil = { path = "../netutil", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }

View File

@ -3,11 +3,10 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-install" name = "solana-install"
description = "The solana cluster software installer" description = "The solana cluster software installer"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
default-run = "solana-install"
[dependencies] [dependencies]
atty = "0.2.11" atty = "0.2.11"
@ -29,10 +28,10 @@ serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
sha2 = "0.8.0" sha2 = "0.8.0"
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-config-api = { path = "../programs/config_api", version = "0.20.0" } solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
tar = "0.4.26" tar = "0.4.26"
tempdir = "0.3.7" tempdir = "0.3.7"
url = "2.1.0" url = "2.1.0"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-keygen" name = "solana-keygen"
version = "0.20.0" version = "0.20.2"
description = "Solana key generation utility" description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -15,7 +15,7 @@ edition = "2018"
clap = "2.33" clap = "2.33"
dirs = "2.0.2" dirs = "2.0.2"
rpassword = "4.0" rpassword = "4.0"
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
tiny-bip39 = "0.6.2" tiny-bip39 = "0.6.2"
[[bin]] [[bin]]

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-ledger-tool" name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -15,10 +15,10 @@ serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
serde_json = "1.0.41" serde_json = "1.0.41"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-ledger = { path = "../ledger", version = "0.20.0" } solana-ledger = { path = "../ledger", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
assert_cmd = "0.11" assert_cmd = "0.11"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-ledger" name = "solana-ledger"
version = "0.20.0" version = "0.20.2"
description = "Solana ledger" description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -26,16 +26,16 @@ rayon = "1.2.0"
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] } reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" } solana-genesis-programs = { path = "../genesis_programs", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-measure = { path = "../measure", version = "0.20.0" } solana-measure = { path = "../measure", version = "0.20.2" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" } solana-merkle-tree = { path = "../merkle-tree", version = "0.20.2" }
solana-metrics = { path = "../metrics", version = "0.20.0" } solana-metrics = { path = "../metrics", version = "0.20.2" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.2" }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
sys-info = "0.5.8" sys-info = "0.5.8"
tar = "0.4.26" tar = "0.4.26"
tempfile = "3.1.0" tempfile = "3.1.0"
@ -49,7 +49,7 @@ features = ["lz4"]
[dev-dependencies] [dev-dependencies]
matches = "0.1.6" matches = "0.1.6"
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]

View File

@ -17,6 +17,7 @@ use rayon::iter::IntoParallelRefIterator;
use rayon::iter::ParallelIterator; use rayon::iter::ParallelIterator;
use rayon::ThreadPool; use rayon::ThreadPool;
use rocksdb::DBRawIterator; use rocksdb::DBRawIterator;
use solana_measure::measure::Measure;
use solana_metrics::{datapoint_debug, datapoint_error}; use solana_metrics::{datapoint_debug, datapoint_error};
use solana_rayon_threadlimit::get_thread_count; use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::clock::Slot; use solana_sdk::clock::Slot;
@ -30,7 +31,7 @@ use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::rc::Rc; use std::rc::Rc;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, Mutex, RwLock};
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb"; pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
@ -41,7 +42,6 @@ thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::
pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000; pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
pub type SlotMetaWorkingSetEntry = (Rc<RefCell<SlotMeta>>, Option<SlotMeta>);
pub type CompletedSlotsReceiver = Receiver<Vec<u64>>; pub type CompletedSlotsReceiver = Receiver<Vec<u64>>;
// ledger window // ledger window
@ -55,10 +55,80 @@ pub struct Blocktree {
data_shred_cf: LedgerColumn<cf::ShredData>, data_shred_cf: LedgerColumn<cf::ShredData>,
code_shred_cf: LedgerColumn<cf::ShredCode>, code_shred_cf: LedgerColumn<cf::ShredCode>,
last_root: Arc<RwLock<u64>>, last_root: Arc<RwLock<u64>>,
insert_shreds_lock: Arc<Mutex<()>>,
pub new_shreds_signals: Vec<SyncSender<bool>>, pub new_shreds_signals: Vec<SyncSender<bool>>,
pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>, pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>,
} }
pub struct IndexMetaWorkingSetEntry {
index: Index,
// true only if at least one shred for this Index was inserted since the time this
// struct was created
did_insert_occur: bool,
}
pub struct SlotMetaWorkingSetEntry {
new_slot_meta: Rc<RefCell<SlotMeta>>,
old_slot_meta: Option<SlotMeta>,
// True only if at least one shred for this SlotMeta was inserted since the time this
// struct was created.
did_insert_occur: bool,
}
pub struct BlocktreeInsertionMetrics {
pub num_shreds: usize,
pub insert_lock_elapsed: u64,
pub insert_shreds_elapsed: u64,
pub shred_recovery_elapsed: u64,
pub chaining_elapsed: u64,
pub commit_working_sets_elapsed: u64,
pub write_batch_elapsed: u64,
pub total_elapsed: u64,
pub num_inserted: u64,
pub num_recovered: usize,
pub index_meta_time: u64,
}
impl SlotMetaWorkingSetEntry {
fn new(new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>) -> Self {
Self {
new_slot_meta,
old_slot_meta,
did_insert_occur: false,
}
}
}
impl BlocktreeInsertionMetrics {
pub fn report_metrics(&self, metric_name: &'static str) {
datapoint_debug!(
metric_name,
("num_shreds", self.num_shreds as i64, i64),
("total_elapsed", self.total_elapsed as i64, i64),
("insert_lock_elapsed", self.insert_lock_elapsed as i64, i64),
(
"insert_shreds_elapsed",
self.insert_shreds_elapsed as i64,
i64
),
(
"shred_recovery_elapsed",
self.shred_recovery_elapsed as i64,
i64
),
("chaining_elapsed", self.chaining_elapsed as i64, i64),
(
"commit_working_sets_elapsed",
self.commit_working_sets_elapsed as i64,
i64
),
("write_batch_elapsed", self.write_batch_elapsed as i64, i64),
("num_inserted", self.num_inserted as i64, i64),
("num_recovered", self.num_recovered as i64, i64),
);
}
}
impl Blocktree { impl Blocktree {
/// Opens a Ledger in directory, provides "infinite" window of shreds /// Opens a Ledger in directory, provides "infinite" window of shreds
pub fn open(ledger_path: &Path) -> Result<Blocktree> { pub fn open(ledger_path: &Path) -> Result<Blocktree> {
@ -106,6 +176,7 @@ impl Blocktree {
code_shred_cf, code_shred_cf,
new_shreds_signals: vec![], new_shreds_signals: vec![],
completed_slots_senders: vec![], completed_slots_senders: vec![],
insert_shreds_lock: Arc::new(Mutex::new(())),
last_root, last_root,
}) })
} }
@ -259,7 +330,7 @@ impl Blocktree {
fn try_shred_recovery( fn try_shred_recovery(
db: &Database, db: &Database,
erasure_metas: &HashMap<(u64, u64), ErasureMeta>, erasure_metas: &HashMap<(u64, u64), ErasureMeta>,
index_working_set: &HashMap<u64, Index>, index_working_set: &HashMap<u64, IndexMetaWorkingSetEntry>,
prev_inserted_datas: &mut HashMap<(u64, u64), Shred>, prev_inserted_datas: &mut HashMap<(u64, u64), Shred>,
prev_inserted_codes: &mut HashMap<(u64, u64), Shred>, prev_inserted_codes: &mut HashMap<(u64, u64), Shred>,
) -> Vec<Shred> { ) -> Vec<Shred> {
@ -284,7 +355,8 @@ impl Blocktree {
); );
}; };
let index = index_working_set.get(&slot).expect("Index"); let index_meta_entry = index_working_set.get(&slot).expect("Index");
let index = &index_meta_entry.index;
match erasure_meta.status(&index) { match erasure_meta.status(&index) {
ErasureMetaStatus::CanRecover => { ErasureMetaStatus::CanRecover => {
// Find shreds for this erasure set and try recovery // Find shreds for this erasure set and try recovery
@ -358,7 +430,13 @@ impl Blocktree {
&self, &self,
shreds: Vec<Shred>, shreds: Vec<Shred>,
leader_schedule: Option<&Arc<LeaderScheduleCache>>, leader_schedule: Option<&Arc<LeaderScheduleCache>>,
) -> Result<()> { ) -> Result<BlocktreeInsertionMetrics> {
let mut total_start = Measure::start("Total elapsed");
let mut start = Measure::start("Blocktree lock");
let _lock = self.insert_shreds_lock.lock().unwrap();
start.stop();
let insert_lock_elapsed = start.as_us();
let db = &*self.db; let db = &*self.db;
let mut write_batch = db.batch()?; let mut write_batch = db.batch()?;
@ -368,26 +446,43 @@ impl Blocktree {
let mut slot_meta_working_set = HashMap::new(); let mut slot_meta_working_set = HashMap::new();
let mut index_working_set = HashMap::new(); let mut index_working_set = HashMap::new();
let num_shreds = shreds.len();
let mut start = Measure::start("Shred insertion");
let mut num_inserted = 0;
let mut index_meta_time = 0;
shreds.into_iter().for_each(|shred| { shreds.into_iter().for_each(|shred| {
if shred.is_data() { let insert_success = {
self.check_insert_data_shred( if shred.is_data() {
shred, self.check_insert_data_shred(
&mut index_working_set, shred,
&mut slot_meta_working_set, &mut index_working_set,
&mut write_batch, &mut slot_meta_working_set,
&mut just_inserted_data_shreds, &mut write_batch,
); &mut just_inserted_data_shreds,
} else if shred.is_code() { &mut index_meta_time,
self.check_insert_coding_shred( )
shred, } else if shred.is_code() {
&mut erasure_metas, self.check_insert_coding_shred(
&mut index_working_set, shred,
&mut write_batch, &mut erasure_metas,
&mut just_inserted_coding_shreds, &mut index_working_set,
); &mut write_batch,
&mut just_inserted_coding_shreds,
&mut index_meta_time,
)
} else {
panic!("There should be no other case");
}
};
if insert_success {
num_inserted += 1;
} }
}); });
start.stop();
let insert_shreds_elapsed = start.as_us();
let mut start = Measure::start("Shred recovery");
let mut num_recovered = 0;
if let Some(leader_schedule_cache) = leader_schedule { if let Some(leader_schedule_cache) = leader_schedule {
let recovered_data = Self::try_shred_recovery( let recovered_data = Self::try_shred_recovery(
&db, &db,
@ -397,6 +492,7 @@ impl Blocktree {
&mut just_inserted_coding_shreds, &mut just_inserted_coding_shreds,
); );
num_recovered = recovered_data.len();
recovered_data.into_iter().for_each(|shred| { recovered_data.into_iter().for_each(|shred| {
if let Some(leader) = leader_schedule_cache.slot_leader_at(shred.slot(), None) { if let Some(leader) = leader_schedule_cache.slot_leader_at(shred.slot(), None) {
if shred.verify(&leader) { if shred.verify(&leader) {
@ -406,15 +502,23 @@ impl Blocktree {
&mut slot_meta_working_set, &mut slot_meta_working_set,
&mut write_batch, &mut write_batch,
&mut just_inserted_coding_shreds, &mut just_inserted_coding_shreds,
) &mut index_meta_time,
);
} }
} }
}); });
} }
start.stop();
let shred_recovery_elapsed = start.as_us();
// Handle chaining for the working set let mut start = Measure::start("Shred recovery");
handle_chaining(&self.db, &mut write_batch, &slot_meta_working_set)?; // Handle chaining for the members of the slot_meta_working_set that were inserted into,
// drop the others
handle_chaining(&self.db, &mut write_batch, &mut slot_meta_working_set)?;
start.stop();
let chaining_elapsed = start.as_us();
let mut start = Measure::start("Commit Working Sets");
let (should_signal, newly_completed_slots) = commit_slot_meta_working_set( let (should_signal, newly_completed_slots) = commit_slot_meta_working_set(
&slot_meta_working_set, &slot_meta_working_set,
&self.completed_slots_senders, &self.completed_slots_senders,
@ -425,11 +529,18 @@ impl Blocktree {
write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?; write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?;
} }
for (&slot, index) in index_working_set.iter() { for (&slot, index_working_set_entry) in index_working_set.iter() {
write_batch.put::<cf::Index>(slot, index)?; if index_working_set_entry.did_insert_occur {
write_batch.put::<cf::Index>(slot, &index_working_set_entry.index)?;
}
} }
start.stop();
let commit_working_sets_elapsed = start.as_us();
let mut start = Measure::start("Write Batch");
self.db.write(write_batch)?; self.db.write(write_batch)?;
start.stop();
let write_batch_elapsed = start.as_us();
if should_signal { if should_signal {
for signal in &self.new_shreds_signals { for signal in &self.new_shreds_signals {
@ -444,82 +555,96 @@ impl Blocktree {
newly_completed_slots, newly_completed_slots,
)?; )?;
Ok(()) total_start.stop();
Ok(BlocktreeInsertionMetrics {
num_shreds,
total_elapsed: total_start.as_us(),
insert_lock_elapsed,
insert_shreds_elapsed,
shred_recovery_elapsed,
chaining_elapsed,
commit_working_sets_elapsed,
write_batch_elapsed,
num_inserted,
num_recovered,
index_meta_time,
})
} }
fn check_insert_coding_shred( fn check_insert_coding_shred(
&self, &self,
shred: Shred, shred: Shred,
erasure_metas: &mut HashMap<(u64, u64), ErasureMeta>, erasure_metas: &mut HashMap<(u64, u64), ErasureMeta>,
index_working_set: &mut HashMap<u64, Index>, index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
write_batch: &mut WriteBatch, write_batch: &mut WriteBatch,
just_inserted_coding_shreds: &mut HashMap<(u64, u64), Shred>, just_inserted_coding_shreds: &mut HashMap<(u64, u64), Shred>,
) { index_meta_time: &mut u64,
) -> bool {
let slot = shred.slot(); let slot = shred.slot();
let shred_index = u64::from(shred.index()); let shred_index = u64::from(shred.index());
let (index_meta, mut new_index_meta) = let index_meta_working_set_entry =
get_index_meta_entry(&self.db, slot, index_working_set); get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
let index_meta = index_meta.unwrap_or_else(|| new_index_meta.as_mut().unwrap()); let index_meta = &mut index_meta_working_set_entry.index;
// This gives the index of first coding shred in this FEC block // This gives the index of first coding shred in this FEC block
// So, all coding shreds in a given FEC block will have the same set index // So, all coding shreds in a given FEC block will have the same set index
if Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) { if Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) {
if let Ok(()) = self.insert_coding_shred(erasure_metas, index_meta, &shred, write_batch) self.insert_coding_shred(erasure_metas, index_meta, &shred, write_batch)
{ .map(|_| {
just_inserted_coding_shreds // Insert was a success!
.entry((slot, shred_index)) just_inserted_coding_shreds
.or_insert_with(|| shred); .entry((slot, shred_index))
new_index_meta.map(|n| index_working_set.insert(slot, n)); .or_insert_with(|| shred);
}
index_meta_working_set_entry.did_insert_occur = true;
})
.is_ok()
} else {
false
} }
} }
fn check_insert_data_shred( fn check_insert_data_shred(
&self, &self,
shred: Shred, shred: Shred,
index_working_set: &mut HashMap<u64, Index>, index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>, slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
write_batch: &mut WriteBatch, write_batch: &mut WriteBatch,
just_inserted_data_shreds: &mut HashMap<(u64, u64), Shred>, just_inserted_data_shreds: &mut HashMap<(u64, u64), Shred>,
) { index_meta_time: &mut u64,
) -> bool {
let slot = shred.slot(); let slot = shred.slot();
let shred_index = u64::from(shred.index()); let shred_index = u64::from(shred.index());
let (index_meta, mut new_index_meta) =
get_index_meta_entry(&self.db, slot, index_working_set); let index_meta_working_set_entry =
let (slot_meta_entry, mut new_slot_meta_entry) = get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
let index_meta = &mut index_meta_working_set_entry.index;
let slot_meta_entry =
get_slot_meta_entry(&self.db, slot_meta_working_set, slot, shred.parent()); get_slot_meta_entry(&self.db, slot_meta_working_set, slot, shred.parent());
let insert_success = { let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
let index_meta = index_meta.unwrap_or_else(|| new_index_meta.as_mut().unwrap());
let entry = slot_meta_entry.unwrap_or_else(|| new_slot_meta_entry.as_mut().unwrap());
let mut slot_meta = entry.0.borrow_mut();
if Blocktree::should_insert_data_shred( if Blocktree::should_insert_data_shred(
&shred, &shred,
&slot_meta, slot_meta,
index_meta.data(), index_meta.data(),
&self.last_root, &self.last_root,
) { ) {
if let Ok(()) = self.insert_data_shred( if let Ok(()) =
&mut slot_meta, self.insert_data_shred(slot_meta, index_meta.data_mut(), &shred, write_batch)
index_meta.data_mut(), {
&shred, just_inserted_data_shreds.insert((slot, shred_index), shred);
write_batch, index_meta_working_set_entry.did_insert_occur = true;
) { slot_meta_entry.did_insert_occur = true;
just_inserted_data_shreds.insert((slot, shred_index), shred); true
new_index_meta.map(|n| index_working_set.insert(slot, n));
true
} else {
false
}
} else { } else {
false false
} }
}; } else {
false
if insert_success {
new_slot_meta_entry.map(|n| slot_meta_working_set.insert(slot, n));
} }
} }
@ -647,9 +772,7 @@ impl Blocktree {
} }
let last_root = *last_root.read().unwrap(); let last_root = *last_root.read().unwrap();
verify_shred_slots(slot, slot_meta.parent_slot, last_root); verify_shred_slots(slot, slot_meta.parent_slot, last_root)
true
} }
fn insert_data_shred( fn insert_data_shred(
@ -661,7 +784,6 @@ impl Blocktree {
) -> Result<()> { ) -> Result<()> {
let slot = shred.slot(); let slot = shred.slot();
let index = u64::from(shred.index()); let index = u64::from(shred.index());
let parent = shred.parent();
let last_in_slot = if shred.last_in_slot() { let last_in_slot = if shred.last_in_slot() {
debug!("got last in slot"); debug!("got last in slot");
@ -677,9 +799,8 @@ impl Blocktree {
false false
}; };
if is_orphan(slot_meta) { // Parent for slot meta should have been set by this point
slot_meta.parent_slot = parent; assert!(!is_orphan(slot_meta));
}
let data_cf = self.db.column::<cf::ShredData>(); let data_cf = self.db.column::<cf::ShredData>();
@ -1218,22 +1339,24 @@ fn update_slot_meta(
fn get_index_meta_entry<'a>( fn get_index_meta_entry<'a>(
db: &Database, db: &Database,
slot: u64, slot: u64,
index_working_set: &'a mut HashMap<u64, Index>, index_working_set: &'a mut HashMap<u64, IndexMetaWorkingSetEntry>,
) -> (Option<&'a mut Index>, Option<Index>) { index_meta_time: &mut u64,
) -> &'a mut IndexMetaWorkingSetEntry {
let index_cf = db.column::<cf::Index>(); let index_cf = db.column::<cf::Index>();
index_working_set let mut total_start = Measure::start("Total elapsed");
.get_mut(&slot) let res = index_working_set.entry(slot).or_insert_with(|| {
.map(|i| (Some(i), None)) let newly_inserted_meta = index_cf
.unwrap_or_else(|| { .get(slot)
let newly_inserted_meta = Some( .unwrap()
index_cf .unwrap_or_else(|| Index::new(slot));
.get(slot) IndexMetaWorkingSetEntry {
.unwrap() index: newly_inserted_meta,
.unwrap_or_else(|| Index::new(slot)), did_insert_occur: false,
); }
});
(None, newly_inserted_meta) total_start.stop();
}) *index_meta_time += total_start.as_us();
res
} }
fn get_slot_meta_entry<'a>( fn get_slot_meta_entry<'a>(
@ -1241,39 +1364,30 @@ fn get_slot_meta_entry<'a>(
slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>, slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>,
slot: u64, slot: u64,
parent_slot: u64, parent_slot: u64,
) -> ( ) -> &'a mut SlotMetaWorkingSetEntry {
Option<&'a mut SlotMetaWorkingSetEntry>,
Option<SlotMetaWorkingSetEntry>,
) {
let meta_cf = db.column::<cf::SlotMeta>(); let meta_cf = db.column::<cf::SlotMeta>();
// Check if we've already inserted the slot metadata for this blob's slot // Check if we've already inserted the slot metadata for this blob's slot
slot_meta_working_set slot_meta_working_set.entry(slot).or_insert_with(|| {
.get_mut(&slot) // Store a 2-tuple of the metadata (working copy, backup copy)
.map(|s| (Some(s), None)) if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") {
.unwrap_or_else(|| { let backup = Some(meta.clone());
// Store a 2-tuple of the metadata (working copy, backup copy) // If parent_slot == std::u64::MAX, then this is one of the orphans inserted
if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") { // during the chaining process, see the function find_slot_meta_in_cached_state()
let backup = Some(meta.clone()); // for details. Slots that are orphans are missing a parent_slot, so we should
// If parent_slot == std::u64::MAX, then this is one of the orphans inserted // fill in the parent now that we know it.
// during the chaining process, see the function find_slot_meta_in_cached_state() if is_orphan(&meta) {
// for details. Slots that are orphans are missing a parent_slot, so we should meta.parent_slot = parent_slot;
// fill in the parent now that we know it.
if is_orphan(&meta) {
meta.parent_slot = parent_slot;
}
(None, Some((Rc::new(RefCell::new(meta)), backup)))
} else {
(
None,
Some((
Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))),
None,
)),
)
} }
})
SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup)
} else {
SlotMetaWorkingSetEntry::new(
Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))),
None,
)
}
})
} }
fn is_valid_write_to_slot_0(slot_to_write: u64, parent_slot: u64, last_root: u64) -> bool { fn is_valid_write_to_slot_0(slot_to_write: u64, parent_slot: u64, last_root: u64) -> bool {
@ -1327,8 +1441,11 @@ fn commit_slot_meta_working_set(
// Check if any metadata was changed, if so, insert the new version of the // Check if any metadata was changed, if so, insert the new version of the
// metadata into the write batch // metadata into the write batch
for (slot, (meta, meta_backup)) in slot_meta_working_set.iter() { for (slot, slot_meta_entry) in slot_meta_working_set.iter() {
let meta: &SlotMeta = &RefCell::borrow(&*meta); // Any slot that wasn't written to should have been filtered out by now.
assert!(slot_meta_entry.did_insert_occur);
let meta: &SlotMeta = &RefCell::borrow(&*slot_meta_entry.new_slot_meta);
let meta_backup = &slot_meta_entry.old_slot_meta;
if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) { if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) {
newly_completed_slots.push(*slot); newly_completed_slots.push(*slot);
} }
@ -1388,8 +1505,8 @@ fn find_slot_meta_in_cached_state<'a>(
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>, chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot: u64, slot: u64,
) -> Result<Option<Rc<RefCell<SlotMeta>>>> { ) -> Result<Option<Rc<RefCell<SlotMeta>>>> {
if let Some((entry, _)) = working_set.get(&slot) { if let Some(entry) = working_set.get(&slot) {
Ok(Some(entry.clone())) Ok(Some(entry.new_slot_meta.clone()))
} else if let Some(entry) = chained_slots.get(&slot) { } else if let Some(entry) = chained_slots.get(&slot) {
Ok(Some(entry.clone())) Ok(Some(entry.clone()))
} else { } else {
@ -1401,12 +1518,14 @@ fn find_slot_meta_in_cached_state<'a>(
fn handle_chaining( fn handle_chaining(
db: &Database, db: &Database,
write_batch: &mut WriteBatch, write_batch: &mut WriteBatch,
working_set: &HashMap<u64, SlotMetaWorkingSetEntry>, working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
) -> Result<()> { ) -> Result<()> {
// Handle chaining for all the SlotMetas that were inserted into
working_set.retain(|_, entry| entry.did_insert_occur);
let mut new_chained_slots = HashMap::new(); let mut new_chained_slots = HashMap::new();
let working_set_slots: Vec<_> = working_set.iter().map(|s| *s.0).collect(); let working_set_slots: Vec<_> = working_set.keys().collect();
for slot in working_set_slots { for slot in working_set_slots {
handle_chaining_for_slot(db, write_batch, working_set, &mut new_chained_slots, slot)?; handle_chaining_for_slot(db, write_batch, working_set, &mut new_chained_slots, *slot)?;
} }
// Write all the newly changed slots in new_chained_slots to the write_batch // Write all the newly changed slots in new_chained_slots to the write_batch
@ -1424,10 +1543,13 @@ fn handle_chaining_for_slot(
new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>, new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
slot: u64, slot: u64,
) -> Result<()> { ) -> Result<()> {
let (meta, meta_backup) = working_set let slot_meta_entry = working_set
.get(&slot) .get(&slot)
.expect("Slot must exist in the working_set hashmap"); .expect("Slot must exist in the working_set hashmap");
let meta = &slot_meta_entry.new_slot_meta;
let meta_backup = &slot_meta_entry.old_slot_meta;
{ {
let mut meta_mut = meta.borrow_mut(); let mut meta_mut = meta.borrow_mut();
let was_orphan_slot = meta_backup.is_some() && is_orphan(meta_backup.as_ref().unwrap()); let was_orphan_slot = meta_backup.is_some() && is_orphan(meta_backup.as_ref().unwrap());
@ -3659,5 +3781,37 @@ pub mod tests {
.expect("Expected successful write of shreds"); .expect("Expected successful write of shreds");
assert!(blocktree.get_slot_entries(slot, 0, None).is_err()); assert!(blocktree.get_slot_entries(slot, 0, None).is_err());
} }
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
fn test_no_insert_but_modify_slot_meta() {
// This tests correctness of the SlotMeta in various cases in which a shred
// that gets filtered out by checks
let (shreds0, _) = make_slot_entries(0, 0, 200);
let blocktree_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Insert the first 5 shreds, we don't have a "is_last" shred yet
blocktree
.insert_shreds(shreds0[0..5].to_vec(), None)
.unwrap();
// Insert a repetitive shred for slot 's', should get ignored, but also
// insert shreds that chains to 's', should see the update in the SlotMeta
// for 's'.
let (mut shreds2, _) = make_slot_entries(2, 0, 200);
let (mut shreds3, _) = make_slot_entries(3, 0, 200);
shreds2.push(shreds0[1].clone());
shreds3.insert(0, shreds0[1].clone());
blocktree.insert_shreds(shreds2, None).unwrap();
let slot_meta = blocktree.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.next_slots, vec![2]);
blocktree.insert_shreds(shreds3, None).unwrap();
let slot_meta = blocktree.meta(0).unwrap().unwrap();
assert_eq!(slot_meta.next_slots, vec![2, 3]);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
} }
} }

View File

@ -19,7 +19,6 @@ use std::sync::Arc;
// A good value for this is the number of cores on the machine // A good value for this is the number of cores on the machine
const TOTAL_THREADS: i32 = 8; const TOTAL_THREADS: i32 = 8;
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
// Column family for metadata about a leader slot // Column family for metadata about a leader slot
const META_CF: &str = "meta"; const META_CF: &str = "meta";
@ -129,22 +128,18 @@ impl Rocks {
let db_options = get_db_options(); let db_options = get_db_options();
// Column family names // Column family names
let meta_cf_descriptor = let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(SlotMeta::NAME));
let dead_slots_cf_descriptor = let dead_slots_cf_descriptor =
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(DeadSlots::NAME)); ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
let erasure_meta_cf_descriptor = let erasure_meta_cf_descriptor =
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(ErasureMeta::NAME)); ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
let orphans_cf_descriptor = let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(Orphans::NAME)); let root_cf_descriptor = ColumnFamilyDescriptor::new(Root::NAME, get_cf_options());
let root_cf_descriptor = let index_cf_descriptor = ColumnFamilyDescriptor::new(Index::NAME, get_cf_options());
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
let index_cf_descriptor =
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
let shred_data_cf_descriptor = let shred_data_cf_descriptor =
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(ShredData::NAME)); ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options());
let shred_code_cf_descriptor = let shred_code_cf_descriptor =
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(ShredCode::NAME)); ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options());
let cfs = vec![ let cfs = vec![
meta_cf_descriptor, meta_cf_descriptor,
@ -679,27 +674,13 @@ impl<'a> WriteBatch<'a> {
} }
} }
fn get_cf_options(name: &'static str) -> Options { fn get_cf_options() -> Options {
use columns::{ErasureMeta, Index, ShredCode, ShredData};
let mut options = Options::default(); let mut options = Options::default();
match name { // 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM
ShredCode::NAME | ShredData::NAME | Index::NAME | ErasureMeta::NAME => { options.set_max_write_buffer_number(8);
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
options.set_max_write_buffer_number(8); options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize); options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
}
_ => {
// We want smaller CFs to flush faster. This results in more WAL files but lowers
// overall WAL space utilization and increases flush frequency
options.set_write_buffer_size(MIN_WRITE_BUFFER_SIZE as usize);
options.set_target_file_size_base(MIN_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MIN_WRITE_BUFFER_SIZE);
options.set_level_zero_file_num_compaction_trigger(1);
}
}
options options
} }

51
ledger/tests/blocktree.rs Normal file
View File

@ -0,0 +1,51 @@
#[macro_use]
extern crate solana_ledger;
use solana_ledger::blocktree::{self, get_tmp_ledger_path, Blocktree};
use solana_ledger::entry;
use solana_sdk::hash::Hash;
use std::sync::Arc;
use std::thread::Builder;
#[test]
fn test_multiple_threads_insert_shred() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
for _ in 0..100 {
let num_threads = 10;
// Create `num_threads` different ticks in slots 1..num_therads + 1, all
// with parent = slot 0
let threads: Vec<_> = (0..num_threads)
.map(|i| {
let entries = entry::create_ticks(1, Hash::default());
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false);
let blocktree_ = blocktree.clone();
Builder::new()
.name("blocktree-writer".to_string())
.spawn(move || {
blocktree_.insert_shreds(shreds, None).unwrap();
})
.unwrap()
})
.collect();
for t in threads {
t.join().unwrap()
}
// Check slot 0 has the correct children
let mut meta0 = blocktree.meta(0).unwrap().unwrap();
meta0.next_slots.sort();
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
assert_eq!(meta0.next_slots, expected_next_slots);
// Delete slots for next iteration
blocktree.purge_slots(0, None);
}
// Cleanup
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-local-cluster" name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -11,29 +11,29 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
log = "0.4.8" log = "0.4.8"
rand = "0.6.5" rand = "0.6.5"
solana-bench-exchange = { path = "../bench-exchange", version = "0.20.0" } solana-bench-exchange = { path = "../bench-exchange", version = "0.20.2" }
solana-bench-tps = { path = "../bench-tps", version = "0.20.0" } solana-bench-tps = { path = "../bench-tps", version = "0.20.2" }
solana-config-api = { path = "../programs/config_api", version = "0.20.0" } solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
solana-core = { path = "../core", version = "0.20.0" } solana-core = { path = "../core", version = "0.20.2" }
solana-client = { path = "../client", version = "0.20.0" } solana-client = { path = "../client", version = "0.20.2" }
solana-drone = { path = "../drone", version = "0.20.0" } solana-drone = { path = "../drone", version = "0.20.2" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" } solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.2" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.2" }
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" } solana-genesis-programs = { path = "../genesis_programs", version = "0.20.2" }
solana-ledger = { path = "../ledger", version = "0.20.0" } solana-ledger = { path = "../ledger", version = "0.20.2" }
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true } solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.2", optional = true }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true } solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.2", optional = true }
solana-runtime = { path = "../runtime", version = "0.20.0" } solana-runtime = { path = "../runtime", version = "0.20.2" }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" } solana-storage-program = { path = "../programs/storage_program", version = "0.20.2" }
solana-vest-api = { path = "../programs/vest_api", version = "0.20.0" } solana-vest-api = { path = "../programs/vest_api", version = "0.20.2" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
symlink = "0.1.0" symlink = "0.1.0"
tempfile = "3.1.0" tempfile = "3.1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
serial_test = "0.2.0" serial_test = "0.2.0"

View File

@ -29,7 +29,6 @@ extern crate solana_drone;
extern crate solana_exchange_program; extern crate solana_exchange_program;
#[cfg(feature = "move")] #[cfg(feature = "move")]
#[macro_use]
#[cfg(test)] #[cfg(test)]
extern crate solana_move_loader_program; extern crate solana_move_loader_program;

View File

@ -7,14 +7,14 @@ use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
use solana_core::validator::ValidatorConfig; use solana_core::validator::ValidatorConfig;
use solana_drone::drone::run_local_drone; use solana_drone::drone::run_local_drone;
#[cfg(feature = "move")] #[cfg(feature = "move")]
use solana_move_loader_program; use solana_sdk::move_loader::solana_move_loader_program;
use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::time::Duration; use std::time::Duration;
fn test_bench_tps_local_cluster(config: Config) { fn test_bench_tps_local_cluster(config: Config) {
#[cfg(feature = "move")] #[cfg(feature = "move")]
let native_instruction_processors = vec![solana_move_loader_program!()]; let native_instruction_processors = vec![solana_move_loader_program()];
#[cfg(not(feature = "move"))] #[cfg(not(feature = "move"))]
let native_instruction_processors = vec![]; let native_instruction_processors = vec![];
@ -71,12 +71,12 @@ fn test_bench_tps_local_cluster_solana() {
test_bench_tps_local_cluster(config); test_bench_tps_local_cluster(config);
} }
#[ignore]
#[test] #[test]
#[serial]
fn test_bench_tps_local_cluster_move() { fn test_bench_tps_local_cluster_move() {
let mut config = Config::default(); let mut config = Config::default();
config.tx_count = 100; config.tx_count = 100;
config.duration = Duration::from_secs(20); config.duration = Duration::from_secs(30);
config.use_move = true; config.use_move = true;
test_bench_tps_local_cluster(config); test_bench_tps_local_cluster(config);

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-logger" name = "solana-logger"
version = "0.20.0" version = "0.20.2"
description = "Solana Logger" description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-measure" name = "solana-measure"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "../README.md" readme = "../README.md"
@ -11,4 +11,4 @@ license = "Apache-2.0"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-merkle-tree" name = "solana-merkle-tree"
version = "0.20.0" version = "0.20.2"
description = "Solana Merkle Tree" description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
hex = "0.4.0" hex = "0.4.0"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-metrics" name = "solana-metrics"
version = "0.20.0" version = "0.20.2"
description = "Solana Metrics" description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ env_logger = "0.7.1"
lazy_static = "1.4.0" lazy_static = "1.4.0"
log = "0.4.8" log = "0.4.8"
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] } reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
solana-sdk = { path = "../sdk", version = "0.20.0" } solana-sdk = { path = "../sdk", version = "0.20.2" }
sys-info = "0.5.8" sys-info = "0.5.8"
[dev-dependencies] [dev-dependencies]

View File

@ -15,8 +15,8 @@
"editable": true, "editable": true,
"gnetId": null, "gnetId": null,
"graphTooltip": 0, "graphTooltip": 0,
"id": 1038, "id": 1069,
"iteration": 1571342384471, "iteration": 1572390574899,
"links": [ "links": [
{ {
"asDropdown": true, "asDropdown": true,
@ -4317,7 +4317,7 @@
"measurement": "cluster_info-vote-count", "measurement": "cluster_info-vote-count",
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "autogen", "policy": "autogen",
"query": "SELECT \"duration\" / 1000 as \"Generation Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-generate_txs\" WHERE $timeFilter fill(null)\n\n\n\n\n", "query": "SELECT mean(\"duration\") as \"Generation Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-generate_txs\" WHERE $timeFilter GROUP BY time(1s)\n\n\n\n\n",
"rawQuery": true, "rawQuery": true,
"refId": "A", "refId": "A",
"resultFormat": "time_series", "resultFormat": "time_series",
@ -4354,7 +4354,7 @@
], ],
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "default", "policy": "default",
"query": "SELECT \"duration\" / 1000 as \"Transmit Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-do_tx_transfers\" WHERE $timeFilter fill(null)", "query": "SELECT mean(\"duration\") as \"Transmit Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-do_tx_transfers\" WHERE $timeFilter GROUP BY time(1s)",
"rawQuery": true, "rawQuery": true,
"refId": "B", "refId": "B",
"resultFormat": "time_series", "resultFormat": "time_series",
@ -4391,7 +4391,7 @@
], ],
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "default", "policy": "default",
"query": "SELECT \"duration\" / 1000 as \"Barrier Transaction Confirmation Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-send_barrier_transaction\" WHERE $timeFilter fill(null)", "query": "SELECT mean(\"duration\") as \"Get Blockhash\" FROM \"$testnet\".\"autogen\".\"bench-tps-get_blockhash\" WHERE $timeFilter GROUP BY time(1s)",
"rawQuery": true, "rawQuery": true,
"refId": "C", "refId": "C",
"resultFormat": "time_series", "resultFormat": "time_series",
@ -4410,6 +4410,43 @@
] ]
], ],
"tags": [] "tags": []
},
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT mean(\"duration\") as \"Get Balance\" FROM \"$testnet\".\"autogen\".\"bench-tps-get_balance\" WHERE $timeFilter GROUP BY time(1s)",
"rawQuery": true,
"refId": "D",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": []
} }
], ],
"thresholds": [], "thresholds": [],
@ -4432,7 +4469,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "short", "format": "µs",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@ -4453,13 +4490,98 @@
"alignLevel": null "alignLevel": null
} }
}, },
{
"columns": [],
"datasource": "$datasource",
"fontSize": "100%",
"gridPos": {
"h": 5,
"w": 24,
"x": 0,
"y": 50
},
"id": 68,
"links": [],
"pageSize": null,
"scroll": true,
"showHeader": true,
"sort": {
"col": null,
"desc": false
},
"styles": [
{
"alias": "Time",
"dateFormat": "YYYY-MM-DD HH:mm:ss",
"pattern": "Time",
"type": "date"
},
{
"alias": "",
"colorMode": null,
"colors": [
"rgba(245, 54, 54, 0.9)",
"rgba(237, 129, 40, 0.89)",
"rgba(50, 172, 45, 0.97)"
],
"decimals": 2,
"pattern": "/.*/",
"thresholds": [],
"type": "number",
"unit": "short"
}
],
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"hide": false,
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT * FROM \"$testnet\".\"autogen\".\"ramp-tps\" WHERE $timeFilter ORDER BY time DESC ",
"rawQuery": true,
"refId": "A",
"resultFormat": "table",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": []
}
],
"title": "Ramp TPS Events",
"transform": "table",
"type": "table"
},
{ {
"collapsed": false, "collapsed": false,
"gridPos": { "gridPos": {
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 50 "y": 55
}, },
"id": 38, "id": 38,
"panels": [], "panels": [],
@ -4477,7 +4599,7 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 0, "x": 0,
"y": 51 "y": 56
}, },
"id": 39, "id": 39,
"legend": { "legend": {
@ -4628,7 +4750,7 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 12, "x": 12,
"y": 51 "y": 56
}, },
"id": 40, "id": 40,
"legend": { "legend": {
@ -4811,7 +4933,7 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 56 "y": 61
}, },
"id": 41, "id": 41,
"panels": [], "panels": [],
@ -4829,9 +4951,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 0, "x": 0,
"y": 57 "y": 62
}, },
"id": 50, "id": 42,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -5148,9 +5270,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 8, "x": 8,
"y": 57 "y": 62
}, },
"id": 47, "id": 43,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -5506,7 +5628,7 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 16, "x": 16,
"y": 57 "y": 62
}, },
"id": 44, "id": 44,
"legend": { "legend": {
@ -5818,9 +5940,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 0, "x": 0,
"y": 63 "y": 68
}, },
"id": 42, "id": 45,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -6248,9 +6370,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 8, "x": 8,
"y": 63 "y": 68
}, },
"id": 46, "id": 47,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -6291,7 +6413,7 @@
], ],
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "default", "policy": "default",
"query": "SELECT sum(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)", "query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true, "rawQuery": true,
"refId": "F", "refId": "F",
"resultFormat": "time_series", "resultFormat": "time_series",
@ -6328,7 +6450,7 @@
], ],
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "default", "policy": "default",
"query": "SELECT sum(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-record_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)", "query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-record_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true, "rawQuery": true,
"refId": "A", "refId": "A",
"resultFormat": "time_series", "resultFormat": "time_series",
@ -6365,7 +6487,7 @@
], ],
"orderByTime": "ASC", "orderByTime": "ASC",
"policy": "default", "policy": "default",
"query": "SELECT sum(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_overhead\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)", "query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_overhead\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true, "rawQuery": true,
"refId": "B", "refId": "B",
"resultFormat": "time_series", "resultFormat": "time_series",
@ -6384,6 +6506,43 @@
] ]
], ],
"tags": [] "tags": []
},
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-record_ms\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "C",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": []
} }
], ],
"thresholds": [], "thresholds": [],
@ -6405,7 +6564,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "ms", "format": "µs",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@ -6437,9 +6596,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 16, "x": 16,
"y": 63 "y": 68
}, },
"id": 56, "id": 47,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -6558,9 +6717,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 16, "x": 16,
"y": 68 "y": 73
}, },
"id": 55, "id": 48,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -6714,9 +6873,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 0, "x": 0,
"y": 69 "y": 74
}, },
"id": 48, "id": 49,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -6870,9 +7029,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 8, "x": 8,
"y": 69 "y": 74
}, },
"id": 49, "id": 50,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -6985,9 +7144,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 16, "x": 16,
"y": 73 "y": 78
}, },
"id": 53, "id": 51,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -7189,9 +7348,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 0, "x": 0,
"y": 74 "y": 79
}, },
"id": 45, "id": 52,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -7491,9 +7650,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 8, "x": 8,
"y": 74 "y": 79
}, },
"id": 51, "id": 53,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -7639,9 +7798,9 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 79 "y": 84
}, },
"id": 57, "id": 54,
"panels": [], "panels": [],
"title": "Tower Consensus", "title": "Tower Consensus",
"type": "row" "type": "row"
@ -7662,9 +7821,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 0, "x": 0,
"y": 80 "y": 85
}, },
"id": 58, "id": 55,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -7822,9 +7981,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 8, "x": 8,
"y": 80 "y": 85
}, },
"id": 54, "id": 56,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -7982,9 +8141,9 @@
"h": 5, "h": 5,
"w": 8, "w": 8,
"x": 16, "x": 16,
"y": 80 "y": 85
}, },
"id": 59, "id": 57,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -8167,9 +8326,9 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 85 "y": 90
}, },
"id": 60, "id": 58,
"panels": [], "panels": [],
"repeat": null, "repeat": null,
"title": "IP Network", "title": "IP Network",
@ -8186,9 +8345,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 0, "x": 0,
"y": 86 "y": 91
}, },
"id": 61, "id": 59,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -8419,9 +8578,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 12, "x": 12,
"y": 86 "y": 91
}, },
"id": 62, "id": 60,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -8572,9 +8731,9 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 91 "y": 96
}, },
"id": 63, "id": 61,
"panels": [], "panels": [],
"title": "Signature Verification", "title": "Signature Verification",
"type": "row" "type": "row"
@ -8590,9 +8749,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 0, "x": 0,
"y": 92 "y": 97
}, },
"id": 64, "id": 62,
"legend": { "legend": {
"avg": false, "avg": false,
"current": false, "current": false,
@ -8711,9 +8870,9 @@
"h": 5, "h": 5,
"w": 12, "w": 12,
"x": 12, "x": 12,
"y": 92 "y": 97
}, },
"id": 65, "id": 63,
"legend": { "legend": {
"alignAsTable": false, "alignAsTable": false,
"avg": false, "avg": false,
@ -8860,9 +9019,9 @@
"h": 1, "h": 1,
"w": 24, "w": 24,
"x": 0, "x": 0,
"y": 97 "y": 102
}, },
"id": 66, "id": 64,
"panels": [], "panels": [],
"title": "Snapshots", "title": "Snapshots",
"type": "row" "type": "row"
@ -8878,9 +9037,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 0, "x": 0,
"y": 98 "y": 103
}, },
"id": 67, "id": 65,
"legend": { "legend": {
"avg": false, "avg": false,
"current": false, "current": false,
@ -9070,9 +9229,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 8, "x": 8,
"y": 98 "y": 103
}, },
"id": 68, "id": 66,
"legend": { "legend": {
"avg": false, "avg": false,
"current": false, "current": false,
@ -9262,9 +9421,9 @@
"h": 6, "h": 6,
"w": 8, "w": 8,
"x": 16, "x": 16,
"y": 98 "y": 103
}, },
"id": 69, "id": 67,
"legend": { "legend": {
"avg": false, "avg": false,
"current": false, "current": false,
@ -9543,5 +9702,5 @@
"timezone": "", "timezone": "",
"title": "Testnet Monitor (edge)", "title": "Testnet Monitor (edge)",
"uid": "testnet-edge", "uid": "testnet-edge",
"version": 6 "version": 2
} }

View File

@ -662,6 +662,7 @@ EOF
set -ex set -ex
if [[ -f /solana-scratch/.instance-startup-complete ]]; then if [[ -f /solana-scratch/.instance-startup-complete ]]; then
echo reboot
$( $(
cd "$here"/scripts/ cd "$here"/scripts/
if "$enableGpu"; then if "$enableGpu"; then
@ -671,7 +672,12 @@ if [[ -f /solana-scratch/.instance-startup-complete ]]; then
if [[ -n $validatorAdditionalDiskSizeInGb ]]; then if [[ -n $validatorAdditionalDiskSizeInGb ]]; then
cat mount-additional-disk.sh cat mount-additional-disk.sh
fi fi
cat ../../scripts/ulimit-n.sh
) )
if [[ -x ~solana/solana/on-reboot ]]; then
sudo -u solana ~solana/solana/on-reboot
fi
# Skip most setup on instance reboot # Skip most setup on instance reboot
exit 0 exit 0
@ -712,6 +718,7 @@ $(
create-solana-user.sh \ create-solana-user.sh \
solana-user-authorized_keys.sh \ solana-user-authorized_keys.sh \
add-testnet-solana-user-authorized_keys.sh \ add-testnet-solana-user-authorized_keys.sh \
install-ag.sh \
install-certbot.sh \ install-certbot.sh \
install-earlyoom.sh \ install-earlyoom.sh \
install-libssl-compatability.sh \ install-libssl-compatability.sh \
@ -732,6 +739,8 @@ $(
) )
cat > /etc/motd <<EOM cat > /etc/motd <<EOM
See startup script log messages in /var/log/syslog for status:
$ sudo cat /var/log/syslog | egrep \\(startup-script\\|cloud-init\)
$(printNetworkInfo) $(printNetworkInfo)
$(creationInfo) $(creationInfo)
EOM EOM
@ -819,7 +828,12 @@ info)
printf " %-16s | %-15s | %-15s | %s\n" "$nodeType" "$ip" "$ipPrivate" "$zone" printf " %-16s | %-15s | %-15s | %s\n" "$nodeType" "$ip" "$ipPrivate" "$zone"
} }
if ! $evalInfo; then if $evalInfo; then
echo "NET_NUM_VALIDATORS=${#validatorIpList[@]}"
echo "NET_NUM_CLIENTS=${#clientIpList[@]}"
echo "NET_NUM_BLOCKSTREAMERS=${#blockstreamerIpList[@]}"
echo "NET_NUM_ARCHIVERS=${#archiverIpList[@]}"
else
printNode "Node Type" "Public IP" "Private IP" "Zone" printNode "Node Type" "Public IP" "Private IP" "Zone"
echo "-------------------+-----------------+-----------------+--------------" echo "-------------------+-----------------+-----------------+--------------"
fi fi

View File

@ -722,9 +722,7 @@ deploy() {
# Stagger additional node start time. If too many nodes start simultaneously # Stagger additional node start time. If too many nodes start simultaneously
# the bootstrap node gets more rsync requests from the additional nodes than # the bootstrap node gets more rsync requests from the additional nodes than
# it can handle. # it can handle.
if ((nodeIndex % 2 == 0)); then sleep 2
sleep 2
fi
fi fi
done done

View File

@ -76,7 +76,6 @@ now=\$(date -u +"%Y-%m-%dT%H:%M:%SZ")
ln -sfT validator.log.\$now validator.log ln -sfT validator.log.\$now validator.log
EOF EOF
chmod +x ~/solana/on-reboot chmod +x ~/solana/on-reboot
echo "@reboot ~/solana/on-reboot" | crontab -
GPU_CUDA_OK=false GPU_CUDA_OK=false
GPU_FAIL_IF_NONE=false GPU_FAIL_IF_NONE=false
@ -105,7 +104,7 @@ waitForNodeToInit() {
echo "--- waiting for $hostname to boot up" echo "--- waiting for $hostname to boot up"
SECONDS= SECONDS=
while [[ ! -r $initCompleteFile ]]; do while [[ ! -r $initCompleteFile ]]; do
if [[ $SECONDS -ge 120 ]]; then if [[ $SECONDS -ge 240 ]]; then
echo "^^^ +++" echo "^^^ +++"
echo "Error: $initCompleteFile not found in $SECONDS seconds" echo "Error: $initCompleteFile not found in $SECONDS seconds"
exit 1 exit 1
@ -262,6 +261,7 @@ EOF
args+=( args+=(
--blockstream /tmp/solana-blockstream.sock --blockstream /tmp/solana-blockstream.sock
--no-voting --no-voting
--dev-no-sigverify
) )
else else
args+=(--enable-rpc-exit) args+=(--enable-rpc-exit)

View File

@ -123,10 +123,49 @@ cloud_FindInstance() {
# #
# This function will be called before |cloud_CreateInstances| # This function will be called before |cloud_CreateInstances|
cloud_Initialize() { cloud_Initialize() {
declare networkName="$1" declare resourceGroup="$1"
# ec2-provider.sh creates firewall rules programmatically, should do the same declare location="$2"
# here. declare nsgName=${resourceGroup}-nsg
echo "Note: one day create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists"
# Check if resource group exists. If not, create it.
(
set -x
numGroup=$(az group list --query "length([?name=='$resourceGroup'])")
if [[ $numGroup -eq 0 ]]; then
echo Resource Group "$resourceGroup" does not exist. Creating it now.
az group create --name "$resourceGroup" --location "$location"
else
echo Resource group "$resourceGroup" already exists.
az group show --name "$resourceGroup"
fi
az network nsg create --name "$nsgName" --resource-group "$resourceGroup"
)
create_nsg_rule() {
ruleName="$1"
ports="$2"
access="$3"
protocol="$4"
priority="$5"
(
set -x
az network nsg rule create -g "${resourceGroup}" --nsg-name "${nsgName}" -n "${ruleName}" \
--priority "${priority}" --source-address-prefixes "*" --source-port-ranges "*" \
--destination-address-prefixes "*" --destination-port-ranges "${ports}" --access "${access}" \
--protocol "${protocol}"
)
}
create_nsg_rule "InboundTCP" "8000-10000" "Allow" "Tcp" 1000
create_nsg_rule "InboundUDP" "8000-10000" "Allow" "Udp" 1001
create_nsg_rule "InboundHTTP" "80" "Allow" "Tcp" 1002
create_nsg_rule "InboundNetworkExplorerAPI" "3001" "Allow" "Tcp" 1003
create_nsg_rule "InboundDrone" "9900" "Allow" "Tcp" 1004
create_nsg_rule "InboundJsonRpc" "8899-8900" "Allow" "Tcp" 1005
create_nsg_rule "InboundRsync" "873" "Allow" "Tcp" 1006
create_nsg_rule "InboundStun" "3478" "Allow" "Udp" 1007
create_nsg_rule "InboundSSH" "22" "Allow" "Tcp" 1008
} }
# #
@ -175,6 +214,7 @@ cloud_CreateInstances() {
nodes+=("$node") nodes+=("$node")
done done
fi fi
nsgName=${networkName}-nsg
declare -a args declare -a args
args=( args=(
@ -184,6 +224,7 @@ cloud_CreateInstances() {
--size "$machineType" --size "$machineType"
--location "$zone" --location "$zone"
--generate-ssh-keys --generate-ssh-keys
--nsg "$nsgName"
) )
if [[ -n $optionalBootDiskSize ]]; then if [[ -n $optionalBootDiskSize ]]; then
@ -219,27 +260,17 @@ cloud_CreateInstances() {
( (
set -x set -x
# 1: Check if resource group exists. If not, create it.
numGroup=$(az group list --query "length([?name=='$networkName'])")
if [[ $numGroup -eq 0 ]]; then
echo Resource Group "$networkName" does not exist. Creating it now.
az group create --name "$networkName" --location "$zone"
else
echo Resource group "$networkName" already exists.
az group show --name "$networkName"
fi
# 2: For node in numNodes, create VM and put the creation process in the background with --no-wait # For node in numNodes, create VM and put the creation process in the background with --no-wait
for nodeName in "${nodes[@]}"; do for nodeName in "${nodes[@]}"; do
az vm create --name "$nodeName" "${args[@]}" --no-wait az vm create --name "$nodeName" "${args[@]}" --no-wait
done done
for nodeName in "${nodes[@]}"; do
az vm wait --created --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600
done
# 3. If GPU is to be enabled, wait until nodes are created, then install the appropriate extension # If GPU is to be enabled, install the appropriate extension
if $enableGpu; then if $enableGpu; then
for nodeName in "${nodes[@]}"; do
az vm wait --created --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600
done
for nodeName in "${nodes[@]}"; do for nodeName in "${nodes[@]}"; do
az vm extension set \ az vm extension set \
--resource-group "$networkName" \ --resource-group "$networkName" \
@ -250,7 +281,7 @@ cloud_CreateInstances() {
--no-wait --no-wait
done done
# 4. Wait until all nodes have GPU extension installed # Wait until all nodes have GPU extension installed
for nodeName in "${nodes[@]}"; do for nodeName in "${nodes[@]}"; do
az vm wait --updated --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600 az vm wait --updated --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600
done done

View File

@ -17,6 +17,7 @@ else
[[ -r /solana-scratch/id_ecdsa.pub ]] || exit 1 [[ -r /solana-scratch/id_ecdsa.pub ]] || exit 1
sudo -u solana bash -c " sudo -u solana bash -c "
echo 'PATH=\"/home/solana/.cargo/bin:$PATH\"' > /home/solana/.profile
mkdir -p /home/solana/.ssh/ mkdir -p /home/solana/.ssh/
cd /home/solana/.ssh/ cd /home/solana/.ssh/
cp /solana-scratch/id_ecdsa.pub authorized_keys cp /solana-scratch/id_ecdsa.pub authorized_keys

9
net/scripts/install-ag.sh Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
#
set -ex
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
apt-get update
apt-get --assume-yes install silversearcher-ag

View File

@ -6,3 +6,5 @@ set -ex
add-apt-repository -y ppa:chris-lea/redis-server add-apt-repository -y ppa:chris-lea/redis-server
apt-get --assume-yes install redis apt-get --assume-yes install redis
systemctl enable redis-server.service

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-netutil" name = "solana-netutil"
version = "0.20.0" version = "0.20.2"
description = "Solana Network Utilities" description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -18,7 +18,7 @@ rand = "0.6.1"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
socket2 = "0.3.11" socket2 = "0.3.11"
solana-logger = { path = "../logger", version = "0.20.0" } solana-logger = { path = "../logger", version = "0.20.2" }
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-bpf-programs" name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.20.0" version = "0.20.2"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "README.md" readme = "README.md"
@ -22,10 +22,10 @@ walkdir = "2"
bincode = "1.1.4" bincode = "1.1.4"
byteorder = "1.3.2" byteorder = "1.3.2"
elf = "0.0.10" elf = "0.0.10"
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.0" } solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.2" }
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-runtime = { path = "../../runtime", version = "0.20.0" } solana-runtime = { path = "../../runtime", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
solana_rbpf = "=0.1.19" solana_rbpf = "=0.1.19"
[[bench]] [[bench]]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-128bit" name = "solana-bpf-rust-128bit"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.20.0" } solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.20.2" }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-128bit-dep" name = "solana-bpf-rust-128bit-dep"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-alloc" name = "solana-bpf-rust-alloc"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-dep-crate" name = "solana-bpf-rust-dep-crate"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,10 +13,10 @@ edition = "2018"
[dependencies] [dependencies]
byteorder = { version = "1", default-features = false } byteorder = { version = "1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-external-spend" name = "solana-bpf-rust-external-spend"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-iter" name = "solana-bpf-rust-iter"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-many-args" name = "solana-bpf-rust-many-args"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.20.0" } solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.20.2" }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-many-args-dep" name = "solana-bpf-rust-many-args-dep"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-noop" name = "solana-bpf-rust-noop"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-panic" name = "solana-bpf-rust-panic"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-param-passing" name = "solana-bpf-rust-param-passing"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.20.0" } solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.20.2" }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-param-passing-dep" name = "solana-bpf-rust-param-passing-dep"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF program written in Rust" description = "Solana BPF program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-sysval" name = "solana-bpf-rust-sysval"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF test program written in Rust" description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false } solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
[dev_dependencies] [dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" } solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
[features] [features]
program = ["solana-sdk/program"] program = ["solana-sdk/program"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-bpf-loader-api" name = "solana-bpf-loader-api"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF Loader" description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -14,8 +14,8 @@ byteorder = "1.3.2"
libc = "0.2.65" libc = "0.2.65"
log = "0.4.8" log = "0.4.8"
serde = "1.0.101" serde = "1.0.101"
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
solana_rbpf = "=0.1.19" solana_rbpf = "=0.1.19"
[lib] [lib]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-bpf-loader-program" name = "solana-bpf-loader-program"
version = "0.20.0" version = "0.20.2"
description = "Solana BPF Loader" description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies] [dependencies]
log = "0.4.8" log = "0.4.8"
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.0" } solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib", "cdylib"] crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-btc-spv-api" name = "solana-btc-spv-api"
version = "0.20.0" version = "0.20.2"
description = "Solana Bitcoin spv parsing program api" description = "Solana Bitcoin spv parsing program api"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -16,7 +16,7 @@ num-derive = "0.3"
num-traits = "0.2" num-traits = "0.2"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-sdk = { path = "../../sdk", version = "0.20.0"} solana-sdk = { path = "../../sdk", version = "0.20.2"}
hex = "0.3.2" hex = "0.3.2"
[lib] [lib]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "btc_spv_bin" name = "btc_spv_bin"
version = "0.20.0" version = "0.20.2"
description = "Solana Bitcoin spv parsing program" description = "Solana Bitcoin spv parsing program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-bitcoin-spv-program" name = "solana-bitcoin-spv-program"
version = "0.20.0" version = "0.20.2"
description = "Solana Bitcoin spv parsing program" description = "Solana Bitcoin spv parsing program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -16,8 +16,8 @@ num-derive = "0.3"
num-traits = "0.2" num-traits = "0.2"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-sdk = { path = "../../sdk", version = "0.20.0"} solana-sdk = { path = "../../sdk", version = "0.20.2"}
solana-btc-spv-api = { path = "../btc_spv_api", version = "0.20.0"} solana-btc-spv-api = { path = "../btc_spv_api", version = "0.20.2"}
[lib] [lib]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-budget-api" name = "solana-budget-api"
version = "0.20.0" version = "0.20.2"
description = "Solana Budget program API" description = "Solana Budget program API"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -16,10 +16,10 @@ num-derive = "0.3"
num-traits = "0.2" num-traits = "0.2"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.20.0" } solana-runtime = { path = "../../runtime", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-budget-program" name = "solana-budget-program"
version = "0.20.0" version = "0.20.2"
description = "Solana budget program" description = "Solana budget program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies] [dependencies]
log = "0.4.8" log = "0.4.8"
solana-budget-api = { path = "../budget_api", version = "0.20.0" } solana-budget-api = { path = "../budget_api", version = "0.20.2" }
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib", "cdylib"] crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-config-api" name = "solana-config-api"
version = "0.20.0" version = "0.20.2"
description = "config program API" description = "config program API"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ bincode = "1.2.0"
log = "0.4.8" log = "0.4.8"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[lib] [lib]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-config-program" name = "solana-config-program"
version = "0.20.0" version = "0.20.2"
description = "config program" description = "config program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies] [dependencies]
log = "0.4.8" log = "0.4.8"
solana-config-api = { path = "../config_api", version = "0.20.0" } solana-config-api = { path = "../config_api", version = "0.20.2" }
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib", "cdylib"] crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-config-tests" name = "solana-config-tests"
version = "0.20.0" version = "0.20.2"
description = "Solana config api tests" description = "Solana config api tests"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,12 +13,12 @@ bincode = "1.2.0"
log = "0.4.8" log = "0.4.8"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
solana-config-api = { path = "../config_api", version = "0.20.0" } solana-config-api = { path = "../config_api", version = "0.20.2" }
solana-config-program = { path = "../config_program", version = "0.20.0" } solana-config-program = { path = "../config_program", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.20.0" } solana-runtime = { path = "../../runtime", version = "0.20.2" }
assert_matches = "1.3.0" assert_matches = "1.3.0"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-exchange-api" name = "solana-exchange-api"
version = "0.20.0" version = "0.20.2"
description = "Solana Exchange program API" description = "Solana Exchange program API"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,12 +13,12 @@ bincode = "1.2.0"
log = "0.4.8" log = "0.4.8"
serde = "1.0.101" serde = "1.0.101"
serde_derive = "1.0.101" serde_derive = "1.0.101"
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-metrics = { path = "../../metrics", version = "0.20.0" } solana-metrics = { path = "../../metrics", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.20.0" } solana-runtime = { path = "../../runtime", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-exchange-program" name = "solana-exchange-program"
version = "0.20.0" version = "0.20.2"
description = "Solana exchange program" description = "Solana exchange program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies] [dependencies]
log = "0.4.8" log = "0.4.8"
solana-exchange-api = { path = "../exchange_api", version = "0.20.0" } solana-exchange-api = { path = "../exchange_api", version = "0.20.2" }
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib", "cdylib"] crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-failure-program" name = "solana-failure-program"
version = "0.20.0" version = "0.20.2"
description = "Solana failure program" description = "Solana failure program"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -10,10 +10,10 @@ edition = "2018"
[dependencies] [dependencies]
log = "0.4.8" log = "0.4.8"
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
[dev-dependencies] [dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.20.0" } solana-runtime = { path = "../../runtime", version = "0.20.2" }
[lib] [lib]
crate-type = ["cdylib"] crate-type = ["cdylib"]

1
programs/librapay_api/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target/

4155
programs/librapay_api/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-librapay-api" name = "solana-librapay-api"
version = "0.20.0" version = "0.20.2"
description = "Solana Libra Payment" description = "Solana Libra Payment"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -11,12 +11,12 @@ edition = "2018"
[dependencies] [dependencies]
bincode = "1.2.0" bincode = "1.2.0"
log = "0.4.8" log = "0.4.8"
solana-logger = { path = "../../logger", version = "0.20.0" } solana-logger = { path = "../../logger", version = "0.20.2" }
solana-sdk = { path = "../../sdk", version = "0.20.0" } solana-sdk = { path = "../../sdk", version = "0.20.2" }
solana-runtime = { path = "../../runtime", version = "0.20.0" } solana-runtime = { path = "../../runtime", version = "0.20.2" }
types = { version = "0.0.0", package = "solana_libra_types" } types = { version = "0.0.1-sol4", package = "solana_libra_types" }
language_e2e_tests = { version = "0.0.0", package = "solana_libra_language_e2e_tests" } language_e2e_tests = { version = "0.0.1-sol4", package = "solana_libra_language_e2e_tests" }
solana-move-loader-api = { path = "../move_loader_api", version = "0.20.0" } solana-move-loader-api = { path = "../move_loader_api", version = "0.20.2" }
[lib] [lib]
crate-type = ["lib"] crate-type = ["lib"]

Some files were not shown because too many files have changed in this diff Show More