Compare commits
42 Commits
document-r
...
v0.20.2
Author | SHA1 | Date | |
---|---|---|---|
d9a9d6547f | |||
c86bf60a40 | |||
03ed4b868d | |||
de83bce0ce | |||
8b494272bf | |||
ee5c890c5d | |||
a4f5397ea4 | |||
66f3b4a3d6 | |||
3a4cd94391 | |||
f4658f3be2 | |||
41c70b9f41 | |||
d1c92db7ab | |||
a8721a5e19 | |||
dec9d00a64 | |||
09252ef084 | |||
c9d568c910 | |||
b054f5f12d | |||
23b4df0fef | |||
ca35841cb1 | |||
33d77357bf | |||
22e84abe5a | |||
9b532b16a9 | |||
c5a98a5b57 | |||
22d60d496b | |||
8243792430 | |||
1d1d85e0c5 | |||
9b0e40d1dc | |||
a231fbe978 | |||
cd2c09c473 | |||
774cd48cb1 | |||
d580603cd4 | |||
f0c931ea84 | |||
74b2eb4328 | |||
f1e9a944ef | |||
4cb38ddf01 | |||
593fde628c | |||
34fa025b17 | |||
33843f824a | |||
542bda0a6f | |||
d8bdbbf291 | |||
168b0f71f5 | |||
be79d97dde |
1540
Cargo.lock
generated
1540
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -34,9 +34,6 @@ members = [
|
||||
"programs/exchange_api",
|
||||
"programs/exchange_program",
|
||||
"programs/failure_program",
|
||||
"programs/move_loader_api",
|
||||
"programs/move_loader_program",
|
||||
"programs/librapay_api",
|
||||
"programs/noop_program",
|
||||
"programs/stake_api",
|
||||
"programs/stake_program",
|
||||
@ -61,4 +58,7 @@ members = [
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
"programs/move_loader_api",
|
||||
"programs/move_loader_program",
|
||||
"programs/librapay_api",
|
||||
]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,9 +10,9 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-measure = { path = "../measure", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -24,16 +24,16 @@ serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
# solana-runtime = { path = "../solana/runtime"}
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.2" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-drone = { path = "../drone", version = "0.20.2" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.9.1"
|
||||
|
@ -2,13 +2,13 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,23 +16,23 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.2" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-drone = { path = "../drone", version = "0.20.2" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.2", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.2" }
|
||||
solana-measure = { path = "../measure", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.2", optional = true }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.2", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.2.0"
|
||||
serial_test_derive = "0.2.0"
|
||||
|
||||
[features]
|
||||
move = ["solana-core/move", "solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"]
|
||||
move = ["solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"]
|
||||
|
@ -18,7 +18,7 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, timestamp},
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
@ -158,12 +158,13 @@ where
|
||||
let mut reclaim_lamports_back_to_source_account = false;
|
||||
let mut i = keypair0_balance;
|
||||
let mut blockhash = Hash::default();
|
||||
let mut blockhash_time = Instant::now();
|
||||
let mut blockhash_time;
|
||||
while start.elapsed() < duration {
|
||||
// ping-pong between source and destination accounts for each loop iteration
|
||||
// this seems to be faster than trying to determine the balance of individual
|
||||
// accounts
|
||||
let len = tx_count as usize;
|
||||
blockhash_time = Instant::now();
|
||||
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
|
||||
blockhash = new_blockhash;
|
||||
} else {
|
||||
@ -173,13 +174,19 @@ where
|
||||
sleep(Duration::from_millis(100));
|
||||
continue;
|
||||
}
|
||||
info!(
|
||||
"Took {} ms for new blockhash",
|
||||
duration_as_ms(&blockhash_time.elapsed())
|
||||
datapoint_debug!(
|
||||
"bench-tps-get_blockhash",
|
||||
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
|
||||
);
|
||||
|
||||
blockhash_time = Instant::now();
|
||||
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(balance);
|
||||
datapoint_debug!(
|
||||
"bench-tps-get_balance",
|
||||
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
|
||||
);
|
||||
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&blockhash,
|
||||
@ -367,7 +374,7 @@ fn generate_txs(
|
||||
);
|
||||
datapoint_debug!(
|
||||
"bench-tps-generate_txs",
|
||||
("duration", duration_as_ms(&duration), i64)
|
||||
("duration", duration_as_us(&duration), i64)
|
||||
);
|
||||
|
||||
let sz = transactions.len() / threads;
|
||||
@ -432,7 +439,7 @@ fn do_tx_transfers<T: Client>(
|
||||
);
|
||||
datapoint_debug!(
|
||||
"bench-tps-do_tx_transfers",
|
||||
("duration", duration_as_ms(&transfer_start.elapsed()), i64),
|
||||
("duration", duration_as_us(&transfer_start.elapsed()), i64),
|
||||
("count", tx_len, i64)
|
||||
);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ pub struct Config {
|
||||
pub write_to_client_file: bool,
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
}
|
||||
@ -41,6 +42,7 @@ impl Default for Config {
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
}
|
||||
@ -108,6 +110,11 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no-multi-client")
|
||||
.long("no-multi-client")
|
||||
.help("Disable multi-client support, only transact with the entrypoint."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
@ -229,6 +236,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
args.multi_client = !matches.is_present("no-multi-client");
|
||||
|
||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
||||
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
||||
|
@ -1,7 +1,7 @@
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
@ -29,6 +29,7 @@ fn main() {
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
} = &cli_config;
|
||||
@ -70,15 +71,19 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
client
|
||||
} else {
|
||||
get_client(&nodes)
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
|
@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 0.20.0
|
||||
solana-cli 0.20.2
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
|
@ -126,7 +126,7 @@ The result field will be a JSON object with the following sub fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.20.2,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
```
|
||||
|
||||
### getBalance
|
||||
@ -729,7 +729,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.20.2,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
### accountUnsubscribe
|
||||
@ -787,7 +787,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.20.2,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
```
|
||||
|
||||
### programUnsubscribe
|
||||
|
@ -11,7 +11,7 @@ This document proposes an easy to use software install and updater that can be u
|
||||
The easiest install method for supported platforms:
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh
|
||||
```
|
||||
|
||||
This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there.
|
||||
@ -20,7 +20,7 @@ If additional arguments need to be specified during the installation, the follow
|
||||
|
||||
```bash
|
||||
$ init_args=.... # arguments for `solana-install-init ...`
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s - ${init_args}
|
||||
```
|
||||
|
||||
### Fetch and run a pre-built installer from a Github release
|
||||
@ -28,7 +28,7 @@ $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install
|
||||
With a well-known release URL, a pre-built binary can be obtained for supported platforms:
|
||||
|
||||
```bash
|
||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.18.0/solana-install-init-x86_64-apple-darwin
|
||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.20.2/solana-install-init-x86_64-apple-darwin
|
||||
$ chmod +x ./solana-install-init
|
||||
$ ./solana-install-init --help
|
||||
```
|
||||
|
@ -29,7 +29,7 @@ Before starting an archiver node, sanity check that the cluster is accessible to
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
||||
@ -47,13 +47,13 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||
#### Linux and mac OS
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
|
||||
|
||||
```bash
|
||||
$ solana-install init
|
||||
solana-install init
|
||||
```
|
||||
|
||||
#### Windows
|
||||
@ -71,9 +71,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
#### mac OS
|
||||
@ -81,9 +81,9 @@ $ export PATH=$PWD/bin:$PATH
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
#### Windows
|
||||
@ -95,7 +95,7 @@ Download the binaries by navigating to [https://github.com/solana-labs/solana/re
|
||||
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
@ -104,8 +104,8 @@ Now configure the keypairs for your archiver by running:
|
||||
Navigate to the solana install location and open a cmd prompt
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o archiver-keypair.json
|
||||
$ solana-keygen new -o storage-keypair.json
|
||||
solana-keygen new -o archiver-keypair.json
|
||||
solana-keygen new -o storage-keypair.json
|
||||
```
|
||||
|
||||
Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step:
|
||||
@ -114,23 +114,23 @@ Use solana-keygen to show the public keys for each of the keypairs, they will be
|
||||
|
||||
```bash
|
||||
# The archiver's identity
|
||||
$ solana-keygen pubkey archiver-keypair.json
|
||||
$ solana-keygen pubkey storage-keypair.json
|
||||
solana-keygen pubkey archiver-keypair.json
|
||||
solana-keygen pubkey storage-keypair.json
|
||||
```
|
||||
|
||||
* Linux and mac OS
|
||||
|
||||
\`\`\`bash
|
||||
|
||||
$ export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
|
||||
export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
|
||||
|
||||
$ export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
|
||||
export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
|
||||
|
||||
```text
|
||||
Then set up the storage accounts for your archiver by running:
|
||||
```bash
|
||||
$ solana --keypair archiver-keypair.json airdrop 100000 lamports
|
||||
$ solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
|
||||
solana --keypair archiver-keypair.json airdrop 100000 lamports
|
||||
solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
|
||||
```
|
||||
|
||||
Note: Every time the testnet restarts, run the steps to setup the archiver accounts again.
|
||||
@ -138,7 +138,7 @@ Note: Every time the testnet restarts, run the steps to setup the archiver accou
|
||||
To start the archiver:
|
||||
|
||||
```bash
|
||||
$ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
|
||||
solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
|
||||
```
|
||||
|
||||
## Verify Archiver Setup
|
||||
@ -146,12 +146,11 @@ $ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypa
|
||||
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
|
||||
|
||||
```bash
|
||||
$ solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
```
|
||||
|
||||
|
@ -7,13 +7,13 @@ You can publish your validator information to the chain to be publicly visible t
|
||||
Run the solana CLI to populate a validator info account:
|
||||
|
||||
```bash
|
||||
$ solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
|
||||
solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
|
||||
```
|
||||
|
||||
For details about optional fields for VALIDATOR\_INFO\_ARGS:
|
||||
|
||||
```bash
|
||||
$ solana validator-info publish --help
|
||||
solana validator-info publish --help
|
||||
```
|
||||
|
||||
## Keybase
|
||||
@ -33,4 +33,3 @@ Including a Keybase username allows client applications \(like the Solana Networ
|
||||
3. Add or update your `solana validator-info` with your Keybase username. The
|
||||
|
||||
CLI will verify the `validator-<PUBKEY>` file
|
||||
|
||||
|
@ -5,13 +5,13 @@
|
||||
The **identity pubkey** for your validator can also be found by running:
|
||||
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-keypair.json
|
||||
solana-keygen pubkey ~/validator-keypair.json
|
||||
```
|
||||
|
||||
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the gossip network by running:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
## Check Vote Activity
|
||||
@ -19,13 +19,13 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
The vote pubkey for the validator can be found by running:
|
||||
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-vote-keypair.json
|
||||
solana-keygen pubkey ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Provide the **vote pubkey** to the `solana show-vote-account` command to view the recent voting activity from your validator:
|
||||
|
||||
```bash
|
||||
$ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
```
|
||||
|
||||
## Check Your Balance
|
||||
@ -33,7 +33,7 @@ $ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
Your account balance should decrease by the transaction fee amount as your validator submits votes, and increase after serving as the leader. Pass the `--lamports` are to observe in finer detail:
|
||||
|
||||
```bash
|
||||
$ solana balance --lamports
|
||||
solana balance --lamports
|
||||
```
|
||||
|
||||
## Check Slot Number
|
||||
@ -41,13 +41,13 @@ $ solana balance --lamports
|
||||
After your validator boots, it may take some time to catch up with the cluster. Use the `get-slot` command to view the current slot that the cluster is processing:
|
||||
|
||||
```bash
|
||||
$ solana get-slot
|
||||
solana get-slot
|
||||
```
|
||||
|
||||
The current slot that your validator is processing can then been seen with:
|
||||
|
||||
```bash
|
||||
$ solana --url http://127.0.0.1:8899 get-slot
|
||||
solana --url http://127.0.0.1:8899 get-slot
|
||||
```
|
||||
|
||||
Until your validator has caught up, it will not be able to vote successfully and stake cannot be delegated to it.
|
||||
@ -60,11 +60,11 @@ There are several useful JSON-RPC endpoints for monitoring your validator on the
|
||||
|
||||
```bash
|
||||
# Similar to solana-gossip, you should see your validator in the list of cluster nodes
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
|
||||
# If your validator is properly voting, it should appear in the list of `current` vote accounts. If staked, `stake` should be > 0
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
|
||||
# Returns the current leader schedule
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
|
||||
# Returns info about the current epoch. slotIndex should progress on subsequent calls.
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://testnet.solana.com:8899
|
||||
```
|
||||
@ -76,9 +76,9 @@ Metrics are available for local monitoring of your validator.
|
||||
Docker must be installed and the current user added to the docker group. Then download `solana-metrics.tar.bz2` from the Github Release and run
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-metrics.tar.bz2
|
||||
$ cd solana-metrics/
|
||||
$ ./start.sh
|
||||
tar jxf solana-metrics.tar.bz2
|
||||
cd solana-metrics/
|
||||
./start.sh
|
||||
```
|
||||
|
||||
A local InfluxDB and Grafana instance is now running on your machine. Define `SOLANA_METRICS_CONFIG` in your environment as described at the end of the `start.sh` output and restart your validator.
|
||||
@ -92,6 +92,5 @@ Log messages emitted by your validator include a timestamp. When sharing logs wi
|
||||
To make it easier to compare logs between different sources we request that everybody use Pacific Time on their validator nodes. In Linux this can be accomplished by running:
|
||||
|
||||
```bash
|
||||
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
||||
sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
||||
```
|
||||
|
||||
|
@ -5,13 +5,13 @@
|
||||
The `solana-install` tool can be used to easily install and upgrade the validator software on Linux x86\_64 and mac OS systems.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
|
||||
|
||||
```bash
|
||||
$ solana-install init
|
||||
solana-install init
|
||||
```
|
||||
|
||||
After a successful install, `solana-install update` may be used to easily update the cluster software to a newer version at any time.
|
||||
@ -25,9 +25,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
### mac OS
|
||||
@ -35,9 +35,9 @@ $ export PATH=$PWD/bin:$PATH
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
## Build From Source
|
||||
@ -45,7 +45,6 @@ $ export PATH=$PWD/bin:$PATH
|
||||
If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with:
|
||||
|
||||
```bash
|
||||
$ ./scripts/cargo-install-all.sh .
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
./scripts/cargo-install-all.sh .
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
|
@ -7,14 +7,14 @@ Adding stake can be accomplished by using the `solana` CLI
|
||||
First create a stake account keypair with `solana-keygen`:
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-config/stake-keypair.json
|
||||
solana-keygen new -o ~/validator-stake-keypair.json
|
||||
```
|
||||
|
||||
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 42 lamports:
|
||||
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 4242 lamports:
|
||||
|
||||
```bash
|
||||
$ solana create-stake-account ~/validator-config/stake-keypair.json 42 lamports
|
||||
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
|
||||
solana create-stake-account ~/validator-stake-keypair.json 4242 lamports
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Note that stakes need to warm up, and warmup increments are applied at Epoch boundaries, so it can take an hour or more for the change to fully take effect.
|
||||
@ -22,13 +22,13 @@ Note that stakes need to warm up, and warmup increments are applied at Epoch bou
|
||||
Stakes can be re-delegated to another node at any time with the same command:
|
||||
|
||||
```bash
|
||||
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/some-other-validator-vote-keypair.json
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Assuming the node is voting, now you're up and running and generating validator rewards. You'll want to periodically redeem/claim your rewards:
|
||||
|
||||
```bash
|
||||
$ solana redeem-vote-credits ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
|
||||
solana redeem-vote-credits ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can only be earned while the validator is up and running. Further, once staked, the validator becomes an important part of the network. In order to safely remove a validator from the network, first deactivate its stake.
|
||||
@ -36,7 +36,7 @@ The rewards lamports earned are split between your stake account and the vote ac
|
||||
Stake can be deactivated by running:
|
||||
|
||||
```bash
|
||||
$ solana deactivate-stake ~/validator-config/stake-keypair.json
|
||||
solana deactivate-stake ~/validator-stake-keypair.json
|
||||
```
|
||||
|
||||
The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake.
|
||||
|
@ -7,7 +7,7 @@ Before attaching a validator node, sanity check that the cluster is accessible t
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the network explorer at [https://explorer.solana.com/](https://explorer.solana.com/) for activity.
|
||||
@ -19,16 +19,16 @@ View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/test
|
||||
Sanity check that you are able to interact with the cluster by receiving a small airdrop of lamports from the testnet drone:
|
||||
|
||||
```bash
|
||||
$ solana set --url http://testnet.solana.com:8899
|
||||
$ solana get
|
||||
$ solana airdrop 123 lamports
|
||||
$ solana balance --lamports
|
||||
solana set --url http://testnet.solana.com:8899
|
||||
solana get
|
||||
solana airdrop 123 lamports
|
||||
solana balance --lamports
|
||||
```
|
||||
|
||||
Also try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
@ -37,7 +37,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
Create an identity keypair for your validator by running:
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-keypair.json
|
||||
solana-keygen new -o ~/validator-keypair.json
|
||||
```
|
||||
|
||||
### Wallet Configuration
|
||||
@ -45,30 +45,30 @@ $ solana-keygen new -o ~/validator-keypair.json
|
||||
You can set solana configuration to use your validator keypair for all following commands:
|
||||
|
||||
```bash
|
||||
$ solana set --keypair ~/validator-keypair.json
|
||||
solana set --keypair ~/validator-keypair.json
|
||||
```
|
||||
|
||||
**All following solana commands assume you have set `--keypair` config to** your validator identity keypair.\*\* If you haven't, you will need to add the `--keypair` argument to each command, like:
|
||||
|
||||
```bash
|
||||
$ solana --keypair ~/validator-keypair.json airdrop 1000 lamports
|
||||
solana --keypair ~/validator-keypair.json airdrop 10
|
||||
```
|
||||
|
||||
\(You can always override the set configuration by explicitly passing the `--keypair` argument with a command.\)
|
||||
|
||||
### Validator Start
|
||||
|
||||
Airdrop yourself some lamports to get started:
|
||||
Airdrop yourself some SOL to get started:
|
||||
|
||||
```bash
|
||||
$ solana airdrop 1000 lamports
|
||||
solana airdrop 10
|
||||
```
|
||||
|
||||
Your validator will need a vote account. Create it now with the following commands:
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-vote-keypair.json
|
||||
$ solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json 1 lamports
|
||||
solana-keygen new -o ~/validator-vote-keypair.json
|
||||
solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json
|
||||
```
|
||||
|
||||
Then use one of the following commands, depending on your installation choice, to start the node:
|
||||
@ -76,19 +76,19 @@ Then use one of the following commands, depending on your installation choice, t
|
||||
If this is a `solana-install`-installation:
|
||||
|
||||
```bash
|
||||
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
Alternatively, the `solana-install run` command can be used to run the validator node while periodically checking for and applying software updates:
|
||||
|
||||
```bash
|
||||
$ solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
If you built from source:
|
||||
|
||||
```bash
|
||||
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
### Enabling CUDA
|
||||
@ -98,7 +98,7 @@ If your machine has a GPU with CUDA installed \(Linux-only currently\), include
|
||||
Or if you built from source, define the SOLANA\_CUDA flag in your environment _before_ running any of the previously mentioned commands
|
||||
|
||||
```bash
|
||||
$ export SOLANA_CUDA=1
|
||||
export SOLANA_CUDA=1
|
||||
```
|
||||
|
||||
When your validator is started look for the following log message to indicate that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`
|
||||
@ -110,4 +110,3 @@ By default the validator will dynamically select available network ports in the
|
||||
### Limiting ledger size to conserve disk space
|
||||
|
||||
By default the validator will retain the full ledger. To conserve disk space start the validator with the `--limit-ledger-size`, which will instruct the validator to only retain the last couple hours of ledger.
|
||||
|
||||
|
@ -15,8 +15,8 @@ Prior to mainnet, the testnets may be running different versions of solana softw
|
||||
You can submit a JSON-RPC request to see the specific version of the cluster.
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
|
||||
{"jsonrpc":"2.0","result":{"solana-core":"0.18.0-pre1"},"id":1}
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
|
||||
{"jsonrpc":"2.0","result":{"solana-core":"0.20.2"},"id":1}
|
||||
```
|
||||
|
||||
## Using a Different Testnet
|
||||
@ -28,17 +28,17 @@ This guide is written in the context of testnet.solana.com, our most stable clus
|
||||
If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s - 0.20.2
|
||||
```
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.2/install/solana-install-init.sh | sh -s - beta
|
||||
```
|
||||
|
||||
Similarly, you can add this argument to the `solana-install` command if you've built the program from source:
|
||||
|
||||
```bash
|
||||
$ solana-install init 0.18.0
|
||||
solana-install init 0.20.2
|
||||
```
|
||||
|
||||
If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet.
|
||||
@ -48,14 +48,14 @@ If you are downloading pre-compiled binaries or building from source, simply cho
|
||||
The Solana CLI tool points at testnet.solana.com by default. Include a `--url` argument to point at a different testnet. For instance:
|
||||
|
||||
```bash
|
||||
$ solana --url http://beta.testnet.solana.com:8899 balance
|
||||
solana --url http://beta.testnet.solana.com:8899 balance
|
||||
```
|
||||
|
||||
The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future cli commands. For example:
|
||||
|
||||
```bash
|
||||
$ solana set --url http://beta.testnet.solana.com:8899
|
||||
$ solana balance # Same result as command above
|
||||
solana set --url http://beta.testnet.solana.com:8899
|
||||
solana balance # Same result as command above
|
||||
```
|
||||
|
||||
\(You can always override the set configuration by explicitly passing the `--url` argument with a command.\)
|
||||
@ -63,12 +63,11 @@ $ solana balance # Same result as command above
|
||||
Solana-gossip and solana-validator commands already require an explicit `--entrypoint` argument. Simply replace testnet.solana.com in the examples with an alternate url to interact with a different testnet. For example:
|
||||
|
||||
```bash
|
||||
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
|
||||
solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
|
||||
```
|
||||
|
||||
You can also submit JSON-RPC requests to a different testnet, like:
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
|
||||
```
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -31,10 +31,11 @@ testName=$(basename "$0" .sh)
|
||||
case $testName in
|
||||
test-stable)
|
||||
echo "Executing $testName"
|
||||
|
||||
_ cargo +"$rust_stable" build --tests --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path local_cluster/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader_api/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader_program/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/librapay_api/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
echo "Executing $testName"
|
||||
|
@ -288,11 +288,15 @@ if ! $skipCreate; then
|
||||
echo "--- $cloudProvider.sh create"
|
||||
create_args=(
|
||||
-p "$netName"
|
||||
-a "$bootstrapValidatorAddress"
|
||||
-c "$clientNodeCount"
|
||||
-n "$additionalValidatorCount"
|
||||
--dedicated
|
||||
)
|
||||
|
||||
if [[ -n $bootstrapValidatorAddress ]]; then
|
||||
create_args+=(-a "$bootstrapValidatorAddress")
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2206
|
||||
create_args+=(${zone_args[@]})
|
||||
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -26,23 +26,23 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
|
||||
solana-drone = { path = "../drone", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.2" }
|
||||
url = "2.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.2" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
|
@ -329,7 +329,6 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
}
|
||||
("balance", Some(matches)) => {
|
||||
let pubkey = pubkey_of(&matches, "pubkey");
|
||||
println!("{:?}", pubkey);
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Balance {
|
||||
pubkey,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "14.0.3"
|
||||
jsonrpc-http-server = "14.0.1"
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
jsonrpc-http-server = "14.0.3"
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -15,7 +15,6 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
|
||||
|
||||
[features]
|
||||
pin_gpu_memory = []
|
||||
move = []
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.0"
|
||||
@ -30,7 +29,7 @@ indexmap = "1.1"
|
||||
itertools = "0.8.0"
|
||||
jsonrpc-core = "14.0.3"
|
||||
jsonrpc-derive = "14.0.3"
|
||||
jsonrpc-http-server = "14.0.1"
|
||||
jsonrpc-http-server = "14.0.3"
|
||||
jsonrpc-pubsub = "14.0.3"
|
||||
jsonrpc-ws-server = "14.0.3"
|
||||
lazy_static = "1.4.0"
|
||||
@ -45,25 +44,25 @@ rayon = "1.2.0"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.2" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.20.2" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-drone = { path = "../drone", version = "0.20.2" }
|
||||
solana-ed25519-dalek = "0.2.0"
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.2" }
|
||||
solana-measure = { path = "../measure", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.2" }
|
||||
symlink = "0.1.0"
|
||||
sys-info = "0.5.8"
|
||||
tempfile = "3.1.0"
|
||||
@ -72,7 +71,7 @@ tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.2" }
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -12,17 +12,20 @@
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_gossip::CrdsGossip;
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS};
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel, EpochSlots, Vote};
|
||||
use crate::packet::{to_shared_blob, Blob, Packet, SharedBlob};
|
||||
use crate::repair_service::RepairType;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::sendmmsg::{multicast, send_mmsg};
|
||||
use crate::streamer::{BlobReceiver, BlobSender};
|
||||
use crate::weighted_shuffle::{weighted_best, weighted_shuffle};
|
||||
use crate::crds_value::CrdsValue;
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{CrdsData, CrdsValueLabel, EpochSlots, Vote},
|
||||
packet::{to_shared_blob, Blob, Packet, SharedBlob},
|
||||
repair_service::RepairType,
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
streamer::{BlobReceiver, BlobSender},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
};
|
||||
use bincode::{deserialize, serialize, serialized_size};
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
@ -195,8 +198,8 @@ impl ClusterInfo {
|
||||
|
||||
pub fn insert_self(&mut self, contact_info: ContactInfo) {
|
||||
if self.id() == contact_info.id {
|
||||
let mut value = CrdsValue::ContactInfo(contact_info.clone());
|
||||
value.sign(&self.keypair);
|
||||
let value =
|
||||
CrdsValue::new_signed(CrdsData::ContactInfo(contact_info.clone()), &self.keypair);
|
||||
let _ = self.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
}
|
||||
@ -205,8 +208,7 @@ impl ClusterInfo {
|
||||
let mut my_data = self.my_data();
|
||||
let now = timestamp();
|
||||
my_data.wallclock = now;
|
||||
let mut entry = CrdsValue::ContactInfo(my_data);
|
||||
entry.sign(&self.keypair);
|
||||
let entry = CrdsValue::new_signed(CrdsData::ContactInfo(my_data), &self.keypair);
|
||||
self.gossip.refresh_push_active_set(stakes);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
@ -214,8 +216,7 @@ impl ClusterInfo {
|
||||
|
||||
// TODO kill insert_info, only used by tests
|
||||
pub fn insert_info(&mut self, contact_info: ContactInfo) {
|
||||
let mut value = CrdsValue::ContactInfo(contact_info);
|
||||
value.sign(&self.keypair);
|
||||
let value = CrdsValue::new_signed(CrdsData::ContactInfo(contact_info), &self.keypair);
|
||||
let _ = self.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
|
||||
@ -297,8 +298,10 @@ impl ClusterInfo {
|
||||
|
||||
pub fn push_epoch_slots(&mut self, id: Pubkey, root: u64, slots: BTreeSet<u64>) {
|
||||
let now = timestamp();
|
||||
let mut entry = CrdsValue::EpochSlots(EpochSlots::new(id, root, slots, now));
|
||||
entry.sign(&self.keypair);
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::EpochSlots(EpochSlots::new(id, root, slots, now)),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
@ -306,8 +309,7 @@ impl ClusterInfo {
|
||||
pub fn push_vote(&mut self, vote: Transaction) {
|
||||
let now = timestamp();
|
||||
let vote = Vote::new(&self.id(), vote, now);
|
||||
let mut entry = CrdsValue::Vote(vote);
|
||||
entry.sign(&self.keypair);
|
||||
let entry = CrdsValue::new_signed(CrdsData::Vote(vote), &self.keypair);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
@ -915,7 +917,7 @@ impl ClusterInfo {
|
||||
.expect("unable to serialize default filter") as usize;
|
||||
let protocol = Protocol::PullRequest(
|
||||
CrdsFilter::default(),
|
||||
CrdsValue::ContactInfo(ContactInfo::default()),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())),
|
||||
);
|
||||
let protocol_size =
|
||||
serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize;
|
||||
@ -1161,9 +1163,7 @@ impl ClusterInfo {
|
||||
1
|
||||
);
|
||||
} else if caller.contact_info().is_some() {
|
||||
if caller.contact_info().unwrap().pubkey()
|
||||
== me.read().unwrap().gossip.id
|
||||
{
|
||||
if caller.contact_info().unwrap().id == me.read().unwrap().gossip.id {
|
||||
warn!("PullRequest ignored, I'm talking to myself");
|
||||
inc_new_counter_debug!("cluster_info-window-request-loopback", 1);
|
||||
} else {
|
||||
@ -1509,6 +1509,7 @@ impl ClusterInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
);
|
||||
(node, gossip_socket, Some(ip_echo))
|
||||
@ -1529,6 +1530,7 @@ impl ClusterInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
);
|
||||
(node, gossip_socket, None)
|
||||
@ -1612,6 +1614,7 @@ impl Node {
|
||||
gossip.local_addr().unwrap(),
|
||||
tvu.local_addr().unwrap(),
|
||||
tvu_forwards.local_addr().unwrap(),
|
||||
repair.local_addr().unwrap(),
|
||||
empty,
|
||||
empty,
|
||||
storage.local_addr().unwrap(),
|
||||
@ -1658,6 +1661,7 @@ impl Node {
|
||||
gossip_addr,
|
||||
tvu.local_addr().unwrap(),
|
||||
tvu_forwards.local_addr().unwrap(),
|
||||
repair.local_addr().unwrap(),
|
||||
tpu.local_addr().unwrap(),
|
||||
tpu_forwards.local_addr().unwrap(),
|
||||
storage.local_addr().unwrap(),
|
||||
@ -1719,7 +1723,7 @@ impl Node {
|
||||
let (_, retransmit_sockets) =
|
||||
multi_bind_in_range(port_range, 8).expect("retransmit multi_bind");
|
||||
|
||||
let (_, repair) = Self::bind(port_range);
|
||||
let (repair_port, repair) = Self::bind(port_range);
|
||||
let (_, broadcast) = Self::bind(port_range);
|
||||
|
||||
let info = ContactInfo::new(
|
||||
@ -1727,6 +1731,7 @@ impl Node {
|
||||
SocketAddr::new(gossip_addr.ip(), gossip_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tvu_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tvu_forwards_port),
|
||||
SocketAddr::new(gossip_addr.ip(), repair_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tpu_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
|
||||
socketaddr_any!(),
|
||||
@ -1884,6 +1889,7 @@ mod tests {
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
0,
|
||||
);
|
||||
cluster_info.insert_info(nxt.clone());
|
||||
@ -1904,6 +1910,7 @@ mod tests {
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
0,
|
||||
);
|
||||
cluster_info.insert_info(nxt);
|
||||
@ -1941,6 +1948,7 @@ mod tests {
|
||||
socketaddr!("127.0.0.1:1239"),
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
0,
|
||||
);
|
||||
let rv = ClusterInfo::run_window_request(
|
||||
@ -2376,7 +2384,8 @@ mod tests {
|
||||
}
|
||||
|
||||
// now add this message back to the table and make sure after the next pull, the entrypoint is unset
|
||||
let entrypoint_crdsvalue = CrdsValue::ContactInfo(entrypoint.clone());
|
||||
let entrypoint_crdsvalue =
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
ClusterInfo::handle_pull_response(
|
||||
&cluster_info,
|
||||
@ -2393,7 +2402,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_split_messages_small() {
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
test_split_messages(value);
|
||||
}
|
||||
|
||||
@ -2403,13 +2412,12 @@ mod tests {
|
||||
for i in 0..128 {
|
||||
btree_slots.insert(i);
|
||||
}
|
||||
let value = CrdsValue::EpochSlots(EpochSlots {
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
slots: btree_slots,
|
||||
signature: Signature::default(),
|
||||
wallclock: 0,
|
||||
});
|
||||
}));
|
||||
test_split_messages(value);
|
||||
}
|
||||
|
||||
@ -2433,7 +2441,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn check_pull_request_size(filter: CrdsFilter) {
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
let protocol = Protocol::PullRequest(filter, value.clone());
|
||||
assert!(serialized_size(&protocol).unwrap() <= PACKET_DATA_SIZE as u64);
|
||||
}
|
||||
|
@ -210,13 +210,13 @@ impl ClusterInfoRepairListener {
|
||||
for (repairee_pubkey, repairee_epoch_slots) in repairees {
|
||||
let repairee_root = repairee_epoch_slots.root;
|
||||
|
||||
let repairee_tvu = {
|
||||
let repairee_repair_addr = {
|
||||
let r_cluster_info = cluster_info.read().unwrap();
|
||||
let contact_info = r_cluster_info.get_contact_info_for_node(repairee_pubkey);
|
||||
contact_info.map(|c| c.tvu)
|
||||
contact_info.map(|c| c.repair)
|
||||
};
|
||||
|
||||
if let Some(repairee_tvu) = repairee_tvu {
|
||||
if let Some(repairee_addr) = repairee_repair_addr {
|
||||
// For every repairee, get the set of repairmen who are responsible for
|
||||
let mut eligible_repairmen = Self::find_eligible_repairmen(
|
||||
my_pubkey,
|
||||
@ -242,7 +242,7 @@ impl ClusterInfoRepairListener {
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
socket,
|
||||
&repairee_tvu,
|
||||
&repairee_addr,
|
||||
NUM_SLOTS_PER_UPDATE,
|
||||
epoch_schedule,
|
||||
);
|
||||
@ -261,7 +261,7 @@ impl ClusterInfoRepairListener {
|
||||
repairee_epoch_slots: &EpochSlots,
|
||||
eligible_repairmen: &[&Pubkey],
|
||||
socket: &UdpSocket,
|
||||
repairee_tvu: &SocketAddr,
|
||||
repairee_addr: &SocketAddr,
|
||||
num_slots_to_repair: usize,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
) -> Result<()> {
|
||||
@ -320,7 +320,7 @@ impl ClusterInfoRepairListener {
|
||||
.get_data_shred(slot, blob_index as u64)
|
||||
.expect("Failed to read data blob from blocktree")
|
||||
{
|
||||
socket.send_to(&blob_data[..], repairee_tvu)?;
|
||||
socket.send_to(&blob_data[..], repairee_addr)?;
|
||||
total_data_blobs_sent += 1;
|
||||
}
|
||||
|
||||
@ -328,7 +328,7 @@ impl ClusterInfoRepairListener {
|
||||
.get_coding_shred(slot, blob_index as u64)
|
||||
.expect("Failed to read coding blob from blocktree")
|
||||
{
|
||||
socket.send_to(&coding_bytes[..], repairee_tvu)?;
|
||||
socket.send_to(&coding_bytes[..], repairee_addr)?;
|
||||
total_coding_blobs_sent += 1;
|
||||
}
|
||||
}
|
||||
|
@ -1,12 +1,9 @@
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
#[cfg(test)]
|
||||
use solana_sdk::rpc_port;
|
||||
#[cfg(test)]
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::signature::{Signable, Signature};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
@ -14,14 +11,14 @@ use std::net::{IpAddr, SocketAddr};
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ContactInfo {
|
||||
pub id: Pubkey,
|
||||
/// signature of this ContactInfo
|
||||
pub signature: Signature,
|
||||
/// gossip address
|
||||
pub gossip: SocketAddr,
|
||||
/// address to connect to for replication
|
||||
pub tvu: SocketAddr,
|
||||
/// address to forward blobs to
|
||||
/// address to forward shreds to
|
||||
pub tvu_forwards: SocketAddr,
|
||||
/// address to send repairs to
|
||||
pub repair: SocketAddr,
|
||||
/// transactions address
|
||||
pub tpu: SocketAddr,
|
||||
/// address to forward unprocessed transactions to
|
||||
@ -80,13 +77,13 @@ impl Default for ContactInfo {
|
||||
gossip: socketaddr_any!(),
|
||||
tvu: socketaddr_any!(),
|
||||
tvu_forwards: socketaddr_any!(),
|
||||
repair: socketaddr_any!(),
|
||||
tpu: socketaddr_any!(),
|
||||
tpu_forwards: socketaddr_any!(),
|
||||
storage_addr: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
wallclock: 0,
|
||||
signature: Signature::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,6 +95,7 @@ impl ContactInfo {
|
||||
gossip: SocketAddr,
|
||||
tvu: SocketAddr,
|
||||
tvu_forwards: SocketAddr,
|
||||
repair: SocketAddr,
|
||||
tpu: SocketAddr,
|
||||
tpu_forwards: SocketAddr,
|
||||
storage_addr: SocketAddr,
|
||||
@ -107,10 +105,10 @@ impl ContactInfo {
|
||||
) -> Self {
|
||||
Self {
|
||||
id: *id,
|
||||
signature: Signature::default(),
|
||||
gossip,
|
||||
tvu,
|
||||
tvu_forwards,
|
||||
repair,
|
||||
tpu,
|
||||
tpu_forwards,
|
||||
storage_addr,
|
||||
@ -131,6 +129,7 @@ impl ContactInfo {
|
||||
socketaddr!("127.0.0.1:1239"),
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
now,
|
||||
)
|
||||
}
|
||||
@ -150,6 +149,7 @@ impl ContactInfo {
|
||||
addr,
|
||||
addr,
|
||||
addr,
|
||||
addr,
|
||||
0,
|
||||
)
|
||||
}
|
||||
@ -167,6 +167,7 @@ impl ContactInfo {
|
||||
let tvu_addr = next_port(&bind_addr, 2);
|
||||
let tpu_forwards_addr = next_port(&bind_addr, 3);
|
||||
let tvu_forwards_addr = next_port(&bind_addr, 4);
|
||||
let repair = next_port(&bind_addr, 5);
|
||||
let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
|
||||
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
Self::new(
|
||||
@ -174,6 +175,7 @@ impl ContactInfo {
|
||||
gossip_addr,
|
||||
tvu_addr,
|
||||
tvu_forwards_addr,
|
||||
repair,
|
||||
tpu_addr,
|
||||
tpu_forwards_addr,
|
||||
"0.0.0.0:0".parse().unwrap(),
|
||||
@ -202,6 +204,7 @@ impl ContactInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
)
|
||||
}
|
||||
@ -232,49 +235,6 @@ impl ContactInfo {
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for ContactInfo {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.id
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
#[derive(Serialize)]
|
||||
struct SignData {
|
||||
id: Pubkey,
|
||||
gossip: SocketAddr,
|
||||
tvu: SocketAddr,
|
||||
tpu: SocketAddr,
|
||||
tpu_forwards: SocketAddr,
|
||||
storage_addr: SocketAddr,
|
||||
rpc: SocketAddr,
|
||||
rpc_pubsub: SocketAddr,
|
||||
wallclock: u64,
|
||||
}
|
||||
|
||||
let me = self;
|
||||
let data = SignData {
|
||||
id: me.id,
|
||||
gossip: me.gossip,
|
||||
tvu: me.tvu,
|
||||
tpu: me.tpu,
|
||||
storage_addr: me.storage_addr,
|
||||
tpu_forwards: me.tpu_forwards,
|
||||
rpc: me.rpc,
|
||||
rpc_pubsub: me.rpc_pubsub,
|
||||
wallclock: me.wallclock,
|
||||
};
|
||||
Cow::Owned(serialize(&data).expect("failed to serialize ContactInfo"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
self.signature
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -165,11 +165,12 @@ impl Crds {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
|
||||
#[test]
|
||||
fn test_insert() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 0).ok(), Some(None));
|
||||
assert_eq!(crds.table.len(), 1);
|
||||
assert!(crds.table.contains_key(&val.label()));
|
||||
@ -178,7 +179,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_update_old() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Err(CrdsError::InsertFailed));
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 0);
|
||||
@ -186,9 +187,15 @@ mod test {
|
||||
#[test]
|
||||
fn test_update_new() {
|
||||
let mut crds = Crds::default();
|
||||
let original = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0));
|
||||
let original = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
)));
|
||||
assert_matches!(crds.insert(original.clone(), 0), Ok(_));
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 1));
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
)));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 1).unwrap().unwrap().value,
|
||||
original
|
||||
@ -198,14 +205,17 @@ mod test {
|
||||
#[test]
|
||||
fn test_update_timestamp() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0));
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
|
||||
crds.update_label_timestamp(&val.label(), 1);
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 1);
|
||||
assert_eq!(crds.table[&val.label()].insert_timestamp, 0);
|
||||
|
||||
let val2 = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(val2.label().pubkey(), val.label().pubkey());
|
||||
assert_matches!(crds.insert(val2.clone(), 0), Ok(Some(_)));
|
||||
|
||||
@ -221,7 +231,7 @@ mod test {
|
||||
|
||||
let mut ci = ContactInfo::default();
|
||||
ci.wallclock += 1;
|
||||
let val3 = CrdsValue::ContactInfo(ci);
|
||||
let val3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
|
||||
assert_matches!(crds.insert(val3.clone(), 3), Ok(Some(_)));
|
||||
assert_eq!(crds.table[&val2.label()].local_timestamp, 3);
|
||||
assert_eq!(crds.table[&val2.label()].insert_timestamp, 3);
|
||||
@ -229,7 +239,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_find_old_records() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
|
||||
|
||||
assert!(crds.find_old_labels(0).is_empty());
|
||||
@ -239,7 +249,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_remove() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
|
||||
|
||||
assert_eq!(crds.find_old_labels(1), vec![val.label()]);
|
||||
@ -248,7 +258,7 @@ mod test {
|
||||
}
|
||||
#[test]
|
||||
fn test_equal() {
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
let v1 = VersionedCrdsValue::new(1, val.clone());
|
||||
let v2 = VersionedCrdsValue::new(1, val);
|
||||
assert_eq!(v1, v2);
|
||||
@ -258,12 +268,15 @@ mod test {
|
||||
fn test_hash_order() {
|
||||
let v1 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
let v2 = VersionedCrdsValue::new(1, {
|
||||
let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0);
|
||||
contact_info.rpc = socketaddr!("0.0.0.0:0");
|
||||
CrdsValue::ContactInfo(contact_info)
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info))
|
||||
});
|
||||
|
||||
assert_eq!(v1.value.label(), v2.value.label());
|
||||
@ -285,11 +298,17 @@ mod test {
|
||||
fn test_wallclock_order() {
|
||||
let v1 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 1)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
))),
|
||||
);
|
||||
let v2 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
assert_eq!(v1.value.label(), v2.value.label());
|
||||
assert!(v1 > v2);
|
||||
@ -301,11 +320,17 @@ mod test {
|
||||
fn test_label_order() {
|
||||
let v1 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
let v2 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
assert_ne!(v1, v2);
|
||||
assert!(!(v1 == v2));
|
||||
|
@ -9,7 +9,6 @@ use crate::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
|
||||
use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE};
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Signable;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
///The min size for bloom filters
|
||||
@ -204,6 +203,7 @@ pub fn get_weight(max_weight: f32, time_since_last_selected: u32, stake: f32) ->
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::timing::timestamp;
|
||||
|
||||
@ -216,7 +216,10 @@ mod test {
|
||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||
crds_gossip
|
||||
.crds
|
||||
.insert(CrdsValue::ContactInfo(ci.clone()), 0)
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
crds_gossip.refresh_push_active_set(&HashMap::new());
|
||||
let now = timestamp();
|
||||
|
@ -294,6 +294,7 @@ impl CrdsGossipPull {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
use itertools::Itertools;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||
@ -303,10 +304,16 @@ mod test {
|
||||
let mut crds = Crds::default();
|
||||
let mut stakes = HashMap::new();
|
||||
let node = CrdsGossipPull::default();
|
||||
let me = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(me.clone(), 0).unwrap();
|
||||
for i in 1..=30 {
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = entry.label().pubkey();
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
stakes.insert(id, i * 100);
|
||||
@ -325,7 +332,10 @@ mod test {
|
||||
#[test]
|
||||
fn test_new_pull_request() {
|
||||
let mut crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
assert_eq!(
|
||||
@ -339,7 +349,10 @@ mod test {
|
||||
Err(CrdsGossipError::NoPeers)
|
||||
);
|
||||
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE);
|
||||
let (to, _, self_info) = req.unwrap();
|
||||
@ -350,13 +363,22 @@ mod test {
|
||||
#[test]
|
||||
fn test_new_mark_creation_time() {
|
||||
let mut crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(old.clone(), 0).unwrap();
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
|
||||
// set request creation time to max_value
|
||||
@ -380,11 +402,17 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_pull_request() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry.clone(), 0).unwrap();
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(new.clone(), 0).unwrap();
|
||||
let req = node.new_pull_request(
|
||||
&node_crds,
|
||||
@ -419,22 +447,32 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_pull_request_response() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry.clone(), 0).unwrap();
|
||||
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(new.clone(), 0).unwrap();
|
||||
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let mut dest_crds = Crds::default();
|
||||
let new_id = Pubkey::new_rand();
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 1));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&new_id, 1,
|
||||
)));
|
||||
dest_crds.insert(new.clone(), 0).unwrap();
|
||||
|
||||
// node contains a key from the dest node, but at an older local timestamp
|
||||
let same_key = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 0));
|
||||
let same_key = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&new_id, 0,
|
||||
)));
|
||||
assert_eq!(same_key.label(), new.label());
|
||||
assert!(same_key.wallclock() < new.wallclock());
|
||||
node_crds.insert(same_key.clone(), 0).unwrap();
|
||||
@ -494,12 +532,18 @@ mod test {
|
||||
#[test]
|
||||
fn test_gossip_purge() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_label = entry.label();
|
||||
let node_pubkey = node_label.pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry.clone(), 0).unwrap();
|
||||
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(old.clone(), 0).unwrap();
|
||||
let value_hash = node_crds.lookup_versioned(&old.label()).unwrap().value_hash;
|
||||
|
||||
|
@ -340,7 +340,7 @@ impl CrdsGossipPush {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_sdk::signature::Signable;
|
||||
use crate::crds_value::CrdsData;
|
||||
|
||||
#[test]
|
||||
fn test_prune() {
|
||||
@ -353,7 +353,9 @@ mod test {
|
||||
stakes.insert(self_id, 100);
|
||||
stakes.insert(origin, 100);
|
||||
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&origin, 0));
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&origin, 0,
|
||||
)));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
@ -395,7 +397,10 @@ mod test {
|
||||
fn test_process_push() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let label = value.label();
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
@ -416,7 +421,7 @@ mod test {
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
@ -426,7 +431,7 @@ mod test {
|
||||
|
||||
// push an old version
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
@ -441,7 +446,7 @@ mod test {
|
||||
|
||||
// push a version to far in the future
|
||||
ci.wallclock = timeout + 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageTimeout)
|
||||
@ -449,7 +454,7 @@ mod test {
|
||||
|
||||
// push a version to far in the past
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, timeout + 1),
|
||||
Err(CrdsGossipError::PushMessageTimeout)
|
||||
@ -461,7 +466,7 @@ mod test {
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 0;
|
||||
let value_old = CrdsValue::ContactInfo(ci.clone());
|
||||
let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
@ -471,7 +476,7 @@ mod test {
|
||||
|
||||
// push an old version
|
||||
ci.wallclock = 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0)
|
||||
.unwrap()
|
||||
@ -492,13 +497,19 @@ mod test {
|
||||
solana_logger::setup();
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
|
||||
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
|
||||
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
for _ in 0..30 {
|
||||
@ -510,7 +521,9 @@ mod test {
|
||||
assert!(push.active_set.get(&value2.label().pubkey()).is_some());
|
||||
|
||||
for _ in 0..push.num_active {
|
||||
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(
|
||||
ContactInfo::new_localhost(&Pubkey::new_rand(), 0),
|
||||
));
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
}
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
@ -523,8 +536,10 @@ mod test {
|
||||
let push = CrdsGossipPush::default();
|
||||
let mut stakes = HashMap::new();
|
||||
for i in 1..=100 {
|
||||
let peer =
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), time));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
time,
|
||||
)));
|
||||
let id = peer.label().pubkey();
|
||||
crds.insert(peer.clone(), time).unwrap();
|
||||
stakes.insert(id, i * 100);
|
||||
@ -542,11 +557,17 @@ mod test {
|
||||
fn test_new_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let mut expected = HashMap::new();
|
||||
expected.insert(peer.label().pubkey(), vec![new_msg.clone()]);
|
||||
assert_eq!(
|
||||
@ -560,11 +581,20 @@ mod test {
|
||||
fn test_personalized_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer_1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer_1.clone(), 0), Ok(None));
|
||||
let peer_2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
|
||||
let peer_3 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
|
||||
Ok(None)
|
||||
@ -572,7 +602,10 @@ mod test {
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
// push 3's contact info to 1 and 2 and 3
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&peer_3.pubkey(), 0));
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&peer_3.pubkey(),
|
||||
0,
|
||||
)));
|
||||
let mut expected = HashMap::new();
|
||||
expected.insert(peer_1.pubkey(), vec![new_msg.clone()]);
|
||||
expected.insert(peer_2.pubkey(), vec![new_msg.clone()]);
|
||||
@ -583,11 +616,17 @@ mod test {
|
||||
fn test_process_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let expected = HashMap::new();
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
@ -600,13 +639,16 @@ mod test {
|
||||
fn test_purge_old_pending_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 1;
|
||||
let new_msg = CrdsValue::ContactInfo(ci.clone());
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
let expected = HashMap::new();
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 1),
|
||||
@ -622,7 +664,7 @@ mod test {
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
let label = value.label();
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
|
@ -8,9 +8,34 @@ use std::collections::BTreeSet;
|
||||
use std::fmt;
|
||||
|
||||
/// CrdsValue that is replicated across the cluster
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct CrdsValue {
|
||||
pub signature: Signature,
|
||||
pub data: CrdsData,
|
||||
}
|
||||
|
||||
impl Signable for CrdsValue {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey()
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
Cow::Owned(serialize(&self.data).expect("failed to serialize CrdsData"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
self.signature
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature
|
||||
}
|
||||
}
|
||||
|
||||
/// CrdsData that defines the different types of items CrdsValues can hold
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum CrdsValue {
|
||||
pub enum CrdsData {
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
ContactInfo(ContactInfo),
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
@ -24,7 +49,6 @@ pub struct EpochSlots {
|
||||
pub from: Pubkey,
|
||||
pub root: u64,
|
||||
pub slots: BTreeSet<u64>,
|
||||
pub signature: Signature,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
@ -34,46 +58,15 @@ impl EpochSlots {
|
||||
from,
|
||||
root,
|
||||
slots,
|
||||
signature: Signature::default(),
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for EpochSlots {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.from
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
#[derive(Serialize)]
|
||||
struct SignData<'a> {
|
||||
root: u64,
|
||||
slots: &'a BTreeSet<u64>,
|
||||
wallclock: u64,
|
||||
}
|
||||
let data = SignData {
|
||||
root: self.root,
|
||||
slots: &self.slots,
|
||||
wallclock: self.wallclock,
|
||||
};
|
||||
Cow::Owned(serialize(&data).expect("unable to serialize EpochSlots"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
self.signature
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Vote {
|
||||
pub from: Pubkey,
|
||||
pub transaction: Transaction,
|
||||
pub signature: Signature,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
@ -82,39 +75,11 @@ impl Vote {
|
||||
Self {
|
||||
from: *from,
|
||||
transaction,
|
||||
signature: Signature::default(),
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for Vote {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.from
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
#[derive(Serialize)]
|
||||
struct SignData<'a> {
|
||||
transaction: &'a Transaction,
|
||||
wallclock: u64,
|
||||
}
|
||||
let data = SignData {
|
||||
transaction: &self.transaction,
|
||||
wallclock: self.wallclock,
|
||||
};
|
||||
Cow::Owned(serialize(&data).expect("unable to serialize Vote"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
self.signature
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of the replicated value
|
||||
/// These are labels for values in a record that is associated with `Pubkey`
|
||||
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
|
||||
@ -145,40 +110,57 @@ impl CrdsValueLabel {
|
||||
}
|
||||
|
||||
impl CrdsValue {
|
||||
pub fn new_unsigned(data: CrdsData) -> Self {
|
||||
Self {
|
||||
signature: Signature::default(),
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_signed(data: CrdsData, keypair: &Keypair) -> Self {
|
||||
let mut value = Self::new_unsigned(data);
|
||||
value.sign(keypair);
|
||||
value
|
||||
}
|
||||
/// Totally unsecure unverfiable wallclock of the node that generated this message
|
||||
/// Latest wallclock is always picked.
|
||||
/// This is used to time out push messages.
|
||||
pub fn wallclock(&self) -> u64 {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.wallclock,
|
||||
CrdsValue::Vote(vote) => vote.wallclock,
|
||||
CrdsValue::EpochSlots(vote) => vote.wallclock,
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.wallclock,
|
||||
CrdsData::Vote(vote) => vote.wallclock,
|
||||
CrdsData::EpochSlots(vote) => vote.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.id,
|
||||
CrdsData::Vote(vote) => vote.from,
|
||||
CrdsData::EpochSlots(slots) => slots.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => {
|
||||
CrdsValueLabel::ContactInfo(contact_info.pubkey())
|
||||
}
|
||||
CrdsValue::Vote(vote) => CrdsValueLabel::Vote(vote.pubkey()),
|
||||
CrdsValue::EpochSlots(slots) => CrdsValueLabel::EpochSlots(slots.pubkey()),
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(_) => CrdsValueLabel::ContactInfo(self.pubkey()),
|
||||
CrdsData::Vote(_) => CrdsValueLabel::Vote(self.pubkey()),
|
||||
CrdsData::EpochSlots(_) => CrdsValueLabel::EpochSlots(self.pubkey()),
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => Some(contact_info),
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => Some(contact_info),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub fn vote(&self) -> Option<&Vote> {
|
||||
match self {
|
||||
CrdsValue::Vote(vote) => Some(vote),
|
||||
match &self.data {
|
||||
CrdsData::Vote(vote) => Some(vote),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub fn epoch_slots(&self) -> Option<&EpochSlots> {
|
||||
match self {
|
||||
CrdsValue::EpochSlots(slots) => Some(slots),
|
||||
match &self.data {
|
||||
CrdsData::EpochSlots(slots) => Some(slots),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@ -197,48 +179,6 @@ impl CrdsValue {
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for CrdsValue {
|
||||
fn sign(&mut self, keypair: &Keypair) {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.sign(keypair),
|
||||
CrdsValue::Vote(vote) => vote.sign(keypair),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.sign(keypair),
|
||||
};
|
||||
}
|
||||
|
||||
fn verify(&self) -> bool {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.verify(),
|
||||
CrdsValue::Vote(vote) => vote.verify(),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.verify(),
|
||||
}
|
||||
}
|
||||
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.pubkey(),
|
||||
CrdsValue::Vote(vote) => vote.pubkey(),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.pubkey(),
|
||||
}
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.get_signature(),
|
||||
CrdsValue::Vote(vote) => vote.get_signature(),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.get_signature(),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, _: Signature) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
@ -263,17 +203,23 @@ mod test {
|
||||
}
|
||||
#[test]
|
||||
fn test_keys_and_values() {
|
||||
let v = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().contact_info().unwrap().id;
|
||||
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
|
||||
|
||||
let v = CrdsValue::Vote(Vote::new(&Pubkey::default(), test_tx(), 0));
|
||||
let v =
|
||||
CrdsValue::new_unsigned(CrdsData::Vote(Vote::new(&Pubkey::default(), test_tx(), 0)));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().vote().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::Vote(key));
|
||||
|
||||
let v = CrdsValue::EpochSlots(EpochSlots::new(Pubkey::default(), 0, BTreeSet::new(), 0));
|
||||
let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots::new(
|
||||
Pubkey::default(),
|
||||
0,
|
||||
BTreeSet::new(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().epoch_slots().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::EpochSlots(key));
|
||||
@ -282,13 +228,24 @@ mod test {
|
||||
fn test_signature() {
|
||||
let keypair = Keypair::new();
|
||||
let wrong_keypair = Keypair::new();
|
||||
let mut v =
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&keypair.pubkey(), timestamp()));
|
||||
let mut v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&keypair.pubkey(),
|
||||
timestamp(),
|
||||
)));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
v = CrdsValue::Vote(Vote::new(&keypair.pubkey(), test_tx(), timestamp()));
|
||||
v = CrdsValue::new_unsigned(CrdsData::Vote(Vote::new(
|
||||
&keypair.pubkey(),
|
||||
test_tx(),
|
||||
timestamp(),
|
||||
)));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
let btreeset: BTreeSet<u64> = vec![1, 2, 3, 6, 8].into_iter().collect();
|
||||
v = CrdsValue::EpochSlots(EpochSlots::new(keypair.pubkey(), 0, btreeset, timestamp()));
|
||||
v = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots::new(
|
||||
keypair.pubkey(),
|
||||
0,
|
||||
btreeset,
|
||||
timestamp(),
|
||||
)));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ impl PohRecorder {
|
||||
let poh_entry = self.poh.lock().unwrap().tick();
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_lock_contention",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
let now = Instant::now();
|
||||
if let Some(poh_entry) = poh_entry {
|
||||
@ -323,7 +323,7 @@ impl PohRecorder {
|
||||
if self.leader_first_tick_height.is_none() {
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
return;
|
||||
}
|
||||
@ -339,7 +339,7 @@ impl PohRecorder {
|
||||
}
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
}
|
||||
|
||||
@ -363,20 +363,29 @@ impl PohRecorder {
|
||||
return Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached));
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if let Some(poh_entry) = self.poh.lock().unwrap().record(mixin) {
|
||||
{
|
||||
let now = Instant::now();
|
||||
let mut poh_lock = self.poh.lock().unwrap();
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-record_lock_contention",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
let entry = Entry {
|
||||
num_hashes: poh_entry.num_hashes,
|
||||
hash: poh_entry.hash,
|
||||
transactions,
|
||||
};
|
||||
self.sender
|
||||
.send((working_bank.bank.clone(), (entry, self.tick_height)))?;
|
||||
return Ok(());
|
||||
let now = Instant::now();
|
||||
let res = poh_lock.record(mixin);
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-record_ms",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
if let Some(poh_entry) = res {
|
||||
let entry = Entry {
|
||||
num_hashes: poh_entry.num_hashes,
|
||||
hash: poh_entry.hash,
|
||||
transactions,
|
||||
};
|
||||
self.sender
|
||||
.send((working_bank.bank.clone(), (entry, self.tick_height)))?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// record() might fail if the next PoH hash needs to be a tick. But that's ok, tick()
|
||||
// and re-record()
|
||||
|
@ -9,25 +9,26 @@ use std::ops::Div;
|
||||
|
||||
/// Returns a list of indexes shuffled based on the input weights
|
||||
/// Note - The sum of all weights must not exceed `u64::MAX`
|
||||
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize>
|
||||
pub fn weighted_shuffle<T>(weights: Vec<T>, mut rng: ChaChaRng) -> Vec<usize>
|
||||
where
|
||||
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
|
||||
{
|
||||
let mut rng = rng;
|
||||
let total_weight: T = weights.clone().into_iter().sum();
|
||||
weights
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
// This generates an "inverse" weight but it avoids floating point math
|
||||
let x = (total_weight / v)
|
||||
.to_u64()
|
||||
.expect("values > u64::max are not supported");
|
||||
(
|
||||
i,
|
||||
// capture the u64 into u128s to prevent overflow
|
||||
(&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
|
||||
rng.gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
|
||||
)
|
||||
})
|
||||
// sort in ascending order
|
||||
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
|
||||
.map(|x| x.0)
|
||||
.collect()
|
||||
@ -35,22 +36,23 @@ where
|
||||
|
||||
/// Returns the highest index after computing a weighted shuffle.
|
||||
/// Saves doing any sorting for O(n) max calculation.
|
||||
pub fn weighted_best(weights_and_indicies: &[(u64, usize)], rng: ChaChaRng) -> usize {
|
||||
let mut rng = rng;
|
||||
if weights_and_indicies.is_empty() {
|
||||
pub fn weighted_best(weights_and_indexes: &[(u64, usize)], mut rng: ChaChaRng) -> usize {
|
||||
if weights_and_indexes.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
let total_weight: u64 = weights_and_indicies.iter().map(|x| x.0).sum();
|
||||
let mut best_weight = 0;
|
||||
let total_weight: u64 = weights_and_indexes.iter().map(|x| x.0).sum();
|
||||
let mut lowest_weight = std::u128::MAX;
|
||||
let mut best_index = 0;
|
||||
for v in weights_and_indicies {
|
||||
for v in weights_and_indexes {
|
||||
// This generates an "inverse" weight but it avoids floating point math
|
||||
let x = (total_weight / v.0)
|
||||
.to_u64()
|
||||
.expect("values > u64::max are not supported");
|
||||
// capture the u64 into u128s to prevent overflow
|
||||
let weight = (&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x);
|
||||
if weight > best_weight {
|
||||
best_weight = weight;
|
||||
let computed_weight = rng.gen_range(1, u128::from(std::u16::MAX)) * u128::from(x);
|
||||
// The highest input weight maps to the lowest computed weight
|
||||
if computed_weight < lowest_weight {
|
||||
lowest_weight = computed_weight;
|
||||
best_index = v.1;
|
||||
}
|
||||
}
|
||||
@ -120,9 +122,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_weighted_best() {
|
||||
let mut weights = vec![(std::u32::MAX as u64, 0); 3];
|
||||
weights.push((1, 5));
|
||||
let best = weighted_best(&weights, ChaChaRng::from_seed([0x5b; 32]));
|
||||
assert_eq!(best, 5);
|
||||
let weights_and_indexes: Vec<_> = vec![100u64, 1000, 10_000, 10]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, weight)| (weight, i))
|
||||
.collect();
|
||||
let best_index = weighted_best(&weights_and_indexes, ChaChaRng::from_seed([0x5b; 32]));
|
||||
assert_eq!(best_index, 2);
|
||||
}
|
||||
}
|
||||
|
@ -85,6 +85,7 @@ where
|
||||
total_packets += more_packets.packets.len();
|
||||
packets.push(more_packets)
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
|
||||
|
||||
@ -127,7 +128,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?;
|
||||
let blocktree_insert_metrics = blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?;
|
||||
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
|
||||
|
||||
trace!(
|
||||
"Elapsed processing time in recv_window(): {}",
|
||||
|
@ -6,8 +6,8 @@ use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::crds_gossip::*;
|
||||
use solana_core::crds_gossip_error::CrdsGossipError;
|
||||
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValue;
|
||||
use solana_core::crds_value::CrdsValueLabel;
|
||||
use solana_core::crds_value::{CrdsData, CrdsValue};
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
@ -72,10 +72,16 @@ fn stakes(network: &Network) -> HashMap<Pubkey, u64> {
|
||||
}
|
||||
|
||||
fn star_network_create(num: usize) -> Network {
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let mut network: HashMap<_, _> = (1..num)
|
||||
.map(|_| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -93,14 +99,20 @@ fn star_network_create(num: usize) -> Network {
|
||||
}
|
||||
|
||||
fn rstar_network_create(num: usize) -> Network {
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let mut origin = CrdsGossip::default();
|
||||
let id = entry.label().pubkey();
|
||||
origin.crds.insert(entry.clone(), 0).unwrap();
|
||||
origin.set_self(&id);
|
||||
let mut network: HashMap<_, _> = (1..num)
|
||||
.map(|_| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -116,7 +128,10 @@ fn rstar_network_create(num: usize) -> Network {
|
||||
fn ring_network_create(num: usize) -> Network {
|
||||
let mut network: HashMap<_, _> = (0..num)
|
||||
.map(|_| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -147,7 +162,10 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network {
|
||||
let num = stakes.len();
|
||||
let mut network: HashMap<_, _> = (0..num)
|
||||
.map(|n| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -219,7 +237,11 @@ fn network_simulator(network: &mut Network, max_convergance: f64) {
|
||||
.and_then(|v| v.contact_info().cloned())
|
||||
.unwrap();
|
||||
m.wallclock = now;
|
||||
node.process_push_message(&Pubkey::default(), vec![CrdsValue::ContactInfo(m)], now);
|
||||
node.process_push_message(
|
||||
&Pubkey::default(),
|
||||
vec![CrdsValue::new_unsigned(CrdsData::ContactInfo(m))],
|
||||
now,
|
||||
);
|
||||
});
|
||||
// push for a bit
|
||||
let (queue_size, bytes_tx) = network_run_push(network, start, end);
|
||||
@ -547,7 +569,10 @@ fn test_prune_errors() {
|
||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||
crds_gossip
|
||||
.crds
|
||||
.insert(CrdsValue::ContactInfo(ci.clone()), 0)
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
crds_gossip.refresh_push_active_set(&HashMap::new());
|
||||
let now = timestamp();
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-drone"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Drone"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,9 +19,9 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-fixed-buf"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "A fixed-size byte array that supports bincode serde"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,10 +15,10 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
|
||||
tempfile = "3.1.0"
|
||||
|
@ -310,16 +310,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?;
|
||||
let mint_keypair = read_keypair_file(mint_keypair_file)?;
|
||||
|
||||
let vote_account = vote_state::create_account(
|
||||
let bootstrap_leader_vote_account = vote_state::create_account(
|
||||
&bootstrap_vote_keypair.pubkey(),
|
||||
&bootstrap_leader_keypair.pubkey(),
|
||||
0,
|
||||
1,
|
||||
);
|
||||
let stake_account = stake_state::create_account(
|
||||
&bootstrap_stake_keypair.pubkey(),
|
||||
let bootstrap_leader_stake_account = stake_state::create_account(
|
||||
&bootstrap_leader_keypair.pubkey(),
|
||||
&bootstrap_vote_keypair.pubkey(),
|
||||
&vote_account,
|
||||
&bootstrap_leader_vote_account,
|
||||
bootstrap_leader_stake_lamports,
|
||||
);
|
||||
|
||||
@ -335,9 +335,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
Account::new(bootstrap_leader_lamports, 0, &system_program::id()),
|
||||
),
|
||||
// where votes go to
|
||||
(bootstrap_vote_keypair.pubkey(), vote_account),
|
||||
(
|
||||
bootstrap_vote_keypair.pubkey(),
|
||||
bootstrap_leader_vote_account,
|
||||
),
|
||||
// passive bootstrap leader stake
|
||||
(bootstrap_stake_keypair.pubkey(), stake_account),
|
||||
(
|
||||
bootstrap_stake_keypair.pubkey(),
|
||||
bootstrap_leader_stake_account,
|
||||
),
|
||||
(
|
||||
bootstrap_storage_keypair.pubkey(),
|
||||
storage_contract::create_validator_storage_account(
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,30 +10,25 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.0" }
|
||||
solana-vest-program = { path = "../programs/vest_program", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.20.0" }
|
||||
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.2" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.2" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.2" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.20.2" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.20.2" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.2" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.2" }
|
||||
solana-vest-program = { path = "../programs/vest_program", version = "0.20.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_genesis_programs"
|
||||
|
||||
[features]
|
||||
move = ["solana-move-loader-program", "solana-move-loader-api"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
use solana_sdk::{
|
||||
clock::Epoch, genesis_block::OperatingMode, pubkey::Pubkey,
|
||||
system_program::solana_system_program,
|
||||
clock::Epoch, genesis_block::OperatingMode, move_loader::solana_move_loader_program,
|
||||
pubkey::Pubkey, system_program::solana_system_program,
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
@ -11,9 +11,6 @@ extern crate solana_budget_program;
|
||||
extern crate solana_config_program;
|
||||
#[macro_use]
|
||||
extern crate solana_exchange_program;
|
||||
#[cfg(feature = "move")]
|
||||
#[macro_use]
|
||||
extern crate solana_move_loader_program;
|
||||
#[macro_use]
|
||||
extern crate solana_stake_program;
|
||||
#[macro_use]
|
||||
@ -42,8 +39,7 @@ pub fn get(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(String, P
|
||||
// Programs that are only available in Development mode
|
||||
solana_budget_program!(),
|
||||
solana_exchange_program!(),
|
||||
#[cfg(feature = "move")]
|
||||
solana_move_loader_program!(),
|
||||
solana_move_loader_program(),
|
||||
])
|
||||
} else {
|
||||
None
|
||||
@ -107,7 +103,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_development_programs() {
|
||||
assert_eq!(get(OperatingMode::Development, 0).unwrap().len(), 9);
|
||||
assert_eq!(get(OperatingMode::Development, 0).unwrap().len(), 10);
|
||||
assert_eq!(get(OperatingMode::Development, 1), None);
|
||||
}
|
||||
|
||||
|
@ -3,18 +3,18 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
|
||||
|
||||
|
||||
|
@ -3,11 +3,10 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
default-run = "solana-install"
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
@ -29,10 +28,10 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_yaml = "0.8.11"
|
||||
sha2 = "0.8.0"
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
url = "2.1.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -15,7 +15,7 @@ edition = "2018"
|
||||
clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
rpassword = "4.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
tiny-bip39 = "0.6.2"
|
||||
|
||||
[[bin]]
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,10 +15,10 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "0.11"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -26,16 +26,16 @@ rayon = "1.2.0"
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-measure = { path = "../measure", version = "0.20.2" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.2" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.2" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.2" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
|
||||
sys-info = "0.5.8"
|
||||
tar = "0.4.26"
|
||||
tempfile = "3.1.0"
|
||||
@ -49,7 +49,7 @@ features = ["lz4"]
|
||||
|
||||
[dev-dependencies]
|
||||
matches = "0.1.6"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -17,6 +17,7 @@ use rayon::iter::IntoParallelRefIterator;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::ThreadPool;
|
||||
use rocksdb::DBRawIterator;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{datapoint_debug, datapoint_error};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::clock::Slot;
|
||||
@ -30,7 +31,7 @@ use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::rc::Rc;
|
||||
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
|
||||
|
||||
@ -41,7 +42,6 @@ thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::
|
||||
|
||||
pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000;
|
||||
|
||||
pub type SlotMetaWorkingSetEntry = (Rc<RefCell<SlotMeta>>, Option<SlotMeta>);
|
||||
pub type CompletedSlotsReceiver = Receiver<Vec<u64>>;
|
||||
|
||||
// ledger window
|
||||
@ -55,10 +55,80 @@ pub struct Blocktree {
|
||||
data_shred_cf: LedgerColumn<cf::ShredData>,
|
||||
code_shred_cf: LedgerColumn<cf::ShredCode>,
|
||||
last_root: Arc<RwLock<u64>>,
|
||||
insert_shreds_lock: Arc<Mutex<()>>,
|
||||
pub new_shreds_signals: Vec<SyncSender<bool>>,
|
||||
pub completed_slots_senders: Vec<SyncSender<Vec<u64>>>,
|
||||
}
|
||||
|
||||
pub struct IndexMetaWorkingSetEntry {
|
||||
index: Index,
|
||||
// true only if at least one shred for this Index was inserted since the time this
|
||||
// struct was created
|
||||
did_insert_occur: bool,
|
||||
}
|
||||
|
||||
pub struct SlotMetaWorkingSetEntry {
|
||||
new_slot_meta: Rc<RefCell<SlotMeta>>,
|
||||
old_slot_meta: Option<SlotMeta>,
|
||||
// True only if at least one shred for this SlotMeta was inserted since the time this
|
||||
// struct was created.
|
||||
did_insert_occur: bool,
|
||||
}
|
||||
|
||||
pub struct BlocktreeInsertionMetrics {
|
||||
pub num_shreds: usize,
|
||||
pub insert_lock_elapsed: u64,
|
||||
pub insert_shreds_elapsed: u64,
|
||||
pub shred_recovery_elapsed: u64,
|
||||
pub chaining_elapsed: u64,
|
||||
pub commit_working_sets_elapsed: u64,
|
||||
pub write_batch_elapsed: u64,
|
||||
pub total_elapsed: u64,
|
||||
pub num_inserted: u64,
|
||||
pub num_recovered: usize,
|
||||
pub index_meta_time: u64,
|
||||
}
|
||||
|
||||
impl SlotMetaWorkingSetEntry {
|
||||
fn new(new_slot_meta: Rc<RefCell<SlotMeta>>, old_slot_meta: Option<SlotMeta>) -> Self {
|
||||
Self {
|
||||
new_slot_meta,
|
||||
old_slot_meta,
|
||||
did_insert_occur: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlocktreeInsertionMetrics {
|
||||
pub fn report_metrics(&self, metric_name: &'static str) {
|
||||
datapoint_debug!(
|
||||
metric_name,
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
("total_elapsed", self.total_elapsed as i64, i64),
|
||||
("insert_lock_elapsed", self.insert_lock_elapsed as i64, i64),
|
||||
(
|
||||
"insert_shreds_elapsed",
|
||||
self.insert_shreds_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"shred_recovery_elapsed",
|
||||
self.shred_recovery_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("chaining_elapsed", self.chaining_elapsed as i64, i64),
|
||||
(
|
||||
"commit_working_sets_elapsed",
|
||||
self.commit_working_sets_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("write_batch_elapsed", self.write_batch_elapsed as i64, i64),
|
||||
("num_inserted", self.num_inserted as i64, i64),
|
||||
("num_recovered", self.num_recovered as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl Blocktree {
|
||||
/// Opens a Ledger in directory, provides "infinite" window of shreds
|
||||
pub fn open(ledger_path: &Path) -> Result<Blocktree> {
|
||||
@ -106,6 +176,7 @@ impl Blocktree {
|
||||
code_shred_cf,
|
||||
new_shreds_signals: vec![],
|
||||
completed_slots_senders: vec![],
|
||||
insert_shreds_lock: Arc::new(Mutex::new(())),
|
||||
last_root,
|
||||
})
|
||||
}
|
||||
@ -259,7 +330,7 @@ impl Blocktree {
|
||||
fn try_shred_recovery(
|
||||
db: &Database,
|
||||
erasure_metas: &HashMap<(u64, u64), ErasureMeta>,
|
||||
index_working_set: &HashMap<u64, Index>,
|
||||
index_working_set: &HashMap<u64, IndexMetaWorkingSetEntry>,
|
||||
prev_inserted_datas: &mut HashMap<(u64, u64), Shred>,
|
||||
prev_inserted_codes: &mut HashMap<(u64, u64), Shred>,
|
||||
) -> Vec<Shred> {
|
||||
@ -284,7 +355,8 @@ impl Blocktree {
|
||||
);
|
||||
};
|
||||
|
||||
let index = index_working_set.get(&slot).expect("Index");
|
||||
let index_meta_entry = index_working_set.get(&slot).expect("Index");
|
||||
let index = &index_meta_entry.index;
|
||||
match erasure_meta.status(&index) {
|
||||
ErasureMetaStatus::CanRecover => {
|
||||
// Find shreds for this erasure set and try recovery
|
||||
@ -358,7 +430,13 @@ impl Blocktree {
|
||||
&self,
|
||||
shreds: Vec<Shred>,
|
||||
leader_schedule: Option<&Arc<LeaderScheduleCache>>,
|
||||
) -> Result<()> {
|
||||
) -> Result<BlocktreeInsertionMetrics> {
|
||||
let mut total_start = Measure::start("Total elapsed");
|
||||
let mut start = Measure::start("Blocktree lock");
|
||||
let _lock = self.insert_shreds_lock.lock().unwrap();
|
||||
start.stop();
|
||||
let insert_lock_elapsed = start.as_us();
|
||||
|
||||
let db = &*self.db;
|
||||
let mut write_batch = db.batch()?;
|
||||
|
||||
@ -368,26 +446,43 @@ impl Blocktree {
|
||||
let mut slot_meta_working_set = HashMap::new();
|
||||
let mut index_working_set = HashMap::new();
|
||||
|
||||
let num_shreds = shreds.len();
|
||||
let mut start = Measure::start("Shred insertion");
|
||||
let mut num_inserted = 0;
|
||||
let mut index_meta_time = 0;
|
||||
shreds.into_iter().for_each(|shred| {
|
||||
if shred.is_data() {
|
||||
self.check_insert_data_shred(
|
||||
shred,
|
||||
&mut index_working_set,
|
||||
&mut slot_meta_working_set,
|
||||
&mut write_batch,
|
||||
&mut just_inserted_data_shreds,
|
||||
);
|
||||
} else if shred.is_code() {
|
||||
self.check_insert_coding_shred(
|
||||
shred,
|
||||
&mut erasure_metas,
|
||||
&mut index_working_set,
|
||||
&mut write_batch,
|
||||
&mut just_inserted_coding_shreds,
|
||||
);
|
||||
let insert_success = {
|
||||
if shred.is_data() {
|
||||
self.check_insert_data_shred(
|
||||
shred,
|
||||
&mut index_working_set,
|
||||
&mut slot_meta_working_set,
|
||||
&mut write_batch,
|
||||
&mut just_inserted_data_shreds,
|
||||
&mut index_meta_time,
|
||||
)
|
||||
} else if shred.is_code() {
|
||||
self.check_insert_coding_shred(
|
||||
shred,
|
||||
&mut erasure_metas,
|
||||
&mut index_working_set,
|
||||
&mut write_batch,
|
||||
&mut just_inserted_coding_shreds,
|
||||
&mut index_meta_time,
|
||||
)
|
||||
} else {
|
||||
panic!("There should be no other case");
|
||||
}
|
||||
};
|
||||
if insert_success {
|
||||
num_inserted += 1;
|
||||
}
|
||||
});
|
||||
start.stop();
|
||||
|
||||
let insert_shreds_elapsed = start.as_us();
|
||||
let mut start = Measure::start("Shred recovery");
|
||||
let mut num_recovered = 0;
|
||||
if let Some(leader_schedule_cache) = leader_schedule {
|
||||
let recovered_data = Self::try_shred_recovery(
|
||||
&db,
|
||||
@ -397,6 +492,7 @@ impl Blocktree {
|
||||
&mut just_inserted_coding_shreds,
|
||||
);
|
||||
|
||||
num_recovered = recovered_data.len();
|
||||
recovered_data.into_iter().for_each(|shred| {
|
||||
if let Some(leader) = leader_schedule_cache.slot_leader_at(shred.slot(), None) {
|
||||
if shred.verify(&leader) {
|
||||
@ -406,15 +502,23 @@ impl Blocktree {
|
||||
&mut slot_meta_working_set,
|
||||
&mut write_batch,
|
||||
&mut just_inserted_coding_shreds,
|
||||
)
|
||||
&mut index_meta_time,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
start.stop();
|
||||
let shred_recovery_elapsed = start.as_us();
|
||||
|
||||
// Handle chaining for the working set
|
||||
handle_chaining(&self.db, &mut write_batch, &slot_meta_working_set)?;
|
||||
let mut start = Measure::start("Shred recovery");
|
||||
// Handle chaining for the members of the slot_meta_working_set that were inserted into,
|
||||
// drop the others
|
||||
handle_chaining(&self.db, &mut write_batch, &mut slot_meta_working_set)?;
|
||||
start.stop();
|
||||
let chaining_elapsed = start.as_us();
|
||||
|
||||
let mut start = Measure::start("Commit Working Sets");
|
||||
let (should_signal, newly_completed_slots) = commit_slot_meta_working_set(
|
||||
&slot_meta_working_set,
|
||||
&self.completed_slots_senders,
|
||||
@ -425,11 +529,18 @@ impl Blocktree {
|
||||
write_batch.put::<cf::ErasureMeta>((slot, set_index), &erasure_meta)?;
|
||||
}
|
||||
|
||||
for (&slot, index) in index_working_set.iter() {
|
||||
write_batch.put::<cf::Index>(slot, index)?;
|
||||
for (&slot, index_working_set_entry) in index_working_set.iter() {
|
||||
if index_working_set_entry.did_insert_occur {
|
||||
write_batch.put::<cf::Index>(slot, &index_working_set_entry.index)?;
|
||||
}
|
||||
}
|
||||
start.stop();
|
||||
let commit_working_sets_elapsed = start.as_us();
|
||||
|
||||
let mut start = Measure::start("Write Batch");
|
||||
self.db.write(write_batch)?;
|
||||
start.stop();
|
||||
let write_batch_elapsed = start.as_us();
|
||||
|
||||
if should_signal {
|
||||
for signal in &self.new_shreds_signals {
|
||||
@ -444,82 +555,96 @@ impl Blocktree {
|
||||
newly_completed_slots,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
total_start.stop();
|
||||
|
||||
Ok(BlocktreeInsertionMetrics {
|
||||
num_shreds,
|
||||
total_elapsed: total_start.as_us(),
|
||||
insert_lock_elapsed,
|
||||
insert_shreds_elapsed,
|
||||
shred_recovery_elapsed,
|
||||
chaining_elapsed,
|
||||
commit_working_sets_elapsed,
|
||||
write_batch_elapsed,
|
||||
num_inserted,
|
||||
num_recovered,
|
||||
index_meta_time,
|
||||
})
|
||||
}
|
||||
|
||||
fn check_insert_coding_shred(
|
||||
&self,
|
||||
shred: Shred,
|
||||
erasure_metas: &mut HashMap<(u64, u64), ErasureMeta>,
|
||||
index_working_set: &mut HashMap<u64, Index>,
|
||||
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
||||
write_batch: &mut WriteBatch,
|
||||
just_inserted_coding_shreds: &mut HashMap<(u64, u64), Shred>,
|
||||
) {
|
||||
index_meta_time: &mut u64,
|
||||
) -> bool {
|
||||
let slot = shred.slot();
|
||||
let shred_index = u64::from(shred.index());
|
||||
|
||||
let (index_meta, mut new_index_meta) =
|
||||
get_index_meta_entry(&self.db, slot, index_working_set);
|
||||
let index_meta_working_set_entry =
|
||||
get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
|
||||
|
||||
let index_meta = index_meta.unwrap_or_else(|| new_index_meta.as_mut().unwrap());
|
||||
let index_meta = &mut index_meta_working_set_entry.index;
|
||||
// This gives the index of first coding shred in this FEC block
|
||||
// So, all coding shreds in a given FEC block will have the same set index
|
||||
if Blocktree::should_insert_coding_shred(&shred, index_meta.coding(), &self.last_root) {
|
||||
if let Ok(()) = self.insert_coding_shred(erasure_metas, index_meta, &shred, write_batch)
|
||||
{
|
||||
just_inserted_coding_shreds
|
||||
.entry((slot, shred_index))
|
||||
.or_insert_with(|| shred);
|
||||
new_index_meta.map(|n| index_working_set.insert(slot, n));
|
||||
}
|
||||
self.insert_coding_shred(erasure_metas, index_meta, &shred, write_batch)
|
||||
.map(|_| {
|
||||
// Insert was a success!
|
||||
just_inserted_coding_shreds
|
||||
.entry((slot, shred_index))
|
||||
.or_insert_with(|| shred);
|
||||
|
||||
index_meta_working_set_entry.did_insert_occur = true;
|
||||
})
|
||||
.is_ok()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn check_insert_data_shred(
|
||||
&self,
|
||||
shred: Shred,
|
||||
index_working_set: &mut HashMap<u64, Index>,
|
||||
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
||||
slot_meta_working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
|
||||
write_batch: &mut WriteBatch,
|
||||
just_inserted_data_shreds: &mut HashMap<(u64, u64), Shred>,
|
||||
) {
|
||||
index_meta_time: &mut u64,
|
||||
) -> bool {
|
||||
let slot = shred.slot();
|
||||
let shred_index = u64::from(shred.index());
|
||||
let (index_meta, mut new_index_meta) =
|
||||
get_index_meta_entry(&self.db, slot, index_working_set);
|
||||
let (slot_meta_entry, mut new_slot_meta_entry) =
|
||||
|
||||
let index_meta_working_set_entry =
|
||||
get_index_meta_entry(&self.db, slot, index_working_set, index_meta_time);
|
||||
|
||||
let index_meta = &mut index_meta_working_set_entry.index;
|
||||
let slot_meta_entry =
|
||||
get_slot_meta_entry(&self.db, slot_meta_working_set, slot, shred.parent());
|
||||
|
||||
let insert_success = {
|
||||
let index_meta = index_meta.unwrap_or_else(|| new_index_meta.as_mut().unwrap());
|
||||
let entry = slot_meta_entry.unwrap_or_else(|| new_slot_meta_entry.as_mut().unwrap());
|
||||
let mut slot_meta = entry.0.borrow_mut();
|
||||
let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut();
|
||||
|
||||
if Blocktree::should_insert_data_shred(
|
||||
&shred,
|
||||
&slot_meta,
|
||||
index_meta.data(),
|
||||
&self.last_root,
|
||||
) {
|
||||
if let Ok(()) = self.insert_data_shred(
|
||||
&mut slot_meta,
|
||||
index_meta.data_mut(),
|
||||
&shred,
|
||||
write_batch,
|
||||
) {
|
||||
just_inserted_data_shreds.insert((slot, shred_index), shred);
|
||||
new_index_meta.map(|n| index_working_set.insert(slot, n));
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
if Blocktree::should_insert_data_shred(
|
||||
&shred,
|
||||
slot_meta,
|
||||
index_meta.data(),
|
||||
&self.last_root,
|
||||
) {
|
||||
if let Ok(()) =
|
||||
self.insert_data_shred(slot_meta, index_meta.data_mut(), &shred, write_batch)
|
||||
{
|
||||
just_inserted_data_shreds.insert((slot, shred_index), shred);
|
||||
index_meta_working_set_entry.did_insert_occur = true;
|
||||
slot_meta_entry.did_insert_occur = true;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if insert_success {
|
||||
new_slot_meta_entry.map(|n| slot_meta_working_set.insert(slot, n));
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@ -647,9 +772,7 @@ impl Blocktree {
|
||||
}
|
||||
|
||||
let last_root = *last_root.read().unwrap();
|
||||
verify_shred_slots(slot, slot_meta.parent_slot, last_root);
|
||||
|
||||
true
|
||||
verify_shred_slots(slot, slot_meta.parent_slot, last_root)
|
||||
}
|
||||
|
||||
fn insert_data_shred(
|
||||
@ -661,7 +784,6 @@ impl Blocktree {
|
||||
) -> Result<()> {
|
||||
let slot = shred.slot();
|
||||
let index = u64::from(shred.index());
|
||||
let parent = shred.parent();
|
||||
|
||||
let last_in_slot = if shred.last_in_slot() {
|
||||
debug!("got last in slot");
|
||||
@ -677,9 +799,8 @@ impl Blocktree {
|
||||
false
|
||||
};
|
||||
|
||||
if is_orphan(slot_meta) {
|
||||
slot_meta.parent_slot = parent;
|
||||
}
|
||||
// Parent for slot meta should have been set by this point
|
||||
assert!(!is_orphan(slot_meta));
|
||||
|
||||
let data_cf = self.db.column::<cf::ShredData>();
|
||||
|
||||
@ -1218,22 +1339,24 @@ fn update_slot_meta(
|
||||
fn get_index_meta_entry<'a>(
|
||||
db: &Database,
|
||||
slot: u64,
|
||||
index_working_set: &'a mut HashMap<u64, Index>,
|
||||
) -> (Option<&'a mut Index>, Option<Index>) {
|
||||
index_working_set: &'a mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
||||
index_meta_time: &mut u64,
|
||||
) -> &'a mut IndexMetaWorkingSetEntry {
|
||||
let index_cf = db.column::<cf::Index>();
|
||||
index_working_set
|
||||
.get_mut(&slot)
|
||||
.map(|i| (Some(i), None))
|
||||
.unwrap_or_else(|| {
|
||||
let newly_inserted_meta = Some(
|
||||
index_cf
|
||||
.get(slot)
|
||||
.unwrap()
|
||||
.unwrap_or_else(|| Index::new(slot)),
|
||||
);
|
||||
|
||||
(None, newly_inserted_meta)
|
||||
})
|
||||
let mut total_start = Measure::start("Total elapsed");
|
||||
let res = index_working_set.entry(slot).or_insert_with(|| {
|
||||
let newly_inserted_meta = index_cf
|
||||
.get(slot)
|
||||
.unwrap()
|
||||
.unwrap_or_else(|| Index::new(slot));
|
||||
IndexMetaWorkingSetEntry {
|
||||
index: newly_inserted_meta,
|
||||
did_insert_occur: false,
|
||||
}
|
||||
});
|
||||
total_start.stop();
|
||||
*index_meta_time += total_start.as_us();
|
||||
res
|
||||
}
|
||||
|
||||
fn get_slot_meta_entry<'a>(
|
||||
@ -1241,39 +1364,30 @@ fn get_slot_meta_entry<'a>(
|
||||
slot_meta_working_set: &'a mut HashMap<u64, SlotMetaWorkingSetEntry>,
|
||||
slot: u64,
|
||||
parent_slot: u64,
|
||||
) -> (
|
||||
Option<&'a mut SlotMetaWorkingSetEntry>,
|
||||
Option<SlotMetaWorkingSetEntry>,
|
||||
) {
|
||||
) -> &'a mut SlotMetaWorkingSetEntry {
|
||||
let meta_cf = db.column::<cf::SlotMeta>();
|
||||
|
||||
// Check if we've already inserted the slot metadata for this blob's slot
|
||||
slot_meta_working_set
|
||||
.get_mut(&slot)
|
||||
.map(|s| (Some(s), None))
|
||||
.unwrap_or_else(|| {
|
||||
// Store a 2-tuple of the metadata (working copy, backup copy)
|
||||
if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") {
|
||||
let backup = Some(meta.clone());
|
||||
// If parent_slot == std::u64::MAX, then this is one of the orphans inserted
|
||||
// during the chaining process, see the function find_slot_meta_in_cached_state()
|
||||
// for details. Slots that are orphans are missing a parent_slot, so we should
|
||||
// fill in the parent now that we know it.
|
||||
if is_orphan(&meta) {
|
||||
meta.parent_slot = parent_slot;
|
||||
}
|
||||
|
||||
(None, Some((Rc::new(RefCell::new(meta)), backup)))
|
||||
} else {
|
||||
(
|
||||
None,
|
||||
Some((
|
||||
Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))),
|
||||
None,
|
||||
)),
|
||||
)
|
||||
slot_meta_working_set.entry(slot).or_insert_with(|| {
|
||||
// Store a 2-tuple of the metadata (working copy, backup copy)
|
||||
if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") {
|
||||
let backup = Some(meta.clone());
|
||||
// If parent_slot == std::u64::MAX, then this is one of the orphans inserted
|
||||
// during the chaining process, see the function find_slot_meta_in_cached_state()
|
||||
// for details. Slots that are orphans are missing a parent_slot, so we should
|
||||
// fill in the parent now that we know it.
|
||||
if is_orphan(&meta) {
|
||||
meta.parent_slot = parent_slot;
|
||||
}
|
||||
})
|
||||
|
||||
SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup)
|
||||
} else {
|
||||
SlotMetaWorkingSetEntry::new(
|
||||
Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))),
|
||||
None,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn is_valid_write_to_slot_0(slot_to_write: u64, parent_slot: u64, last_root: u64) -> bool {
|
||||
@ -1327,8 +1441,11 @@ fn commit_slot_meta_working_set(
|
||||
|
||||
// Check if any metadata was changed, if so, insert the new version of the
|
||||
// metadata into the write batch
|
||||
for (slot, (meta, meta_backup)) in slot_meta_working_set.iter() {
|
||||
let meta: &SlotMeta = &RefCell::borrow(&*meta);
|
||||
for (slot, slot_meta_entry) in slot_meta_working_set.iter() {
|
||||
// Any slot that wasn't written to should have been filtered out by now.
|
||||
assert!(slot_meta_entry.did_insert_occur);
|
||||
let meta: &SlotMeta = &RefCell::borrow(&*slot_meta_entry.new_slot_meta);
|
||||
let meta_backup = &slot_meta_entry.old_slot_meta;
|
||||
if !completed_slots_senders.is_empty() && is_newly_completed_slot(meta, meta_backup) {
|
||||
newly_completed_slots.push(*slot);
|
||||
}
|
||||
@ -1388,8 +1505,8 @@ fn find_slot_meta_in_cached_state<'a>(
|
||||
chained_slots: &'a HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||
slot: u64,
|
||||
) -> Result<Option<Rc<RefCell<SlotMeta>>>> {
|
||||
if let Some((entry, _)) = working_set.get(&slot) {
|
||||
Ok(Some(entry.clone()))
|
||||
if let Some(entry) = working_set.get(&slot) {
|
||||
Ok(Some(entry.new_slot_meta.clone()))
|
||||
} else if let Some(entry) = chained_slots.get(&slot) {
|
||||
Ok(Some(entry.clone()))
|
||||
} else {
|
||||
@ -1401,12 +1518,14 @@ fn find_slot_meta_in_cached_state<'a>(
|
||||
fn handle_chaining(
|
||||
db: &Database,
|
||||
write_batch: &mut WriteBatch,
|
||||
working_set: &HashMap<u64, SlotMetaWorkingSetEntry>,
|
||||
working_set: &mut HashMap<u64, SlotMetaWorkingSetEntry>,
|
||||
) -> Result<()> {
|
||||
// Handle chaining for all the SlotMetas that were inserted into
|
||||
working_set.retain(|_, entry| entry.did_insert_occur);
|
||||
let mut new_chained_slots = HashMap::new();
|
||||
let working_set_slots: Vec<_> = working_set.iter().map(|s| *s.0).collect();
|
||||
let working_set_slots: Vec<_> = working_set.keys().collect();
|
||||
for slot in working_set_slots {
|
||||
handle_chaining_for_slot(db, write_batch, working_set, &mut new_chained_slots, slot)?;
|
||||
handle_chaining_for_slot(db, write_batch, working_set, &mut new_chained_slots, *slot)?;
|
||||
}
|
||||
|
||||
// Write all the newly changed slots in new_chained_slots to the write_batch
|
||||
@ -1424,10 +1543,13 @@ fn handle_chaining_for_slot(
|
||||
new_chained_slots: &mut HashMap<u64, Rc<RefCell<SlotMeta>>>,
|
||||
slot: u64,
|
||||
) -> Result<()> {
|
||||
let (meta, meta_backup) = working_set
|
||||
let slot_meta_entry = working_set
|
||||
.get(&slot)
|
||||
.expect("Slot must exist in the working_set hashmap");
|
||||
|
||||
let meta = &slot_meta_entry.new_slot_meta;
|
||||
let meta_backup = &slot_meta_entry.old_slot_meta;
|
||||
|
||||
{
|
||||
let mut meta_mut = meta.borrow_mut();
|
||||
let was_orphan_slot = meta_backup.is_some() && is_orphan(meta_backup.as_ref().unwrap());
|
||||
@ -3659,5 +3781,37 @@ pub mod tests {
|
||||
.expect("Expected successful write of shreds");
|
||||
assert!(blocktree.get_slot_entries(slot, 0, None).is_err());
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_insert_but_modify_slot_meta() {
|
||||
// This tests correctness of the SlotMeta in various cases in which a shred
|
||||
// that gets filtered out by checks
|
||||
let (shreds0, _) = make_slot_entries(0, 0, 200);
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
|
||||
// Insert the first 5 shreds, we don't have a "is_last" shred yet
|
||||
blocktree
|
||||
.insert_shreds(shreds0[0..5].to_vec(), None)
|
||||
.unwrap();
|
||||
|
||||
// Insert a repetitive shred for slot 's', should get ignored, but also
|
||||
// insert shreds that chains to 's', should see the update in the SlotMeta
|
||||
// for 's'.
|
||||
let (mut shreds2, _) = make_slot_entries(2, 0, 200);
|
||||
let (mut shreds3, _) = make_slot_entries(3, 0, 200);
|
||||
shreds2.push(shreds0[1].clone());
|
||||
shreds3.insert(0, shreds0[1].clone());
|
||||
blocktree.insert_shreds(shreds2, None).unwrap();
|
||||
let slot_meta = blocktree.meta(0).unwrap().unwrap();
|
||||
assert_eq!(slot_meta.next_slots, vec![2]);
|
||||
blocktree.insert_shreds(shreds3, None).unwrap();
|
||||
let slot_meta = blocktree.meta(0).unwrap().unwrap();
|
||||
assert_eq!(slot_meta.next_slots, vec![2, 3]);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ use std::sync::Arc;
|
||||
// A good value for this is the number of cores on the machine
|
||||
const TOTAL_THREADS: i32 = 8;
|
||||
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
|
||||
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
|
||||
|
||||
// Column family for metadata about a leader slot
|
||||
const META_CF: &str = "meta";
|
||||
@ -129,22 +128,18 @@ impl Rocks {
|
||||
let db_options = get_db_options();
|
||||
|
||||
// Column family names
|
||||
let meta_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(SlotMeta::NAME));
|
||||
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
||||
let dead_slots_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(DeadSlots::NAME));
|
||||
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
|
||||
let erasure_meta_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(ErasureMeta::NAME));
|
||||
let orphans_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(Orphans::NAME));
|
||||
let root_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
|
||||
let index_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
|
||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
||||
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
|
||||
let root_cf_descriptor = ColumnFamilyDescriptor::new(Root::NAME, get_cf_options());
|
||||
let index_cf_descriptor = ColumnFamilyDescriptor::new(Index::NAME, get_cf_options());
|
||||
let shred_data_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(ShredData::NAME));
|
||||
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options());
|
||||
let shred_code_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(ShredCode::NAME));
|
||||
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options());
|
||||
|
||||
let cfs = vec![
|
||||
meta_cf_descriptor,
|
||||
@ -679,27 +674,13 @@ impl<'a> WriteBatch<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cf_options(name: &'static str) -> Options {
|
||||
use columns::{ErasureMeta, Index, ShredCode, ShredData};
|
||||
|
||||
fn get_cf_options() -> Options {
|
||||
let mut options = Options::default();
|
||||
match name {
|
||||
ShredCode::NAME | ShredData::NAME | Index::NAME | ErasureMeta::NAME => {
|
||||
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
|
||||
}
|
||||
_ => {
|
||||
// We want smaller CFs to flush faster. This results in more WAL files but lowers
|
||||
// overall WAL space utilization and increases flush frequency
|
||||
options.set_write_buffer_size(MIN_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MIN_WRITE_BUFFER_SIZE);
|
||||
options.set_max_bytes_for_level_base(MIN_WRITE_BUFFER_SIZE);
|
||||
options.set_level_zero_file_num_compaction_trigger(1);
|
||||
}
|
||||
}
|
||||
// 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
|
||||
options
|
||||
}
|
||||
|
||||
|
51
ledger/tests/blocktree.rs
Normal file
51
ledger/tests/blocktree.rs
Normal file
@ -0,0 +1,51 @@
|
||||
#[macro_use]
|
||||
extern crate solana_ledger;
|
||||
|
||||
use solana_ledger::blocktree::{self, get_tmp_ledger_path, Blocktree};
|
||||
use solana_ledger::entry;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
use std::thread::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_multiple_threads_insert_shred() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
|
||||
|
||||
for _ in 0..100 {
|
||||
let num_threads = 10;
|
||||
|
||||
// Create `num_threads` different ticks in slots 1..num_therads + 1, all
|
||||
// with parent = slot 0
|
||||
let threads: Vec<_> = (0..num_threads)
|
||||
.map(|i| {
|
||||
let entries = entry::create_ticks(1, Hash::default());
|
||||
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false);
|
||||
let blocktree_ = blocktree.clone();
|
||||
Builder::new()
|
||||
.name("blocktree-writer".to_string())
|
||||
.spawn(move || {
|
||||
blocktree_.insert_shreds(shreds, None).unwrap();
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
for t in threads {
|
||||
t.join().unwrap()
|
||||
}
|
||||
|
||||
// Check slot 0 has the correct children
|
||||
let mut meta0 = blocktree.meta(0).unwrap().unwrap();
|
||||
meta0.next_slots.sort();
|
||||
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
|
||||
assert_eq!(meta0.next_slots, expected_next_slots);
|
||||
|
||||
// Delete slots for next iteration
|
||||
blocktree.purge_slots(0, None);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-local-cluster"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,29 +11,29 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-bench-exchange = { path = "../bench-exchange", version = "0.20.0" }
|
||||
solana-bench-tps = { path = "../bench-tps", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-bench-exchange = { path = "../bench-exchange", version = "0.20.2" }
|
||||
solana-bench-tps = { path = "../bench-tps", version = "0.20.2" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.2" }
|
||||
solana-core = { path = "../core", version = "0.20.2" }
|
||||
solana-client = { path = "../client", version = "0.20.2" }
|
||||
solana-drone = { path = "../drone", version = "0.20.2" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.2" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.2" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.2" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.2" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.2", optional = true }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.2", optional = true }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.2" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.2" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.2" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.2" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.2" }
|
||||
symlink = "0.1.0"
|
||||
tempfile = "3.1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.2.0"
|
||||
|
@ -29,7 +29,6 @@ extern crate solana_drone;
|
||||
extern crate solana_exchange_program;
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate solana_move_loader_program;
|
||||
|
||||
|
@ -7,14 +7,14 @@ use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
#[cfg(feature = "move")]
|
||||
use solana_move_loader_program;
|
||||
use solana_sdk::move_loader::solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
#[cfg(feature = "move")]
|
||||
let native_instruction_processors = vec![solana_move_loader_program!()];
|
||||
let native_instruction_processors = vec![solana_move_loader_program()];
|
||||
|
||||
#[cfg(not(feature = "move"))]
|
||||
let native_instruction_processors = vec![];
|
||||
@ -71,12 +71,12 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(20);
|
||||
config.duration = Duration::from_secs(30);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-logger"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Logger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-measure"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -11,4 +11,4 @@ license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-merkle-tree"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Merkle Tree"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-metrics"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Metrics"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,7 +13,7 @@ env_logger = "0.7.1"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.2" }
|
||||
sys-info = "0.5.8"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -15,8 +15,8 @@
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 1038,
|
||||
"iteration": 1571342384471,
|
||||
"id": 1069,
|
||||
"iteration": 1572390574899,
|
||||
"links": [
|
||||
{
|
||||
"asDropdown": true,
|
||||
@ -4317,7 +4317,7 @@
|
||||
"measurement": "cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT \"duration\" / 1000 as \"Generation Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-generate_txs\" WHERE $timeFilter fill(null)\n\n\n\n\n",
|
||||
"query": "SELECT mean(\"duration\") as \"Generation Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-generate_txs\" WHERE $timeFilter GROUP BY time(1s)\n\n\n\n\n",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
@ -4354,7 +4354,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT \"duration\" / 1000 as \"Transmit Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-do_tx_transfers\" WHERE $timeFilter fill(null)",
|
||||
"query": "SELECT mean(\"duration\") as \"Transmit Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-do_tx_transfers\" WHERE $timeFilter GROUP BY time(1s)",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
@ -4391,7 +4391,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT \"duration\" / 1000 as \"Barrier Transaction Confirmation Time\" FROM \"$testnet\".\"autogen\".\"bench-tps-send_barrier_transaction\" WHERE $timeFilter fill(null)",
|
||||
"query": "SELECT mean(\"duration\") as \"Get Blockhash\" FROM \"$testnet\".\"autogen\".\"bench-tps-get_blockhash\" WHERE $timeFilter GROUP BY time(1s)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
@ -4410,6 +4410,43 @@
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"duration\") as \"Get Balance\" FROM \"$testnet\".\"autogen\".\"bench-tps-get_balance\" WHERE $timeFilter GROUP BY time(1s)",
|
||||
"rawQuery": true,
|
||||
"refId": "D",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"value"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
@ -4432,7 +4469,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"format": "µs",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@ -4453,13 +4490,98 @@
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"columns": [],
|
||||
"datasource": "$datasource",
|
||||
"fontSize": "100%",
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 50
|
||||
},
|
||||
"id": 68,
|
||||
"links": [],
|
||||
"pageSize": null,
|
||||
"scroll": true,
|
||||
"showHeader": true,
|
||||
"sort": {
|
||||
"col": null,
|
||||
"desc": false
|
||||
},
|
||||
"styles": [
|
||||
{
|
||||
"alias": "Time",
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"pattern": "Time",
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"decimals": 2,
|
||||
"pattern": "/.*/",
|
||||
"thresholds": [],
|
||||
"type": "number",
|
||||
"unit": "short"
|
||||
}
|
||||
],
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"hide": false,
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT * FROM \"$testnet\".\"autogen\".\"ramp-tps\" WHERE $timeFilter ORDER BY time DESC ",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "table",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"value"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"title": "Ramp TPS Events",
|
||||
"transform": "table",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 50
|
||||
"y": 55
|
||||
},
|
||||
"id": 38,
|
||||
"panels": [],
|
||||
@ -4477,7 +4599,7 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 51
|
||||
"y": 56
|
||||
},
|
||||
"id": 39,
|
||||
"legend": {
|
||||
@ -4628,7 +4750,7 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 51
|
||||
"y": 56
|
||||
},
|
||||
"id": 40,
|
||||
"legend": {
|
||||
@ -4811,7 +4933,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 56
|
||||
"y": 61
|
||||
},
|
||||
"id": 41,
|
||||
"panels": [],
|
||||
@ -4829,9 +4951,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 57
|
||||
"y": 62
|
||||
},
|
||||
"id": 50,
|
||||
"id": 42,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5148,9 +5270,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 57
|
||||
"y": 62
|
||||
},
|
||||
"id": 47,
|
||||
"id": 43,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5506,7 +5628,7 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 57
|
||||
"y": 62
|
||||
},
|
||||
"id": 44,
|
||||
"legend": {
|
||||
@ -5818,9 +5940,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 63
|
||||
"y": 68
|
||||
},
|
||||
"id": 42,
|
||||
"id": 45,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6248,9 +6370,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 63
|
||||
"y": 68
|
||||
},
|
||||
"id": 46,
|
||||
"id": 47,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6291,7 +6413,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT sum(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "F",
|
||||
"resultFormat": "time_series",
|
||||
@ -6328,7 +6450,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT sum(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-record_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-record_lock_contention\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
@ -6365,7 +6487,7 @@
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT sum(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_overhead\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-tick_overhead\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
@ -6384,6 +6506,43 @@
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT mean(\"count\") FROM \"$testnet\".\"autogen\".\"poh_recorder-record_ms\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"value"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
@ -6405,7 +6564,7 @@
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "ms",
|
||||
"format": "µs",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
@ -6437,9 +6596,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 63
|
||||
"y": 68
|
||||
},
|
||||
"id": 56,
|
||||
"id": 47,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6558,9 +6717,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 68
|
||||
"y": 73
|
||||
},
|
||||
"id": 55,
|
||||
"id": 48,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6714,9 +6873,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 69
|
||||
"y": 74
|
||||
},
|
||||
"id": 48,
|
||||
"id": 49,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6870,9 +7029,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 69
|
||||
"y": 74
|
||||
},
|
||||
"id": 49,
|
||||
"id": 50,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6985,9 +7144,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 73
|
||||
"y": 78
|
||||
},
|
||||
"id": 53,
|
||||
"id": 51,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7189,9 +7348,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 74
|
||||
"y": 79
|
||||
},
|
||||
"id": 45,
|
||||
"id": 52,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7491,9 +7650,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 74
|
||||
"y": 79
|
||||
},
|
||||
"id": 51,
|
||||
"id": 53,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7639,9 +7798,9 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 79
|
||||
"y": 84
|
||||
},
|
||||
"id": 57,
|
||||
"id": 54,
|
||||
"panels": [],
|
||||
"title": "Tower Consensus",
|
||||
"type": "row"
|
||||
@ -7662,9 +7821,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 80
|
||||
"y": 85
|
||||
},
|
||||
"id": 58,
|
||||
"id": 55,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7822,9 +7981,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 80
|
||||
"y": 85
|
||||
},
|
||||
"id": 54,
|
||||
"id": 56,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7982,9 +8141,9 @@
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 80
|
||||
"y": 85
|
||||
},
|
||||
"id": 59,
|
||||
"id": 57,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8167,9 +8326,9 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 85
|
||||
"y": 90
|
||||
},
|
||||
"id": 60,
|
||||
"id": 58,
|
||||
"panels": [],
|
||||
"repeat": null,
|
||||
"title": "IP Network",
|
||||
@ -8186,9 +8345,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 86
|
||||
"y": 91
|
||||
},
|
||||
"id": 61,
|
||||
"id": 59,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8419,9 +8578,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 86
|
||||
"y": 91
|
||||
},
|
||||
"id": 62,
|
||||
"id": 60,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8572,9 +8731,9 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 91
|
||||
"y": 96
|
||||
},
|
||||
"id": 63,
|
||||
"id": 61,
|
||||
"panels": [],
|
||||
"title": "Signature Verification",
|
||||
"type": "row"
|
||||
@ -8590,9 +8749,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 92
|
||||
"y": 97
|
||||
},
|
||||
"id": 64,
|
||||
"id": 62,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -8711,9 +8870,9 @@
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 92
|
||||
"y": 97
|
||||
},
|
||||
"id": 65,
|
||||
"id": 63,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8860,9 +9019,9 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 97
|
||||
"y": 102
|
||||
},
|
||||
"id": 66,
|
||||
"id": 64,
|
||||
"panels": [],
|
||||
"title": "Snapshots",
|
||||
"type": "row"
|
||||
@ -8878,9 +9037,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 98
|
||||
"y": 103
|
||||
},
|
||||
"id": 67,
|
||||
"id": 65,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -9070,9 +9229,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 8,
|
||||
"y": 98
|
||||
"y": 103
|
||||
},
|
||||
"id": 68,
|
||||
"id": 66,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -9262,9 +9421,9 @@
|
||||
"h": 6,
|
||||
"w": 8,
|
||||
"x": 16,
|
||||
"y": 98
|
||||
"y": 103
|
||||
},
|
||||
"id": 69,
|
||||
"id": 67,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -9543,5 +9702,5 @@
|
||||
"timezone": "",
|
||||
"title": "Testnet Monitor (edge)",
|
||||
"uid": "testnet-edge",
|
||||
"version": 6
|
||||
}
|
||||
"version": 2
|
||||
}
|
||||
|
16
net/gce.sh
16
net/gce.sh
@ -662,6 +662,7 @@ EOF
|
||||
set -ex
|
||||
|
||||
if [[ -f /solana-scratch/.instance-startup-complete ]]; then
|
||||
echo reboot
|
||||
$(
|
||||
cd "$here"/scripts/
|
||||
if "$enableGpu"; then
|
||||
@ -671,7 +672,12 @@ if [[ -f /solana-scratch/.instance-startup-complete ]]; then
|
||||
if [[ -n $validatorAdditionalDiskSizeInGb ]]; then
|
||||
cat mount-additional-disk.sh
|
||||
fi
|
||||
|
||||
cat ../../scripts/ulimit-n.sh
|
||||
)
|
||||
if [[ -x ~solana/solana/on-reboot ]]; then
|
||||
sudo -u solana ~solana/solana/on-reboot
|
||||
fi
|
||||
|
||||
# Skip most setup on instance reboot
|
||||
exit 0
|
||||
@ -712,6 +718,7 @@ $(
|
||||
create-solana-user.sh \
|
||||
solana-user-authorized_keys.sh \
|
||||
add-testnet-solana-user-authorized_keys.sh \
|
||||
install-ag.sh \
|
||||
install-certbot.sh \
|
||||
install-earlyoom.sh \
|
||||
install-libssl-compatability.sh \
|
||||
@ -732,6 +739,8 @@ $(
|
||||
)
|
||||
|
||||
cat > /etc/motd <<EOM
|
||||
See startup script log messages in /var/log/syslog for status:
|
||||
$ sudo cat /var/log/syslog | egrep \\(startup-script\\|cloud-init\)
|
||||
$(printNetworkInfo)
|
||||
$(creationInfo)
|
||||
EOM
|
||||
@ -819,7 +828,12 @@ info)
|
||||
printf " %-16s | %-15s | %-15s | %s\n" "$nodeType" "$ip" "$ipPrivate" "$zone"
|
||||
}
|
||||
|
||||
if ! $evalInfo; then
|
||||
if $evalInfo; then
|
||||
echo "NET_NUM_VALIDATORS=${#validatorIpList[@]}"
|
||||
echo "NET_NUM_CLIENTS=${#clientIpList[@]}"
|
||||
echo "NET_NUM_BLOCKSTREAMERS=${#blockstreamerIpList[@]}"
|
||||
echo "NET_NUM_ARCHIVERS=${#archiverIpList[@]}"
|
||||
else
|
||||
printNode "Node Type" "Public IP" "Private IP" "Zone"
|
||||
echo "-------------------+-----------------+-----------------+--------------"
|
||||
fi
|
||||
|
@ -722,9 +722,7 @@ deploy() {
|
||||
# Stagger additional node start time. If too many nodes start simultaneously
|
||||
# the bootstrap node gets more rsync requests from the additional nodes than
|
||||
# it can handle.
|
||||
if ((nodeIndex % 2 == 0)); then
|
||||
sleep 2
|
||||
fi
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -76,7 +76,6 @@ now=\$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
ln -sfT validator.log.\$now validator.log
|
||||
EOF
|
||||
chmod +x ~/solana/on-reboot
|
||||
echo "@reboot ~/solana/on-reboot" | crontab -
|
||||
|
||||
GPU_CUDA_OK=false
|
||||
GPU_FAIL_IF_NONE=false
|
||||
@ -105,7 +104,7 @@ waitForNodeToInit() {
|
||||
echo "--- waiting for $hostname to boot up"
|
||||
SECONDS=
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 120 ]]; then
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
@ -262,6 +261,7 @@ EOF
|
||||
args+=(
|
||||
--blockstream /tmp/solana-blockstream.sock
|
||||
--no-voting
|
||||
--dev-no-sigverify
|
||||
)
|
||||
else
|
||||
args+=(--enable-rpc-exit)
|
||||
|
@ -123,10 +123,49 @@ cloud_FindInstance() {
|
||||
#
|
||||
# This function will be called before |cloud_CreateInstances|
|
||||
cloud_Initialize() {
|
||||
declare networkName="$1"
|
||||
# ec2-provider.sh creates firewall rules programmatically, should do the same
|
||||
# here.
|
||||
echo "Note: one day create $networkName firewall rules programmatically instead of assuming the 'testnet' tag exists"
|
||||
declare resourceGroup="$1"
|
||||
declare location="$2"
|
||||
declare nsgName=${resourceGroup}-nsg
|
||||
|
||||
# Check if resource group exists. If not, create it.
|
||||
(
|
||||
set -x
|
||||
numGroup=$(az group list --query "length([?name=='$resourceGroup'])")
|
||||
if [[ $numGroup -eq 0 ]]; then
|
||||
echo Resource Group "$resourceGroup" does not exist. Creating it now.
|
||||
az group create --name "$resourceGroup" --location "$location"
|
||||
else
|
||||
echo Resource group "$resourceGroup" already exists.
|
||||
az group show --name "$resourceGroup"
|
||||
fi
|
||||
|
||||
az network nsg create --name "$nsgName" --resource-group "$resourceGroup"
|
||||
)
|
||||
|
||||
create_nsg_rule() {
|
||||
ruleName="$1"
|
||||
ports="$2"
|
||||
access="$3"
|
||||
protocol="$4"
|
||||
priority="$5"
|
||||
(
|
||||
set -x
|
||||
az network nsg rule create -g "${resourceGroup}" --nsg-name "${nsgName}" -n "${ruleName}" \
|
||||
--priority "${priority}" --source-address-prefixes "*" --source-port-ranges "*" \
|
||||
--destination-address-prefixes "*" --destination-port-ranges "${ports}" --access "${access}" \
|
||||
--protocol "${protocol}"
|
||||
)
|
||||
}
|
||||
|
||||
create_nsg_rule "InboundTCP" "8000-10000" "Allow" "Tcp" 1000
|
||||
create_nsg_rule "InboundUDP" "8000-10000" "Allow" "Udp" 1001
|
||||
create_nsg_rule "InboundHTTP" "80" "Allow" "Tcp" 1002
|
||||
create_nsg_rule "InboundNetworkExplorerAPI" "3001" "Allow" "Tcp" 1003
|
||||
create_nsg_rule "InboundDrone" "9900" "Allow" "Tcp" 1004
|
||||
create_nsg_rule "InboundJsonRpc" "8899-8900" "Allow" "Tcp" 1005
|
||||
create_nsg_rule "InboundRsync" "873" "Allow" "Tcp" 1006
|
||||
create_nsg_rule "InboundStun" "3478" "Allow" "Udp" 1007
|
||||
create_nsg_rule "InboundSSH" "22" "Allow" "Tcp" 1008
|
||||
}
|
||||
|
||||
#
|
||||
@ -175,6 +214,7 @@ cloud_CreateInstances() {
|
||||
nodes+=("$node")
|
||||
done
|
||||
fi
|
||||
nsgName=${networkName}-nsg
|
||||
|
||||
declare -a args
|
||||
args=(
|
||||
@ -184,6 +224,7 @@ cloud_CreateInstances() {
|
||||
--size "$machineType"
|
||||
--location "$zone"
|
||||
--generate-ssh-keys
|
||||
--nsg "$nsgName"
|
||||
)
|
||||
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
@ -219,27 +260,17 @@ cloud_CreateInstances() {
|
||||
|
||||
(
|
||||
set -x
|
||||
# 1: Check if resource group exists. If not, create it.
|
||||
numGroup=$(az group list --query "length([?name=='$networkName'])")
|
||||
if [[ $numGroup -eq 0 ]]; then
|
||||
echo Resource Group "$networkName" does not exist. Creating it now.
|
||||
az group create --name "$networkName" --location "$zone"
|
||||
else
|
||||
echo Resource group "$networkName" already exists.
|
||||
az group show --name "$networkName"
|
||||
fi
|
||||
|
||||
# 2: For node in numNodes, create VM and put the creation process in the background with --no-wait
|
||||
# For node in numNodes, create VM and put the creation process in the background with --no-wait
|
||||
for nodeName in "${nodes[@]}"; do
|
||||
az vm create --name "$nodeName" "${args[@]}" --no-wait
|
||||
done
|
||||
for nodeName in "${nodes[@]}"; do
|
||||
az vm wait --created --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600
|
||||
done
|
||||
|
||||
# 3. If GPU is to be enabled, wait until nodes are created, then install the appropriate extension
|
||||
# If GPU is to be enabled, install the appropriate extension
|
||||
if $enableGpu; then
|
||||
for nodeName in "${nodes[@]}"; do
|
||||
az vm wait --created --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600
|
||||
done
|
||||
|
||||
for nodeName in "${nodes[@]}"; do
|
||||
az vm extension set \
|
||||
--resource-group "$networkName" \
|
||||
@ -250,7 +281,7 @@ cloud_CreateInstances() {
|
||||
--no-wait
|
||||
done
|
||||
|
||||
# 4. Wait until all nodes have GPU extension installed
|
||||
# Wait until all nodes have GPU extension installed
|
||||
for nodeName in "${nodes[@]}"; do
|
||||
az vm wait --updated --name "$nodeName" --resource-group "$networkName" --verbose --timeout 600
|
||||
done
|
||||
|
@ -17,6 +17,7 @@ else
|
||||
[[ -r /solana-scratch/id_ecdsa.pub ]] || exit 1
|
||||
|
||||
sudo -u solana bash -c "
|
||||
echo 'PATH=\"/home/solana/.cargo/bin:$PATH\"' > /home/solana/.profile
|
||||
mkdir -p /home/solana/.ssh/
|
||||
cd /home/solana/.ssh/
|
||||
cp /solana-scratch/id_ecdsa.pub authorized_keys
|
||||
|
9
net/scripts/install-ag.sh
Executable file
9
net/scripts/install-ag.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
set -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
apt-get update
|
||||
apt-get --assume-yes install silversearcher-ag
|
@ -6,3 +6,5 @@ set -ex
|
||||
|
||||
add-apt-repository -y ppa:chris-lea/redis-server
|
||||
apt-get --assume-yes install redis
|
||||
|
||||
systemctl enable redis-server.service
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-netutil"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Network Utilities"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -18,7 +18,7 @@ rand = "0.6.1"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
socket2 = "0.3.11"
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.2" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-bpf-programs"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -22,10 +22,10 @@ walkdir = "2"
|
||||
bincode = "1.1.4"
|
||||
byteorder = "1.3.2"
|
||||
elf = "0.0.10"
|
||||
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
solana_rbpf = "=0.1.19"
|
||||
|
||||
[[bench]]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-128bit"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.20.2" }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-128bit-dep"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-alloc"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-dep-crate"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,10 +13,10 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
byteorder = { version = "1", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-external-spend"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-iter"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-many-args"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.20.2" }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-many-args-dep"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-noop"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-panic"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-param-passing"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.20.2" }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-param-passing-dep"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
[package]
|
||||
name = "solana-bpf-rust-sysval"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF test program written in Rust"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.0", default-features = false }
|
||||
solana-sdk = { path = "../../../../sdk/", version = "0.20.2", default-features = false }
|
||||
|
||||
[dev_dependencies]
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.0" }
|
||||
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.20.2" }
|
||||
|
||||
[features]
|
||||
program = ["solana-sdk/program"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bpf-loader-api"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,8 +14,8 @@ byteorder = "1.3.2"
|
||||
libc = "0.2.65"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.101"
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
solana_rbpf = "=0.1.19"
|
||||
|
||||
[lib]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bpf-loader-program"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana BPF Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,9 +10,9 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "cdylib"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-btc-spv-api"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Bitcoin spv parsing program api"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -16,7 +16,7 @@ num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0"}
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2"}
|
||||
hex = "0.3.2"
|
||||
|
||||
[lib]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "btc_spv_bin"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Bitcoin spv parsing program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bitcoin-spv-program"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Bitcoin spv parsing program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -16,8 +16,8 @@ num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0"}
|
||||
solana-btc-spv-api = { path = "../btc_spv_api", version = "0.20.0"}
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2"}
|
||||
solana-btc-spv-api = { path = "../btc_spv_api", version = "0.20.2"}
|
||||
|
||||
|
||||
[lib]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-budget-api"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Budget program API"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -16,10 +16,10 @@ num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-budget-program"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana budget program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,9 +10,9 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-budget-api = { path = "../budget_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../budget_api", version = "0.20.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "cdylib"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-config-api"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "config program API"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,7 +13,7 @@ bincode = "1.2.0"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
|
||||
[lib]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-config-program"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "config program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,9 +10,9 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-config-api = { path = "../config_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-config-api = { path = "../config_api", version = "0.20.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "cdylib"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-config-tests"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana config api tests"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,12 +13,12 @@ bincode = "1.2.0"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-config-api = { path = "../config_api", version = "0.20.0" }
|
||||
solana-config-program = { path = "../config_program", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
solana-config-api = { path = "../config_api", version = "0.20.2" }
|
||||
solana-config-program = { path = "../config_program", version = "0.20.2" }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.2" }
|
||||
assert_matches = "1.3.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-exchange-api"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Exchange program API"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,12 +13,12 @@ bincode = "1.2.0"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../../metrics", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-metrics = { path = "../../metrics", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-exchange-program"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana exchange program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,9 +10,9 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-exchange-api = { path = "../exchange_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../exchange_api", version = "0.20.2" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "cdylib"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-failure-program"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana failure program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,10 +10,10 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
1
programs/librapay_api/.gitignore
vendored
Normal file
1
programs/librapay_api/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/target/
|
4155
programs/librapay_api/Cargo.lock
generated
Normal file
4155
programs/librapay_api/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-librapay-api"
|
||||
version = "0.20.0"
|
||||
version = "0.20.2"
|
||||
description = "Solana Libra Payment"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,12 +11,12 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.2.0"
|
||||
log = "0.4.8"
|
||||
solana-logger = { path = "../../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.0" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.0" }
|
||||
types = { version = "0.0.0", package = "solana_libra_types" }
|
||||
language_e2e_tests = { version = "0.0.0", package = "solana_libra_language_e2e_tests" }
|
||||
solana-move-loader-api = { path = "../move_loader_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../../logger", version = "0.20.2" }
|
||||
solana-sdk = { path = "../../sdk", version = "0.20.2" }
|
||||
solana-runtime = { path = "../../runtime", version = "0.20.2" }
|
||||
types = { version = "0.0.1-sol4", package = "solana_libra_types" }
|
||||
language_e2e_tests = { version = "0.0.1-sol4", package = "solana_libra_language_e2e_tests" }
|
||||
solana-move-loader-api = { path = "../move_loader_api", version = "0.20.2" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user