Compare commits
85 Commits
Author | SHA1 | Date | |
---|---|---|---|
f9d9c1fcbf | |||
7c59c105cf | |||
a8ea9f2738 | |||
651f87a937 | |||
88f8e2f332 | |||
a2cb289503 | |||
89bd9d5b72 | |||
7edaaeb2a1 | |||
1c3ade80c2 | |||
3606d51507 | |||
281fd88ea7 | |||
ee6b625c13 | |||
4cc1b85376 | |||
f8312ce125 | |||
6a4cd02f64 | |||
50f238d900 | |||
23e3f4e8a2 | |||
27f70dfa49 | |||
72d366a84e | |||
2da9de8861 | |||
f4288961d5 | |||
143ad436cf | |||
0a9fbc3e4c | |||
7aa091bf8c | |||
91d8bfa828 | |||
c501c19750 | |||
acd55660da | |||
855bd7d3b8 | |||
a2e9d8e0bf | |||
81dbe3c49b | |||
086e20f6c7 | |||
d08a810c08 | |||
400610bf6a | |||
f759ac3a8d | |||
558411364e | |||
d0b5be3051 | |||
dc6da6fcca | |||
8ae11a74fa | |||
11f0333728 | |||
aac74d2357 | |||
508abcf4ed | |||
6dbb6c7fe2 | |||
2f58658f61 | |||
0ec7ff5e2f | |||
4d49820188 | |||
6e51babff9 | |||
872cf100d7 | |||
78cc4e644c | |||
81c0152187 | |||
4779625f23 | |||
3c0b03ba84 | |||
c53f163ef5 | |||
ca35854417 | |||
ab1fda2a54 | |||
a6ec77c230 | |||
1d7894f1be | |||
4866a1fc39 | |||
60c5e59a5e | |||
fd93bdadf6 | |||
6089db2a07 | |||
462d0cfc6c | |||
e6d6fc4391 | |||
092556ae5e | |||
ad9fa54a47 | |||
2d68170747 | |||
20f3d18458 | |||
be79efe9b7 | |||
5db377f743 | |||
9c2f45a1e0 | |||
8646918d00 | |||
7c44fc3561 | |||
686403eb1d | |||
6cf9b60a9c | |||
aca142df16 | |||
b2582196db | |||
85a77bec5f | |||
e781cbf4ba | |||
59956e4543 | |||
303417f981 | |||
fea03fdf33 | |||
e8160efc46 | |||
e0ba0d581c | |||
36eda29fc9 | |||
2ec73db6bd | |||
ef6ce2765e |
@ -4,7 +4,7 @@ version: '{build}'
|
|||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- /v[0-9.]+/
|
- /^v[0-9.]+/
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
- '%USERPROFILE%\.cargo'
|
- '%USERPROFILE%\.cargo'
|
||||||
|
@ -17,7 +17,7 @@ script:
|
|||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- /v.*/
|
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
|
||||||
|
|
||||||
notifications:
|
notifications:
|
||||||
slack:
|
slack:
|
||||||
|
652
Cargo.lock
generated
652
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -41,6 +41,7 @@ members = [
|
|||||||
"runtime",
|
"runtime",
|
||||||
"sdk",
|
"sdk",
|
||||||
"upload-perf",
|
"upload-perf",
|
||||||
|
"validator-info",
|
||||||
"vote-signer",
|
"vote-signer",
|
||||||
"wallet",
|
"wallet",
|
||||||
]
|
]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-exchange"
|
name = "solana-bench-exchange"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -24,16 +24,16 @@ serde_derive = "1.0.92"
|
|||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
serde_yaml = "0.8.9"
|
serde_yaml = "0.8.9"
|
||||||
# solana-runtime = { path = "../solana/runtime"}
|
# solana-runtime = { path = "../solana/runtime"}
|
||||||
solana = { path = "../core", version = "0.16.0" }
|
solana = { path = "../core", version = "0.16.5" }
|
||||||
solana-client = { path = "../client", version = "0.16.0" }
|
solana-client = { path = "../client", version = "0.16.5" }
|
||||||
solana-drone = { path = "../drone", version = "0.16.0" }
|
solana-drone = { path = "../drone", version = "0.16.5" }
|
||||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" }
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.5" }
|
||||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.5" }
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.16.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.5" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.16.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.5" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../runtime", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
untrusted = "0.6.2"
|
untrusted = "0.6.2"
|
||||||
ws = "0.8.1"
|
ws = "0.8.1"
|
||||||
|
|
||||||
|
@ -2,16 +2,16 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
solana = { path = "../core", version = "0.16.0" }
|
solana = { path = "../core", version = "0.16.5" }
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.16.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.5" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -15,14 +15,14 @@ serde = "1.0.92"
|
|||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
serde_yaml = "0.8.9"
|
serde_yaml = "0.8.9"
|
||||||
solana = { path = "../core", version = "0.16.0" }
|
solana = { path = "../core", version = "0.16.5" }
|
||||||
solana-client = { path = "../client", version = "0.16.0" }
|
solana-client = { path = "../client", version = "0.16.5" }
|
||||||
solana-drone = { path = "../drone", version = "0.16.0" }
|
solana-drone = { path = "../drone", version = "0.16.5" }
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.16.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.5" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.16.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.5" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../runtime", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
@ -346,10 +346,12 @@ pub fn fund_keys<T: Client>(
|
|||||||
source: &Keypair,
|
source: &Keypair,
|
||||||
dests: &[Keypair],
|
dests: &[Keypair],
|
||||||
total: u64,
|
total: u64,
|
||||||
lamports_per_signature: u64,
|
max_fee: u64,
|
||||||
|
mut extra: u64,
|
||||||
) {
|
) {
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
||||||
|
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
|
||||||
|
|
||||||
println!("funding keys {}", dests.len());
|
println!("funding keys {}", dests.len());
|
||||||
while !notfunded.is_empty() {
|
while !notfunded.is_empty() {
|
||||||
@ -362,7 +364,8 @@ pub fn fund_keys<T: Client>(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let start = notfunded.len() - max_units as usize;
|
let start = notfunded.len() - max_units as usize;
|
||||||
let per_unit = (f.1 - max_units * lamports_per_signature) / max_units;
|
let fees = if extra > 0 { max_fee } else { 0 };
|
||||||
|
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
|
||||||
let moves: Vec<_> = notfunded[start..]
|
let moves: Vec<_> = notfunded[start..]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|k| (k.pubkey(), per_unit))
|
.map(|k| (k.pubkey(), per_unit))
|
||||||
@ -374,6 +377,7 @@ pub fn fund_keys<T: Client>(
|
|||||||
if !moves.is_empty() {
|
if !moves.is_empty() {
|
||||||
to_fund.push((f.0, moves));
|
to_fund.push((f.0, moves));
|
||||||
}
|
}
|
||||||
|
extra -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to transfer a "few" at a time with recent blockhash
|
// try to transfer a "few" at a time with recent blockhash
|
||||||
@ -582,16 +586,20 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
|
|||||||
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> Vec<Keypair> {
|
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
||||||
let mut rnd = GenKeys::new(seed);
|
let mut rnd = GenKeys::new(seed);
|
||||||
|
|
||||||
let mut total_keys = 1;
|
let mut total_keys = 0;
|
||||||
|
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
|
||||||
|
let mut delta = 1;
|
||||||
while total_keys < count {
|
while total_keys < count {
|
||||||
total_keys *= MAX_SPENDS_PER_TX;
|
extra += delta;
|
||||||
|
delta *= MAX_SPENDS_PER_TX;
|
||||||
|
total_keys += delta;
|
||||||
}
|
}
|
||||||
rnd.gen_n_keypairs(total_keys)
|
(rnd.gen_n_keypairs(total_keys), extra)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_and_fund_keypairs<T: Client>(
|
pub fn generate_and_fund_keypairs<T: Client>(
|
||||||
@ -602,8 +610,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
lamports_per_account: u64,
|
lamports_per_account: u64,
|
||||||
) -> Result<(Vec<Keypair>, u64)> {
|
) -> Result<(Vec<Keypair>, u64)> {
|
||||||
info!("Creating {} keypairs...", tx_count * 2);
|
info!("Creating {} keypairs...", tx_count * 2);
|
||||||
let mut keypairs = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
|
let (mut keypairs, extra) = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
|
||||||
|
|
||||||
info!("Get lamports...");
|
info!("Get lamports...");
|
||||||
|
|
||||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||||
@ -614,19 +621,21 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
|
|
||||||
if lamports_per_account > last_keypair_balance {
|
if lamports_per_account > last_keypair_balance {
|
||||||
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
|
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||||
let extra =
|
let account_desired_balance =
|
||||||
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
||||||
let total = extra * (keypairs.len() as u64);
|
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
||||||
|
let total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
||||||
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
||||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
|
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
|
||||||
}
|
}
|
||||||
info!("adding more lamports {}", extra);
|
info!("adding more lamports {}", account_desired_balance);
|
||||||
fund_keys(
|
fund_keys(
|
||||||
client,
|
client,
|
||||||
funding_pubkey,
|
funding_pubkey,
|
||||||
&keypairs,
|
&keypairs,
|
||||||
total,
|
total,
|
||||||
fee_calculator.max_lamports_per_signature,
|
fee_calculator.max_lamports_per_signature,
|
||||||
|
extra,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -647,6 +656,7 @@ mod tests {
|
|||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::bank_client::BankClient;
|
use solana_runtime::bank_client::BankClient;
|
||||||
use solana_sdk::client::SyncClient;
|
use solana_sdk::client::SyncClient;
|
||||||
|
use solana_sdk::fee_calculator::FeeCalculator;
|
||||||
use solana_sdk::genesis_block::create_genesis_block;
|
use solana_sdk::genesis_block::create_genesis_block;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
|
|
||||||
@ -735,7 +745,33 @@ mod tests {
|
|||||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||||
|
|
||||||
for kp in &keypairs {
|
for kp in &keypairs {
|
||||||
assert!(client.get_balance(&kp.pubkey()).unwrap() >= lamports);
|
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_bench_tps_fund_keys_with_fees() {
|
||||||
|
let (mut genesis_block, id) = create_genesis_block(10_000);
|
||||||
|
let fee_calculator = FeeCalculator::new(11);
|
||||||
|
genesis_block.fee_calculator = fee_calculator;
|
||||||
|
let bank = Bank::new(&genesis_block);
|
||||||
|
let client = BankClient::new(bank);
|
||||||
|
let tx_count = 10;
|
||||||
|
let lamports = 20;
|
||||||
|
|
||||||
|
let (keypairs, _keypair_balance) =
|
||||||
|
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
|
||||||
|
|
||||||
|
let max_fee = client
|
||||||
|
.get_recent_blockhash()
|
||||||
|
.unwrap()
|
||||||
|
.1
|
||||||
|
.max_lamports_per_signature;
|
||||||
|
for kp in &keypairs {
|
||||||
|
assert_eq!(
|
||||||
|
client.get_balance(&kp.pubkey()).unwrap(),
|
||||||
|
lamports + max_fee
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ fn main() {
|
|||||||
} = cli_config;
|
} = cli_config;
|
||||||
|
|
||||||
if write_to_client_file {
|
if write_to_client_file {
|
||||||
let keypairs = generate_keypairs(&id, tx_count as u64 * 2);
|
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2);
|
||||||
let num_accounts = keypairs.len() as u64;
|
let num_accounts = keypairs.len() as u64;
|
||||||
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
|
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
|
||||||
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
||||||
|
@ -25,6 +25,7 @@ Methods
|
|||||||
* [getAccountInfo](#getaccountinfo)
|
* [getAccountInfo](#getaccountinfo)
|
||||||
* [getBalance](#getbalance)
|
* [getBalance](#getbalance)
|
||||||
* [getClusterNodes](#getclusternodes)
|
* [getClusterNodes](#getclusternodes)
|
||||||
|
* [getProgramAccounts](#getprogramaccounts)
|
||||||
* [getRecentBlockhash](#getrecentblockhash)
|
* [getRecentBlockhash](#getrecentblockhash)
|
||||||
* [getSignatureStatus](#getsignaturestatus)
|
* [getSignatureStatus](#getsignaturestatus)
|
||||||
* [getSlotLeader](#getslotleader)
|
* [getSlotLeader](#getslotleader)
|
||||||
@ -96,6 +97,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
|||||||
{"jsonrpc":"2.0","result":true,"id":1}
|
{"jsonrpc":"2.0","result":true,"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getAccountInfo
|
||||||
|
Returns all information associated with the account of provided Pubkey
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be a JSON object with the following sub fields:
|
||||||
|
|
||||||
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
|
* `data`, array of bytes representing any data associated with the account
|
||||||
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getBalance
|
### getBalance
|
||||||
@ -142,28 +169,29 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getAccountInfo
|
### getProgramAccounts
|
||||||
Returns all information associated with the account of provided Pubkey
|
Returns all accounts owned by the provided program Pubkey
|
||||||
|
|
||||||
##### Parameters:
|
##### Parameters:
|
||||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
* `string` - Pubkey of program, as base-58 encoded string
|
||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
The result field will be a JSON object with the following sub fields:
|
The result field will be an array of arrays. Each sub array will contain:
|
||||||
|
* `string` - a the account Pubkey as base-58 encoded string
|
||||||
|
and a JSON object, with the following sub fields:
|
||||||
|
|
||||||
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
* `data`, array of bytes representing any data associated with the account
|
* `data`, array of bytes representing any data associated with the account
|
||||||
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
|
|
||||||
|
|
||||||
##### Example:
|
##### Example:
|
||||||
```bash
|
```bash
|
||||||
// Request
|
// Request
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@ -402,7 +430,7 @@ for a given account public key changes
|
|||||||
|
|
||||||
##### Notification Format:
|
##### Notification Format:
|
||||||
```bash
|
```bash
|
||||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -74,8 +74,8 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
|
|||||||
software on Linux x86_64 and mac OS systems.
|
software on Linux x86_64 and mac OS systems.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
|
$ export SOLANA_RELEASE=v0.16.5 # skip this line to install the latest release
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.5/install/solana-install-init.sh | sh -s
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively build the `solana-install` program from source and run the
|
Alternatively build the `solana-install` program from source and run the
|
||||||
@ -240,3 +240,34 @@ A local InfluxDB and Grafana instance is now running on your machine. Define
|
|||||||
`start.sh` output and restart your validator.
|
`start.sh` output and restart your validator.
|
||||||
|
|
||||||
Metrics should now be streaming and visible from your local Grafana dashboard.
|
Metrics should now be streaming and visible from your local Grafana dashboard.
|
||||||
|
|
||||||
|
#### Publishing Validator Info
|
||||||
|
|
||||||
|
You can publish your validator information to the chain to be publicly visible
|
||||||
|
to other users.
|
||||||
|
|
||||||
|
Run the solana-validator-info CLI to populate a validator-info account:
|
||||||
|
```bash
|
||||||
|
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
|
||||||
|
```
|
||||||
|
Optional fields for VALIDATOR_INFO_ARGS:
|
||||||
|
* Website
|
||||||
|
* Keybase ID
|
||||||
|
* Details
|
||||||
|
|
||||||
|
##### Keybase
|
||||||
|
|
||||||
|
Including a Keybase ID allows client applications (like the Solana Network
|
||||||
|
Explorer) to automatically pull in your validator public profile, including
|
||||||
|
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
|
||||||
|
Keybase:
|
||||||
|
|
||||||
|
1. Join https://keybase.io/ and complete the profile for your validator
|
||||||
|
2. Add your validator **identity pubkey** to Keybase:
|
||||||
|
* Create an empty file on your local computer called `solana_pubkey_<PUBKEY>`
|
||||||
|
* In Keybase, navigate to the Files section, and upload your pubkey file to
|
||||||
|
your public folder: `/keybase/public/<KEYBASE_ID>`
|
||||||
|
* To check your pubkey, ensure you can successfully browse to
|
||||||
|
`https://keybase.pub/<KEYBASE_ID>/solana_pubkey_<PUBKEY>`
|
||||||
|
3. Add or update your `solana-validator-info` with your Keybase ID. The CLI will
|
||||||
|
verify the `solana_pubkey_<PUBKEY>` file
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-chacha-sys"
|
name = "solana-chacha-sys"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana chacha-sys"
|
description = "Solana chacha-sys"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
|
@ -3,7 +3,7 @@ steps:
|
|||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
name: "publish docker"
|
name: "publish docker"
|
||||||
- command: "ci/publish-crate.sh"
|
- command: "ci/publish-crate.sh"
|
||||||
timeout_in_minutes: 40
|
timeout_in_minutes: 60
|
||||||
name: "publish crate"
|
name: "publish crate"
|
||||||
branches: "!master"
|
branches: "!master"
|
||||||
- command: "ci/publish-bpf-sdk.sh"
|
- command: "ci/publish-bpf-sdk.sh"
|
||||||
|
@ -33,9 +33,15 @@ if [[ -n $CI ]]; then
|
|||||||
export CI_PULL_REQUEST=
|
export CI_PULL_REQUEST=
|
||||||
fi
|
fi
|
||||||
export CI_OS_NAME=linux
|
export CI_OS_NAME=linux
|
||||||
|
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||||
|
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||||
|
# triggered it
|
||||||
|
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
|
||||||
|
else
|
||||||
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
|
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
|
||||||
|
fi
|
||||||
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
|
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
|
||||||
# the solana-secondary builder
|
# the solana-secondary pipeline
|
||||||
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
||||||
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
|
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||||
else
|
else
|
||||||
|
@ -30,12 +30,24 @@ cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
|
|||||||
Cargo_tomls=$(ci/order-crates-for-publishing.py)
|
Cargo_tomls=$(ci/order-crates-for-publishing.py)
|
||||||
|
|
||||||
for Cargo_toml in $Cargo_tomls; do
|
for Cargo_toml in $Cargo_tomls; do
|
||||||
echo "-- $Cargo_toml"
|
echo "--- $Cargo_toml"
|
||||||
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
|
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
|
||||||
echo "Error: $Cargo_toml version is not $expectedCrateVersion"
|
echo "Error: $Cargo_toml version is not $expectedCrateVersion"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
crate_name=$(grep -m 1 '^name = ' "$Cargo_toml" | cut -f 3 -d ' ' | tr -d \")
|
||||||
|
|
||||||
|
if grep -q "^publish = false" "$Cargo_toml"; then
|
||||||
|
echo "$crate_name is is marked as unpublishable"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
|
||||||
|
echo "${crate_name} version ${expectedCrateVersion} is already on crates.io"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
crate=$(dirname "$Cargo_toml")
|
crate=$(dirname "$Cargo_toml")
|
||||||
@ -45,15 +57,26 @@ for Cargo_toml in $Cargo_tomls; do
|
|||||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
||||||
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
|
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
|
||||||
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
crate_name=$(grep -m 1 '^name = ' $Cargo_toml | cut -f 3 -d ' ' | tr -d \")
|
|
||||||
numRetries=30
|
numRetries=30
|
||||||
for ((i = 1 ; i <= numRetries ; i++)); do
|
for ((i = 1 ; i <= numRetries ; i++)); do
|
||||||
echo "Attempt ${i} of ${numRetries}"
|
echo "Attempt ${i} of ${numRetries}"
|
||||||
# shellcheck disable=SC2086
|
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
|
||||||
if [[ $(is_crate_version_uploaded $crate_name $expectedCrateVersion) = True ]] ; then
|
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io REST API"
|
||||||
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io"
|
|
||||||
break
|
really_uploaded=0
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
rm -rf crate-test
|
||||||
|
cargo init crate-test
|
||||||
|
cd crate-test/
|
||||||
|
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
|
||||||
|
echo "[workspace]" >> Cargo.toml
|
||||||
|
cargo check
|
||||||
|
) && really_uploaded=1
|
||||||
|
if ((really_uploaded)); then
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
echo "${crate_name} not yet available for download from crates.io"
|
||||||
fi
|
fi
|
||||||
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
|
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
|
||||||
sleep 2
|
sleep 2
|
||||||
|
@ -120,16 +120,16 @@ if [[ "$CI_OS_NAME" = linux ]]; then
|
|||||||
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
|
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo --- Saving build artifacts
|
|
||||||
source ci/upload-ci-artifact.sh
|
source ci/upload-ci-artifact.sh
|
||||||
upload-ci-artifact solana-release-$TARGET.tar.bz2
|
|
||||||
|
|
||||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
|
||||||
echo Skipped due to DO_NOT_PUBLISH_TAR
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
|
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
|
||||||
|
upload-ci-artifact "$file"
|
||||||
|
|
||||||
|
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||||
|
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ -n $BUILDKITE ]]; then
|
if [[ -n $BUILDKITE ]]; then
|
||||||
echo --- AWS S3 Store: "$file"
|
echo --- AWS S3 Store: "$file"
|
||||||
(
|
(
|
||||||
|
@ -24,6 +24,12 @@ blockstreamer=false
|
|||||||
deployUpdateManifest=true
|
deployUpdateManifest=true
|
||||||
fetchLogs=true
|
fetchLogs=true
|
||||||
maybeHashesPerTick=
|
maybeHashesPerTick=
|
||||||
|
maybeStakeNodesInGenesisBlock=
|
||||||
|
maybeExternalPrimordialAccountsFile=
|
||||||
|
maybeLamports=
|
||||||
|
maybeLetsEncrypt=
|
||||||
|
maybeFullnodeAdditionalDiskSize=
|
||||||
|
maybeNoSnapshot=
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
exitcode=0
|
exitcode=0
|
||||||
@ -62,11 +68,24 @@ Deploys a CD testnet
|
|||||||
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
|
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
|
||||||
-S - Stop network software without tearing down nodes.
|
-S - Stop network software without tearing down nodes.
|
||||||
-f - Discard validator nodes that didn't bootup successfully
|
-f - Discard validator nodes that didn't bootup successfully
|
||||||
-w - Skip time-consuming "bells and whistles" that are
|
--stake-internal-nodes NUM_LAMPORTS
|
||||||
unnecessary for a high-node count demo testnet
|
- Amount to stake internal nodes. If set, airdrops are disabled.
|
||||||
|
--external-accounts-file FILE_PATH
|
||||||
|
- Path to external Primordial Accounts file, if it exists.
|
||||||
--hashes-per-tick NUM_HASHES|sleep|auto
|
--hashes-per-tick NUM_HASHES|sleep|auto
|
||||||
- Override the default --hashes-per-tick for the cluster
|
- Override the default --hashes-per-tick for the cluster
|
||||||
|
--lamports NUM_LAMPORTS
|
||||||
|
- Specify the number of lamports to mint (default 100000000000000)
|
||||||
|
--skip-deploy-update
|
||||||
|
- If set, will skip software update deployment
|
||||||
|
--skip-remote-log-retrieval
|
||||||
|
- If set, will not fetch logs from remote nodes
|
||||||
|
--letsencrypt [dns name]
|
||||||
|
- Attempt to generate a TLS certificate using this DNS name
|
||||||
|
--fullnode-additional-disk-size-gb [number]
|
||||||
|
- Size of additional disk in GB for all fullnodes
|
||||||
|
--no-snapshot
|
||||||
|
- If set, disables booting validators from a snapshot
|
||||||
|
|
||||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||||
metrics
|
metrics
|
||||||
@ -82,6 +101,33 @@ while [[ -n $1 ]]; do
|
|||||||
if [[ $1 = --hashes-per-tick ]]; then
|
if [[ $1 = --hashes-per-tick ]]; then
|
||||||
maybeHashesPerTick="$1 $2"
|
maybeHashesPerTick="$1 $2"
|
||||||
shift 2
|
shift 2
|
||||||
|
elif [[ $1 = --lamports ]]; then
|
||||||
|
maybeLamports="$1 $2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --stake-internal-nodes ]]; then
|
||||||
|
maybeStakeNodesInGenesisBlock="$1 $2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --external-accounts-file ]]; then
|
||||||
|
maybeExternalPrimordialAccountsFile="$1 $2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --skip-deploy-update ]]; then
|
||||||
|
deployUpdateManifest=false
|
||||||
|
shift 1
|
||||||
|
elif [[ $1 = --skip-remote-log-retrieval ]]; then
|
||||||
|
fetchLogs=false
|
||||||
|
shift 1
|
||||||
|
elif [[ $1 = --letsencrypt ]]; then
|
||||||
|
maybeLetsEncrypt="$1 $2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
|
||||||
|
maybeFullnodeAdditionalDiskSize="$1 $2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
|
||||||
|
shortArgs+=("$1")
|
||||||
|
shift
|
||||||
|
elif [[ $1 = --no-snapshot ]]; then
|
||||||
|
maybeNoSnapshot="$1"
|
||||||
|
shift 1
|
||||||
else
|
else
|
||||||
usage "Unknown long option: $1"
|
usage "Unknown long option: $1"
|
||||||
fi
|
fi
|
||||||
@ -228,6 +274,11 @@ if ! $skipCreate; then
|
|||||||
# shellcheck disable=SC2206
|
# shellcheck disable=SC2206
|
||||||
create_args+=(${zone_args[@]})
|
create_args+=(${zone_args[@]})
|
||||||
|
|
||||||
|
if [[ -n $maybeLetsEncrypt ]]; then
|
||||||
|
# shellcheck disable=SC2206 # Do not want to quote $maybeLetsEncrypt
|
||||||
|
create_args+=($maybeLetsEncrypt)
|
||||||
|
fi
|
||||||
|
|
||||||
if $blockstreamer; then
|
if $blockstreamer; then
|
||||||
create_args+=(-u)
|
create_args+=(-u)
|
||||||
fi
|
fi
|
||||||
@ -256,6 +307,11 @@ if ! $skipCreate; then
|
|||||||
create_args+=(-f)
|
create_args+=(-f)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n $maybeFullnodeAdditionalDiskSize ]]; then
|
||||||
|
# shellcheck disable=SC2206 # Do not want to quote
|
||||||
|
create_args+=($maybeFullnodeAdditionalDiskSize)
|
||||||
|
fi
|
||||||
|
|
||||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||||
else
|
else
|
||||||
echo "--- $cloudProvider.sh config"
|
echo "--- $cloudProvider.sh config"
|
||||||
@ -318,7 +374,6 @@ if ! $skipStart; then
|
|||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
|
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
|
||||||
args+=($maybeHashesPerTick)
|
args+=($maybeHashesPerTick)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if $reuseLedger; then
|
if $reuseLedger; then
|
||||||
args+=(-r)
|
args+=(-r)
|
||||||
fi
|
fi
|
||||||
@ -334,7 +389,24 @@ if ! $skipStart; then
|
|||||||
args+=(--deploy-update windows)
|
args+=(--deploy-update windows)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
|
if [[ -n $maybeStakeNodesInGenesisBlock ]]; then
|
||||||
|
# shellcheck disable=SC2206 # Do not want to quote $maybeStakeNodesInGenesisBlock
|
||||||
|
args+=($maybeStakeNodesInGenesisBlock)
|
||||||
|
fi
|
||||||
|
if [[ -n $maybeExternalPrimordialAccountsFile ]]; then
|
||||||
|
# shellcheck disable=SC2206 # Do not want to quote $maybeExternalPrimordialAccountsFile
|
||||||
|
args+=($maybeExternalPrimordialAccountsFile)
|
||||||
|
fi
|
||||||
|
if [[ -n $maybeLamports ]]; then
|
||||||
|
# shellcheck disable=SC2206 # Do not want to quote $maybeLamports
|
||||||
|
args+=($maybeLamports)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n $maybeNoSnapshot ]]; then
|
||||||
|
# shellcheck disable=SC2206
|
||||||
|
args+=($maybeNoSnapshot)
|
||||||
|
fi
|
||||||
|
|
||||||
time net/net.sh "${args[@]}"
|
time net/net.sh "${args[@]}"
|
||||||
) || ok=false
|
) || ok=false
|
||||||
|
|
||||||
|
@ -44,6 +44,8 @@ steps:
|
|||||||
value: "testnet-beta-perf"
|
value: "testnet-beta-perf"
|
||||||
- label: "testnet-demo"
|
- label: "testnet-demo"
|
||||||
value: "testnet-demo"
|
value: "testnet-demo"
|
||||||
|
- label: "tds"
|
||||||
|
value: "tds"
|
||||||
- select: "Operation"
|
- select: "Operation"
|
||||||
key: "testnet-operation"
|
key: "testnet-operation"
|
||||||
default: "sanity-or-restart"
|
default: "sanity-or-restart"
|
||||||
@ -153,6 +155,10 @@ testnet-demo)
|
|||||||
: "${GCE_NODE_COUNT:=150}"
|
: "${GCE_NODE_COUNT:=150}"
|
||||||
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
||||||
;;
|
;;
|
||||||
|
tds)
|
||||||
|
CHANNEL_OR_TAG=beta
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
exit 1
|
exit 1
|
||||||
@ -287,6 +293,14 @@ sanity() {
|
|||||||
$ok
|
$ok
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
|
tds)
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
|
NO_VALIDATOR_SANITY=1 \
|
||||||
|
ci/testnet-sanity.sh tds-solana-com gce "${GCE_ZONES[0]}" -f
|
||||||
|
)
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
exit 1
|
exit 1
|
||||||
@ -321,7 +335,8 @@ deploy() {
|
|||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
|
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
|
||||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||||
|
-a eipalloc-0ccd4f2239886fa94 --letsencrypt edge.testnet.solana.com \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
@ -347,7 +362,8 @@ deploy() {
|
|||||||
set -x
|
set -x
|
||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
|
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
|
||||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
|
||||||
|
-a eipalloc-0f286cf8a0771ce35 --letsencrypt beta.testnet.solana.com \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
@ -378,7 +394,8 @@ deploy() {
|
|||||||
|
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
|
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
|
||||||
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f -a eipalloc-0fa502bf95f6f18b2 \
|
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f \
|
||||||
|
-a eipalloc-0fa502bf95f6f18b2 --letsencrypt testnet.solana.com \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${maybeSkipStart:+-s} \
|
${maybeSkipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
@ -424,7 +441,9 @@ deploy() {
|
|||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
||||||
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f -w \
|
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \
|
||||||
|
--skip-deploy-update \
|
||||||
|
--skip-remote-log-retrieval \
|
||||||
-a demo-testnet-solana-com \
|
-a demo-testnet-solana-com \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${maybeSkipStart:+-s} \
|
${maybeSkipStart:+-s} \
|
||||||
@ -436,7 +455,9 @@ deploy() {
|
|||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
|
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
|
||||||
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x -w \
|
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \
|
||||||
|
--skip-deploy-update \
|
||||||
|
--skip-remote-log-retrieval \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
@ -444,6 +465,106 @@ deploy() {
|
|||||||
fi
|
fi
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
|
tds)
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Allow cluster configuration to be overridden from env vars
|
||||||
|
|
||||||
|
if [[ -z $TDS_ZONES ]]; then
|
||||||
|
TDS_ZONES="us-west1-a,us-central1-a,europe-west4-a"
|
||||||
|
fi
|
||||||
|
GCE_CLOUD_ZONES=(); while read -r -d, ; do GCE_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TDS_ZONES},"
|
||||||
|
|
||||||
|
if [[ -z $TDS_NODE_COUNT ]]; then
|
||||||
|
TDS_NODE_COUNT="3"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $TDS_CLIENT_COUNT ]]; then
|
||||||
|
TDS_CLIENT_COUNT="1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $ENABLE_GPU ]]; then
|
||||||
|
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
|
||||||
|
elif [[ $ENABLE_GPU == skip ]]; then
|
||||||
|
maybeGpu=()
|
||||||
|
else
|
||||||
|
maybeGpu=(-G "${ENABLE_GPU}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $HASHES_PER_TICK ]]; then
|
||||||
|
maybeHashesPerTick="--hashes-per-tick auto"
|
||||||
|
elif [[ $HASHES_PER_TICK == skip ]]; then
|
||||||
|
maybeHashesPerTick=""
|
||||||
|
else
|
||||||
|
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $STAKE_INTERNAL_NODES ]]; then
|
||||||
|
maybeStakeInternalNodes="--stake-internal-nodes 1000000000000"
|
||||||
|
elif [[ $STAKE_INTERNAL_NODES == skip ]]; then
|
||||||
|
maybeStakeInternalNodes=""
|
||||||
|
else
|
||||||
|
maybeStakeInternalNodes="--stake-internal-nodes ${STAKE_INTERNAL_NODES}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
|
||||||
|
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
|
||||||
|
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/stage1/validator.yml
|
||||||
|
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
|
||||||
|
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
|
||||||
|
elif [[ $EXTERNAL_ACCOUNTS_FILE_URL == skip ]]; then
|
||||||
|
maybeExternalAccountsFile=""
|
||||||
|
else
|
||||||
|
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/stage1/validator.yml
|
||||||
|
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
|
||||||
|
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $LAMPORTS ]]; then
|
||||||
|
maybeLamports="--lamports 8589934592000000000"
|
||||||
|
elif [[ $LAMPORTS == skip ]]; then
|
||||||
|
maybeLamports=""
|
||||||
|
else
|
||||||
|
maybeLamports="--lamports ${LAMPORTS}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $ADDITIONAL_DISK_SIZE_GB ]]; then
|
||||||
|
maybeAdditionalDisk="--fullnode-additional-disk-size-gb 32000"
|
||||||
|
elif [[ $ADDITIONAL_DISK_SIZE_GB == skip ]]; then
|
||||||
|
maybeAdditionalDisk=""
|
||||||
|
else
|
||||||
|
maybeAdditionalDisk="--fullnode-additional-disk-size-gb ${ADDITIONAL_DISK_SIZE_GB}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# Multiple V100 GPUs are available in us-west1, us-central1 and europe-west4
|
||||||
|
# shellcheck disable=SC2068
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
|
NO_VALIDATOR_SANITY=1 \
|
||||||
|
ci/testnet-deploy.sh -p tds-solana-com -C gce \
|
||||||
|
"${maybeGpu[@]}" \
|
||||||
|
-d pd-ssd \
|
||||||
|
${GCE_CLOUD_ZONES[@]/#/-z } \
|
||||||
|
-t "$CHANNEL_OR_TAG" \
|
||||||
|
-n ${TDS_NODE_COUNT} \
|
||||||
|
-c ${TDS_CLIENT_COUNT} \
|
||||||
|
-P -u \
|
||||||
|
-a tds-solana-com --letsencrypt tds.solana.com \
|
||||||
|
${maybeHashesPerTick} \
|
||||||
|
${skipCreate:+-e} \
|
||||||
|
${skipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
|
${maybeDelete:+-D} \
|
||||||
|
${maybeStakeInternalNodes} \
|
||||||
|
${maybeExternalAccountsFile} \
|
||||||
|
${maybeLamports} \
|
||||||
|
${maybeAdditionalDisk} \
|
||||||
|
--skip-deploy-update \
|
||||||
|
--no-snapshot
|
||||||
|
)
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-client"
|
name = "solana-client"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Client"
|
description = "Solana Client"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -19,10 +19,10 @@ reqwest = "0.9.18"
|
|||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana-netutil = { path = "../netutil", version = "0.16.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
jsonrpc-core = "12.0.0"
|
jsonrpc-core = "12.0.0"
|
||||||
jsonrpc-http-server = "12.0.0"
|
jsonrpc-http-server = "12.0.0"
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
|
@ -274,6 +274,39 @@ impl RpcClient {
|
|||||||
self.get_account(pubkey).map(|account| account.lamports)
|
self.get_account(pubkey).map(|account| account.lamports)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> io::Result<Vec<(Pubkey, Account)>> {
|
||||||
|
let params = json!([format!("{}", pubkey)]);
|
||||||
|
let response = self
|
||||||
|
.client
|
||||||
|
.send(&RpcRequest::GetProgramAccounts, Some(params), 0)
|
||||||
|
.map_err(|err| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
format!("AccountNotFound: pubkey={}: {}", pubkey, err),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let accounts: Vec<(String, Account)> =
|
||||||
|
serde_json::from_value::<Vec<(String, Account)>>(response).map_err(|err| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
format!("GetProgramAccounts parse failure: {:?}", err),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
|
||||||
|
for (string, account) in accounts.into_iter() {
|
||||||
|
let pubkey = string.parse().map_err(|err| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
format!("GetProgramAccounts parse failure: {:?}", err),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
pubkey_accounts.push((pubkey, account));
|
||||||
|
}
|
||||||
|
Ok(pubkey_accounts)
|
||||||
|
}
|
||||||
|
|
||||||
/// Request the transaction count. If the response packet is dropped by the network,
|
/// Request the transaction count. If the response packet is dropped by the network,
|
||||||
/// this method will try again 5 times.
|
/// this method will try again 5 times.
|
||||||
pub fn get_transaction_count(&self) -> io::Result<u64> {
|
pub fn get_transaction_count(&self) -> io::Result<u64> {
|
||||||
|
@ -10,6 +10,7 @@ pub enum RpcRequest {
|
|||||||
GetBalance,
|
GetBalance,
|
||||||
GetClusterNodes,
|
GetClusterNodes,
|
||||||
GetNumBlocksSinceSignatureConfirmation,
|
GetNumBlocksSinceSignatureConfirmation,
|
||||||
|
GetProgramAccounts,
|
||||||
GetRecentBlockhash,
|
GetRecentBlockhash,
|
||||||
GetSignatureStatus,
|
GetSignatureStatus,
|
||||||
GetSlot,
|
GetSlot,
|
||||||
@ -38,6 +39,7 @@ impl RpcRequest {
|
|||||||
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
||||||
"getNumBlocksSinceSignatureConfirmation"
|
"getNumBlocksSinceSignatureConfirmation"
|
||||||
}
|
}
|
||||||
|
RpcRequest::GetProgramAccounts => "getProgramAccounts",
|
||||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||||
RpcRequest::GetSlot => "getSlot",
|
RpcRequest::GetSlot => "getSlot",
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
readme = "../README.md"
|
readme = "../README.md"
|
||||||
@ -45,27 +45,27 @@ rocksdb = "0.11.0"
|
|||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" }
|
solana-budget-api = { path = "../programs/budget_api", version = "0.16.5" }
|
||||||
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" }
|
solana-budget-program = { path = "../programs/budget_program", version = "0.16.5" }
|
||||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.0" }
|
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.5" }
|
||||||
solana-client = { path = "../client", version = "0.16.0" }
|
solana-client = { path = "../client", version = "0.16.5" }
|
||||||
solana-config-program = { path = "../programs/config_program", version = "0.16.0" }
|
solana-config-program = { path = "../programs/config_program", version = "0.16.5" }
|
||||||
solana-drone = { path = "../drone", version = "0.16.0" }
|
solana-drone = { path = "../drone", version = "0.16.5" }
|
||||||
solana-ed25519-dalek = "0.2.0"
|
solana-ed25519-dalek = "0.2.0"
|
||||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.5" }
|
||||||
solana-kvstore = { path = "../kvstore", version = "0.16.0", optional = true }
|
solana-kvstore = { path = "../kvstore", version = "0.16.5", optional = true }
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.16.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.5" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.16.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.5" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../runtime", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" }
|
solana-stake-api = { path = "../programs/stake_api", version = "0.16.5" }
|
||||||
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" }
|
solana-stake-program = { path = "../programs/stake_program", version = "0.16.5" }
|
||||||
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" }
|
solana-storage-api = { path = "../programs/storage_api", version = "0.16.5" }
|
||||||
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" }
|
solana-storage-program = { path = "../programs/storage_program", version = "0.16.5" }
|
||||||
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" }
|
solana-vote-api = { path = "../programs/vote_api", version = "0.16.5" }
|
||||||
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" }
|
solana-vote-program = { path = "../programs/vote_program", version = "0.16.5" }
|
||||||
solana-vote-signer = { path = "../vote-signer", version = "0.16.0" }
|
solana-vote-signer = { path = "../vote-signer", version = "0.16.5" }
|
||||||
sys-info = "0.5.7"
|
sys-info = "0.5.7"
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
|
@ -329,8 +329,9 @@ impl BankForks {
|
|||||||
names.sort();
|
names.sort();
|
||||||
let mut bank_maps = vec![];
|
let mut bank_maps = vec![];
|
||||||
let status_cache_rc = StatusCacheRc::default();
|
let status_cache_rc = StatusCacheRc::default();
|
||||||
|
let id = (names[names.len() - 1] + 1) as usize;
|
||||||
let mut bank0 =
|
let mut bank0 =
|
||||||
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc);
|
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
|
||||||
bank0.freeze();
|
bank0.freeze();
|
||||||
let bank_root = BankForks::load_snapshots(
|
let bank_root = BankForks::load_snapshots(
|
||||||
&names,
|
&names,
|
||||||
|
@ -418,7 +418,7 @@ impl BankingStage {
|
|||||||
// the likelihood of any single thread getting starved and processing old ids.
|
// the likelihood of any single thread getting starved and processing old ids.
|
||||||
// TODO: Banking stage threads should be prioritized to complete faster then this queue
|
// TODO: Banking stage threads should be prioritized to complete faster then this queue
|
||||||
// expires.
|
// expires.
|
||||||
let (loaded_accounts, results) =
|
let (loaded_accounts, results, tx_count, signature_count) =
|
||||||
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
|
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
|
||||||
let load_execute_time = now.elapsed();
|
let load_execute_time = now.elapsed();
|
||||||
|
|
||||||
@ -432,7 +432,7 @@ impl BankingStage {
|
|||||||
|
|
||||||
let commit_time = {
|
let commit_time = {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
bank.commit_transactions(txs, &loaded_accounts, &results);
|
bank.commit_transactions(txs, &loaded_accounts, &results, tx_count, signature_count);
|
||||||
now.elapsed()
|
now.elapsed()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -722,70 +722,6 @@ impl Blocktree {
|
|||||||
iter.map(|(_, blob_data)| Blob::new(&blob_data))
|
iter.map(|(_, blob_data)| Blob::new(&blob_data))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return an iterator for all the entries in the given file.
|
|
||||||
pub fn read_ledger(&self) -> Result<impl Iterator<Item = Entry>> {
|
|
||||||
use crate::entry::EntrySlice;
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
|
|
||||||
struct EntryIterator {
|
|
||||||
db_iterator: Cursor<cf::Data>,
|
|
||||||
|
|
||||||
// TODO: remove me when replay_stage is iterating by block (Blocktree)
|
|
||||||
// this verification is duplicating that of replay_stage, which
|
|
||||||
// can do this in parallel
|
|
||||||
blockhash: Option<Hash>,
|
|
||||||
// https://github.com/rust-rocksdb/rust-rocksdb/issues/234
|
|
||||||
// rocksdb issue: the _blocktree member must be lower in the struct to prevent a crash
|
|
||||||
// when the db_iterator member above is dropped.
|
|
||||||
// _blocktree is unused, but dropping _blocktree results in a broken db_iterator
|
|
||||||
// you have to hold the database open in order to iterate over it, and in order
|
|
||||||
// for db_iterator to be able to run Drop
|
|
||||||
// _blocktree: Blocktree,
|
|
||||||
entries: VecDeque<Entry>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Iterator for EntryIterator {
|
|
||||||
type Item = Entry;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Entry> {
|
|
||||||
if !self.entries.is_empty() {
|
|
||||||
return Some(self.entries.pop_front().unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.db_iterator.valid() {
|
|
||||||
if let Some(value) = self.db_iterator.value_bytes() {
|
|
||||||
if let Ok(next_entries) =
|
|
||||||
deserialize::<Vec<Entry>>(&value[BLOB_HEADER_SIZE..])
|
|
||||||
{
|
|
||||||
if let Some(blockhash) = self.blockhash {
|
|
||||||
if !next_entries.verify(&blockhash) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.db_iterator.next();
|
|
||||||
if next_entries.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
self.entries = VecDeque::from(next_entries);
|
|
||||||
let entry = self.entries.pop_front().unwrap();
|
|
||||||
self.blockhash = Some(entry.hash);
|
|
||||||
return Some(entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let mut db_iterator = self.db.cursor::<cf::Data>()?;
|
|
||||||
|
|
||||||
db_iterator.seek_to_first();
|
|
||||||
Ok(EntryIterator {
|
|
||||||
entries: VecDeque::new(),
|
|
||||||
db_iterator,
|
|
||||||
blockhash: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_slot_entries_with_blob_count(
|
pub fn get_slot_entries_with_blob_count(
|
||||||
&self,
|
&self,
|
||||||
slot: u64,
|
slot: u64,
|
||||||
@ -1662,9 +1598,7 @@ pub fn tmp_copy_blocktree(from: &str, name: &str) -> String {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::entry::{
|
use crate::entry::{create_ticks, make_tiny_test_entries, Entry, EntrySlice};
|
||||||
create_ticks, make_tiny_test_entries, make_tiny_test_entries_from_hash, Entry, EntrySlice,
|
|
||||||
};
|
|
||||||
use crate::erasure::{CodingGenerator, NUM_CODING, NUM_DATA};
|
use crate::erasure::{CodingGenerator, NUM_CODING, NUM_DATA};
|
||||||
use crate::packet;
|
use crate::packet;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
@ -2192,59 +2126,6 @@ pub mod tests {
|
|||||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_genesis_and_entry_iterator() {
|
|
||||||
let entries = make_tiny_test_entries_from_hash(&Hash::default(), 10);
|
|
||||||
|
|
||||||
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
|
|
||||||
{
|
|
||||||
genesis(&ledger_path, &Keypair::new(), &entries).unwrap();
|
|
||||||
|
|
||||||
let ledger = Blocktree::open(&ledger_path).expect("open failed");
|
|
||||||
|
|
||||||
let read_entries: Vec<Entry> =
|
|
||||||
ledger.read_ledger().expect("read_ledger failed").collect();
|
|
||||||
assert!(read_entries.verify(&Hash::default()));
|
|
||||||
assert_eq!(entries, read_entries);
|
|
||||||
}
|
|
||||||
|
|
||||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
pub fn test_entry_iterator_up_to_consumed() {
|
|
||||||
let entries = make_tiny_test_entries_from_hash(&Hash::default(), 3);
|
|
||||||
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
|
|
||||||
{
|
|
||||||
// put entries except last 2 into ledger
|
|
||||||
genesis(&ledger_path, &Keypair::new(), &entries[..entries.len() - 2]).unwrap();
|
|
||||||
|
|
||||||
let ledger = Blocktree::open(&ledger_path).expect("open failed");
|
|
||||||
|
|
||||||
// now write the last entry, ledger has a hole in it one before the end
|
|
||||||
// +-+-+-+-+-+-+-+ +-+
|
|
||||||
// | | | | | | | | | |
|
|
||||||
// +-+-+-+-+-+-+-+ +-+
|
|
||||||
ledger
|
|
||||||
.write_entries(
|
|
||||||
0u64,
|
|
||||||
0,
|
|
||||||
(entries.len() - 1) as u64,
|
|
||||||
16,
|
|
||||||
&entries[entries.len() - 1..],
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let read_entries: Vec<Entry> =
|
|
||||||
ledger.read_ledger().expect("read_ledger failed").collect();
|
|
||||||
assert!(read_entries.verify(&Hash::default()));
|
|
||||||
|
|
||||||
// enumeration should stop at the hole
|
|
||||||
assert_eq!(entries[..entries.len() - 2].to_vec(), read_entries);
|
|
||||||
}
|
|
||||||
|
|
||||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn test_new_blobs_signal() {
|
pub fn test_new_blobs_signal() {
|
||||||
// Initialize ledger
|
// Initialize ledger
|
||||||
|
@ -142,6 +142,7 @@ pub fn process_blocktree(
|
|||||||
genesis_block: &GenesisBlock,
|
genesis_block: &GenesisBlock,
|
||||||
blocktree: &Blocktree,
|
blocktree: &Blocktree,
|
||||||
account_paths: Option<String>,
|
account_paths: Option<String>,
|
||||||
|
verify_ledger: bool,
|
||||||
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
info!("processing ledger...");
|
info!("processing ledger...");
|
||||||
@ -204,7 +205,7 @@ pub fn process_blocktree(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !entries.is_empty() {
|
if !entries.is_empty() {
|
||||||
if !entries.verify(&last_entry_hash) {
|
if verify_ledger && !entries.verify(&last_entry_hash) {
|
||||||
warn!(
|
warn!(
|
||||||
"Ledger proof of history failed at slot: {}, entry: {}",
|
"Ledger proof of history failed at slot: {}, entry: {}",
|
||||||
slot, entry_height
|
slot, entry_height
|
||||||
@ -373,7 +374,7 @@ pub mod tests {
|
|||||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
||||||
|
|
||||||
let (mut _bank_forks, bank_forks_info, _) =
|
let (mut _bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1);
|
assert_eq!(bank_forks_info.len(), 1);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -432,7 +433,7 @@ pub mod tests {
|
|||||||
blocktree.set_roots(&[4, 1, 0]).unwrap();
|
blocktree.set_roots(&[4, 1, 0]).unwrap();
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
|
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
|
||||||
|
|
||||||
@ -506,7 +507,7 @@ pub mod tests {
|
|||||||
blocktree.set_roots(&[0, 1]).unwrap();
|
blocktree.set_roots(&[0, 1]).unwrap();
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -587,7 +588,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1); // There is one fork
|
assert_eq!(bank_forks_info.len(), 1); // There is one fork
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -723,7 +724,7 @@ pub mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1);
|
assert_eq!(bank_forks_info.len(), 1);
|
||||||
assert_eq!(bank_forks.root(), 0);
|
assert_eq!(bank_forks.root(), 0);
|
||||||
@ -754,7 +755,7 @@ pub mod tests {
|
|||||||
|
|
||||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||||
let (bank_forks, bank_forks_info, _) =
|
let (bank_forks, bank_forks_info, _) =
|
||||||
process_blocktree(&genesis_block, &blocktree, None).unwrap();
|
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
|
||||||
|
|
||||||
assert_eq!(bank_forks_info.len(), 1);
|
assert_eq!(bank_forks_info.len(), 1);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -133,7 +133,7 @@ mod tests {
|
|||||||
hasher.hash(&buf[..size]);
|
hasher.hash(&buf[..size]);
|
||||||
|
|
||||||
// golden needs to be updated if blob stuff changes....
|
// golden needs to be updated if blob stuff changes....
|
||||||
let golden: Hash = "E2HZjSC6VgH4nmEiTbMDATTeBcFjwSYz7QYvU7doGNhD"
|
let golden: Hash = "37YzrTgiFRGQG1EoMZVecnGqxEK7UGxEQeBSdGMJcKqp"
|
||||||
.parse()
|
.parse()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -748,7 +748,7 @@ impl ClusterInfo {
|
|||||||
|
|
||||||
/// retransmit messages to a list of nodes
|
/// retransmit messages to a list of nodes
|
||||||
/// # Remarks
|
/// # Remarks
|
||||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
/// We need to avoid having obj locked while doing a io, such as the `send_to`
|
||||||
pub fn retransmit_to(
|
pub fn retransmit_to(
|
||||||
obj: &Arc<RwLock<Self>>,
|
obj: &Arc<RwLock<Self>>,
|
||||||
peers: &[ContactInfo],
|
peers: &[ContactInfo],
|
||||||
@ -1092,7 +1092,7 @@ impl ClusterInfo {
|
|||||||
if caller.contact_info().is_none() {
|
if caller.contact_info().is_none() {
|
||||||
return vec![];
|
return vec![];
|
||||||
}
|
}
|
||||||
let mut from = caller.contact_info().cloned().unwrap();
|
let from = caller.contact_info().unwrap();
|
||||||
if from.id == self_id {
|
if from.id == self_id {
|
||||||
warn!(
|
warn!(
|
||||||
"PullRequest ignored, I'm talking to myself: me={} remoteme={}",
|
"PullRequest ignored, I'm talking to myself: me={} remoteme={}",
|
||||||
@ -1110,15 +1110,10 @@ impl ClusterInfo {
|
|||||||
let len = data.len();
|
let len = data.len();
|
||||||
trace!("get updates since response {}", len);
|
trace!("get updates since response {}", len);
|
||||||
let rsp = Protocol::PullResponse(self_id, data);
|
let rsp = Protocol::PullResponse(self_id, data);
|
||||||
// The remote node may not know its public IP:PORT. Record what it looks like to us.
|
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
|
||||||
// This may or may not be correct for everybody, but it's better than leaving the remote with
|
// gossip addr, respond to the origin addr.
|
||||||
// an unspecified address in our table
|
|
||||||
if from.gossip.ip().is_unspecified() {
|
|
||||||
inc_new_counter_debug!("cluster_info-window-request-updates-unspec-gossip", 1);
|
|
||||||
from.gossip = *from_addr;
|
|
||||||
}
|
|
||||||
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
||||||
to_shared_blob(rsp, from.gossip).ok().into_iter().collect()
|
to_shared_blob(rsp, *from_addr).ok().into_iter().collect()
|
||||||
}
|
}
|
||||||
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
|
||||||
let len = data.len();
|
let len = data.len();
|
||||||
|
@ -260,14 +260,16 @@ impl ClusterInfoRepairListener {
|
|||||||
num_slots_to_repair: usize,
|
num_slots_to_repair: usize,
|
||||||
epoch_schedule: &EpochSchedule,
|
epoch_schedule: &EpochSchedule,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root + 1);
|
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root);
|
||||||
|
|
||||||
if slot_iter.is_err() {
|
if slot_iter.is_err() {
|
||||||
warn!("Root for repairee is on different fork OR replay_stage hasn't marked this slot as root yet");
|
info!(
|
||||||
|
"Root for repairee is on different fork. My root: {}, repairee_root: {}",
|
||||||
|
my_root, repairee_epoch_slots.root
|
||||||
|
);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let slot_iter = slot_iter?;
|
let mut slot_iter = slot_iter?;
|
||||||
|
|
||||||
let mut total_data_blobs_sent = 0;
|
let mut total_data_blobs_sent = 0;
|
||||||
let mut total_coding_blobs_sent = 0;
|
let mut total_coding_blobs_sent = 0;
|
||||||
@ -276,6 +278,10 @@ impl ClusterInfoRepairListener {
|
|||||||
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
|
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
|
||||||
let max_confirmed_repairee_slot =
|
let max_confirmed_repairee_slot =
|
||||||
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
|
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
|
||||||
|
|
||||||
|
// Skip the first slot in the iterator because we know it's the root slot which the repairee
|
||||||
|
// already has
|
||||||
|
slot_iter.next();
|
||||||
for (slot, slot_meta) in slot_iter {
|
for (slot, slot_meta) in slot_iter {
|
||||||
if slot > my_root
|
if slot > my_root
|
||||||
|| num_slots_repaired >= num_slots_to_repair
|
|| num_slots_repaired >= num_slots_to_repair
|
||||||
|
@ -226,11 +226,13 @@ impl CodingGenerator {
|
|||||||
let index = data_blob.index();
|
let index = data_blob.index();
|
||||||
let slot = data_blob.slot();
|
let slot = data_blob.slot();
|
||||||
let id = data_blob.id();
|
let id = data_blob.id();
|
||||||
|
let version = data_blob.version();
|
||||||
|
|
||||||
let mut coding_blob = Blob::default();
|
let mut coding_blob = Blob::default();
|
||||||
coding_blob.set_index(index);
|
coding_blob.set_index(index);
|
||||||
coding_blob.set_slot(slot);
|
coding_blob.set_slot(slot);
|
||||||
coding_blob.set_id(&id);
|
coding_blob.set_id(&id);
|
||||||
|
coding_blob.set_version(version);
|
||||||
coding_blob.set_size(max_data_size);
|
coding_blob.set_size(max_data_size);
|
||||||
coding_blob.set_coding();
|
coding_blob.set_coding();
|
||||||
|
|
||||||
|
@ -176,6 +176,7 @@ impl LocalCluster {
|
|||||||
&leader_voting_keypair,
|
&leader_voting_keypair,
|
||||||
&leader_storage_keypair,
|
&leader_storage_keypair,
|
||||||
None,
|
None,
|
||||||
|
true,
|
||||||
&config.validator_configs[0],
|
&config.validator_configs[0],
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -308,6 +309,7 @@ impl LocalCluster {
|
|||||||
&voting_keypair,
|
&voting_keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
Some(&self.entry_point_info),
|
Some(&self.entry_point_info),
|
||||||
|
true,
|
||||||
&validator_config,
|
&validator_config,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -561,6 +563,7 @@ impl Cluster for LocalCluster {
|
|||||||
&fullnode_info.voting_keypair,
|
&fullnode_info.voting_keypair,
|
||||||
&fullnode_info.storage_keypair,
|
&fullnode_info.storage_keypair,
|
||||||
None,
|
None,
|
||||||
|
true,
|
||||||
config,
|
config,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -341,7 +341,8 @@ macro_rules! range {
|
|||||||
const SIGNATURE_RANGE: std::ops::Range<usize> = range!(0, Signature);
|
const SIGNATURE_RANGE: std::ops::Range<usize> = range!(0, Signature);
|
||||||
const FORWARDED_RANGE: std::ops::Range<usize> = range!(SIGNATURE_RANGE.end, bool);
|
const FORWARDED_RANGE: std::ops::Range<usize> = range!(SIGNATURE_RANGE.end, bool);
|
||||||
const PARENT_RANGE: std::ops::Range<usize> = range!(FORWARDED_RANGE.end, u64);
|
const PARENT_RANGE: std::ops::Range<usize> = range!(FORWARDED_RANGE.end, u64);
|
||||||
const SLOT_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
|
const VERSION_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
|
||||||
|
const SLOT_RANGE: std::ops::Range<usize> = range!(VERSION_RANGE.end, u64);
|
||||||
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
|
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
|
||||||
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
|
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
|
||||||
const FLAGS_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, u32);
|
const FLAGS_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, u32);
|
||||||
@ -391,6 +392,12 @@ impl Blob {
|
|||||||
pub fn set_parent(&mut self, ix: u64) {
|
pub fn set_parent(&mut self, ix: u64) {
|
||||||
LittleEndian::write_u64(&mut self.data[PARENT_RANGE], ix);
|
LittleEndian::write_u64(&mut self.data[PARENT_RANGE], ix);
|
||||||
}
|
}
|
||||||
|
pub fn version(&self) -> u64 {
|
||||||
|
LittleEndian::read_u64(&self.data[VERSION_RANGE])
|
||||||
|
}
|
||||||
|
pub fn set_version(&mut self, version: u64) {
|
||||||
|
LittleEndian::write_u64(&mut self.data[VERSION_RANGE], version);
|
||||||
|
}
|
||||||
pub fn slot(&self) -> u64 {
|
pub fn slot(&self) -> u64 {
|
||||||
LittleEndian::read_u64(&self.data[SLOT_RANGE])
|
LittleEndian::read_u64(&self.data[SLOT_RANGE])
|
||||||
}
|
}
|
||||||
@ -862,4 +869,12 @@ mod tests {
|
|||||||
b.sign(&k);
|
b.sign(&k);
|
||||||
assert!(b.verify());
|
assert!(b.verify());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_version() {
|
||||||
|
let mut b = Blob::default();
|
||||||
|
assert_eq!(b.version(), 0);
|
||||||
|
b.set_version(1);
|
||||||
|
assert_eq!(b.version(), 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ use solana_sdk::account_utils::State;
|
|||||||
use solana_sdk::client::{AsyncClient, SyncClient};
|
use solana_sdk::client::{AsyncClient, SyncClient};
|
||||||
use solana_sdk::hash::{Hash, Hasher};
|
use solana_sdk::hash::{Hash, Hasher};
|
||||||
use solana_sdk::message::Message;
|
use solana_sdk::message::Message;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||||
use solana_sdk::timing::timestamp;
|
use solana_sdk::timing::timestamp;
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
@ -303,7 +302,7 @@ impl Replicator {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run(&mut self, mining_pool_pubkey: Pubkey) {
|
pub fn run(&mut self) {
|
||||||
info!("waiting for ledger download");
|
info!("waiting for ledger download");
|
||||||
self.thread_handles.pop().unwrap().join().unwrap();
|
self.thread_handles.pop().unwrap().join().unwrap();
|
||||||
self.encrypt_ledger()
|
self.encrypt_ledger()
|
||||||
@ -330,11 +329,11 @@ impl Replicator {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
self.blockhash = storage_blockhash;
|
self.blockhash = storage_blockhash;
|
||||||
self.redeem_rewards(&mining_pool_pubkey);
|
self.redeem_rewards();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn redeem_rewards(&self, mining_pool_pubkey: &Pubkey) {
|
fn redeem_rewards(&self) {
|
||||||
let nodes = self.cluster_info.read().unwrap().tvu_peers();
|
let nodes = self.cluster_info.read().unwrap().tvu_peers();
|
||||||
let client = crate::gossip_service::get_client(&nodes);
|
let client = crate::gossip_service::get_client(&nodes);
|
||||||
|
|
||||||
@ -347,7 +346,6 @@ impl Replicator {
|
|||||||
let ix = storage_instruction::claim_reward(
|
let ix = storage_instruction::claim_reward(
|
||||||
&self.keypair.pubkey(),
|
&self.keypair.pubkey(),
|
||||||
&self.storage_keypair.pubkey(),
|
&self.storage_keypair.pubkey(),
|
||||||
mining_pool_pubkey,
|
|
||||||
);
|
);
|
||||||
let message = Message::new_with_payer(vec![ix], Some(&self.keypair.pubkey()));
|
let message = Message::new_with_payer(vec![ix], Some(&self.keypair.pubkey()));
|
||||||
if let Err(e) = client.send_message(&[&self.keypair], message) {
|
if let Err(e) = client.send_message(&[&self.keypair], message) {
|
||||||
@ -468,7 +466,15 @@ impl Replicator {
|
|||||||
// check if the storage account exists
|
// check if the storage account exists
|
||||||
let balance = client.poll_get_balance(&storage_keypair.pubkey());
|
let balance = client.poll_get_balance(&storage_keypair.pubkey());
|
||||||
if balance.is_err() || balance.unwrap() == 0 {
|
if balance.is_err() || balance.unwrap() == 0 {
|
||||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().expect("blockhash");
|
let blockhash = match client.get_recent_blockhash() {
|
||||||
|
Ok((blockhash, _)) => blockhash,
|
||||||
|
Err(_) => {
|
||||||
|
return Err(Error::IO(<io::Error>::new(
|
||||||
|
io::ErrorKind::Other,
|
||||||
|
"unable to get recent blockhash, can't submit proof",
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let ix = storage_instruction::create_replicator_storage_account(
|
let ix = storage_instruction::create_replicator_storage_account(
|
||||||
&keypair.pubkey(),
|
&keypair.pubkey(),
|
||||||
@ -495,16 +501,25 @@ impl Replicator {
|
|||||||
// No point if we've got no storage account...
|
// No point if we've got no storage account...
|
||||||
let nodes = self.cluster_info.read().unwrap().tvu_peers();
|
let nodes = self.cluster_info.read().unwrap().tvu_peers();
|
||||||
let client = crate::gossip_service::get_client(&nodes);
|
let client = crate::gossip_service::get_client(&nodes);
|
||||||
assert!(
|
let storage_balance = client.poll_get_balance(&self.storage_keypair.pubkey());
|
||||||
client
|
if storage_balance.is_err() || storage_balance.unwrap() == 0 {
|
||||||
.poll_get_balance(&self.storage_keypair.pubkey())
|
error!("Unable to submit mining proof, no storage account");
|
||||||
.unwrap()
|
return;
|
||||||
> 0
|
}
|
||||||
);
|
|
||||||
// ...or no lamports for fees
|
// ...or no lamports for fees
|
||||||
assert!(client.poll_get_balance(&self.keypair.pubkey()).unwrap() > 0);
|
let balance = client.poll_get_balance(&self.keypair.pubkey());
|
||||||
|
if balance.is_err() || balance.unwrap() == 0 {
|
||||||
|
error!("Unable to submit mining proof, insufficient Replicator Account balance");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let (blockhash, _) = client.get_recent_blockhash().expect("No recent blockhash");
|
let blockhash = match client.get_recent_blockhash() {
|
||||||
|
Ok((blockhash, _)) => blockhash,
|
||||||
|
Err(_) => {
|
||||||
|
error!("unable to get recent blockhash, can't submit proof");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
let instruction = storage_instruction::mining_proof(
|
let instruction = storage_instruction::mining_proof(
|
||||||
&self.storage_keypair.pubkey(),
|
&self.storage_keypair.pubkey(),
|
||||||
self.sha_state,
|
self.sha_state,
|
||||||
@ -518,14 +533,14 @@ impl Replicator {
|
|||||||
message,
|
message,
|
||||||
blockhash,
|
blockhash,
|
||||||
);
|
);
|
||||||
client
|
if let Err(err) = client.send_and_confirm_transaction(
|
||||||
.send_and_confirm_transaction(
|
|
||||||
&[&self.keypair, &self.storage_keypair],
|
&[&self.keypair, &self.storage_keypair],
|
||||||
&mut transaction,
|
&mut transaction,
|
||||||
10,
|
10,
|
||||||
0,
|
0,
|
||||||
)
|
) {
|
||||||
.expect("transfer didn't work!");
|
error!("Error: {:?}; while sending mining proof", err);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn close(self) {
|
pub fn close(self) {
|
||||||
|
@ -70,6 +70,15 @@ impl JsonRpcRequestProcessor {
|
|||||||
.ok_or_else(Error::invalid_request)
|
.ok_or_else(Error::invalid_request)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_program_accounts(&self, program_id: &Pubkey) -> Result<Vec<(String, Account)>> {
|
||||||
|
Ok(self
|
||||||
|
.bank()
|
||||||
|
.get_program_accounts(&program_id)
|
||||||
|
.into_iter()
|
||||||
|
.map(|(pubkey, account)| (pubkey.to_string(), account))
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
|
pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
|
||||||
self.bank().get_balance(&pubkey)
|
self.bank().get_balance(&pubkey)
|
||||||
}
|
}
|
||||||
@ -196,8 +205,8 @@ pub struct RpcVoteAccountInfo {
|
|||||||
/// The current stake, in lamports, delegated to this vote account
|
/// The current stake, in lamports, delegated to this vote account
|
||||||
pub stake: u64,
|
pub stake: u64,
|
||||||
|
|
||||||
/// A 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
|
/// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
|
||||||
pub commission: u32,
|
pub commission: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rpc(server)]
|
#[rpc(server)]
|
||||||
@ -210,6 +219,9 @@ pub trait RpcSol {
|
|||||||
#[rpc(meta, name = "getAccountInfo")]
|
#[rpc(meta, name = "getAccountInfo")]
|
||||||
fn get_account_info(&self, _: Self::Metadata, _: String) -> Result<Account>;
|
fn get_account_info(&self, _: Self::Metadata, _: String) -> Result<Account>;
|
||||||
|
|
||||||
|
#[rpc(meta, name = "getProgramAccounts")]
|
||||||
|
fn get_program_accounts(&self, _: Self::Metadata, _: String) -> Result<Vec<(String, Account)>>;
|
||||||
|
|
||||||
#[rpc(meta, name = "getBalance")]
|
#[rpc(meta, name = "getBalance")]
|
||||||
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
|
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
|
||||||
|
|
||||||
@ -297,6 +309,19 @@ impl RpcSol for RpcSolImpl {
|
|||||||
.get_account_info(&pubkey)
|
.get_account_info(&pubkey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_program_accounts(
|
||||||
|
&self,
|
||||||
|
meta: Self::Metadata,
|
||||||
|
id: String,
|
||||||
|
) -> Result<Vec<(String, Account)>> {
|
||||||
|
debug!("get_program_accounts rpc request received: {:?}", id);
|
||||||
|
let program_id = verify_pubkey(id)?;
|
||||||
|
meta.request_processor
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get_program_accounts(&program_id)
|
||||||
|
}
|
||||||
|
|
||||||
fn get_balance(&self, meta: Self::Metadata, id: String) -> Result<u64> {
|
fn get_balance(&self, meta: Self::Metadata, id: String) -> Result<u64> {
|
||||||
debug!("get_balance rpc request received: {:?}", id);
|
debug!("get_balance rpc request received: {:?}", id);
|
||||||
let pubkey = verify_pubkey(id)?;
|
let pubkey = verify_pubkey(id)?;
|
||||||
@ -535,7 +560,7 @@ mod tests {
|
|||||||
|
|
||||||
fn start_rpc_handler_with_tx(
|
fn start_rpc_handler_with_tx(
|
||||||
pubkey: &Pubkey,
|
pubkey: &Pubkey,
|
||||||
) -> (MetaIoHandler<Meta>, Meta, Hash, Keypair, Pubkey) {
|
) -> (MetaIoHandler<Meta>, Meta, Arc<Bank>, Hash, Keypair, Pubkey) {
|
||||||
let (bank_forks, alice) = new_bank_forks();
|
let (bank_forks, alice) = new_bank_forks();
|
||||||
let bank = bank_forks.read().unwrap().working_bank();
|
let bank = bank_forks.read().unwrap().working_bank();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
@ -567,7 +592,7 @@ mod tests {
|
|||||||
request_processor,
|
request_processor,
|
||||||
cluster_info,
|
cluster_info,
|
||||||
};
|
};
|
||||||
(io, meta, blockhash, alice, leader.id)
|
(io, meta, bank, blockhash, alice, leader.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -595,7 +620,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_balance() {
|
fn test_rpc_get_balance() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
|
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
|
||||||
@ -613,7 +639,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_cluster_nodes() {
|
fn test_rpc_get_cluster_nodes() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#);
|
||||||
let res = io.handle_request_sync(&req, meta);
|
let res = io.handle_request_sync(&req, meta);
|
||||||
@ -633,7 +660,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_slot_leader() {
|
fn test_rpc_get_slot_leader() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#);
|
||||||
let res = io.handle_request_sync(&req, meta);
|
let res = io.handle_request_sync(&req, meta);
|
||||||
@ -649,7 +677,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_tx_count() {
|
fn test_rpc_get_tx_count() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
|
||||||
let res = io.handle_request_sync(&req, meta);
|
let res = io.handle_request_sync(&req, meta);
|
||||||
@ -664,7 +693,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_total_supply() {
|
fn test_rpc_get_total_supply() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}}"#);
|
||||||
let rep = io.handle_request_sync(&req, meta);
|
let rep = io.handle_request_sync(&req, meta);
|
||||||
@ -689,7 +719,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_account_info() {
|
fn test_rpc_get_account_info() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#,
|
r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#,
|
||||||
@ -713,10 +744,46 @@ mod tests {
|
|||||||
assert_eq!(expected, result);
|
assert_eq!(expected, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rpc_get_program_accounts() {
|
||||||
|
let bob = Keypair::new();
|
||||||
|
let (io, meta, bank, blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob.pubkey());
|
||||||
|
|
||||||
|
let new_program_id = Pubkey::new_rand();
|
||||||
|
let tx = system_transaction::assign(&bob, blockhash, &new_program_id);
|
||||||
|
bank.process_transaction(&tx).unwrap();
|
||||||
|
let req = format!(
|
||||||
|
r#"{{"jsonrpc":"2.0","id":1,"method":"getProgramAccounts","params":["{}"]}}"#,
|
||||||
|
new_program_id
|
||||||
|
);
|
||||||
|
let res = io.handle_request_sync(&req, meta);
|
||||||
|
let expected = format!(
|
||||||
|
r#"{{
|
||||||
|
"jsonrpc":"2.0",
|
||||||
|
"result":[["{}", {{
|
||||||
|
"owner": {:?},
|
||||||
|
"lamports": 20,
|
||||||
|
"data": [],
|
||||||
|
"executable": false
|
||||||
|
}}]],
|
||||||
|
"id":1}}
|
||||||
|
"#,
|
||||||
|
bob.pubkey(),
|
||||||
|
new_program_id.as_ref()
|
||||||
|
);
|
||||||
|
let expected: Response =
|
||||||
|
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||||
|
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||||
|
.expect("actual response deserialization");
|
||||||
|
assert_eq!(expected, result);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_confirm_tx() {
|
fn test_rpc_confirm_tx() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, blockhash, alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, blockhash, alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
@ -735,7 +802,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_signature_status() {
|
fn test_rpc_get_signature_status() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, blockhash, alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, blockhash, alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||||
|
|
||||||
let req = format!(
|
let req = format!(
|
||||||
@ -799,7 +867,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_get_recent_blockhash() {
|
fn test_rpc_get_recent_blockhash() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#);
|
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#);
|
||||||
let res = io.handle_request_sync(&req, meta);
|
let res = io.handle_request_sync(&req, meta);
|
||||||
@ -824,7 +893,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_fail_request_airdrop() {
|
fn test_rpc_fail_request_airdrop() {
|
||||||
let bob_pubkey = Pubkey::new_rand();
|
let bob_pubkey = Pubkey::new_rand();
|
||||||
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
|
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
|
||||||
|
start_rpc_handler_with_tx(&bob_pubkey);
|
||||||
|
|
||||||
// Expect internal error because no drone is available
|
// Expect internal error because no drone is available
|
||||||
let req = format!(
|
let req = format!(
|
||||||
|
@ -84,6 +84,7 @@ impl Validator {
|
|||||||
voting_keypair: &Arc<Keypair>,
|
voting_keypair: &Arc<Keypair>,
|
||||||
storage_keypair: &Arc<Keypair>,
|
storage_keypair: &Arc<Keypair>,
|
||||||
entrypoint_info_option: Option<&ContactInfo>,
|
entrypoint_info_option: Option<&ContactInfo>,
|
||||||
|
verify_ledger: bool,
|
||||||
config: &ValidatorConfig,
|
config: &ValidatorConfig,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
warn!("CUDA is {}abled", if cfg!(cuda) { "en" } else { "dis" });
|
warn!("CUDA is {}abled", if cfg!(cuda) { "en" } else { "dis" });
|
||||||
@ -104,6 +105,7 @@ impl Validator {
|
|||||||
ledger_path,
|
ledger_path,
|
||||||
config.account_paths.clone(),
|
config.account_paths.clone(),
|
||||||
config.snapshot_path.clone(),
|
config.snapshot_path.clone(),
|
||||||
|
verify_ledger,
|
||||||
);
|
);
|
||||||
|
|
||||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||||
@ -301,6 +303,7 @@ fn get_bank_forks(
|
|||||||
blocktree: &Blocktree,
|
blocktree: &Blocktree,
|
||||||
account_paths: Option<String>,
|
account_paths: Option<String>,
|
||||||
snapshot_path: Option<String>,
|
snapshot_path: Option<String>,
|
||||||
|
verify_ledger: bool,
|
||||||
) -> (BankForks, Vec<BankForksInfo>, LeaderScheduleCache) {
|
) -> (BankForks, Vec<BankForksInfo>, LeaderScheduleCache) {
|
||||||
if snapshot_path.is_some() {
|
if snapshot_path.is_some() {
|
||||||
let bank_forks =
|
let bank_forks =
|
||||||
@ -318,7 +321,12 @@ fn get_bank_forks(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
let (mut bank_forks, bank_forks_info, leader_schedule_cache) =
|
let (mut bank_forks, bank_forks_info, leader_schedule_cache) =
|
||||||
blocktree_processor::process_blocktree(&genesis_block, &blocktree, account_paths)
|
blocktree_processor::process_blocktree(
|
||||||
|
&genesis_block,
|
||||||
|
&blocktree,
|
||||||
|
account_paths,
|
||||||
|
verify_ledger,
|
||||||
|
)
|
||||||
.expect("process_blocktree failed");
|
.expect("process_blocktree failed");
|
||||||
if snapshot_path.is_some() {
|
if snapshot_path.is_some() {
|
||||||
bank_forks.set_snapshot_config(snapshot_path);
|
bank_forks.set_snapshot_config(snapshot_path);
|
||||||
@ -331,6 +339,7 @@ pub fn new_banks_from_blocktree(
|
|||||||
blocktree_path: &str,
|
blocktree_path: &str,
|
||||||
account_paths: Option<String>,
|
account_paths: Option<String>,
|
||||||
snapshot_path: Option<String>,
|
snapshot_path: Option<String>,
|
||||||
|
verify_ledger: bool,
|
||||||
) -> (
|
) -> (
|
||||||
BankForks,
|
BankForks,
|
||||||
Vec<BankForksInfo>,
|
Vec<BankForksInfo>,
|
||||||
@ -347,8 +356,13 @@ pub fn new_banks_from_blocktree(
|
|||||||
Blocktree::open_with_signal(blocktree_path)
|
Blocktree::open_with_signal(blocktree_path)
|
||||||
.expect("Expected to successfully open database ledger");
|
.expect("Expected to successfully open database ledger");
|
||||||
|
|
||||||
let (bank_forks, bank_forks_info, leader_schedule_cache) =
|
let (bank_forks, bank_forks_info, leader_schedule_cache) = get_bank_forks(
|
||||||
get_bank_forks(&genesis_block, &blocktree, account_paths, snapshot_path);
|
&genesis_block,
|
||||||
|
&blocktree,
|
||||||
|
account_paths,
|
||||||
|
snapshot_path,
|
||||||
|
verify_ledger,
|
||||||
|
);
|
||||||
|
|
||||||
(
|
(
|
||||||
bank_forks,
|
bank_forks,
|
||||||
@ -412,6 +426,7 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, String) {
|
|||||||
&voting_keypair,
|
&voting_keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
None,
|
None,
|
||||||
|
true,
|
||||||
&ValidatorConfig::default(),
|
&ValidatorConfig::default(),
|
||||||
);
|
);
|
||||||
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
|
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
|
||||||
@ -447,6 +462,7 @@ mod tests {
|
|||||||
&voting_keypair,
|
&voting_keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
Some(&leader_node.info),
|
Some(&leader_node.info),
|
||||||
|
true,
|
||||||
&ValidatorConfig::default(),
|
&ValidatorConfig::default(),
|
||||||
);
|
);
|
||||||
validator.close().unwrap();
|
validator.close().unwrap();
|
||||||
@ -478,6 +494,7 @@ mod tests {
|
|||||||
&voting_keypair,
|
&voting_keypair,
|
||||||
&storage_keypair,
|
&storage_keypair,
|
||||||
Some(&leader_node.info),
|
Some(&leader_node.info),
|
||||||
|
true,
|
||||||
&ValidatorConfig::default(),
|
&ValidatorConfig::default(),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -7,6 +7,8 @@ use rand_chacha::ChaChaRng;
|
|||||||
use std::iter;
|
use std::iter;
|
||||||
use std::ops::Div;
|
use std::ops::Div;
|
||||||
|
|
||||||
|
/// Returns a list of indexes shuffled based on the input weights
|
||||||
|
/// Note - The sum of all weights must not exceed `u64::MAX`
|
||||||
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize>
|
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize>
|
||||||
where
|
where
|
||||||
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
|
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
|
||||||
@ -17,10 +19,13 @@ where
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, v)| {
|
.map(|(i, v)| {
|
||||||
let x = (total_weight / v).to_u32().unwrap();
|
let x = (total_weight / v)
|
||||||
|
.to_u64()
|
||||||
|
.expect("values > u64::max are not supported");
|
||||||
(
|
(
|
||||||
i,
|
i,
|
||||||
(&mut rng).gen_range(1, u64::from(std::u16::MAX)) * u64::from(x),
|
// capture the u64 into u128s to prevent overflow
|
||||||
|
(&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
|
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
|
||||||
@ -73,4 +78,18 @@ mod tests {
|
|||||||
assert_eq!(x, y);
|
assert_eq!(x, y);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_weighted_shuffle_imbalanced() {
|
||||||
|
let mut weights = vec![std::u32::MAX as u64; 3];
|
||||||
|
weights.push(1);
|
||||||
|
let shuffle = weighted_shuffle(weights.clone(), ChaChaRng::from_seed([0x5a; 32]));
|
||||||
|
shuffle.into_iter().for_each(|x| {
|
||||||
|
if x == weights.len() - 1 {
|
||||||
|
assert_eq!(weights[x], 1);
|
||||||
|
} else {
|
||||||
|
assert_eq!(weights[x], std::u32::MAX as u64);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -118,6 +118,7 @@ fn test_leader_failure_4() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
|
#[ignore]
|
||||||
fn test_two_unbalanced_stakes() {
|
fn test_two_unbalanced_stakes() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let mut validator_config = ValidatorConfig::default();
|
let mut validator_config = ValidatorConfig::default();
|
||||||
|
@ -98,7 +98,7 @@ fn test_replay() {
|
|||||||
completed_slots_receiver,
|
completed_slots_receiver,
|
||||||
leader_schedule_cache,
|
leader_schedule_cache,
|
||||||
_,
|
_,
|
||||||
) = validator::new_banks_from_blocktree(&blocktree_path, None, None);
|
) = validator::new_banks_from_blocktree(&blocktree_path, None, None, true);
|
||||||
let working_bank = bank_forks.working_bank();
|
let working_bank = bank_forks.working_bank();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
working_bank.get_balance(&mint_keypair.pubkey()),
|
working_bank.get_balance(&mint_keypair.pubkey()),
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-drone"
|
name = "solana-drone"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Drone"
|
description = "Solana Drone"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -20,9 +20,9 @@ clap = "2.33"
|
|||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.16.0" }
|
solana-metrics = { path = "../metrics", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-genesis"
|
name = "solana-genesis"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -15,24 +15,24 @@ serde = "1.0.92"
|
|||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
serde_json = "1.0.39"
|
serde_json = "1.0.39"
|
||||||
serde_yaml = "0.8.9"
|
serde_yaml = "0.8.9"
|
||||||
solana = { path = "../core", version = "0.16.0" }
|
solana = { path = "../core", version = "0.16.5" }
|
||||||
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.16.0" }
|
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.16.5" }
|
||||||
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.16.0" }
|
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.16.5" }
|
||||||
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" }
|
solana-budget-api = { path = "../programs/budget_api", version = "0.16.5" }
|
||||||
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" }
|
solana-budget-program = { path = "../programs/budget_program", version = "0.16.5" }
|
||||||
solana-config-api = { path = "../programs/config_api", version = "0.16.0" }
|
solana-config-api = { path = "../programs/config_api", version = "0.16.5" }
|
||||||
solana-config-program = { path = "../programs/config_program", version = "0.16.0" }
|
solana-config-program = { path = "../programs/config_program", version = "0.16.5" }
|
||||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" }
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.5" }
|
||||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" }
|
solana-stake-api = { path = "../programs/stake_api", version = "0.16.5" }
|
||||||
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" }
|
solana-stake-program = { path = "../programs/stake_program", version = "0.16.5" }
|
||||||
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" }
|
solana-storage-api = { path = "../programs/storage_api", version = "0.16.5" }
|
||||||
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" }
|
solana-storage-program = { path = "../programs/storage_program", version = "0.16.5" }
|
||||||
solana-token-api = { path = "../programs/token_api", version = "0.16.0" }
|
solana-token-api = { path = "../programs/token_api", version = "0.16.5" }
|
||||||
solana-token-program = { path = "../programs/token_program", version = "0.16.0" }
|
solana-token-program = { path = "../programs/token_program", version = "0.16.5" }
|
||||||
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" }
|
solana-vote-api = { path = "../programs/vote_api", version = "0.16.5" }
|
||||||
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" }
|
solana-vote-program = { path = "../programs/vote_program", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hashbrown = "0.3.0"
|
hashbrown = "0.3.0"
|
||||||
|
@ -147,14 +147,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
.required(true)
|
.required(true)
|
||||||
.help("Path to file containing the bootstrap leader's storage keypair"),
|
.help("Path to file containing the bootstrap leader's storage keypair"),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("storage_mining_pool_lamports")
|
|
||||||
.long("storage-mining-pool-lamports")
|
|
||||||
.value_name("LAMPORTS")
|
|
||||||
.takes_value(true)
|
|
||||||
.required(true)
|
|
||||||
.help("Number of lamports to assign to the storage mining pool"),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("bootstrap_leader_lamports")
|
Arg::with_name("bootstrap_leader_lamports")
|
||||||
.long("bootstrap-leader-lamports")
|
.long("bootstrap-leader-lamports")
|
||||||
@ -261,7 +253,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
|
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
|
||||||
let bootstrap_leader_stake_lamports =
|
let bootstrap_leader_stake_lamports =
|
||||||
value_t_or_exit!(matches, "bootstrap_leader_stake_lamports", u64);
|
value_t_or_exit!(matches, "bootstrap_leader_stake_lamports", u64);
|
||||||
let storage_pool_lamports = value_t_or_exit!(matches, "storage_mining_pool_lamports", u64);
|
|
||||||
|
|
||||||
let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?;
|
let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?;
|
||||||
let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?;
|
let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?;
|
||||||
@ -306,12 +297,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
1,
|
1,
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
|
||||||
"StorageMiningPoo111111111111111111111111111"
|
|
||||||
.parse()
|
|
||||||
.unwrap(),
|
|
||||||
storage_contract::create_mining_pool_account(storage_pool_lamports),
|
|
||||||
),
|
|
||||||
])
|
])
|
||||||
.native_instruction_processors(&[
|
.native_instruction_processors(&[
|
||||||
solana_bpf_loader_program!(),
|
solana_bpf_loader_program!(),
|
||||||
@ -370,6 +355,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
builder = append_primordial_accounts(file, AccountFileFormat::Keypair, builder)?;
|
builder = append_primordial_accounts(file, AccountFileFormat::Keypair, builder)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add the reward pools
|
||||||
|
builder = solana_storage_api::rewards_pools::genesis(builder);
|
||||||
|
builder = solana_stake_api::rewards_pools::genesis(builder);
|
||||||
|
|
||||||
create_new_ledger(ledger_path, &builder.build())?;
|
create_new_ledger(ledger_path, &builder.build())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -524,6 +513,8 @@ mod tests {
|
|||||||
)
|
)
|
||||||
.expect("builder");
|
.expect("builder");
|
||||||
|
|
||||||
|
builder = solana_storage_api::rewards_pools::genesis(builder);
|
||||||
|
|
||||||
remove_file(path).unwrap();
|
remove_file(path).unwrap();
|
||||||
|
|
||||||
let genesis_block = builder.clone().build();
|
let genesis_block = builder.clone().build();
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-gossip"
|
name = "solana-gossip"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -11,10 +11,10 @@ homepage = "https://solana.com/"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
env_logger = "0.6.1"
|
env_logger = "0.6.1"
|
||||||
solana = { path = "../core", version = "0.16.0" }
|
solana = { path = "../core", version = "0.16.5" }
|
||||||
solana-client = { path = "../client", version = "0.16.0" }
|
solana-client = { path = "../client", version = "0.16.5" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.16.0" }
|
solana-netutil = { path = "../netutil", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = []
|
cuda = []
|
||||||
|
@ -41,12 +41,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
SubCommand::with_name("spy")
|
SubCommand::with_name("spy")
|
||||||
.about("Monitor the gossip entrypoint")
|
.about("Monitor the gossip entrypoint")
|
||||||
.setting(AppSettings::DisableVersion)
|
.setting(AppSettings::DisableVersion)
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("pull_only")
|
|
||||||
.long("pull-only")
|
|
||||||
.takes_value(false)
|
|
||||||
.help("Use a partial gossip node (Pulls only) to spy on the cluster. By default it will use a full fledged gossip node (Pushes and Pulls). Useful when behind a NAT"),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("num_nodes")
|
Arg::with_name("num_nodes")
|
||||||
.short("N")
|
.short("N")
|
||||||
@ -120,9 +114,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
|||||||
.value_of("node_pubkey")
|
.value_of("node_pubkey")
|
||||||
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
|
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
|
||||||
|
|
||||||
let gossip_addr = if matches.is_present("pull_only") {
|
let gossip_addr = {
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let mut addr = socketaddr_any!();
|
let mut addr = socketaddr_any!();
|
||||||
addr.set_ip(
|
addr.set_ip(
|
||||||
solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| {
|
solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| {
|
||||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-install"
|
name = "solana-install"
|
||||||
description = "The solana cluster software installer"
|
description = "The solana cluster software installer"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
@ -13,25 +13,28 @@ cuda = []
|
|||||||
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
atty = "0.2.11"
|
||||||
bincode = "1.1.4"
|
bincode = "1.1.4"
|
||||||
bs58 = "0.2.0"
|
bs58 = "0.2.0"
|
||||||
bzip2 = "0.3.3"
|
bzip2 = "0.3.3"
|
||||||
chrono = { version = "0.4.0", features = ["serde"] }
|
chrono = { version = "0.4.0", features = ["serde"] }
|
||||||
clap = { version = "2.33.0" }
|
clap = { version = "2.33.0" }
|
||||||
console = "0.7.5"
|
console = "0.7.7"
|
||||||
|
ctrlc = { version = "3.1.3", features = ["termination"] }
|
||||||
dirs = "2.0.1"
|
dirs = "2.0.1"
|
||||||
indicatif = "0.11.0"
|
indicatif = "0.11.0"
|
||||||
lazy_static = "1.3.0"
|
lazy_static = "1.3.0"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
|
nix = "0.14.1"
|
||||||
reqwest = "0.9.18"
|
reqwest = "0.9.18"
|
||||||
ring = "0.13.2"
|
ring = "0.13.2"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
serde_yaml = "0.8.9"
|
serde_yaml = "0.8.9"
|
||||||
solana-client = { path = "../client", version = "0.16.0" }
|
solana-client = { path = "../client", version = "0.16.5" }
|
||||||
solana-config-api = { path = "../programs/config_api", version = "0.16.0" }
|
solana-config-api = { path = "../programs/config_api", version = "0.16.5" }
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
tar = "0.4.26"
|
tar = "0.4.26"
|
||||||
tempdir = "0.3.7"
|
tempdir = "0.3.7"
|
||||||
url = "1.7.2"
|
url = "1.7.2"
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
use crate::config::Config;
|
use crate::config::Config;
|
||||||
|
use crate::stop_process::stop_process;
|
||||||
use crate::update_manifest::{SignedUpdateManifest, UpdateManifest};
|
use crate::update_manifest::{SignedUpdateManifest, UpdateManifest};
|
||||||
use chrono::{Local, TimeZone};
|
use chrono::{Local, TimeZone};
|
||||||
use console::{style, Emoji};
|
use console::{style, Emoji};
|
||||||
use indicatif::{ProgressBar, ProgressStyle};
|
use indicatif::{ProgressBar, ProgressStyle};
|
||||||
use ring::digest::{Context, Digest, SHA256};
|
use ring::digest::{Context, Digest, SHA256};
|
||||||
use solana_client::rpc_client::RpcClient;
|
use solana_client::rpc_client::RpcClient;
|
||||||
use solana_config_api::config_instruction;
|
use solana_config_api::config_instruction::{self, ConfigKeys};
|
||||||
use solana_sdk::message::Message;
|
use solana_sdk::message::Message;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil, Signable};
|
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil, Signable};
|
||||||
@ -13,7 +14,7 @@ use solana_sdk::transaction::Transaction;
|
|||||||
use std::fs::{self, File};
|
use std::fs::{self, File};
|
||||||
use std::io::{self, BufReader, Read};
|
use std::io::{self, BufReader, Read};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::thread::sleep;
|
use std::sync::mpsc;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
@ -203,6 +204,7 @@ fn new_update_manifest(
|
|||||||
&from_keypair.pubkey(),
|
&from_keypair.pubkey(),
|
||||||
&update_manifest_keypair.pubkey(),
|
&update_manifest_keypair.pubkey(),
|
||||||
1, // lamports
|
1, // lamports
|
||||||
|
vec![], // additional keys
|
||||||
);
|
);
|
||||||
let mut transaction = Transaction::new_unsigned_instructions(vec![new_account]);
|
let mut transaction = Transaction::new_unsigned_instructions(vec![new_account]);
|
||||||
transaction.sign(&[from_keypair], recent_blockhash);
|
transaction.sign(&[from_keypair], recent_blockhash);
|
||||||
@ -224,6 +226,8 @@ fn store_update_manifest(
|
|||||||
let signers = [from_keypair, update_manifest_keypair];
|
let signers = [from_keypair, update_manifest_keypair];
|
||||||
let instruction = config_instruction::store::<SignedUpdateManifest>(
|
let instruction = config_instruction::store::<SignedUpdateManifest>(
|
||||||
&update_manifest_keypair.pubkey(),
|
&update_manifest_keypair.pubkey(),
|
||||||
|
true, // update_manifest_keypair is signer
|
||||||
|
vec![], // additional keys
|
||||||
update_manifest,
|
update_manifest,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -238,9 +242,10 @@ fn get_update_manifest(
|
|||||||
rpc_client: &RpcClient,
|
rpc_client: &RpcClient,
|
||||||
update_manifest_pubkey: &Pubkey,
|
update_manifest_pubkey: &Pubkey,
|
||||||
) -> Result<UpdateManifest, String> {
|
) -> Result<UpdateManifest, String> {
|
||||||
let data = rpc_client
|
let mut data = rpc_client
|
||||||
.get_account_data(update_manifest_pubkey)
|
.get_account_data(update_manifest_pubkey)
|
||||||
.map_err(|err| format!("Unable to fetch update manifest: {}", err))?;
|
.map_err(|err| format!("Unable to fetch update manifest: {}", err))?;
|
||||||
|
let data = data.split_off(ConfigKeys::serialized_size(vec![]));
|
||||||
|
|
||||||
let signed_update_manifest =
|
let signed_update_manifest =
|
||||||
SignedUpdateManifest::deserialize(update_manifest_pubkey, &data)
|
SignedUpdateManifest::deserialize(update_manifest_pubkey, &data)
|
||||||
@ -514,7 +519,7 @@ pub fn init(
|
|||||||
false
|
false
|
||||||
};
|
};
|
||||||
|
|
||||||
if !path_modified {
|
if !path_modified && !no_modify_path {
|
||||||
check_env_path_for_bin_dir(&config);
|
check_env_path_for_bin_dir(&config);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -748,7 +753,11 @@ pub fn run(
|
|||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let config = Config::load(config_file)?;
|
let config = Config::load(config_file)?;
|
||||||
|
|
||||||
let full_program_path = config.active_release_bin_dir().join(program_name);
|
let mut full_program_path = config.active_release_bin_dir().join(program_name);
|
||||||
|
if cfg!(windows) && full_program_path.extension().is_none() {
|
||||||
|
full_program_path.set_extension("exe");
|
||||||
|
}
|
||||||
|
|
||||||
if !full_program_path.exists() {
|
if !full_program_path.exists() {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"{} does not exist",
|
"{} does not exist",
|
||||||
@ -758,6 +767,13 @@ pub fn run(
|
|||||||
|
|
||||||
let mut child_option: Option<std::process::Child> = None;
|
let mut child_option: Option<std::process::Child> = None;
|
||||||
let mut now = Instant::now();
|
let mut now = Instant::now();
|
||||||
|
|
||||||
|
let (signal_sender, signal_receiver) = mpsc::channel();
|
||||||
|
ctrlc::set_handler(move || {
|
||||||
|
let _ = signal_sender.send(());
|
||||||
|
})
|
||||||
|
.expect("Error setting Ctrl-C handler");
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
child_option = match child_option {
|
child_option = match child_option {
|
||||||
Some(mut child) => match child.try_wait() {
|
Some(mut child) => match child.try_wait() {
|
||||||
@ -793,7 +809,9 @@ pub fn run(
|
|||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
// Update successful, kill current process so it will be restart
|
// Update successful, kill current process so it will be restart
|
||||||
if let Some(ref mut child) = child_option {
|
if let Some(ref mut child) = child_option {
|
||||||
println!("Killing program: {:?}", child.kill());
|
stop_process(child).unwrap_or_else(|err| {
|
||||||
|
eprintln!("Failed to stop child: {:?}", err);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(false) => {} // No update available
|
Ok(false) => {} // No update available
|
||||||
@ -803,6 +821,15 @@ pub fn run(
|
|||||||
};
|
};
|
||||||
now = Instant::now();
|
now = Instant::now();
|
||||||
}
|
}
|
||||||
sleep(Duration::from_secs(1));
|
|
||||||
|
if let Ok(()) = signal_receiver.recv_timeout(Duration::from_secs(1)) {
|
||||||
|
// Handle SIGTERM...
|
||||||
|
if let Some(ref mut child) = child_option {
|
||||||
|
stop_process(child).unwrap_or_else(|err| {
|
||||||
|
eprintln!("Failed to stop child: {:?}", err);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ mod build_env;
|
|||||||
mod command;
|
mod command;
|
||||||
mod config;
|
mod config;
|
||||||
mod defaults;
|
mod defaults;
|
||||||
|
mod stop_process;
|
||||||
mod update_manifest;
|
mod update_manifest;
|
||||||
|
|
||||||
// Return an error if a url cannot be parsed.
|
// Return an error if a url cannot be parsed.
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
|
use atty;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
|
||||||
#[cfg(windows)]
|
|
||||||
fn press_enter() {
|
fn press_enter() {
|
||||||
// On windows, where installation happens in a console that may have opened just for this
|
// On windows, where installation happens in a console that may have opened just for this
|
||||||
// purpose, give the user an opportunity to see the error before the window closes.
|
// purpose, give the user an opportunity to see the error before the window closes.
|
||||||
|
if cfg!(windows) && atty::is(atty::Stream::Stdin) {
|
||||||
println!();
|
println!();
|
||||||
println!("Press the Enter key to continue.");
|
println!("Press the Enter key to continue.");
|
||||||
|
|
||||||
@ -13,9 +14,7 @@ fn press_enter() {
|
|||||||
let mut lines = stdin.lines();
|
let mut lines = stdin.lines();
|
||||||
lines.next();
|
lines.next();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
#[cfg(not(windows))]
|
|
||||||
fn press_enter() {}
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_install::main_init().unwrap_or_else(|err| {
|
solana_install::main_init().unwrap_or_else(|err| {
|
||||||
|
67
install/src/stop_process.rs
Normal file
67
install/src/stop_process.rs
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
use std::io;
|
||||||
|
use std::process::Child;
|
||||||
|
|
||||||
|
fn kill_process(process: &mut Child) -> Result<(), io::Error> {
|
||||||
|
if let Ok(()) = process.kill() {
|
||||||
|
process.wait()?;
|
||||||
|
} else {
|
||||||
|
println!("Process {} has already exited", process.id());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(windows)]
|
||||||
|
pub fn stop_process(process: &mut Child) -> Result<(), io::Error> {
|
||||||
|
kill_process(process)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
pub fn stop_process(process: &mut Child) -> Result<(), io::Error> {
|
||||||
|
use nix::errno::Errno::{EINVAL, EPERM, ESRCH};
|
||||||
|
use nix::sys::signal::{kill, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use nix::Error::Sys;
|
||||||
|
use std::io::ErrorKind;
|
||||||
|
use std::thread;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
let nice_wait = Duration::from_secs(5);
|
||||||
|
let pid = Pid::from_raw(process.id() as i32);
|
||||||
|
match kill(pid, Signal::SIGINT) {
|
||||||
|
Ok(()) => {
|
||||||
|
let expire = Instant::now() + nice_wait;
|
||||||
|
while let Ok(None) = process.try_wait() {
|
||||||
|
if Instant::now() > expire {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread::sleep(nice_wait / 10);
|
||||||
|
}
|
||||||
|
if let Ok(None) = process.try_wait() {
|
||||||
|
kill_process(process)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(Sys(EINVAL)) => {
|
||||||
|
println!("Invalid signal. Killing process {}", pid);
|
||||||
|
kill_process(process)?;
|
||||||
|
}
|
||||||
|
Err(Sys(EPERM)) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
ErrorKind::InvalidInput,
|
||||||
|
format!("Insufficient permissions to signal process {}", pid),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Err(Sys(ESRCH)) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
ErrorKind::InvalidInput,
|
||||||
|
format!("Process {} does not exist", pid),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
ErrorKind::InvalidInput,
|
||||||
|
format!("Unexpected error {}", e),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-keygen"
|
name = "solana-keygen"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana key generation utility"
|
description = "Solana key generation utility"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -15,7 +15,7 @@ cuda = []
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33"
|
clap = "2.33"
|
||||||
dirs = "2.0.1"
|
dirs = "2.0.1"
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "solana-keygen"
|
name = "solana-keygen"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-kvstore"
|
name = "solana-kvstore"
|
||||||
description = "Embedded Key-Value store for solana"
|
description = "Embedded Key-Value store for solana"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
|
@ -3,18 +3,21 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-ledger-tool"
|
name = "solana-ledger-tool"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
serde_json = "1.0.39"
|
serde = "1.0.94"
|
||||||
solana = { path = "../core", version = "0.16.0" }
|
serde_derive = "1.0.94"
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
serde_json = "1.0.40"
|
||||||
solana-runtime = { path = "../runtime", version = "0.16.0" }
|
serde_yaml = "0.8.9"
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana = { path = "../core", version = "0.16.5" }
|
||||||
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
|
solana-runtime = { path = "../runtime", version = "0.16.5" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
assert_cmd = "0.11"
|
assert_cmd = "0.11"
|
||||||
|
@ -1,13 +1,70 @@
|
|||||||
use clap::{crate_description, crate_name, crate_version, App, Arg, SubCommand};
|
use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, SubCommand};
|
||||||
use solana::blocktree::Blocktree;
|
use solana::blocktree::Blocktree;
|
||||||
use solana::blocktree_processor::process_blocktree;
|
use solana::blocktree_processor::process_blocktree;
|
||||||
use solana_sdk::genesis_block::GenesisBlock;
|
use solana_sdk::genesis_block::GenesisBlock;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::fs::File;
|
||||||
use std::io::{stdout, Write};
|
use std::io::{stdout, Write};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
enum LedgerOutputMethod {
|
||||||
|
Print,
|
||||||
|
Json,
|
||||||
|
}
|
||||||
|
fn output_ledger(blocktree: Blocktree, starting_slot: u64, method: LedgerOutputMethod) {
|
||||||
|
let rooted_slot_iterator = blocktree
|
||||||
|
.rooted_slot_iterator(starting_slot)
|
||||||
|
.unwrap_or_else(|err| {
|
||||||
|
eprintln!(
|
||||||
|
"Failed to load entries starting from slot {}: {:?}",
|
||||||
|
starting_slot, err
|
||||||
|
);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
if method == LedgerOutputMethod::Json {
|
||||||
|
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (slot, slot_meta) in rooted_slot_iterator {
|
||||||
|
match method {
|
||||||
|
LedgerOutputMethod::Print => println!("Slot {}", slot),
|
||||||
|
LedgerOutputMethod::Json => {
|
||||||
|
serde_json::to_writer(stdout(), &slot_meta).expect("serialize slot_meta");
|
||||||
|
stdout().write_all(b",\n").expect("newline");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let entries = blocktree
|
||||||
|
.get_slot_entries(slot, 0, None)
|
||||||
|
.unwrap_or_else(|err| {
|
||||||
|
eprintln!("Failed to load entries for slot {}: {:?}", slot, err);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
for entry in entries {
|
||||||
|
match method {
|
||||||
|
LedgerOutputMethod::Print => println!("{:?}", entry),
|
||||||
|
LedgerOutputMethod::Json => {
|
||||||
|
serde_json::to_writer(stdout(), &entry).expect("serialize entry");
|
||||||
|
stdout().write_all(b",\n").expect("newline");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if method == LedgerOutputMethod::Json {
|
||||||
|
stdout().write_all(b"\n]}\n").expect("close array");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
const DEFAULT_ROOT_COUNT: &str = "1";
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let matches = App::new(crate_name!()).about(crate_description!())
|
let matches = App::new(crate_name!())
|
||||||
|
.about(crate_description!())
|
||||||
.version(crate_version!())
|
.version(crate_version!())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("ledger")
|
Arg::with_name("ledger")
|
||||||
@ -19,30 +76,46 @@ fn main() {
|
|||||||
.help("Use directory for ledger location"),
|
.help("Use directory for ledger location"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("head")
|
Arg::with_name("starting_slot")
|
||||||
.short("n")
|
.long("starting-slot")
|
||||||
.long("head")
|
|
||||||
.value_name("NUM")
|
.value_name("NUM")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Limit to at most the first NUM entries in ledger\n (only applies to print and json commands)"),
|
.default_value("0")
|
||||||
)
|
.help("Start at this slot (only applies to print and json commands)"),
|
||||||
.arg(
|
|
||||||
Arg::with_name("min-hashes")
|
|
||||||
.short("h")
|
|
||||||
.long("min-hashes")
|
|
||||||
.value_name("NUM")
|
|
||||||
.takes_value(true)
|
|
||||||
.help("Skip entries with fewer than NUM hashes\n (only applies to print and json commands)"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("continue")
|
|
||||||
.short("c")
|
|
||||||
.long("continue")
|
|
||||||
.help("Continue verify even if verification fails"),
|
|
||||||
)
|
)
|
||||||
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
|
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
|
||||||
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
|
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
|
||||||
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
|
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
|
||||||
|
.subcommand(SubCommand::with_name("prune").about("Prune the ledger at the block height").arg(
|
||||||
|
Arg::with_name("slot_list")
|
||||||
|
.long("slot-list")
|
||||||
|
.value_name("FILENAME")
|
||||||
|
.takes_value(true)
|
||||||
|
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
|
||||||
|
))
|
||||||
|
.subcommand(SubCommand::with_name("list-roots").about("Output upto last <num-roots> root hashes and their heights starting at the given block height").arg(
|
||||||
|
Arg::with_name("max_height")
|
||||||
|
.long("max-height")
|
||||||
|
.value_name("NUM")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(true)
|
||||||
|
.help("Maximum block height"),
|
||||||
|
).arg(
|
||||||
|
Arg::with_name("slot_list")
|
||||||
|
.long("slot-list")
|
||||||
|
.value_name("FILENAME")
|
||||||
|
.required(false)
|
||||||
|
.takes_value(true)
|
||||||
|
.help("The location of the output YAML file. A list of rollback slot heights and hashes will be written to the file."),
|
||||||
|
).arg(
|
||||||
|
Arg::with_name("num_roots")
|
||||||
|
.long("num-roots")
|
||||||
|
.value_name("NUM")
|
||||||
|
.takes_value(true)
|
||||||
|
.default_value(DEFAULT_ROOT_COUNT)
|
||||||
|
.required(false)
|
||||||
|
.help("Number of roots in the output"),
|
||||||
|
))
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let ledger_path = matches.value_of("ledger").unwrap();
|
let ledger_path = matches.value_of("ledger").unwrap();
|
||||||
@ -63,55 +136,18 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let entries = match blocktree.read_ledger() {
|
let starting_slot = value_t!(matches, "starting_slot", u64).unwrap_or_else(|e| e.exit());
|
||||||
Ok(entries) => entries,
|
|
||||||
Err(err) => {
|
|
||||||
eprintln!("Failed to read ledger at {}: {}", ledger_path, err);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let head = match matches.value_of("head") {
|
|
||||||
Some(head) => head.parse().expect("please pass a number for --head"),
|
|
||||||
None => <usize>::max_value(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let min_hashes = match matches.value_of("min-hashes") {
|
|
||||||
Some(hashes) => hashes
|
|
||||||
.parse()
|
|
||||||
.expect("please pass a number for --min-hashes"),
|
|
||||||
None => 0,
|
|
||||||
} as u64;
|
|
||||||
|
|
||||||
match matches.subcommand() {
|
match matches.subcommand() {
|
||||||
("print", _) => {
|
("print", _) => {
|
||||||
for (i, entry) in entries.enumerate() {
|
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Print);
|
||||||
if i >= head {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry.num_hashes < min_hashes {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
println!("{:?}", entry);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
("json", _) => {
|
("json", _) => {
|
||||||
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
|
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Json);
|
||||||
for (i, entry) in entries.enumerate() {
|
|
||||||
if i >= head {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
("verify", _) => {
|
||||||
if entry.num_hashes < min_hashes {
|
println!("Verifying ledger...");
|
||||||
continue;
|
match process_blocktree(&genesis_block, &blocktree, None, true) {
|
||||||
}
|
|
||||||
serde_json::to_writer(stdout(), &entry).expect("serialize");
|
|
||||||
stdout().write_all(b",\n").expect("newline");
|
|
||||||
}
|
|
||||||
stdout().write_all(b"\n]}\n").expect("close array");
|
|
||||||
}
|
|
||||||
("verify", _) => match process_blocktree(&genesis_block, &blocktree, None) {
|
|
||||||
Ok((_bank_forks, bank_forks_info, _)) => {
|
Ok((_bank_forks, bank_forks_info, _)) => {
|
||||||
println!("{:?}", bank_forks_info);
|
println!("{:?}", bank_forks_info);
|
||||||
}
|
}
|
||||||
@ -119,7 +155,99 @@ fn main() {
|
|||||||
eprintln!("Ledger verification failed: {:?}", err);
|
eprintln!("Ledger verification failed: {:?}", err);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
|
}
|
||||||
|
("prune", Some(args_matches)) => {
|
||||||
|
if let Some(prune_file_path) = args_matches.value_of("slot_list") {
|
||||||
|
let prune_file = File::open(prune_file_path.to_string()).unwrap();
|
||||||
|
let slot_hashes: BTreeMap<u64, String> =
|
||||||
|
serde_yaml::from_reader(prune_file).unwrap();
|
||||||
|
|
||||||
|
let iter = blocktree
|
||||||
|
.rooted_slot_iterator(0)
|
||||||
|
.expect("Failed to get rooted slot");
|
||||||
|
|
||||||
|
let potential_hashes: Vec<_> = iter
|
||||||
|
.filter_map(|(slot, meta)| {
|
||||||
|
let blockhash = blocktree
|
||||||
|
.get_slot_entries(slot, meta.last_index, Some(1))
|
||||||
|
.unwrap()
|
||||||
|
.first()
|
||||||
|
.unwrap()
|
||||||
|
.hash
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
slot_hashes.get(&slot).and_then(|hash| {
|
||||||
|
if *hash == blockhash {
|
||||||
|
Some((slot, blockhash))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let (target_slot, target_hash) = potential_hashes
|
||||||
|
.last()
|
||||||
|
.expect("Failed to find a valid slot");
|
||||||
|
println!("Prune at slot {:?} hash {:?}", target_slot, target_hash);
|
||||||
|
// ToDo: Do the actual pruning of the database
|
||||||
|
}
|
||||||
|
}
|
||||||
|
("list-roots", Some(args_matches)) => {
|
||||||
|
let max_height = if let Some(height) = args_matches.value_of("max_height") {
|
||||||
|
usize::from_str(height).expect("Maximum height must be a number")
|
||||||
|
} else {
|
||||||
|
panic!("Maximum height must be provided");
|
||||||
|
};
|
||||||
|
let num_roots = if let Some(roots) = args_matches.value_of("num_roots") {
|
||||||
|
usize::from_str(roots).expect("Number of roots must be a number")
|
||||||
|
} else {
|
||||||
|
usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
let iter = blocktree
|
||||||
|
.rooted_slot_iterator(0)
|
||||||
|
.expect("Failed to get rooted slot");
|
||||||
|
|
||||||
|
let slot_hash: Vec<_> = iter
|
||||||
|
.filter_map(|(slot, meta)| {
|
||||||
|
if slot <= max_height as u64 {
|
||||||
|
let blockhash = blocktree
|
||||||
|
.get_slot_entries(slot, meta.last_index, Some(1))
|
||||||
|
.unwrap()
|
||||||
|
.first()
|
||||||
|
.unwrap()
|
||||||
|
.hash;
|
||||||
|
Some((slot, blockhash))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut output_file: Box<Write> = if let Some(path) = args_matches.value_of("slot_list")
|
||||||
|
{
|
||||||
|
match File::create(path) {
|
||||||
|
Ok(file) => Box::new(file),
|
||||||
|
_ => Box::new(stdout()),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Box::new(stdout())
|
||||||
|
};
|
||||||
|
|
||||||
|
slot_hash
|
||||||
|
.into_iter()
|
||||||
|
.rev()
|
||||||
|
.enumerate()
|
||||||
|
.for_each(|(i, (slot, hash))| {
|
||||||
|
if i < num_roots {
|
||||||
|
output_file
|
||||||
|
.write_all(format!("{:?}: {:?}\n", slot, hash).as_bytes())
|
||||||
|
.expect("failed to write");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
("", _) => {
|
("", _) => {
|
||||||
eprintln!("{}", matches.usage());
|
eprintln!("{}", matches.usage());
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -45,20 +45,5 @@ fn nominal() {
|
|||||||
// Print everything
|
// Print everything
|
||||||
let output = run_ledger_tool(&["-l", &ledger_path, "print"]);
|
let output = run_ledger_tool(&["-l", &ledger_path, "print"]);
|
||||||
assert!(output.status.success());
|
assert!(output.status.success());
|
||||||
assert_eq!(count_newlines(&output.stdout), ticks);
|
assert_eq!(count_newlines(&output.stdout), ticks + 1);
|
||||||
|
|
||||||
// Only print the first 5 items
|
|
||||||
let output = run_ledger_tool(&["-l", &ledger_path, "-n", "5", "print"]);
|
|
||||||
assert!(output.status.success());
|
|
||||||
assert_eq!(count_newlines(&output.stdout), 5);
|
|
||||||
|
|
||||||
// Skip entries with no hashes
|
|
||||||
let output = run_ledger_tool(&["-l", &ledger_path, "-h", "1", "print"]);
|
|
||||||
assert!(output.status.success());
|
|
||||||
assert_eq!(count_newlines(&output.stdout), ticks);
|
|
||||||
|
|
||||||
// Skip entries with fewer than 2 hashes (skip everything)
|
|
||||||
let output = run_ledger_tool(&["-l", &ledger_path, "-h", "2", "print"]);
|
|
||||||
assert!(output.status.success());
|
|
||||||
assert_eq!(count_newlines(&output.stdout), 0);
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-logger"
|
name = "solana-logger"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Logger"
|
description = "Solana Logger"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-merkle-tree"
|
name = "solana-merkle-tree"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Merkle Tree"
|
description = "Solana Merkle Tree"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = "0.3.2"
|
hex = "0.3.2"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-metrics"
|
name = "solana-metrics"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Metrics"
|
description = "Solana Metrics"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -14,7 +14,7 @@ influx_db_client = "0.3.6"
|
|||||||
lazy_static = "1.3.0"
|
lazy_static = "1.3.0"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
reqwest = "0.9.18"
|
reqwest = "0.9.18"
|
||||||
solana-sdk = { path = "../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../sdk", version = "0.16.5" }
|
||||||
sys-info = "0.5.7"
|
sys-info = "0.5.7"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
@ -11,7 +11,9 @@ set -e
|
|||||||
|
|
||||||
for i in "$SOLANA_RSYNC_CONFIG_DIR" "$SOLANA_CONFIG_DIR"; do
|
for i in "$SOLANA_RSYNC_CONFIG_DIR" "$SOLANA_CONFIG_DIR"; do
|
||||||
echo "Cleaning $i"
|
echo "Cleaning $i"
|
||||||
|
rm -rvf "${i:?}/" # <-- $i might be a symlink, rm the other side of it first
|
||||||
rm -rvf "$i"
|
rm -rvf "$i"
|
||||||
mkdir -p "$i"
|
mkdir -p "$i"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
setup_secondary_mount
|
||||||
|
@ -72,6 +72,17 @@ SOLANA_RSYNC_CONFIG_DIR=$SOLANA_ROOT/config
|
|||||||
# Configuration that remains local
|
# Configuration that remains local
|
||||||
SOLANA_CONFIG_DIR=$SOLANA_ROOT/config-local
|
SOLANA_CONFIG_DIR=$SOLANA_ROOT/config-local
|
||||||
|
|
||||||
|
SECONDARY_DISK_MOUNT_POINT=/mnt/extra-disk
|
||||||
|
setup_secondary_mount() {
|
||||||
|
# If there is a secondary disk, symlink the config-local dir there
|
||||||
|
if [[ -d $SECONDARY_DISK_MOUNT_POINT ]]; then
|
||||||
|
mkdir -p $SECONDARY_DISK_MOUNT_POINT/config-local
|
||||||
|
rm -rf "$SOLANA_CONFIG_DIR"
|
||||||
|
ln -sfT $SECONDARY_DISK_MOUNT_POINT/config-local "$SOLANA_CONFIG_DIR"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
setup_secondary_mount
|
||||||
|
|
||||||
default_arg() {
|
default_arg() {
|
||||||
declare name=$1
|
declare name=$1
|
||||||
declare value=$2
|
declare value=$2
|
||||||
@ -88,3 +99,18 @@ default_arg() {
|
|||||||
args+=("$name")
|
args+=("$name")
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
replace_arg() {
|
||||||
|
declare name=$1
|
||||||
|
declare value=$2
|
||||||
|
|
||||||
|
default_arg "$name" "$value"
|
||||||
|
|
||||||
|
declare index=0
|
||||||
|
for arg in "${args[@]}"; do
|
||||||
|
index=$((index + 1))
|
||||||
|
if [[ $arg = "$name" ]]; then
|
||||||
|
args[$index]="$value"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
@ -9,6 +9,7 @@ source "$here"/common.sh
|
|||||||
# shellcheck source=scripts/oom-score-adj.sh
|
# shellcheck source=scripts/oom-score-adj.sh
|
||||||
source "$here"/../scripts/oom-score-adj.sh
|
source "$here"/../scripts/oom-score-adj.sh
|
||||||
|
|
||||||
|
|
||||||
fullnode_usage() {
|
fullnode_usage() {
|
||||||
if [[ -n $1 ]]; then
|
if [[ -n $1 ]]; then
|
||||||
echo "$*"
|
echo "$*"
|
||||||
@ -76,24 +77,8 @@ rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
|||||||
|
|
||||||
setup_validator_accounts() {
|
setup_validator_accounts() {
|
||||||
declare entrypoint_ip=$1
|
declare entrypoint_ip=$1
|
||||||
declare node_keypair_path=$2
|
declare node_lamports=$2
|
||||||
declare vote_keypair_path=$3
|
declare stake_lamports=$3
|
||||||
declare stake_keypair_path=$4
|
|
||||||
declare storage_keypair_path=$5
|
|
||||||
declare node_lamports=$6
|
|
||||||
declare stake_lamports=$7
|
|
||||||
|
|
||||||
declare node_pubkey
|
|
||||||
node_pubkey=$($solana_keygen pubkey "$node_keypair_path")
|
|
||||||
|
|
||||||
declare vote_pubkey
|
|
||||||
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
|
|
||||||
|
|
||||||
declare stake_pubkey
|
|
||||||
stake_pubkey=$($solana_keygen pubkey "$stake_keypair_path")
|
|
||||||
|
|
||||||
declare storage_pubkey
|
|
||||||
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
|
|
||||||
|
|
||||||
if [[ -f $configured_flag ]]; then
|
if [[ -f $configured_flag ]]; then
|
||||||
echo "Vote and stake accounts have already been configured"
|
echo "Vote and stake accounts have already been configured"
|
||||||
@ -101,75 +86,68 @@ setup_validator_accounts() {
|
|||||||
if ((airdrops_enabled)); then
|
if ((airdrops_enabled)); then
|
||||||
# Fund the node with enough tokens to fund its Vote, Staking, and Storage accounts
|
# Fund the node with enough tokens to fund its Vote, Staking, and Storage accounts
|
||||||
declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees
|
declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" airdrop $((node_lamports+stake_lamports+fees)) || return $?
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" airdrop $((node_lamports+stake_lamports+fees)) || return $?
|
||||||
else
|
else
|
||||||
echo "current account balance is "
|
echo "current account balance is "
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fund the vote account from the node, with the node as the node_pubkey
|
# Fund the vote account from the node, with the node as the identity_pubkey
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
create-vote-account "$vote_pubkey" "$node_pubkey" 1 --commission 65535 || return $?
|
create-vote-account "$vote_pubkey" "$identity_pubkey" 1 --commission 127 || return $?
|
||||||
|
|
||||||
# Fund the stake account from the node, with the node as the node_pubkey
|
# Fund the stake account from the node, with the node as the identity_pubkey
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
create-stake-account "$stake_pubkey" "$stake_lamports" || return $?
|
create-stake-account "$stake_pubkey" "$stake_lamports" || return $?
|
||||||
|
|
||||||
# Delegate the stake. The transaction fee is paid by the node but the
|
# Delegate the stake. The transaction fee is paid by the node but the
|
||||||
# transaction must be signed by the stake_keypair
|
# transaction must be signed by the stake_keypair
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
delegate-stake "$stake_keypair_path" "$vote_pubkey" "$stake_lamports" || return $?
|
delegate-stake "$stake_keypair_path" "$vote_pubkey" "$stake_lamports" || return $?
|
||||||
|
|
||||||
# Setup validator storage account
|
# Setup validator storage account
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
create-validator-storage-account "$node_pubkey" "$storage_pubkey" || return $?
|
create-validator-storage-account "$identity_pubkey" "$storage_pubkey" || return $?
|
||||||
|
|
||||||
touch "$configured_flag"
|
touch "$configured_flag"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
|
||||||
show-vote-account "$vote_pubkey"
|
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
|
||||||
show-stake-account "$stake_pubkey"
|
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
|
||||||
show-storage-account "$storage_pubkey"
|
|
||||||
|
|
||||||
echo "Identity account balance:"
|
echo "Identity account balance:"
|
||||||
|
(
|
||||||
|
set -x
|
||||||
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance
|
||||||
echo "========================================================================"
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
|
show-vote-account "$vote_pubkey"
|
||||||
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
|
show-stake-account "$stake_pubkey"
|
||||||
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
|
show-storage-account "$storage_pubkey"
|
||||||
|
)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_replicator_account() {
|
setup_replicator_account() {
|
||||||
declare entrypoint_ip=$1
|
declare entrypoint_ip=$1
|
||||||
declare node_keypair_path=$2
|
declare node_lamports=$2
|
||||||
declare storage_keypair_path=$3
|
|
||||||
declare node_lamports=$4
|
|
||||||
|
|
||||||
declare node_pubkey
|
|
||||||
node_pubkey=$($solana_keygen pubkey "$node_keypair_path")
|
|
||||||
|
|
||||||
declare storage_pubkey
|
|
||||||
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
|
|
||||||
|
|
||||||
if [[ -f $configured_flag ]]; then
|
if [[ -f $configured_flag ]]; then
|
||||||
echo "Replicator account has already been configured"
|
echo "Replicator account has already been configured"
|
||||||
else
|
else
|
||||||
if ((airdrops_enabled)); then
|
if ((airdrops_enabled)); then
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" airdrop "$node_lamports" || return $?
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" airdrop "$node_lamports" || return $?
|
||||||
else
|
else
|
||||||
echo "current account balance is "
|
echo "current account balance is "
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Setup replicator storage account
|
# Setup replicator storage account
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
create-replicator-storage-account "$node_pubkey" "$storage_pubkey" || return $?
|
create-replicator-storage-account "$identity_pubkey" "$storage_pubkey" || return $?
|
||||||
|
|
||||||
touch "$configured_flag"
|
touch "$configured_flag"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
|
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
|
||||||
show-storage-account "$storage_pubkey"
|
show-storage-account "$storage_pubkey"
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
@ -192,6 +170,7 @@ identity_keypair_path=
|
|||||||
no_restart=0
|
no_restart=0
|
||||||
airdrops_enabled=1
|
airdrops_enabled=1
|
||||||
generate_snapshots=0
|
generate_snapshots=0
|
||||||
|
boot_from_snapshot=1
|
||||||
|
|
||||||
positional_args=()
|
positional_args=()
|
||||||
while [[ -n $1 ]]; do
|
while [[ -n $1 ]]; do
|
||||||
@ -209,6 +188,9 @@ while [[ -n $1 ]]; do
|
|||||||
elif [[ $1 = --generate-snapshots ]]; then
|
elif [[ $1 = --generate-snapshots ]]; then
|
||||||
generate_snapshots=1
|
generate_snapshots=1
|
||||||
shift
|
shift
|
||||||
|
elif [[ $1 = --no-snapshot ]]; then
|
||||||
|
boot_from_snapshot=0
|
||||||
|
shift
|
||||||
elif [[ $1 = --replicator ]]; then
|
elif [[ $1 = --replicator ]]; then
|
||||||
node_type=replicator
|
node_type=replicator
|
||||||
shift
|
shift
|
||||||
@ -275,24 +257,13 @@ if [[ $node_type = replicator ]]; then
|
|||||||
shift "$shift"
|
shift "$shift"
|
||||||
|
|
||||||
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/replicator-keypair$label.json}"
|
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/replicator-keypair$label.json}"
|
||||||
|
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||||
|
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
|
||||||
|
|
||||||
storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-keypair$label.json
|
storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-keypair$label.json
|
||||||
ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger$label
|
ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger$label
|
||||||
configured_flag=$SOLANA_CONFIG_DIR/replicator$label.configured
|
configured_flag=$SOLANA_CONFIG_DIR/replicator$label.configured
|
||||||
|
|
||||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
|
||||||
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
|
|
||||||
[[ -r "$storage_keypair_path" ]] || $solana_keygen new -o "$storage_keypair_path"
|
|
||||||
|
|
||||||
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
|
|
||||||
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
======================[ $node_type configuration ]======================
|
|
||||||
replicator pubkey: $identity_pubkey
|
|
||||||
storage pubkey: $storage_pubkey
|
|
||||||
ledger: $ledger_config_dir
|
|
||||||
======================================================================
|
|
||||||
EOF
|
|
||||||
program=$solana_replicator
|
program=$solana_replicator
|
||||||
default_arg --entrypoint "$entrypoint_address"
|
default_arg --entrypoint "$entrypoint_address"
|
||||||
default_arg --identity "$identity_keypair_path"
|
default_arg --identity "$identity_keypair_path"
|
||||||
@ -300,6 +271,7 @@ EOF
|
|||||||
default_arg --ledger "$ledger_config_dir"
|
default_arg --ledger "$ledger_config_dir"
|
||||||
|
|
||||||
rsync_entrypoint_url=$(rsync_url "$entrypoint")
|
rsync_entrypoint_url=$(rsync_url "$entrypoint")
|
||||||
|
|
||||||
elif [[ $node_type = bootstrap_leader ]]; then
|
elif [[ $node_type = bootstrap_leader ]]; then
|
||||||
if [[ ${#positional_args[@]} -ne 0 ]]; then
|
if [[ ${#positional_args[@]} -ne 0 ]]; then
|
||||||
fullnode_usage "Unknown argument: ${positional_args[0]}"
|
fullnode_usage "Unknown argument: ${positional_args[0]}"
|
||||||
@ -311,9 +283,11 @@ elif [[ $node_type = bootstrap_leader ]]; then
|
|||||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify
|
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify
|
||||||
|
|
||||||
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/bootstrap-leader-keypair.json}"
|
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/bootstrap-leader-keypair.json}"
|
||||||
|
|
||||||
vote_keypair_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-vote-keypair.json
|
vote_keypair_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-vote-keypair.json
|
||||||
ledger_config_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger
|
ledger_config_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger
|
||||||
state_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-state
|
state_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-state
|
||||||
|
stake_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-stake-keypair.json
|
||||||
storage_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-storage-keypair.json
|
storage_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-storage-keypair.json
|
||||||
configured_flag=$SOLANA_CONFIG_DIR/bootstrap-leader.configured
|
configured_flag=$SOLANA_CONFIG_DIR/bootstrap-leader.configured
|
||||||
|
|
||||||
@ -332,19 +306,16 @@ elif [[ $node_type = validator ]]; then
|
|||||||
shift "$shift"
|
shift "$shift"
|
||||||
|
|
||||||
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/validator-keypair$label.json}"
|
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/validator-keypair$label.json}"
|
||||||
|
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||||
|
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
|
||||||
|
|
||||||
vote_keypair_path=$SOLANA_CONFIG_DIR/validator-vote-keypair$label.json
|
vote_keypair_path=$SOLANA_CONFIG_DIR/validator-vote-keypair$label.json
|
||||||
ledger_config_dir=$SOLANA_CONFIG_DIR/validator-ledger$label
|
ledger_config_dir=$SOLANA_CONFIG_DIR/validator-ledger$label
|
||||||
state_dir="$SOLANA_CONFIG_DIR"/validator-state$label
|
state_dir="$SOLANA_CONFIG_DIR"/validator-state$label
|
||||||
storage_keypair_path=$SOLANA_CONFIG_DIR/validator-storage-keypair$label.json
|
|
||||||
stake_keypair_path=$SOLANA_CONFIG_DIR/validator-stake-keypair$label.json
|
stake_keypair_path=$SOLANA_CONFIG_DIR/validator-stake-keypair$label.json
|
||||||
|
storage_keypair_path=$SOLANA_CONFIG_DIR/validator-storage-keypair$label.json
|
||||||
configured_flag=$SOLANA_CONFIG_DIR/validator$label.configured
|
configured_flag=$SOLANA_CONFIG_DIR/validator$label.configured
|
||||||
|
|
||||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
|
||||||
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
|
|
||||||
[[ -r "$vote_keypair_path" ]] || $solana_keygen new -o "$vote_keypair_path"
|
|
||||||
[[ -r "$stake_keypair_path" ]] || $solana_keygen new -o "$stake_keypair_path"
|
|
||||||
[[ -r "$storage_keypair_path" ]] || $solana_keygen new -o "$storage_keypair_path"
|
|
||||||
|
|
||||||
default_arg --entrypoint "$entrypoint_address"
|
default_arg --entrypoint "$entrypoint_address"
|
||||||
if ((airdrops_enabled)); then
|
if ((airdrops_enabled)); then
|
||||||
default_arg --rpc-drone-address "${entrypoint_address%:*}:9900"
|
default_arg --rpc-drone-address "${entrypoint_address%:*}:9900"
|
||||||
@ -356,29 +327,14 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
|
||||||
|
|
||||||
if [[ $node_type != replicator ]]; then
|
if [[ $node_type != replicator ]]; then
|
||||||
accounts_config_dir="$state_dir"/accounts
|
accounts_config_dir="$state_dir"/accounts
|
||||||
snapshot_config_dir="$state_dir"/snapshots
|
snapshot_config_dir="$state_dir"/snapshots
|
||||||
|
|
||||||
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
|
|
||||||
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
|
|
||||||
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
======================[ $node_type configuration ]======================
|
|
||||||
identity pubkey: $identity_pubkey
|
|
||||||
vote pubkey: $vote_pubkey
|
|
||||||
storage pubkey: $storage_pubkey
|
|
||||||
ledger: $ledger_config_dir
|
|
||||||
accounts: $accounts_config_dir
|
|
||||||
snapshots: $snapshot_config_dir
|
|
||||||
========================================================================
|
|
||||||
EOF
|
|
||||||
|
|
||||||
default_arg --identity "$identity_keypair_path"
|
default_arg --identity "$identity_keypair_path"
|
||||||
default_arg --voting-keypair "$vote_keypair_path"
|
default_arg --voting-keypair "$vote_keypair_path"
|
||||||
default_arg --vote-account "$vote_pubkey"
|
|
||||||
default_arg --storage-keypair "$storage_keypair_path"
|
default_arg --storage-keypair "$storage_keypair_path"
|
||||||
default_arg --ledger "$ledger_config_dir"
|
default_arg --ledger "$ledger_config_dir"
|
||||||
default_arg --accounts "$accounts_config_dir"
|
default_arg --accounts "$accounts_config_dir"
|
||||||
@ -397,72 +353,137 @@ if [[ -z $CI ]]; then # Skip in CI
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
new_gensis_block() {
|
new_gensis_block() {
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
$rsync -r "${rsync_entrypoint_url:?}"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
|
||||||
|
) || (
|
||||||
|
echo "Error: failed to rsync genesis ledger"
|
||||||
|
)
|
||||||
|
|
||||||
! diff -q "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.bin "$ledger_config_dir"/genesis.bin >/dev/null 2>&1
|
! diff -q "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.bin "$ledger_config_dir"/genesis.bin >/dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
PS4="$(basename "$0"): "
|
PS4="$(basename "$0"): "
|
||||||
|
|
||||||
pid=
|
pid=
|
||||||
trap '[[ -n $pid ]] && kill "$pid" >/dev/null 2>&1 && wait "$pid"' INT TERM ERR
|
kill_fullnode() {
|
||||||
|
if [[ -n $pid ]]; then
|
||||||
|
declare _pid=$pid
|
||||||
|
pid=
|
||||||
|
echo "killing pid $_pid"
|
||||||
|
kill "$_pid" || true
|
||||||
|
wait "$_pid" || true
|
||||||
|
echo "$_pid killed"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
trap 'kill_fullnode' INT TERM ERR
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
if new_gensis_block; then
|
if [[ $node_type != bootstrap_leader ]] && new_gensis_block; then
|
||||||
# If the genesis block has changed remove the now stale ledger and vote
|
# If the genesis block has changed remove the now stale ledger and
|
||||||
# keypair for the node and start all over again
|
# vote/stake/storage keypairs for the node and start all over again
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
rm -rf "$ledger_config_dir" "$state_dir" "$configured_flag"
|
rm -rf "$ledger_config_dir" "$state_dir" "$configured_flag"
|
||||||
)
|
)
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
|
|
||||||
if [[ $node_type = bootstrap_leader ]]; then
|
|
||||||
ledger_not_setup "$SOLANA_RSYNC_CONFIG_DIR/ledger does not exist"
|
|
||||||
elif [[ $node_type = validator ]]; then
|
|
||||||
(
|
|
||||||
SECONDS=0
|
|
||||||
set -x
|
|
||||||
cd "$SOLANA_RSYNC_CONFIG_DIR"
|
|
||||||
$rsync -qPr "${rsync_entrypoint_url:?}"/config/{ledger,state.tgz} .
|
|
||||||
echo "Fetched snapshot in $SECONDS seconds"
|
|
||||||
) || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
if [[ $node_type = validator ]]; then
|
if [[ $node_type = validator ]]; then
|
||||||
if [[ -f "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz ]]; then
|
$solana_keygen new -f -o "$vote_keypair_path"
|
||||||
mkdir -p "$state_dir"
|
$solana_keygen new -f -o "$stake_keypair_path"
|
||||||
|
$solana_keygen new -f -o "$storage_keypair_path"
|
||||||
|
fi
|
||||||
|
if [[ $node_type = replicator ]]; then
|
||||||
|
$solana_keygen new -f -o "$storage_keypair_path"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $node_type = replicator ]]; then
|
||||||
|
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
|
||||||
|
setup_replicator_account "${entrypoint_address%:*}" \
|
||||||
|
"$node_lamports"
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
======================[ $node_type configuration ]======================
|
||||||
|
replicator pubkey: $identity_pubkey
|
||||||
|
storage pubkey: $storage_pubkey
|
||||||
|
ledger: $ledger_config_dir
|
||||||
|
======================================================================
|
||||||
|
EOF
|
||||||
|
|
||||||
|
else
|
||||||
|
if [[ $node_type = bootstrap_leader && ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
|
||||||
|
ledger_not_setup "$SOLANA_RSYNC_CONFIG_DIR/ledger does not exist"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -d "$ledger_config_dir" ]]; then
|
||||||
|
if [[ $node_type = validator ]]; then
|
||||||
|
(
|
||||||
|
cd "$SOLANA_RSYNC_CONFIG_DIR"
|
||||||
|
|
||||||
|
echo "Rsyncing genesis ledger from ${rsync_entrypoint_url:?}..."
|
||||||
SECONDS=
|
SECONDS=
|
||||||
|
while ! $rsync -Pr "${rsync_entrypoint_url:?}"/config/ledger .; do
|
||||||
|
echo "Genesis ledger rsync failed"
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
echo "Fetched genesis ledger in $SECONDS seconds"
|
||||||
|
|
||||||
|
if ((boot_from_snapshot)); then
|
||||||
|
SECONDS=
|
||||||
|
echo "Rsyncing state snapshot ${rsync_entrypoint_url:?}..."
|
||||||
|
if ! $rsync -P "${rsync_entrypoint_url:?}"/config/state.tgz .; then
|
||||||
|
echo "State snapshot rsync failed"
|
||||||
|
rm -f "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
echo "Fetched snapshot in $SECONDS seconds"
|
||||||
|
|
||||||
|
SECONDS=
|
||||||
|
mkdir -p "$state_dir"
|
||||||
|
(
|
||||||
|
set -x
|
||||||
tar -C "$state_dir" -zxf "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
|
tar -C "$state_dir" -zxf "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
|
||||||
|
)
|
||||||
echo "Extracted snapshot in $SECONDS seconds"
|
echo "Extracted snapshot in $SECONDS seconds"
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
if [[ ! -d "$ledger_config_dir" ]]; then
|
|
||||||
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
|
|
||||||
fi
|
|
||||||
)
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
if ((stake_lamports)); then
|
(
|
||||||
if [[ $node_type = validator ]]; then
|
set -x
|
||||||
|
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
|
||||||
|
stake_pubkey=$($solana_keygen pubkey "$stake_keypair_path")
|
||||||
|
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
|
||||||
|
replace_arg --vote-account "$vote_pubkey"
|
||||||
|
|
||||||
|
if [[ $node_type = validator ]] && ((stake_lamports)); then
|
||||||
setup_validator_accounts "${entrypoint_address%:*}" \
|
setup_validator_accounts "${entrypoint_address%:*}" \
|
||||||
"$identity_keypair_path" \
|
|
||||||
"$vote_keypair_path" \
|
|
||||||
"$stake_keypair_path" \
|
|
||||||
"$storage_keypair_path" \
|
|
||||||
"$node_lamports" \
|
"$node_lamports" \
|
||||||
"$stake_lamports"
|
"$stake_lamports"
|
||||||
elif [[ $node_type = replicator ]]; then
|
|
||||||
setup_replicator_account "${entrypoint_address%:*}" \
|
|
||||||
"$identity_keypair_path" \
|
|
||||||
"$storage_keypair_path" \
|
|
||||||
"$node_lamports"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
======================[ $node_type configuration ]======================
|
||||||
|
identity pubkey: $identity_pubkey
|
||||||
|
vote pubkey: $vote_pubkey
|
||||||
|
stake pubkey: $stake_pubkey
|
||||||
|
storage pubkey: $storage_pubkey
|
||||||
|
ledger: $ledger_config_dir
|
||||||
|
accounts: $accounts_config_dir
|
||||||
|
snapshots: $snapshot_config_dir
|
||||||
|
========================================================================
|
||||||
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$PS4$program ${args[*]}"
|
echo "$PS4$program ${args[*]}"
|
||||||
|
|
||||||
$program "${args[@]}" &
|
$program "${args[@]}" &
|
||||||
pid=$!
|
pid=$!
|
||||||
|
echo "pid: $pid"
|
||||||
oom_score_adj "$pid" 1000
|
oom_score_adj "$pid" 1000
|
||||||
|
|
||||||
if ((no_restart)); then
|
if ((no_restart)); then
|
||||||
@ -488,9 +509,15 @@ while true; do
|
|||||||
new_state_archive="$SOLANA_RSYNC_CONFIG_DIR"/new_state.tgz
|
new_state_archive="$SOLANA_RSYNC_CONFIG_DIR"/new_state.tgz
|
||||||
(
|
(
|
||||||
rm -rf "$new_state_dir" "$new_state_archive"
|
rm -rf "$new_state_dir" "$new_state_archive"
|
||||||
cp -a "$state_dir" "$new_state_dir"
|
mkdir -p "$new_state_dir"
|
||||||
|
# When saving the state, its necessary to have the snapshots be saved first
|
||||||
|
# followed by the accounts folder. This would avoid conditions where incomplete
|
||||||
|
# accounts gets picked while its still in the process of being updated and are
|
||||||
|
# not frozen yet.
|
||||||
|
cp -a "$state_dir"/snapshots "$new_state_dir"
|
||||||
|
cp -a "$state_dir"/accounts "$new_state_dir"
|
||||||
cd "$new_state_dir"
|
cd "$new_state_dir"
|
||||||
tar zcf "$new_state_archive" ./*
|
tar zcfS "$new_state_archive" ./*
|
||||||
)
|
)
|
||||||
ln -f "$new_state_archive" "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
|
ln -f "$new_state_archive" "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
|
||||||
rm -rf "$new_state_dir" "$new_state_archive"
|
rm -rf "$new_state_dir" "$new_state_archive"
|
||||||
@ -504,21 +531,16 @@ while true; do
|
|||||||
|
|
||||||
if ((poll_for_new_genesis_block && --secs_to_next_genesis_poll == 0)); then
|
if ((poll_for_new_genesis_block && --secs_to_next_genesis_poll == 0)); then
|
||||||
echo "Polling for new genesis block..."
|
echo "Polling for new genesis block..."
|
||||||
(
|
if new_gensis_block; then
|
||||||
set -x
|
echo "############## New genesis detected, restarting $node_type ##############"
|
||||||
$rsync -r "${rsync_entrypoint_url:?}"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
|
break
|
||||||
) || (
|
fi
|
||||||
echo "Error: failed to rsync ledger"
|
|
||||||
)
|
|
||||||
new_gensis_block && break
|
|
||||||
secs_to_next_genesis_poll=60
|
secs_to_next_genesis_poll=60
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "############## New genesis detected, restarting $node_type ##############"
|
kill_fullnode
|
||||||
kill "$pid" || true
|
|
||||||
wait "$pid" || true
|
|
||||||
# give the cluster time to come back up
|
# give the cluster time to come back up
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
@ -23,7 +23,6 @@ default_arg --ledger "$SOLANA_RSYNC_CONFIG_DIR"/ledger
|
|||||||
default_arg --mint "$SOLANA_CONFIG_DIR"/mint-keypair.json
|
default_arg --mint "$SOLANA_CONFIG_DIR"/mint-keypair.json
|
||||||
default_arg --lamports 100000000000000
|
default_arg --lamports 100000000000000
|
||||||
default_arg --bootstrap-leader-lamports 424242
|
default_arg --bootstrap-leader-lamports 424242
|
||||||
default_arg --storage-mining-pool-lamports 100000000
|
|
||||||
default_arg --target-lamports-per-signature 42
|
default_arg --target-lamports-per-signature 42
|
||||||
default_arg --target-signatures-per-slot 42
|
default_arg --target-signatures-per-slot 42
|
||||||
default_arg --hashes-per-tick auto
|
default_arg --hashes-per-tick auto
|
||||||
|
@ -25,6 +25,7 @@ entrypointIp=
|
|||||||
publicNetwork=
|
publicNetwork=
|
||||||
netBasename=
|
netBasename=
|
||||||
sshPrivateKey=
|
sshPrivateKey=
|
||||||
|
letsEncryptDomainName=
|
||||||
externalNodeSshKey=
|
externalNodeSshKey=
|
||||||
sshOptions=()
|
sshOptions=()
|
||||||
fullnodeIpList=()
|
fullnodeIpList=()
|
||||||
|
51
net/gce.sh
51
net/gce.sh
@ -63,10 +63,12 @@ blockstreamer=false
|
|||||||
fullNodeBootDiskSizeInGb=1000
|
fullNodeBootDiskSizeInGb=1000
|
||||||
clientBootDiskSizeInGb=75
|
clientBootDiskSizeInGb=75
|
||||||
replicatorBootDiskSizeInGb=1000
|
replicatorBootDiskSizeInGb=1000
|
||||||
|
fullNodeAdditionalDiskSizeInGb=
|
||||||
externalNodes=false
|
externalNodes=false
|
||||||
failOnValidatorBootupFailure=true
|
failOnValidatorBootupFailure=true
|
||||||
|
|
||||||
publicNetwork=false
|
publicNetwork=false
|
||||||
|
letsEncryptDomainName=
|
||||||
enableGpu=false
|
enableGpu=false
|
||||||
customAddress=
|
customAddress=
|
||||||
zones=()
|
zones=()
|
||||||
@ -122,7 +124,13 @@ Manage testnet instances
|
|||||||
* For EC2, [address] is the "allocation ID" of the desired
|
* For EC2, [address] is the "allocation ID" of the desired
|
||||||
Elastic IP.
|
Elastic IP.
|
||||||
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
|
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
|
||||||
|
--letsencrypt [dns name] - Attempt to generate a TLS certificate using this
|
||||||
|
DNS name (useful only when the -a and -P options
|
||||||
|
are also provided)
|
||||||
|
--fullnode-additional-disk-size-gb [number]
|
||||||
|
- Add an additional [number] GB SSD to all fullnodes to store the config-local directory.
|
||||||
|
If not set, config-local will be written to the boot disk by default.
|
||||||
|
Only supported on GCE.
|
||||||
config-specific options:
|
config-specific options:
|
||||||
-P - Use public network IP addresses (default: $publicNetwork)
|
-P - Use public network IP addresses (default: $publicNetwork)
|
||||||
|
|
||||||
@ -136,14 +144,34 @@ EOF
|
|||||||
exit $exitcode
|
exit $exitcode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
command=$1
|
command=$1
|
||||||
[[ -n $command ]] || usage
|
[[ -n $command ]] || usage
|
||||||
shift
|
shift
|
||||||
[[ $command = create || $command = config || $command = info || $command = delete ]] ||
|
[[ $command = create || $command = config || $command = info || $command = delete ]] ||
|
||||||
usage "Invalid command: $command"
|
usage "Invalid command: $command"
|
||||||
|
|
||||||
while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt; do
|
shortArgs=()
|
||||||
|
while [[ -n $1 ]]; do
|
||||||
|
if [[ ${1:0:2} = -- ]]; then
|
||||||
|
if [[ $1 = --letsencrypt ]]; then
|
||||||
|
letsEncryptDomainName="$2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
|
||||||
|
fullNodeAdditionalDiskSizeInGb="$2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
|
||||||
|
shortArgs+=("$1")
|
||||||
|
shift
|
||||||
|
else
|
||||||
|
usage "Unknown long option: $1"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
shortArgs+=("$1")
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt "${shortArgs[@]}"; do
|
||||||
case $opt in
|
case $opt in
|
||||||
h | \?)
|
h | \?)
|
||||||
usage
|
usage
|
||||||
@ -199,7 +227,6 @@ while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt; do
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
shift $((OPTIND - 1))
|
|
||||||
|
|
||||||
[[ ${#zones[@]} -gt 0 ]] || zones+=("$(cloud_DefaultZone)")
|
[[ ${#zones[@]} -gt 0 ]] || zones+=("$(cloud_DefaultZone)")
|
||||||
|
|
||||||
@ -217,8 +244,14 @@ case $cloudProvider in
|
|||||||
gce)
|
gce)
|
||||||
;;
|
;;
|
||||||
ec2)
|
ec2)
|
||||||
|
if [[ -n $fullNodeAdditionalDiskSizeInGb ]] ; then
|
||||||
|
usage "Error: --fullnode-additional-disk-size-gb currently only supported with cloud provider: gce"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
azure)
|
azure)
|
||||||
|
if [[ -n $fullNodeAdditionalDiskSizeInGb ]] ; then
|
||||||
|
usage "Error: --fullnode-additional-disk-size-gb currently only supported with cloud provider: gce"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||||
@ -328,6 +361,7 @@ prepareInstancesAndWriteConfigFile() {
|
|||||||
netBasename=$prefix
|
netBasename=$prefix
|
||||||
publicNetwork=$publicNetwork
|
publicNetwork=$publicNetwork
|
||||||
sshPrivateKey=$sshPrivateKey
|
sshPrivateKey=$sshPrivateKey
|
||||||
|
letsEncryptDomainName=$letsEncryptDomainName
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
touch "$geoipConfigFile"
|
touch "$geoipConfigFile"
|
||||||
@ -598,6 +632,7 @@ $(
|
|||||||
disable-background-upgrades.sh \
|
disable-background-upgrades.sh \
|
||||||
create-solana-user.sh \
|
create-solana-user.sh \
|
||||||
add-solana-user-authorized_keys.sh \
|
add-solana-user-authorized_keys.sh \
|
||||||
|
install-certbot.sh \
|
||||||
install-earlyoom.sh \
|
install-earlyoom.sh \
|
||||||
install-libssl-compatability.sh \
|
install-libssl-compatability.sh \
|
||||||
install-nodejs.sh \
|
install-nodejs.sh \
|
||||||
@ -611,6 +646,10 @@ $(
|
|||||||
cat enable-nvidia-persistence-mode.sh
|
cat enable-nvidia-persistence-mode.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n $fullNodeAdditionalDiskSizeInGb ]]; then
|
||||||
|
cat mount-additional-disk.sh
|
||||||
|
fi
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
cat > /etc/motd <<EOM
|
cat > /etc/motd <<EOM
|
||||||
@ -637,7 +676,7 @@ EOF
|
|||||||
else
|
else
|
||||||
cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \
|
cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \
|
||||||
"$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \
|
"$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \
|
||||||
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType"
|
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $additionalFullNodeCount -gt 0 ]]; then
|
if [[ $additionalFullNodeCount -gt 0 ]]; then
|
||||||
@ -657,7 +696,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \
|
cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \
|
||||||
"$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \
|
"$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \
|
||||||
"$startupScript" "" "$bootDiskType" &
|
"$startupScript" "" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb" &
|
||||||
done
|
done
|
||||||
|
|
||||||
wait
|
wait
|
||||||
|
68
net/net.sh
68
net/net.sh
@ -50,17 +50,18 @@ Operate a configured testnet
|
|||||||
-c bench-tps=2="--tx_count 25000"
|
-c bench-tps=2="--tx_count 25000"
|
||||||
This will start 2 bench-tps clients, and supply "--tx_count 25000"
|
This will start 2 bench-tps clients, and supply "--tx_count 25000"
|
||||||
to the bench-tps client.
|
to the bench-tps client.
|
||||||
|
-n NUM_FULL_NODES - Number of fullnodes to apply command to.
|
||||||
|
|
||||||
--hashes-per-tick NUM_HASHES|sleep|auto
|
--hashes-per-tick NUM_HASHES|sleep|auto
|
||||||
- Override the default --hashes-per-tick for the cluster
|
- Override the default --hashes-per-tick for the cluster
|
||||||
-n NUM_FULL_NODES - Number of fullnodes to apply command to.
|
--lamports NUM_LAMPORTS_TO_MINT
|
||||||
|
- Override the default 100000000000000 lamports minted in genesis
|
||||||
-x Accounts and Stakes for external nodes
|
--stake-internal-nodes NUM_LAMPORTS_PER_NODE
|
||||||
- A YML file with a list of account pubkeys and corresponding stakes
|
- Amount to stake internal nodes in genesis block. If set, airdrops are disabled.
|
||||||
for external nodes
|
--external-accounts-file FILE_PATH
|
||||||
-s Num lamports per node in genesis block
|
- A YML file with a list of account pubkeys and corresponding stakes for external nodes
|
||||||
- Create account keypairs for internal nodes and assign these many lamports
|
--no-snapshot
|
||||||
|
- If set, disables booting validators from a snapshot
|
||||||
sanity/start/update-specific options:
|
sanity/start/update-specific options:
|
||||||
-F - Discard validator nodes that didn't bootup successfully
|
-F - Discard validator nodes that didn't bootup successfully
|
||||||
-o noLedgerVerify - Skip ledger verification
|
-o noLedgerVerify - Skip ledger verification
|
||||||
@ -96,7 +97,9 @@ failOnValidatorBootupFailure=true
|
|||||||
genesisOptions=
|
genesisOptions=
|
||||||
numFullnodesRequested=
|
numFullnodesRequested=
|
||||||
externalPrimordialAccountsFile=
|
externalPrimordialAccountsFile=
|
||||||
|
remoteExternalPrimordialAccountsFile=
|
||||||
stakeNodesInGenesisBlock=
|
stakeNodesInGenesisBlock=
|
||||||
|
maybeNoSnapshot=""
|
||||||
|
|
||||||
command=$1
|
command=$1
|
||||||
[[ -n $command ]] || usage
|
[[ -n $command ]] || usage
|
||||||
@ -111,9 +114,22 @@ while [[ -n $1 ]]; do
|
|||||||
elif [[ $1 = --target-lamports-per-signature ]]; then
|
elif [[ $1 = --target-lamports-per-signature ]]; then
|
||||||
genesisOptions="$genesisOptions $1 $2"
|
genesisOptions="$genesisOptions $1 $2"
|
||||||
shift 2
|
shift 2
|
||||||
|
elif [[ $1 = --lamports ]]; then
|
||||||
|
genesisOptions="$genesisOptions $1 $2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --no-snapshot ]]; then
|
||||||
|
maybeNoSnapshot="$1"
|
||||||
|
shift 1
|
||||||
elif [[ $1 = --deploy-update ]]; then
|
elif [[ $1 = --deploy-update ]]; then
|
||||||
updatePlatforms="$updatePlatforms $2"
|
updatePlatforms="$updatePlatforms $2"
|
||||||
shift 2
|
shift 2
|
||||||
|
elif [[ $1 = --stake-internal-nodes ]]; then
|
||||||
|
stakeNodesInGenesisBlock="$2"
|
||||||
|
shift 2
|
||||||
|
elif [[ $1 = --external-accounts-file ]]; then
|
||||||
|
externalPrimordialAccountsFile="$2"
|
||||||
|
remoteExternalPrimordialAccountsFile=/tmp/external-primordial-accounts.yml
|
||||||
|
shift 2
|
||||||
else
|
else
|
||||||
usage "Unknown long option: $1"
|
usage "Unknown long option: $1"
|
||||||
fi
|
fi
|
||||||
@ -123,7 +139,7 @@ while [[ -n $1 ]]; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
while getopts "h?T:t:o:f:rD:c:Fn:i:x:s:" opt "${shortArgs[@]}"; do
|
while getopts "h?T:t:o:f:rD:c:Fn:i:" opt "${shortArgs[@]}"; do
|
||||||
case $opt in
|
case $opt in
|
||||||
h | \?)
|
h | \?)
|
||||||
usage
|
usage
|
||||||
@ -202,12 +218,6 @@ while getopts "h?T:t:o:f:rD:c:Fn:i:x:s:" opt "${shortArgs[@]}"; do
|
|||||||
F)
|
F)
|
||||||
failOnValidatorBootupFailure=false
|
failOnValidatorBootupFailure=false
|
||||||
;;
|
;;
|
||||||
x)
|
|
||||||
externalPrimordialAccountsFile=$OPTARG
|
|
||||||
;;
|
|
||||||
s)
|
|
||||||
stakeNodesInGenesisBlock=$OPTARG
|
|
||||||
;;
|
|
||||||
i)
|
i)
|
||||||
nodeAddress=$OPTARG
|
nodeAddress=$OPTARG
|
||||||
;;
|
;;
|
||||||
@ -321,7 +331,7 @@ startBootstrapLeader() {
|
|||||||
set -x
|
set -x
|
||||||
startCommon "$ipAddress" || exit 1
|
startCommon "$ipAddress" || exit 1
|
||||||
[[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \
|
[[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \
|
||||||
"$ipAddress:~/solana/config/external-primodial-accounts.yml"
|
"$ipAddress:$remoteExternalPrimordialAccountsFile"
|
||||||
case $deployMethod in
|
case $deployMethod in
|
||||||
tar)
|
tar)
|
||||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
||||||
@ -343,12 +353,13 @@ startBootstrapLeader() {
|
|||||||
\"$RUST_LOG\" \
|
\"$RUST_LOG\" \
|
||||||
$skipSetup \
|
$skipSetup \
|
||||||
$failOnValidatorBootupFailure \
|
$failOnValidatorBootupFailure \
|
||||||
\"$externalPrimordialAccountsFile\" \
|
\"$remoteExternalPrimordialAccountsFile\" \
|
||||||
\"$stakeNodesInGenesisBlock\" \
|
\"$stakeNodesInGenesisBlock\" \
|
||||||
$nodeIndex \
|
$nodeIndex \
|
||||||
$numBenchTpsClients \"$benchTpsExtraArgs\" \
|
$numBenchTpsClients \"$benchTpsExtraArgs\" \
|
||||||
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \
|
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \
|
||||||
\"$genesisOptions\" \
|
\"$genesisOptions\" \
|
||||||
|
$maybeNoSnapshot \
|
||||||
"
|
"
|
||||||
) >> "$logFile" 2>&1 || {
|
) >> "$logFile" 2>&1 || {
|
||||||
cat "$logFile"
|
cat "$logFile"
|
||||||
@ -368,6 +379,23 @@ startNode() {
|
|||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
startCommon "$ipAddress"
|
startCommon "$ipAddress"
|
||||||
|
|
||||||
|
if [[ $nodeType = blockstreamer ]] && [[ -n $letsEncryptDomainName ]]; then
|
||||||
|
#
|
||||||
|
# Create/renew TLS certificate
|
||||||
|
#
|
||||||
|
declare localArchive=~/letsencrypt-"$letsEncryptDomainName".tgz
|
||||||
|
if [[ -r "$localArchive" ]]; then
|
||||||
|
timeout 30s scp "${sshOptions[@]}" "$localArchive" "$ipAddress:letsencrypt.tgz"
|
||||||
|
fi
|
||||||
|
ssh "${sshOptions[@]}" -n "$ipAddress" \
|
||||||
|
"sudo -H /certbot-restore.sh $letsEncryptDomainName maintainers@solana.com"
|
||||||
|
rm -f letsencrypt.tgz
|
||||||
|
timeout 30s scp "${sshOptions[@]}" "$ipAddress:/letsencrypt.tgz" letsencrypt.tgz
|
||||||
|
test -s letsencrypt.tgz # Ensure non-empty before overwriting $localArchive
|
||||||
|
cp letsencrypt.tgz "$localArchive"
|
||||||
|
fi
|
||||||
|
|
||||||
ssh "${sshOptions[@]}" -n "$ipAddress" \
|
ssh "${sshOptions[@]}" -n "$ipAddress" \
|
||||||
"./solana/net/remote/remote-node.sh \
|
"./solana/net/remote/remote-node.sh \
|
||||||
$deployMethod \
|
$deployMethod \
|
||||||
@ -377,10 +405,11 @@ startNode() {
|
|||||||
\"$RUST_LOG\" \
|
\"$RUST_LOG\" \
|
||||||
$skipSetup \
|
$skipSetup \
|
||||||
$failOnValidatorBootupFailure \
|
$failOnValidatorBootupFailure \
|
||||||
\"$externalPrimordialAccountsFile\" \
|
\"$remoteExternalPrimordialAccountsFile\" \
|
||||||
\"$stakeNodesInGenesisBlock\" \
|
\"$stakeNodesInGenesisBlock\" \
|
||||||
$nodeIndex \
|
$nodeIndex \
|
||||||
\"$genesisOptions\" \
|
\"$genesisOptions\" \
|
||||||
|
$maybeNoSnapshot \
|
||||||
"
|
"
|
||||||
) >> "$logFile" 2>&1 &
|
) >> "$logFile" 2>&1 &
|
||||||
declare pid=$!
|
declare pid=$!
|
||||||
@ -477,7 +506,8 @@ start() {
|
|||||||
declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
|
curl --retry 5 --retry-delay 2 --retry-connrefused \
|
||||||
|
-o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
|
||||||
)
|
)
|
||||||
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
|
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
|
||||||
else
|
else
|
||||||
|
@ -34,4 +34,6 @@ loadConfigFile
|
|||||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
scripts/solana-install-deploy.sh localhost "$releaseChannel" "$updatePlatform"
|
scripts/solana-install-deploy.sh \
|
||||||
|
--keypair config-local/mint-keypair.json \
|
||||||
|
localhost "$releaseChannel" "$updatePlatform"
|
||||||
|
@ -19,6 +19,7 @@ benchTpsExtraArgs="${12}"
|
|||||||
numBenchExchangeClients="${13}"
|
numBenchExchangeClients="${13}"
|
||||||
benchExchangeExtraArgs="${14}"
|
benchExchangeExtraArgs="${14}"
|
||||||
genesisOptions="${15}"
|
genesisOptions="${15}"
|
||||||
|
noSnapshot="${16}"
|
||||||
set +x
|
set +x
|
||||||
export RUST_LOG
|
export RUST_LOG
|
||||||
|
|
||||||
@ -92,7 +93,7 @@ local|tar)
|
|||||||
SUDO_OK=1 source scripts/tune-system.sh
|
SUDO_OK=1 source scripts/tune-system.sh
|
||||||
|
|
||||||
(
|
(
|
||||||
sudo scripts/oom-monitor.sh
|
sudo SOLANA_METRICS_CONFIG="$SOLANA_METRICS_CONFIG" scripts/oom-monitor.sh
|
||||||
) > oom-monitor.log 2>&1 &
|
) > oom-monitor.log 2>&1 &
|
||||||
echo $! > oom-monitor.pid
|
echo $! > oom-monitor.pid
|
||||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||||
@ -170,6 +171,7 @@ local|tar)
|
|||||||
args+=(--no-airdrop)
|
args+=(--no-airdrop)
|
||||||
fi
|
fi
|
||||||
args+=(--init-complete-file "$initCompleteFile")
|
args+=(--init-complete-file "$initCompleteFile")
|
||||||
|
args+=("$noSnapshot")
|
||||||
nohup ./multinode-demo/validator.sh --bootstrap-leader "${args[@]}" > fullnode.log 2>&1 &
|
nohup ./multinode-demo/validator.sh --bootstrap-leader "${args[@]}" > fullnode.log 2>&1 &
|
||||||
waitForNodeToInit
|
waitForNodeToInit
|
||||||
;;
|
;;
|
||||||
@ -223,8 +225,15 @@ local|tar)
|
|||||||
if [[ -z $stakeNodesInGenesisBlock ]]; then
|
if [[ -z $stakeNodesInGenesisBlock ]]; then
|
||||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Grab the TLS cert generated by /certbot-restore.sh
|
||||||
|
if [[ -f /.cert.pem ]]; then
|
||||||
|
sudo install -o $UID -m 400 /.cert.pem /.key.pem .
|
||||||
|
ls -l .cert.pem .key.pem
|
||||||
|
fi
|
||||||
|
|
||||||
export BLOCKEXPLORER_GEOIP_WHITELIST=$PWD/net/config/geoip.yml
|
export BLOCKEXPLORER_GEOIP_WHITELIST=$PWD/net/config/geoip.yml
|
||||||
npm install @solana/blockexplorer@1
|
npm install @solana/blockexplorer@1.21.0
|
||||||
npx solana-blockexplorer > blockexplorer.log 2>&1 &
|
npx solana-blockexplorer > blockexplorer.log 2>&1 &
|
||||||
|
|
||||||
# Confirm the blockexplorer is accessible
|
# Confirm the blockexplorer is accessible
|
||||||
@ -240,6 +249,7 @@ local|tar)
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
args+=(--init-complete-file "$initCompleteFile")
|
args+=(--init-complete-file "$initCompleteFile")
|
||||||
|
args+=("$noSnapshot")
|
||||||
nohup ./multinode-demo/validator.sh "${args[@]}" > fullnode.log 2>&1 &
|
nohup ./multinode-demo/validator.sh "${args[@]}" > fullnode.log 2>&1 &
|
||||||
waitForNodeToInit
|
waitForNodeToInit
|
||||||
;;
|
;;
|
||||||
@ -257,6 +267,7 @@ local|tar)
|
|||||||
if [[ $skipSetup != true ]]; then
|
if [[ $skipSetup != true ]]; then
|
||||||
./multinode-demo/clear-config.sh
|
./multinode-demo/clear-config.sh
|
||||||
fi
|
fi
|
||||||
|
args+=("$noSnapshot")
|
||||||
nohup ./multinode-demo/replicator.sh "${args[@]}" > fullnode.log 2>&1 &
|
nohup ./multinode-demo/replicator.sh "${args[@]}" > fullnode.log 2>&1 &
|
||||||
sleep 1
|
sleep 1
|
||||||
;;
|
;;
|
||||||
|
@ -183,13 +183,11 @@ if $installCheck && [[ -r update_manifest_keypair.json ]]; then
|
|||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
update_manifest_pubkey=$($solana_keygen pubkey update_manifest_keypair.json)
|
|
||||||
rm -rf install-data-dir
|
rm -rf install-data-dir
|
||||||
$solana_install init \
|
$solana_install init \
|
||||||
--no-modify-path \
|
--no-modify-path \
|
||||||
--data-dir install-data-dir \
|
--data-dir install-data-dir \
|
||||||
--url http://"$sanityTargetIp":8899 \
|
--url http://"$sanityTargetIp":8899 \
|
||||||
--pubkey "$update_manifest_pubkey"
|
|
||||||
|
|
||||||
$solana_install info
|
$solana_install info
|
||||||
)
|
)
|
||||||
|
@ -14,6 +14,8 @@ set -ex
|
|||||||
# 2. Inline ~/.ssh/id-solana-testnet.pub below
|
# 2. Inline ~/.ssh/id-solana-testnet.pub below
|
||||||
cat > /solana-authorized_keys <<EOF
|
cat > /solana-authorized_keys <<EOF
|
||||||
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
|
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
|
||||||
|
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGqZAwAZeBl0buOMz4FpUYrtpwk1L5aGKlbd7lI8dpbSx5WVRPWCVKhWzsGMtDUIfmozdzJouk1LPyihghTDgsE=
|
||||||
|
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOk4jgcX/VWSk3j//wXeIynSQjsOt+AjYXM/XZUMa7R1Q8lfIJGK/qHLBP86CMXdpyEKJ5i37QLYOL+0VuRy0CI=
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
sudo -u solana bash -c "
|
sudo -u solana bash -c "
|
||||||
|
@ -309,3 +309,12 @@ cloud_FetchFile() {
|
|||||||
cloud_GetConfigValueFromInstanceName "$instanceName" osProfile.adminUsername
|
cloud_GetConfigValueFromInstanceName "$instanceName" osProfile.adminUsername
|
||||||
scp "${config_value}@${publicIp}:${remoteFile}" "$localFile"
|
scp "${config_value}@${publicIp}:${remoteFile}" "$localFile"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# cloud_CreateAndAttachPersistentDisk
|
||||||
|
#
|
||||||
|
# Not yet implemented for this cloud provider
|
||||||
|
cloud_CreateAndAttachPersistentDisk() {
|
||||||
|
echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for azure"
|
||||||
|
exit 1
|
||||||
|
}
|
@ -381,3 +381,12 @@ cloud_FetchFile() {
|
|||||||
"solana@$publicIp:$remoteFile" "$localFile"
|
"solana@$publicIp:$remoteFile" "$localFile"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# cloud_CreateAndAttachPersistentDisk
|
||||||
|
#
|
||||||
|
# Not yet implemented for this cloud provider
|
||||||
|
cloud_CreateAndAttachPersistentDisk() {
|
||||||
|
echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for ec2"
|
||||||
|
exit 1
|
||||||
|
}
|
@ -81,7 +81,7 @@
|
|||||||
"FromPort": 3001,
|
"FromPort": 3001,
|
||||||
"IpRanges": [
|
"IpRanges": [
|
||||||
{
|
{
|
||||||
"Description": "blockexplorer API port",
|
"Description": "blockexplorer http API port",
|
||||||
"CidrIp": "0.0.0.0/0"
|
"CidrIp": "0.0.0.0/0"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -91,7 +91,26 @@
|
|||||||
"Ipv6Ranges": [
|
"Ipv6Ranges": [
|
||||||
{
|
{
|
||||||
"CidrIpv6": "::/0",
|
"CidrIpv6": "::/0",
|
||||||
"Description": "blockexplorer API port"
|
"Description": "blockexplorer http API port"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PrefixListIds": [],
|
||||||
|
"FromPort": 3443,
|
||||||
|
"IpRanges": [
|
||||||
|
{
|
||||||
|
"Description": "blockexplorer https API port",
|
||||||
|
"CidrIp": "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ToPort": 3443,
|
||||||
|
"IpProtocol": "tcp",
|
||||||
|
"UserIdGroupPairs": [],
|
||||||
|
"Ipv6Ranges": [
|
||||||
|
{
|
||||||
|
"CidrIpv6": "::/0",
|
||||||
|
"Description": "blockexplorer https API port"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -126,6 +126,7 @@ cloud_CreateInstances() {
|
|||||||
declare optionalStartupScript="$8"
|
declare optionalStartupScript="$8"
|
||||||
declare optionalAddress="$9"
|
declare optionalAddress="$9"
|
||||||
declare optionalBootDiskType="${10}"
|
declare optionalBootDiskType="${10}"
|
||||||
|
declare optionalAdditionalDiskSize="${11}"
|
||||||
|
|
||||||
if $enableGpu; then
|
if $enableGpu; then
|
||||||
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
|
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
|
||||||
@ -198,6 +199,22 @@ cloud_CreateInstances() {
|
|||||||
set -x
|
set -x
|
||||||
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
|
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if [[ -n $optionalAdditionalDiskSize ]]; then
|
||||||
|
if [[ $numNodes = 1 ]]; then
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
cloud_CreateAndAttachPersistentDisk "${namePrefix}" "$optionalAdditionalDiskSize" "pd-ssd" "$zone"
|
||||||
|
)
|
||||||
|
else
|
||||||
|
for node in $(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes"); do
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
cloud_CreateAndAttachPersistentDisk "${node}" "$optionalAdditionalDiskSize" "pd-ssd" "$zone"
|
||||||
|
)
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -256,3 +273,31 @@ cloud_FetchFile() {
|
|||||||
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
|
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# cloud_CreateAndAttachPersistentDisk [instanceName] [diskSize] [diskType]
|
||||||
|
#
|
||||||
|
# Create a persistent disk and attach it to a pre-existing VM instance.
|
||||||
|
# Set disk to auto-delete upon instance deletion
|
||||||
|
#
|
||||||
|
cloud_CreateAndAttachPersistentDisk() {
|
||||||
|
declare instanceName="$1"
|
||||||
|
declare diskSize="$2"
|
||||||
|
declare diskType="$3"
|
||||||
|
declare zone="$4"
|
||||||
|
diskName="${instanceName}-pd"
|
||||||
|
|
||||||
|
gcloud beta compute disks create "$diskName" \
|
||||||
|
--size "$diskSize" \
|
||||||
|
--type "$diskType" \
|
||||||
|
--zone "$zone"
|
||||||
|
|
||||||
|
gcloud compute instances attach-disk "$instanceName" \
|
||||||
|
--disk "$diskName" \
|
||||||
|
--zone "$zone"
|
||||||
|
|
||||||
|
gcloud compute instances set-disk-auto-delete "$instanceName" \
|
||||||
|
--disk "$diskName" \
|
||||||
|
--zone "$zone" \
|
||||||
|
--auto-delete
|
||||||
|
}
|
||||||
|
52
net/scripts/install-certbot.sh
Executable file
52
net/scripts/install-certbot.sh
Executable file
@ -0,0 +1,52 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
[[ $(uname) = Linux ]] || exit 1
|
||||||
|
[[ $USER = root ]] || exit 1
|
||||||
|
|
||||||
|
apt-get update
|
||||||
|
add-apt-repository --yes ppa:certbot/certbot
|
||||||
|
apt-get --assume-yes install certbot
|
||||||
|
|
||||||
|
cat > /certbot-restore.sh <<'EOF'
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
domain=$1
|
||||||
|
email=$2
|
||||||
|
|
||||||
|
if [[ $USER != root ]]; then
|
||||||
|
echo "Run as root"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f /.cert.pem ]]; then
|
||||||
|
echo "Certificate already initialized"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
if [[ -r letsencrypt.tgz ]]; then
|
||||||
|
tar -C / -zxf letsencrypt.tgz
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd /
|
||||||
|
rm -f letsencrypt.tgz
|
||||||
|
|
||||||
|
maybeDryRun=
|
||||||
|
# Uncomment during testing to avoid hitting LetsEncrypt API limits while iterating
|
||||||
|
#maybeDryRun="--dry-run"
|
||||||
|
|
||||||
|
certbot certonly --standalone -d "$domain" --email "$email" --agree-tos -n $maybeDryRun
|
||||||
|
|
||||||
|
tar zcf letsencrypt.tgz /etc/letsencrypt
|
||||||
|
ls -l letsencrypt.tgz
|
||||||
|
|
||||||
|
# Copy certificates to / for easy access without knowing the value of "$domain"
|
||||||
|
rm -f /.key.pem /.cert.pem
|
||||||
|
cp /etc/letsencrypt/live/$domain/privkey.pem /.key.pem
|
||||||
|
cp /etc/letsencrypt/live/$domain/cert.pem /.cert.pem
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
chmod +x /certbot-restore.sh
|
21
net/scripts/mount-additional-disk.sh
Executable file
21
net/scripts/mount-additional-disk.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -x
|
||||||
|
|
||||||
|
mount_point=/mnt/extra-disk
|
||||||
|
disk=sdb
|
||||||
|
if ! lsblk | grep -q ${disk} ; then
|
||||||
|
echo "${disk} does not exist"
|
||||||
|
else
|
||||||
|
if mount | grep -q ${disk} ; then
|
||||||
|
echo "${disk} is already mounted"
|
||||||
|
else
|
||||||
|
sudo mkfs.ext4 -F /dev/"$disk"
|
||||||
|
sudo mkdir -p "$mount_point"
|
||||||
|
sudo mount /dev/"$disk" "$mount_point"
|
||||||
|
sudo chmod a+w "$mount_point"
|
||||||
|
if ! mount | grep -q ${mount_point} ; then
|
||||||
|
echo "${disk} failed to mount!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-netutil"
|
name = "solana-netutil"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Network Utilities"
|
description = "Solana Network Utilities"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -15,7 +15,7 @@ log = "0.4.2"
|
|||||||
nix = "0.14.1"
|
nix = "0.14.1"
|
||||||
rand = "0.6.1"
|
rand = "0.6.1"
|
||||||
socket2 = "0.3.9"
|
socket2 = "0.3.9"
|
||||||
solana-logger = { path = "../logger", version = "0.16.0" }
|
solana-logger = { path = "../logger", version = "0.16.5" }
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-programs"
|
name = "solana-bpf-programs"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
@ -9,6 +9,7 @@ repository = "https://github.com/solana-labs/solana"
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
publish = false
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
bpf_c = []
|
bpf_c = []
|
||||||
@ -21,10 +22,10 @@ walkdir = "2"
|
|||||||
bincode = "1.1.4"
|
bincode = "1.1.4"
|
||||||
byteorder = "1.3.2"
|
byteorder = "1.3.2"
|
||||||
elf = "0.0.10"
|
elf = "0.0.10"
|
||||||
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.0" }
|
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.5" }
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-runtime = { path = "../../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../../runtime", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
solana_rbpf = "=0.1.13"
|
solana_rbpf = "=0.1.13"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-128bit"
|
name = "solana-bpf-rust-128bit"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF iter program written in Rust"
|
description = "Solana BPF iter program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,8 +12,8 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.16.0" }
|
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-128bit-dep"
|
name = "solana-bpf-rust-128bit-dep"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF many-args-dep program written in Rust"
|
description = "Solana BPF many-args-dep program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-alloc"
|
name = "solana-bpf-rust-alloc"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF alloc program written in Rust"
|
description = "Solana BPF alloc program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-dep-crate"
|
name = "solana-bpf-rust-dep-crate"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF dep-crate program written in Rust"
|
description = "Solana BPF dep-crate program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -13,7 +13,7 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
byteorder = { version = "1", default-features = false }
|
byteorder = { version = "1", default-features = false }
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-iter"
|
name = "solana-bpf-rust-iter"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF iter program written in Rust"
|
description = "Solana BPF iter program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-many-args"
|
name = "solana-bpf-rust-many-args"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF many-args program written in Rust"
|
description = "Solana BPF many-args program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,8 +12,8 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.16.0" }
|
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-many-args-dep"
|
name = "solana-bpf-rust-many-args-dep"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF many-args-dep program written in Rust"
|
description = "Solana BPF many-args-dep program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-noop"
|
name = "solana-bpf-rust-noop"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF noop program written in Rust"
|
description = "Solana BPF noop program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-panic"
|
name = "solana-bpf-rust-panic"
|
||||||
version = "0.15.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF iter program written in Rust"
|
description = "Solana BPF iter program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-rust-tick-height"
|
name = "solana-bpf-rust-tick-height"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF noop program written in Rust"
|
description = "Solana BPF noop program written in Rust"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -14,7 +14,7 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
byteorder = { version = "1", default-features = false }
|
byteorder = { version = "1", default-features = false }
|
||||||
|
|
||||||
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
|
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = []
|
members = []
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-loader-api"
|
name = "solana-bpf-loader-api"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF Loader"
|
description = "Solana BPF Loader"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -14,8 +14,8 @@ byteorder = "1.3.2"
|
|||||||
libc = "0.2.58"
|
libc = "0.2.58"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
solana_rbpf = "=0.1.13"
|
solana_rbpf = "=0.1.13"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-loader-program"
|
name = "solana-bpf-loader-program"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana BPF Loader"
|
description = "Solana BPF Loader"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -10,9 +10,9 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.0" }
|
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib", "cdylib"]
|
crate-type = ["lib", "cdylib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-budget-api"
|
name = "solana-budget-api"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Budget program API"
|
description = "Solana Budget program API"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -16,10 +16,10 @@ num-derive = "0.2"
|
|||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-runtime = { path = "../../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../../runtime", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib"]
|
crate-type = ["lib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-budget-program"
|
name = "solana-budget-program"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana budget program"
|
description = "Solana budget program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -10,9 +10,9 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
solana-budget-api = { path = "../budget_api", version = "0.16.0" }
|
solana-budget-api = { path = "../budget_api", version = "0.16.5" }
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib", "cdylib"]
|
crate-type = ["lib", "cdylib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-config-api"
|
name = "solana-config-api"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "config program API"
|
description = "config program API"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -13,13 +13,12 @@ bincode = "1.1.4"
|
|||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-runtime = { path = "../../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../../runtime", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib"]
|
crate-type = ["lib"]
|
||||||
name = "solana_config_api"
|
name = "solana_config_api"
|
||||||
|
|
||||||
|
@ -1,26 +1,59 @@
|
|||||||
use crate::id;
|
use crate::id;
|
||||||
use crate::ConfigState;
|
use crate::ConfigState;
|
||||||
|
use bincode::serialize;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use solana_sdk::instruction::{AccountMeta, Instruction};
|
use solana_sdk::instruction::{AccountMeta, Instruction};
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use solana_sdk::short_vec;
|
||||||
use solana_sdk::system_instruction;
|
use solana_sdk::system_instruction;
|
||||||
|
|
||||||
|
/// A collection of keys to be stored in Config account data.
|
||||||
|
#[derive(Debug, Default, Deserialize, Serialize)]
|
||||||
|
pub struct ConfigKeys {
|
||||||
|
// Each key tuple comprises a unique `Pubkey` identifier,
|
||||||
|
// and `bool` whether that key is a signer of the data
|
||||||
|
#[serde(with = "short_vec")]
|
||||||
|
pub keys: Vec<(Pubkey, bool)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConfigKeys {
|
||||||
|
pub fn serialized_size(keys: Vec<(Pubkey, bool)>) -> usize {
|
||||||
|
serialize(&ConfigKeys { keys })
|
||||||
|
.unwrap_or_else(|_| vec![])
|
||||||
|
.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a new, empty configuration account
|
/// Create a new, empty configuration account
|
||||||
pub fn create_account<T: ConfigState>(
|
pub fn create_account<T: ConfigState>(
|
||||||
from_account_pubkey: &Pubkey,
|
from_account_pubkey: &Pubkey,
|
||||||
config_account_pubkey: &Pubkey,
|
config_account_pubkey: &Pubkey,
|
||||||
lamports: u64,
|
lamports: u64,
|
||||||
|
keys: Vec<(Pubkey, bool)>,
|
||||||
) -> Instruction {
|
) -> Instruction {
|
||||||
|
let space = T::max_space() + ConfigKeys::serialized_size(keys) as u64;
|
||||||
system_instruction::create_account(
|
system_instruction::create_account(
|
||||||
from_account_pubkey,
|
from_account_pubkey,
|
||||||
config_account_pubkey,
|
config_account_pubkey,
|
||||||
lamports,
|
lamports,
|
||||||
T::max_space(),
|
space,
|
||||||
&id(),
|
&id(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store new data in a configuration account
|
/// Store new data in a configuration account
|
||||||
pub fn store<T: ConfigState>(config_account_pubkey: &Pubkey, data: &T) -> Instruction {
|
pub fn store<T: ConfigState>(
|
||||||
let account_metas = vec![AccountMeta::new(*config_account_pubkey, true)];
|
config_account_pubkey: &Pubkey,
|
||||||
Instruction::new(id(), data, account_metas)
|
is_config_signer: bool,
|
||||||
|
keys: Vec<(Pubkey, bool)>,
|
||||||
|
data: &T,
|
||||||
|
) -> Instruction {
|
||||||
|
let mut account_metas = vec![AccountMeta::new(*config_account_pubkey, is_config_signer)];
|
||||||
|
for (signer_pubkey, _) in keys.iter().filter(|(_, is_signer)| *is_signer) {
|
||||||
|
if signer_pubkey != config_account_pubkey {
|
||||||
|
account_metas.push(AccountMeta::new(*signer_pubkey, true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let account_data = (ConfigKeys { keys }, data);
|
||||||
|
Instruction::new(id(), &account_data, account_metas)
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
//! Config program
|
//! Config program
|
||||||
|
|
||||||
|
use crate::config_instruction::ConfigKeys;
|
||||||
|
use bincode::deserialize;
|
||||||
use log::*;
|
use log::*;
|
||||||
use solana_sdk::account::KeyedAccount;
|
use solana_sdk::account::KeyedAccount;
|
||||||
use solana_sdk::instruction::InstructionError;
|
use solana_sdk::instruction::InstructionError;
|
||||||
@ -10,17 +12,90 @@ pub fn process_instruction(
|
|||||||
keyed_accounts: &mut [KeyedAccount],
|
keyed_accounts: &mut [KeyedAccount],
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
) -> Result<(), InstructionError> {
|
) -> Result<(), InstructionError> {
|
||||||
|
let key_list: ConfigKeys = deserialize(data).map_err(|err| {
|
||||||
|
error!("Invalid ConfigKeys data: {:?} {:?}", data, err);
|
||||||
|
InstructionError::InvalidInstructionData
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let current_data: ConfigKeys = deserialize(&keyed_accounts[0].account.data).map_err(|err| {
|
||||||
|
error!("Invalid data in account[0]: {:?} {:?}", data, err);
|
||||||
|
InstructionError::InvalidAccountData
|
||||||
|
})?;
|
||||||
|
let current_signer_keys: Vec<Pubkey> = current_data
|
||||||
|
.keys
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, is_signer)| *is_signer)
|
||||||
|
.map(|(pubkey, _)| *pubkey)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if current_signer_keys.is_empty() {
|
||||||
|
// Config account keypair must be a signer on account initilization,
|
||||||
|
// or when no signers specified in Config data
|
||||||
if keyed_accounts[0].signer_key().is_none() {
|
if keyed_accounts[0].signer_key().is_none() {
|
||||||
error!("account[0].signer_key().is_none()");
|
error!("account[0].signer_key().is_none()");
|
||||||
Err(InstructionError::MissingRequiredSignature)?;
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut counter = 0;
|
||||||
|
for (i, (signer, _)) in key_list
|
||||||
|
.keys
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, is_signer)| *is_signer)
|
||||||
|
.enumerate()
|
||||||
|
{
|
||||||
|
counter += 1;
|
||||||
|
if signer != keyed_accounts[0].unsigned_key() {
|
||||||
|
let account_index = i + 1;
|
||||||
|
let signer_account = keyed_accounts.get(account_index);
|
||||||
|
if signer_account.is_none() {
|
||||||
|
error!("account {:?} is not in account list", signer);
|
||||||
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
|
}
|
||||||
|
let signer_key = signer_account.unwrap().signer_key();
|
||||||
|
if signer_key.is_none() {
|
||||||
|
error!("account {:?} signer_key().is_none()", signer);
|
||||||
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
|
}
|
||||||
|
if signer_key.unwrap() != signer {
|
||||||
|
error!(
|
||||||
|
"account[{:?}].signer_key() does not match Config data)",
|
||||||
|
account_index
|
||||||
|
);
|
||||||
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
|
}
|
||||||
|
// If Config account is already initialized, update signatures must match Config data
|
||||||
|
if !current_data.keys.is_empty()
|
||||||
|
&& current_signer_keys
|
||||||
|
.iter()
|
||||||
|
.find(|&pubkey| pubkey == signer)
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
error!("account {:?} is not in stored signer list", signer);
|
||||||
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
|
}
|
||||||
|
} else if keyed_accounts[0].signer_key().is_none() {
|
||||||
|
error!("account[0].signer_key().is_none()");
|
||||||
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for Config data signers not present in incoming account update
|
||||||
|
if current_signer_keys.len() > counter {
|
||||||
|
error!(
|
||||||
|
"too few signers: {:?}; expected: {:?}",
|
||||||
|
counter,
|
||||||
|
current_signer_keys.len()
|
||||||
|
);
|
||||||
|
Err(InstructionError::MissingRequiredSignature)?;
|
||||||
|
}
|
||||||
|
|
||||||
if keyed_accounts[0].account.data.len() < data.len() {
|
if keyed_accounts[0].account.data.len() < data.len() {
|
||||||
error!("instruction data too large");
|
error!("instruction data too large");
|
||||||
Err(InstructionError::InvalidInstructionData)?;
|
Err(InstructionError::InvalidInstructionData)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
keyed_accounts[0].account.data[0..data.len()].copy_from_slice(data);
|
keyed_accounts[0].account.data[0..data.len()].copy_from_slice(&data);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,7 +139,11 @@ mod tests {
|
|||||||
(bank, mint_keypair)
|
(bank, mint_keypair)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_config_account(bank: Bank, mint_keypair: &Keypair) -> (BankClient, Keypair) {
|
fn create_config_account(
|
||||||
|
bank: Bank,
|
||||||
|
mint_keypair: &Keypair,
|
||||||
|
keys: Vec<(Pubkey, bool)>,
|
||||||
|
) -> (BankClient, Keypair) {
|
||||||
let config_keypair = Keypair::new();
|
let config_keypair = Keypair::new();
|
||||||
let config_pubkey = config_keypair.pubkey();
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
@ -76,6 +155,7 @@ mod tests {
|
|||||||
&mint_keypair.pubkey(),
|
&mint_keypair.pubkey(),
|
||||||
&config_pubkey,
|
&config_pubkey,
|
||||||
1,
|
1,
|
||||||
|
keys,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.expect("new_account");
|
.expect("new_account");
|
||||||
@ -87,7 +167,7 @@ mod tests {
|
|||||||
fn test_process_create_ok() {
|
fn test_process_create_ok() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let (bank, mint_keypair) = create_bank(10_000);
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
|
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair, vec![]);
|
||||||
let config_account_data = bank_client
|
let config_account_data = bank_client
|
||||||
.get_account_data(&config_keypair.pubkey())
|
.get_account_data(&config_keypair.pubkey())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@ -102,13 +182,16 @@ mod tests {
|
|||||||
fn test_process_store_ok() {
|
fn test_process_store_ok() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let (bank, mint_keypair) = create_bank(10_000);
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
|
let keys = vec![];
|
||||||
|
let (bank_client, config_keypair) =
|
||||||
|
create_config_account(bank, &mint_keypair, keys.clone());
|
||||||
let config_pubkey = config_keypair.pubkey();
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
let my_config = MyConfig::new(42);
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
let instruction = config_instruction::store(&config_pubkey, &my_config);
|
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
|
||||||
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
|
||||||
bank_client
|
bank_client
|
||||||
.send_message(&[&mint_keypair, &config_keypair], message)
|
.send_message(&[&mint_keypair, &config_keypair], message)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -117,6 +200,8 @@ mod tests {
|
|||||||
.get_account_data(&config_pubkey)
|
.get_account_data(&config_pubkey)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
let meta_length = ConfigKeys::serialized_size(keys);
|
||||||
|
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
my_config,
|
my_config,
|
||||||
MyConfig::deserialize(&config_account_data).unwrap()
|
MyConfig::deserialize(&config_account_data).unwrap()
|
||||||
@ -127,12 +212,12 @@ mod tests {
|
|||||||
fn test_process_store_fail_instruction_data_too_large() {
|
fn test_process_store_fail_instruction_data_too_large() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
let (bank, mint_keypair) = create_bank(10_000);
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
|
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair, vec![]);
|
||||||
let config_pubkey = config_keypair.pubkey();
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
let my_config = MyConfig::new(42);
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
let mut instruction = config_instruction::store(&config_pubkey, &my_config);
|
let mut instruction = config_instruction::store(&config_pubkey, true, vec![], &my_config);
|
||||||
instruction.data = vec![0; 123]; // <-- Replace data with a vector that's too large
|
instruction.data = vec![0; 123]; // <-- Replace data with a vector that's too large
|
||||||
let message = Message::new(vec![instruction]);
|
let message = Message::new(vec![instruction]);
|
||||||
bank_client
|
bank_client
|
||||||
@ -148,13 +233,14 @@ mod tests {
|
|||||||
let system_pubkey = system_keypair.pubkey();
|
let system_pubkey = system_keypair.pubkey();
|
||||||
|
|
||||||
bank.transfer(42, &mint_keypair, &system_pubkey).unwrap();
|
bank.transfer(42, &mint_keypair, &system_pubkey).unwrap();
|
||||||
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
|
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair, vec![]);
|
||||||
let config_pubkey = config_keypair.pubkey();
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
let transfer_instruction =
|
let transfer_instruction =
|
||||||
system_instruction::transfer(&system_pubkey, &Pubkey::new_rand(), 42);
|
system_instruction::transfer(&system_pubkey, &Pubkey::new_rand(), 42);
|
||||||
let my_config = MyConfig::new(42);
|
let my_config = MyConfig::new(42);
|
||||||
let mut store_instruction = config_instruction::store(&config_pubkey, &my_config);
|
let mut store_instruction =
|
||||||
|
config_instruction::store(&config_pubkey, true, vec![], &my_config);
|
||||||
store_instruction.accounts[0].is_signer = false; // <----- not a signer
|
store_instruction.accounts[0].is_signer = false; // <----- not a signer
|
||||||
|
|
||||||
let message = Message::new(vec![transfer_instruction, store_instruction]);
|
let message = Message::new(vec![transfer_instruction, store_instruction]);
|
||||||
@ -162,4 +248,232 @@ mod tests {
|
|||||||
.send_message(&[&system_keypair], message)
|
.send_message(&[&system_keypair], message)
|
||||||
.unwrap_err();
|
.unwrap_err();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_store_with_additional_signers() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let signer0 = Keypair::new();
|
||||||
|
let signer1 = Keypair::new();
|
||||||
|
let keys = vec![
|
||||||
|
(pubkey, false),
|
||||||
|
(signer0.pubkey(), true),
|
||||||
|
(signer1.pubkey(), true),
|
||||||
|
];
|
||||||
|
let (bank_client, config_keypair) =
|
||||||
|
create_config_account(bank, &mint_keypair, keys.clone());
|
||||||
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
|
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
|
||||||
|
bank_client
|
||||||
|
.send_message(
|
||||||
|
&[&mint_keypair, &config_keypair, &signer0, &signer1],
|
||||||
|
message,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let config_account_data = bank_client
|
||||||
|
.get_account_data(&config_pubkey)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let meta_length = ConfigKeys::serialized_size(keys.clone());
|
||||||
|
let meta_data: ConfigKeys = deserialize(&config_account_data[0..meta_length]).unwrap();
|
||||||
|
assert_eq!(meta_data.keys, keys);
|
||||||
|
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
|
||||||
|
assert_eq!(
|
||||||
|
my_config,
|
||||||
|
MyConfig::deserialize(&config_account_data).unwrap()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_store_without_config_signer() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let signer0 = Keypair::new();
|
||||||
|
let keys = vec![(pubkey, false), (signer0.pubkey(), true)];
|
||||||
|
let (bank_client, config_keypair) =
|
||||||
|
create_config_account(bank, &mint_keypair, keys.clone());
|
||||||
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
|
let instruction =
|
||||||
|
config_instruction::store(&config_pubkey, false, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &signer0], message)
|
||||||
|
.unwrap_err();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_process_store_with_bad_additional_signer() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
|
let signer0 = Keypair::new();
|
||||||
|
let signer1 = Keypair::new();
|
||||||
|
let keys = vec![(signer0.pubkey(), true)];
|
||||||
|
let (bank_client, config_keypair) =
|
||||||
|
create_config_account(bank, &mint_keypair, keys.clone());
|
||||||
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
|
// Config-data pubkey doesn't match signer
|
||||||
|
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
|
||||||
|
let mut message =
|
||||||
|
Message::new_with_payer(vec![instruction.clone()], Some(&mint_keypair.pubkey()));
|
||||||
|
message.account_keys[2] = signer1.pubkey();
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &config_keypair, &signer1], message)
|
||||||
|
.unwrap_err();
|
||||||
|
|
||||||
|
// Config-data pubkey not a signer
|
||||||
|
let mut message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
message.header.num_required_signatures = 2;
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &config_keypair], message)
|
||||||
|
.unwrap_err();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_config_updates() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let signer0 = Keypair::new();
|
||||||
|
let signer1 = Keypair::new();
|
||||||
|
let keys = vec![
|
||||||
|
(pubkey, false),
|
||||||
|
(signer0.pubkey(), true),
|
||||||
|
(signer1.pubkey(), true),
|
||||||
|
];
|
||||||
|
let (bank_client, config_keypair) =
|
||||||
|
create_config_account(bank, &mint_keypair, keys.clone());
|
||||||
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
|
||||||
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
|
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
|
||||||
|
bank_client
|
||||||
|
.send_message(
|
||||||
|
&[&mint_keypair, &config_keypair, &signer0, &signer1],
|
||||||
|
message,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Update with expected signatures
|
||||||
|
let new_config = MyConfig::new(84);
|
||||||
|
let instruction =
|
||||||
|
config_instruction::store(&config_pubkey, false, keys.clone(), &new_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &signer0, &signer1], message)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let config_account_data = bank_client
|
||||||
|
.get_account_data(&config_pubkey)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let meta_length = ConfigKeys::serialized_size(keys.clone());
|
||||||
|
let meta_data: ConfigKeys = deserialize(&config_account_data[0..meta_length]).unwrap();
|
||||||
|
assert_eq!(meta_data.keys, keys);
|
||||||
|
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
|
||||||
|
assert_eq!(
|
||||||
|
new_config,
|
||||||
|
MyConfig::deserialize(&config_account_data).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Attempt update with incomplete signatures
|
||||||
|
let keys = vec![(pubkey, false), (signer0.pubkey(), true)];
|
||||||
|
let instruction =
|
||||||
|
config_instruction::store(&config_pubkey, false, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &signer0], message)
|
||||||
|
.unwrap_err();
|
||||||
|
|
||||||
|
// Attempt update with incorrect signatures
|
||||||
|
let signer2 = Keypair::new();
|
||||||
|
let keys = vec![
|
||||||
|
(pubkey, false),
|
||||||
|
(signer0.pubkey(), true),
|
||||||
|
(signer2.pubkey(), true),
|
||||||
|
];
|
||||||
|
let instruction =
|
||||||
|
config_instruction::store(&config_pubkey, false, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &signer0, &signer2], message)
|
||||||
|
.unwrap_err();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_config_updates_requiring_config() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let (bank, mint_keypair) = create_bank(10_000);
|
||||||
|
let pubkey = Pubkey::new_rand();
|
||||||
|
let signer0 = Keypair::new();
|
||||||
|
let keys = vec![
|
||||||
|
(pubkey, false),
|
||||||
|
(signer0.pubkey(), true),
|
||||||
|
(signer0.pubkey(), true),
|
||||||
|
]; // Dummy keys for account sizing
|
||||||
|
let (bank_client, config_keypair) =
|
||||||
|
create_config_account(bank, &mint_keypair, keys.clone());
|
||||||
|
let config_pubkey = config_keypair.pubkey();
|
||||||
|
let keys = vec![
|
||||||
|
(pubkey, false),
|
||||||
|
(signer0.pubkey(), true),
|
||||||
|
(config_keypair.pubkey(), true),
|
||||||
|
];
|
||||||
|
|
||||||
|
let my_config = MyConfig::new(42);
|
||||||
|
|
||||||
|
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &config_keypair, &signer0], message)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Update with expected signatures
|
||||||
|
let new_config = MyConfig::new(84);
|
||||||
|
let instruction =
|
||||||
|
config_instruction::store(&config_pubkey, true, keys.clone(), &new_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &config_keypair, &signer0], message)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let config_account_data = bank_client
|
||||||
|
.get_account_data(&config_pubkey)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let meta_length = ConfigKeys::serialized_size(keys.clone());
|
||||||
|
let meta_data: ConfigKeys = deserialize(&config_account_data[0..meta_length]).unwrap();
|
||||||
|
assert_eq!(meta_data.keys, keys);
|
||||||
|
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
|
||||||
|
assert_eq!(
|
||||||
|
new_config,
|
||||||
|
MyConfig::deserialize(&config_account_data).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Attempt update with incomplete signatures
|
||||||
|
let keys = vec![(pubkey, false), (config_keypair.pubkey(), true)];
|
||||||
|
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
|
||||||
|
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
|
||||||
|
bank_client
|
||||||
|
.send_message(&[&mint_keypair, &config_keypair], message)
|
||||||
|
.unwrap_err();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-config-program"
|
name = "solana-config-program"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "config program"
|
description = "config program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -10,9 +10,9 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
solana-config-api = { path = "../config_api", version = "0.16.0" }
|
solana-config-api = { path = "../config_api", version = "0.16.5" }
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib", "cdylib"]
|
crate-type = ["lib", "cdylib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-exchange-api"
|
name = "solana-exchange-api"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Exchange program API"
|
description = "Solana Exchange program API"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -13,12 +13,12 @@ bincode = "1.1.4"
|
|||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-metrics = { path = "../../metrics", version = "0.16.0" }
|
solana-metrics = { path = "../../metrics", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-runtime = { path = "../../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../../runtime", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib"]
|
crate-type = ["lib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-exchange-program"
|
name = "solana-exchange-program"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana exchange program"
|
description = "Solana exchange program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -10,9 +10,9 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
solana-exchange-api = { path = "../exchange_api", version = "0.16.0" }
|
solana-exchange-api = { path = "../exchange_api", version = "0.16.5" }
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib", "cdylib"]
|
crate-type = ["lib", "cdylib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-failure-program"
|
name = "solana-failure-program"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana failure program"
|
description = "Solana failure program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -10,10 +10,10 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
solana-runtime = { path = "../../runtime", version = "0.16.0" }
|
solana-runtime = { path = "../../runtime", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
@ -1,24 +1,23 @@
|
|||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::bank_client::BankClient;
|
use solana_runtime::bank_client::BankClient;
|
||||||
use solana_runtime::loader_utils::{create_invoke_instruction, load_program};
|
use solana_runtime::loader_utils::create_invoke_instruction;
|
||||||
use solana_sdk::client::SyncClient;
|
use solana_sdk::client::SyncClient;
|
||||||
use solana_sdk::genesis_block::create_genesis_block;
|
use solana_sdk::genesis_block::create_genesis_block;
|
||||||
use solana_sdk::instruction::InstructionError;
|
use solana_sdk::instruction::InstructionError;
|
||||||
use solana_sdk::native_loader;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use solana_sdk::signature::KeypairUtil;
|
use solana_sdk::signature::KeypairUtil;
|
||||||
use solana_sdk::transaction::TransactionError;
|
use solana_sdk::transaction::TransactionError;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_program_native_failure() {
|
fn test_program_native_failure() {
|
||||||
let (genesis_block, alice_keypair) = create_genesis_block(50);
|
let (genesis_block, alice_keypair) = create_genesis_block(50);
|
||||||
|
let program_id = Pubkey::new_rand();
|
||||||
let bank = Bank::new(&genesis_block);
|
let bank = Bank::new(&genesis_block);
|
||||||
let bank_client = BankClient::new(bank);
|
bank.register_native_instruction_processor("solana_failure_program", &program_id);
|
||||||
|
|
||||||
let program = "solana_failure_program".as_bytes().to_vec();
|
|
||||||
let program_id = load_program(&bank_client, &alice_keypair, &native_loader::id(), program);
|
|
||||||
|
|
||||||
// Call user program
|
// Call user program
|
||||||
let instruction = create_invoke_instruction(alice_keypair.pubkey(), program_id, &1u8);
|
let instruction = create_invoke_instruction(alice_keypair.pubkey(), program_id, &1u8);
|
||||||
|
let bank_client = BankClient::new(bank);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bank_client
|
bank_client
|
||||||
.send_instruction(&alice_keypair, instruction)
|
.send_instruction(&alice_keypair, instruction)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-noop-program"
|
name = "solana-noop-program"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana noop program"
|
description = "Solana noop program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -10,8 +10,8 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-stake-api"
|
name = "solana-stake-api"
|
||||||
version = "0.16.0"
|
version = "0.16.5"
|
||||||
description = "Solana Stake program API"
|
description = "Solana Stake program API"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -14,10 +14,10 @@ log = "0.4.2"
|
|||||||
rand = "0.6.5"
|
rand = "0.6.5"
|
||||||
serde = "1.0.92"
|
serde = "1.0.92"
|
||||||
serde_derive = "1.0.92"
|
serde_derive = "1.0.92"
|
||||||
solana-logger = { path = "../../logger", version = "0.16.0" }
|
solana-logger = { path = "../../logger", version = "0.16.5" }
|
||||||
solana-metrics = { path = "../../metrics", version = "0.16.0" }
|
solana-metrics = { path = "../../metrics", version = "0.16.5" }
|
||||||
solana-sdk = { path = "../../sdk", version = "0.16.0" }
|
solana-sdk = { path = "../../sdk", version = "0.16.5" }
|
||||||
solana-vote-api = { path = "../vote_api", version = "0.16.0" }
|
solana-vote-api = { path = "../vote_api", version = "0.16.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
crate-type = ["lib"]
|
crate-type = ["lib"]
|
||||||
|
@ -31,6 +31,24 @@ pub enum StakeInstruction {
|
|||||||
/// 2 - RewardsPool Stake Account from which to redeem credits
|
/// 2 - RewardsPool Stake Account from which to redeem credits
|
||||||
/// 3 - Rewards syscall Account that carries points values
|
/// 3 - Rewards syscall Account that carries points values
|
||||||
RedeemVoteCredits,
|
RedeemVoteCredits,
|
||||||
|
|
||||||
|
/// Withdraw unstaked lamports from the stake account
|
||||||
|
///
|
||||||
|
/// Expects 3 Accounts:
|
||||||
|
/// 0 - Delegate StakeAccount
|
||||||
|
/// 1 - System account to which the lamports will be transferred,
|
||||||
|
/// 2 - Syscall Account that carries epoch
|
||||||
|
///
|
||||||
|
/// The u64 is the portion of the Stake account balance to be withdrawn,
|
||||||
|
/// must be <= StakeAccount.lamports - staked lamports
|
||||||
|
Withdraw(u64),
|
||||||
|
|
||||||
|
/// Deactivates the stake in the account
|
||||||
|
///
|
||||||
|
/// Expects 2 Accounts:
|
||||||
|
/// 0 - Delegate StakeAccount
|
||||||
|
/// 1 - Syscall Account that carries epoch
|
||||||
|
Deactivate,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_stake_account(
|
pub fn create_stake_account(
|
||||||
@ -77,6 +95,23 @@ pub fn delegate_stake(stake_pubkey: &Pubkey, vote_pubkey: &Pubkey, stake: u64) -
|
|||||||
Instruction::new(id(), &StakeInstruction::DelegateStake(stake), account_metas)
|
Instruction::new(id(), &StakeInstruction::DelegateStake(stake), account_metas)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn withdraw(stake_pubkey: &Pubkey, to_pubkey: &Pubkey, lamports: u64) -> Instruction {
|
||||||
|
let account_metas = vec![
|
||||||
|
AccountMeta::new(*stake_pubkey, true),
|
||||||
|
AccountMeta::new(*to_pubkey, false),
|
||||||
|
AccountMeta::new(syscall::current::id(), false),
|
||||||
|
];
|
||||||
|
Instruction::new(id(), &StakeInstruction::Withdraw(lamports), account_metas)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deactivate_stake(stake_pubkey: &Pubkey) -> Instruction {
|
||||||
|
let account_metas = vec![
|
||||||
|
AccountMeta::new(*stake_pubkey, true),
|
||||||
|
AccountMeta::new(syscall::current::id(), false),
|
||||||
|
];
|
||||||
|
Instruction::new(id(), &StakeInstruction::Deactivate, account_metas)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn process_instruction(
|
pub fn process_instruction(
|
||||||
_program_id: &Pubkey,
|
_program_id: &Pubkey,
|
||||||
keyed_accounts: &mut [KeyedAccount],
|
keyed_accounts: &mut [KeyedAccount],
|
||||||
@ -123,6 +158,27 @@ pub fn process_instruction(
|
|||||||
&syscall::rewards::from_keyed_account(&rest[0])?,
|
&syscall::rewards::from_keyed_account(&rest[0])?,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
StakeInstruction::Withdraw(lamports) => {
|
||||||
|
if rest.len() != 2 {
|
||||||
|
Err(InstructionError::InvalidInstructionData)?;
|
||||||
|
}
|
||||||
|
let (to, syscall) = &mut rest.split_at_mut(1);
|
||||||
|
let mut to = &mut to[0];
|
||||||
|
|
||||||
|
me.withdraw(
|
||||||
|
lamports,
|
||||||
|
&mut to,
|
||||||
|
&syscall::current::from_keyed_account(&syscall[0])?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
StakeInstruction::Deactivate => {
|
||||||
|
if rest.len() != 1 {
|
||||||
|
Err(InstructionError::InvalidInstructionData)?;
|
||||||
|
}
|
||||||
|
let syscall = &rest[0];
|
||||||
|
|
||||||
|
me.deactivate_stake(&syscall::current::from_keyed_account(&syscall)?)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,6 +224,14 @@ mod tests {
|
|||||||
process_instruction(&delegate_stake(&Pubkey::default(), &Pubkey::default(), 0)),
|
process_instruction(&delegate_stake(&Pubkey::default(), &Pubkey::default(), 0)),
|
||||||
Err(InstructionError::InvalidAccountData),
|
Err(InstructionError::InvalidAccountData),
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
process_instruction(&withdraw(&Pubkey::default(), &Pubkey::new_rand(), 100)),
|
||||||
|
Err(InstructionError::InvalidAccountData),
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
process_instruction(&deactivate_stake(&Pubkey::default())),
|
||||||
|
Err(InstructionError::InvalidAccountData),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -250,6 +314,76 @@ mod tests {
|
|||||||
),
|
),
|
||||||
Err(InstructionError::InvalidAccountData),
|
Err(InstructionError::InvalidAccountData),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Tests 3rd keyed account is of correct type (Current instead of rewards) in withdraw
|
||||||
|
assert_eq!(
|
||||||
|
super::process_instruction(
|
||||||
|
&Pubkey::default(),
|
||||||
|
&mut [
|
||||||
|
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
|
||||||
|
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
|
||||||
|
KeyedAccount::new(
|
||||||
|
&syscall::rewards::id(),
|
||||||
|
false,
|
||||||
|
&mut syscall::rewards::create_account(1, 0.0, 0.0)
|
||||||
|
),
|
||||||
|
],
|
||||||
|
&serialize(&StakeInstruction::Withdraw(42)).unwrap(),
|
||||||
|
),
|
||||||
|
Err(InstructionError::InvalidArgument),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Tests correct number of accounts are provided in withdraw
|
||||||
|
assert_eq!(
|
||||||
|
super::process_instruction(
|
||||||
|
&Pubkey::default(),
|
||||||
|
&mut [
|
||||||
|
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
|
||||||
|
KeyedAccount::new(
|
||||||
|
&syscall::current::id(),
|
||||||
|
false,
|
||||||
|
&mut syscall::rewards::create_account(1, 0.0, 0.0)
|
||||||
|
),
|
||||||
|
],
|
||||||
|
&serialize(&StakeInstruction::Withdraw(42)).unwrap(),
|
||||||
|
),
|
||||||
|
Err(InstructionError::InvalidInstructionData),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Tests 2nd keyed account is of correct type (Current instead of rewards) in deactivate
|
||||||
|
assert_eq!(
|
||||||
|
super::process_instruction(
|
||||||
|
&Pubkey::default(),
|
||||||
|
&mut [
|
||||||
|
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
|
||||||
|
KeyedAccount::new(
|
||||||
|
&syscall::rewards::id(),
|
||||||
|
false,
|
||||||
|
&mut syscall::rewards::create_account(1, 0.0, 0.0)
|
||||||
|
),
|
||||||
|
],
|
||||||
|
&serialize(&StakeInstruction::Deactivate).unwrap(),
|
||||||
|
),
|
||||||
|
Err(InstructionError::InvalidArgument),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Tests correct number of accounts are provided in deactivate
|
||||||
|
assert_eq!(
|
||||||
|
super::process_instruction(
|
||||||
|
&Pubkey::default(),
|
||||||
|
&mut [
|
||||||
|
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
|
||||||
|
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
|
||||||
|
KeyedAccount::new(
|
||||||
|
&syscall::current::id(),
|
||||||
|
false,
|
||||||
|
&mut syscall::rewards::create_account(1, 0.0, 0.0)
|
||||||
|
),
|
||||||
|
],
|
||||||
|
&serialize(&StakeInstruction::Deactivate).unwrap(),
|
||||||
|
),
|
||||||
|
Err(InstructionError::InvalidInstructionData),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user