Compare commits

...

85 Commits

Author SHA1 Message Date
f9d9c1fcbf Update book to SOLANA_RELEASE=v0.16.5 (#5154) 2019-07-17 14:20:49 -06:00
7c59c105cf Add weighted shuffle support for values upto u64::MAX (#5151) (#5152)
automerge

(cherry picked from commit 10d85f8366)
2019-07-17 13:12:05 -07:00
a8ea9f2738 Fix bench-tps funding math; make generate_keypairs() and fund_keys() algorithms consistent (#4841) (#5145)
* Fix funding math; make generate_keypairs and fund_keys consistent

* Add test, and fix inconsistencies it exposes

* De-pow math, and use assert_eq in tests for better failure msgs
2019-07-17 09:01:13 -07:00
651f87a937 Show stake pubkey 2019-07-16 20:10:58 -07:00
88f8e2f332 Check harder on crates.io for recently published crates (#5136) (#5138)
automerge
2019-07-16 19:54:17 -07:00
a2cb289503 clear-config.sh now works with a secondary disk (#5135) (#5137)
automerge
2019-07-16 19:47:40 -07:00
89bd9d5b72 Bump blockexplorer version 2019-07-16 19:44:42 -07:00
7edaaeb2a1 Improve validator-info CLI (#5121) (#5125)
automerge
2019-07-16 09:08:35 -07:00
1c3ade80c2 Add missing dash 2019-07-16 07:28:26 -07:00
3606d51507 Increment toml and cargo.ock to 0.16.5 (#5119) 2019-07-15 17:32:12 -06:00
281fd88ea7 Update testnet doc to use latest release (#5118) 2019-07-15 17:11:42 -06:00
ee6b625c13 fix transaction_count (bp #5110) (#5111)
automerge
2019-07-15 14:46:11 -07:00
4cc1b85376 Boot remote native loads, take 2 (#5106) (#5109)
automerge
2019-07-15 12:54:24 -07:00
f8312ce125 Keybase pubkey file instructions and verification for validators (#5090) (#5102)
automerge
2019-07-14 23:25:50 -07:00
6a4cd02f64 Add node zone and count to ENV (#5100) (#5101)
automerge
2019-07-14 22:13:50 -07:00
50f238d900 Pull testnet vars up to buildkite env (#5098) (#5099)
automerge
2019-07-14 19:56:59 -07:00
23e3f4e8a2 Plumb --no-snapshot in from CI (#5077) (#5095)
* Plumb --no-snapshot in from CI

(cherry picked from commit 440d006ec1)
2019-07-14 13:22:28 -06:00
27f70dfa49 Correctly decode update manifest (#5086) (#5087)
automerge
2019-07-12 23:25:15 -07:00
72d366a84e Stop trying to publish crates that are unpublishable 2019-07-12 21:53:33 -07:00
2da9de8861 Avoid trying to republish crates already on crates.io 2019-07-12 21:36:07 -07:00
f4288961d5 Add epoch voting history to show-vote-account (#5080) 2019-07-12 21:23:15 -07:00
143ad436cf Give publish-crate more time 2019-07-12 20:28:23 -07:00
0a9fbc3e4c Facility to generate a blocktree prune list using ledger tool (#5041) (#5081)
automerge
2019-07-12 17:49:29 -07:00
7aa091bf8c Add rewards to is_syscall_id() (#5035) 2019-07-12 16:10:28 -07:00
91d8bfa828 Increment cargo tomls to 0.16.4 (#5078) 2019-07-12 16:30:55 -06:00
c501c19750 Add a version field to blobs (bp #5057) (#5068)
automerge
2019-07-12 14:38:32 -07:00
acd55660da Add --no-snapshot to disable booting a validator from a snapshot (#5050) (#5073)
automerge
2019-07-12 15:35:42 -06:00
855bd7d3b8 apt-get update before installing certbot (#5054) (#5056)
* apt-get update before installing certbot

(cherry picked from commit f093377805)
2019-07-12 11:54:56 -06:00
a2e9d8e0bf Enable GPUs and secondary disks for TdS net, pull external account file (#5031) (#5053) 2019-07-12 10:17:46 -06:00
81dbe3c49b Add support for additional disks for config-local (#5030) (#5040)
* Add support for additional disks for config-local

(cherry picked from commit e4861f52e0)
2019-07-12 10:01:07 -06:00
086e20f6c7 Restore ledger-tool print and json commands (#5048) (#5049)
automerge
2019-07-11 21:14:37 -07:00
d08a810c08 v0.16: Expand Config program; implement Validator Info CLI (#5045)
* Update config program to accommodate multiple signers (#4946)

* Update config program to accommodate multiple signers

* Update install CLI

* Remove account_type u32; add handling for unsigned keys in list

* ConfigKeys doc

* Make config_api more robust (#4980)

* Make config_api more robust

* Add test and update store instruction

* Improve signature checks in config_api (#5001)

automerge

* Add validator-info CLI (#4970)

* Add validator-info CLI

* Add GetProgramAccounts method to solana-client

* Update validator-info args, and add get subcommand

* Update ValidatorInfo lengths

* Add account filter for get --all

* Update testnet participation doc to reflect validator-info

* Flesh out tests

* Review comments
2019-07-11 18:28:49 -06:00
400610bf6a v0.16: AccountsDB updates and getProgramAccounts RPC fix (#5044)
* reduce replicode in accounts, fix cast to i64 (#5025)

* add accounts_index_scan_accounts (#5020)

* Plumb scan_accounts into accounts_db, adding load from storage (#5029)

* Fix getProgramAccounts RPC (#5024)

* Use scan_accounts to load accounts by program_id

* Add bank test

* Use get_program_accounts in RPC

* Rebase for v0.16
2019-07-11 17:57:56 -06:00
f759ac3a8d add node_pubkey to vote warning (#5033) (#5034)
(cherry picked from commit a191f3fd90)
2019-07-11 13:58:49 -07:00
558411364e Pass SOLANA_METRICS_CONFIG along to oom-monitor.sh (#5021) (#5026)
(cherry picked from commit 8781aebe06)
2019-07-11 12:43:38 -07:00
d0b5be3051 Rename tds-testnet to tds (#5008) (#5009)
(cherry picked from commit e563a4dda3)
2019-07-10 11:39:57 -06:00
dc6da6fcca Bump @solana/blockexplorer to v1.17.2 2019-07-10 09:33:10 -07:00
8ae11a74fa Move letsencrypt arg to create_args 2019-07-09 21:26:56 -07:00
11f0333728 Include --letsencrypt ($1) 2019-07-09 20:55:41 -07:00
aac74d2357 Fund solana-install deployments from the mint keypair to avoid airdrops (#4997) (#5000)
automerge
2019-07-09 17:29:43 -07:00
508abcf4ed net/ plumbing to manage LetsEncrypt TLS certificates (#4985) (#4996)
automerge
2019-07-09 16:29:45 -07:00
6dbb6c7fe2 Fix always passing in remote filename, even if no accounts file (#4993) (#4994)
* Fix always passing in remote filename, even if no accounts file

* typo

(cherry picked from commit d111223085)
2019-07-09 15:44:04 -07:00
2f58658f61 Add testnet-tds support to testnet manager (#4762) (#4987)
automerge
2019-07-09 14:16:13 -07:00
0ec7ff5e2f Add pubkey (#4971) (#4977)
automerge
2019-07-09 01:28:48 -07:00
4d49820188 Handle replicator errors without panicking (#4957)
* Handle replicator errors without panicking

* Typo

* Handle error with match instead of if-let

* Discard error
2019-07-08 11:23:21 -07:00
6e51babff9 Reduce default commission from 100% to 50% (#4929) 2019-07-05 08:00:39 -07:00
872cf100d7 [Security] Bump smallvec from 0.6.9 to 0.6.10 (#4921)
Bumps [smallvec](https://github.com/servo/rust-smallvec) from 0.6.9 to 0.6.10. **This update includes security fixes.**
- [Release notes](https://github.com/servo/rust-smallvec/releases)
- [Commits](https://github.com/servo/rust-smallvec/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-03 18:12:11 -07:00
78cc4e644c Cargo.lock 2019-07-03 18:10:31 -07:00
81c0152187 install: more little window fixes (#4930)
* Only add .exe extension if no extension was given

* Switch to ctrlc crate for freebie Windows ^C handling
2019-07-03 18:00:27 -07:00
4779625f23 change vote commission to u8 (from u32) (#4887) (#4918)
automerge
2019-07-02 14:52:53 -07:00
3c0b03ba84 Update cargo.toml and cargo.lock files for 0.16.3 (#4917) 2019-07-02 14:33:04 -06:00
c53f163ef5 Add RPC api to return program accounts (#4876) (#4912)
automerge
2019-07-02 11:48:00 -07:00
ca35854417 Add .exe extension before checking for a program file on windows (#4902) (#4903)
automerge
2019-07-02 08:48:35 -07:00
ab1fda2a54 Add curl retries 2019-07-02 08:38:18 -07:00
a6ec77c230 Update Cargo.toml 2019-07-01 23:18:15 -07:00
1d7894f1be Avoid signal-hook crate on windows (#4901) 2019-07-01 22:52:49 -07:00
4866a1fc39 run command now kills child process on SIGTERM to cleanly exit (#4896) (#4899)
automerge
2019-07-01 19:28:12 -07:00
60c5e59a5e Always send pull responses to the origin addr (#4894) (#4897)
automerge
2019-07-01 17:34:51 -07:00
fd93bdadf6 Try to gracefully terminal child process before using SIGKILL (#4890) (#4892)
automerge
2019-07-01 14:43:15 -07:00
6089db2a07 Rework fullnode.sh to recover better from genesis block resets (#4884) (#4888)
automerge
2019-07-01 12:31:53 -07:00
462d0cfc6c Disable Enter prompt when stdin is not a tty (#4874) (#4877)
(cherry picked from commit 41bda18046)
2019-06-28 18:18:39 -07:00
e6d6fc4391 Don't prompt the user to update their PATH if --no-modify-path was supplied (#4872) (#4875)
automerge
2019-06-28 17:39:21 -07:00
092556ae5e Lower warn to info, fetch from validator root instead of root + 1 (#4870) (#4873)
automerge
2019-06-28 16:55:46 -07:00
ad9fa54a47 Ensure validator process is killed when fullnode.sh is killed (#4869) (#4871)
automerge
2019-06-28 15:03:46 -07:00
2d68170747 Update cargo.toml files to version 0.16.2 (#4854) 2019-06-27 11:08:02 -06:00
20f3d18458 Save snapshots followed by accounts to avoid stale account data (#4847) (#4849)
automerge
2019-06-26 23:53:14 -07:00
be79efe9b7 rsync of ledger/ and state.tgz now works on both macOS and Linux (#4845) (#4846)
automerge
2019-06-26 22:45:44 -07:00
5db377f743 Use default pubkey for solana-install sanity check 2019-06-26 21:51:10 -07:00
9c2f45a1e0 Fix possible subtract with overflow if mining pool is not setup (#4836)
automerge
2019-06-26 15:28:37 -07:00
8646918d00 Upload all artifacts 2019-06-26 14:38:55 -07:00
7c44fc3561 Set CI_REPO_SLUG correctly for the solana-secondary pipeline 2019-06-26 14:38:55 -07:00
686403eb1d Setup reward pools in genesis (#4831) (#4834)
automerge
2019-06-26 14:29:27 -07:00
6cf9b60a9c Airdrop more token in wallet sanity due to fee (#4830) (#4832)
automerge
2019-06-26 14:05:17 -07:00
aca142df16 Update cargo.toml files to 0.16.1 (#4829) 2019-06-26 13:25:13 -06:00
b2582196db Create snapshots sparsely (#4815) (#4816)
(cherry picked from commit c5e6ebb496)
2019-06-25 12:13:05 -07:00
85a77bec5f Set proper count value for account stores (#4797) (#4813)
* set count values for store accounts

* Use AppendVecId type

(cherry picked from commit 9e7f618cff)
2019-06-25 07:57:40 -07:00
e781cbf4ba Ignore flaky test_two_unbalanced_stakes (#4794) (#4796)
automerge
2019-06-23 21:30:02 -07:00
59956e4543 Remove --storage-mining-pool-lamports (#4792) (#4793)
automerge
2019-06-23 20:53:31 -07:00
303417f981 Lock blockexplorer version 2019-06-22 22:23:16 -07:00
fea03fdf33 Add storage reward pools (#4779) (#4789)
automerge
2019-06-22 21:26:11 -07:00
e8160efc46 Prevent Travis/Appveyor from trying to build mergify branches (#4786) (#4787)
(cherry picked from commit 23b6b85bf0)
2019-06-22 08:58:13 -07:00
e0ba0d581c Update authorized public key (#4783) 2019-06-22 08:44:00 -07:00
36eda29fc9 Add instructions and processor for stake deactivation (#4781)
automerge
2019-06-22 08:43:17 -07:00
2ec73db6bd Program instruction to withdraw un-staked lamports from stake account (#4780) (#4782) 2019-06-22 00:11:15 -07:00
ef6ce2765e Remove storage-mining-pool-keypair arg 2019-06-21 21:38:43 -07:00
148 changed files with 4263 additions and 1456 deletions

View File

@ -4,7 +4,7 @@ version: '{build}'
branches:
only:
- master
- /v[0-9.]+/
- /^v[0-9.]+/
cache:
- '%USERPROFILE%\.cargo'

View File

@ -17,7 +17,7 @@ script:
branches:
only:
- master
- /v.*/
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
notifications:
slack:

652
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -41,6 +41,7 @@ members = [
"runtime",
"sdk",
"upload-perf",
"validator-info",
"vote-signer",
"wallet",
]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -24,16 +24,16 @@ serde_derive = "1.0.92"
serde_json = "1.0.39"
serde_yaml = "0.8.9"
# solana-runtime = { path = "../solana/runtime"}
solana = { path = "../core", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-drone = { path = "../drone", version = "0.16.0" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana = { path = "../core", version = "0.16.5" }
solana-client = { path = "../client", version = "0.16.5" }
solana-drone = { path = "../drone", version = "0.16.5" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.5" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.5" }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-metrics = { path = "../metrics", version = "0.16.5" }
solana-netutil = { path = "../netutil", version = "0.16.5" }
solana-runtime = { path = "../runtime", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
untrusted = "0.6.2"
ws = "0.8.1"

View File

@ -2,16 +2,16 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana = { path = "../core", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana = { path = "../core", version = "0.16.5" }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-netutil = { path = "../netutil", version = "0.16.5" }
[features]
cuda = ["solana/cuda"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -15,14 +15,14 @@ serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
serde_yaml = "0.8.9"
solana = { path = "../core", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-drone = { path = "../drone", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana = { path = "../core", version = "0.16.5" }
solana-client = { path = "../client", version = "0.16.5" }
solana-drone = { path = "../drone", version = "0.16.5" }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-metrics = { path = "../metrics", version = "0.16.5" }
solana-netutil = { path = "../netutil", version = "0.16.5" }
solana-runtime = { path = "../runtime", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
[features]
cuda = ["solana/cuda"]

View File

@ -346,10 +346,12 @@ pub fn fund_keys<T: Client>(
source: &Keypair,
dests: &[Keypair],
total: u64,
lamports_per_signature: u64,
max_fee: u64,
mut extra: u64,
) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
println!("funding keys {}", dests.len());
while !notfunded.is_empty() {
@ -362,7 +364,8 @@ pub fn fund_keys<T: Client>(
break;
}
let start = notfunded.len() - max_units as usize;
let per_unit = (f.1 - max_units * lamports_per_signature) / max_units;
let fees = if extra > 0 { max_fee } else { 0 };
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
let moves: Vec<_> = notfunded[start..]
.iter()
.map(|k| (k.pubkey(), per_unit))
@ -374,6 +377,7 @@ pub fn fund_keys<T: Client>(
if !moves.is_empty() {
to_fund.push((f.0, moves));
}
extra -= 1;
}
// try to transfer a "few" at a time with recent blockhash
@ -582,16 +586,20 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
}
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> Vec<Keypair> {
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
let mut rnd = GenKeys::new(seed);
let mut total_keys = 1;
let mut total_keys = 0;
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
let mut delta = 1;
while total_keys < count {
total_keys *= MAX_SPENDS_PER_TX;
extra += delta;
delta *= MAX_SPENDS_PER_TX;
total_keys += delta;
}
rnd.gen_n_keypairs(total_keys)
(rnd.gen_n_keypairs(total_keys), extra)
}
pub fn generate_and_fund_keypairs<T: Client>(
@ -602,8 +610,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
lamports_per_account: u64,
) -> Result<(Vec<Keypair>, u64)> {
info!("Creating {} keypairs...", tx_count * 2);
let mut keypairs = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
let (mut keypairs, extra) = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume.
@ -614,19 +621,21 @@ pub fn generate_and_fund_keypairs<T: Client>(
if lamports_per_account > last_keypair_balance {
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
let extra =
let account_desired_balance =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let total = extra * (keypairs.len() as u64);
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
let total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
}
info!("adding more lamports {}", extra);
info!("adding more lamports {}", account_desired_balance);
fund_keys(
client,
funding_pubkey,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
extra,
);
}
@ -647,6 +656,7 @@ mod tests {
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_block::create_genesis_block;
use std::sync::mpsc::channel;
@ -735,7 +745,33 @@ mod tests {
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
for kp in &keypairs {
assert!(client.get_balance(&kp.pubkey()).unwrap() >= lamports);
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
}
}
#[test]
fn test_bench_tps_fund_keys_with_fees() {
let (mut genesis_block, id) = create_genesis_block(10_000);
let fee_calculator = FeeCalculator::new(11);
genesis_block.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_block);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
let max_fee = client
.get_recent_blockhash()
.unwrap()
.1
.max_lamports_per_signature;
for kp in &keypairs {
assert_eq!(
client.get_balance(&kp.pubkey()).unwrap(),
lamports + max_fee
);
}
}
}

View File

@ -40,7 +40,7 @@ fn main() {
} = cli_config;
if write_to_client_file {
let keypairs = generate_keypairs(&id, tx_count as u64 * 2);
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2);
let num_accounts = keypairs.len() as u64;
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)

View File

@ -25,6 +25,7 @@ Methods
* [getAccountInfo](#getaccountinfo)
* [getBalance](#getbalance)
* [getClusterNodes](#getclusternodes)
* [getProgramAccounts](#getprogramaccounts)
* [getRecentBlockhash](#getrecentblockhash)
* [getSignatureStatus](#getsignaturestatus)
* [getSlotLeader](#getslotleader)
@ -96,6 +97,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
{"jsonrpc":"2.0","result":true,"id":1}
```
---
### getAccountInfo
Returns all information associated with the account of provided Pubkey
##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
##### Results:
The result field will be a JSON object with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
```
---
### getBalance
@ -142,28 +169,29 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
---
### getAccountInfo
Returns all information associated with the account of provided Pubkey
### getProgramAccounts
Returns all accounts owned by the provided program Pubkey
##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
* `string` - Pubkey of program, as base-58 encoded string
##### Results:
The result field will be a JSON object with the following sub fields:
The result field will be an array of arrays. Each sub array will contain:
* `string` - a the account Pubkey as base-58 encoded string
and a JSON object, with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
```
---
@ -402,7 +430,7 @@ for a given account public key changes
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
---

View File

@ -74,8 +74,8 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
software on Linux x86_64 and mac OS systems.
```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
$ export SOLANA_RELEASE=v0.16.5 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.5/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the
@ -240,3 +240,34 @@ A local InfluxDB and Grafana instance is now running on your machine. Define
`start.sh` output and restart your validator.
Metrics should now be streaming and visible from your local Grafana dashboard.
#### Publishing Validator Info
You can publish your validator information to the chain to be publicly visible
to other users.
Run the solana-validator-info CLI to populate a validator-info account:
```bash
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
```
Optional fields for VALIDATOR_INFO_ARGS:
* Website
* Keybase ID
* Details
##### Keybase
Including a Keybase ID allows client applications (like the Solana Network
Explorer) to automatically pull in your validator public profile, including
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
Keybase:
1. Join https://keybase.io/ and complete the profile for your validator
2. Add your validator **identity pubkey** to Keybase:
* Create an empty file on your local computer called `solana_pubkey_<PUBKEY>`
* In Keybase, navigate to the Files section, and upload your pubkey file to
your public folder: `/keybase/public/<KEYBASE_ID>`
* To check your pubkey, ensure you can successfully browse to
`https://keybase.pub/<KEYBASE_ID>/solana_pubkey_<PUBKEY>`
3. Add or update your `solana-validator-info` with your Keybase ID. The CLI will
verify the `solana_pubkey_<PUBKEY>` file

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.16.0"
version = "0.16.5"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -3,7 +3,7 @@ steps:
timeout_in_minutes: 20
name: "publish docker"
- command: "ci/publish-crate.sh"
timeout_in_minutes: 40
timeout_in_minutes: 60
name: "publish crate"
branches: "!master"
- command: "ci/publish-bpf-sdk.sh"

View File

@ -33,9 +33,15 @@ if [[ -n $CI ]]; then
export CI_PULL_REQUEST=
fi
export CI_OS_NAME=linux
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
else
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
fi
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
# the solana-secondary builder
# the solana-secondary pipeline
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
else

View File

@ -30,12 +30,24 @@ cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
Cargo_tomls=$(ci/order-crates-for-publishing.py)
for Cargo_toml in $Cargo_tomls; do
echo "-- $Cargo_toml"
echo "--- $Cargo_toml"
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
echo "Error: $Cargo_toml version is not $expectedCrateVersion"
exit 1
}
crate_name=$(grep -m 1 '^name = ' "$Cargo_toml" | cut -f 3 -d ' ' | tr -d \")
if grep -q "^publish = false" "$Cargo_toml"; then
echo "$crate_name is is marked as unpublishable"
continue
fi
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
echo "${crate_name} version ${expectedCrateVersion} is already on crates.io"
continue
fi
(
set -x
crate=$(dirname "$Cargo_toml")
@ -45,15 +57,26 @@ for Cargo_toml in $Cargo_tomls; do
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
# shellcheck disable=SC2086
crate_name=$(grep -m 1 '^name = ' $Cargo_toml | cut -f 3 -d ' ' | tr -d \")
numRetries=30
for ((i = 1 ; i <= numRetries ; i++)); do
echo "Attempt ${i} of ${numRetries}"
# shellcheck disable=SC2086
if [[ $(is_crate_version_uploaded $crate_name $expectedCrateVersion) = True ]] ; then
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io"
break
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io REST API"
really_uploaded=0
(
set -x
rm -rf crate-test
cargo init crate-test
cd crate-test/
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
echo "[workspace]" >> Cargo.toml
cargo check
) && really_uploaded=1
if ((really_uploaded)); then
break;
fi
echo "${crate_name} not yet available for download from crates.io"
fi
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
sleep 2

View File

@ -120,16 +120,16 @@ if [[ "$CI_OS_NAME" = linux ]]; then
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
fi
echo --- Saving build artifacts
source ci/upload-ci-artifact.sh
upload-ci-artifact solana-release-$TARGET.tar.bz2
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0
fi
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
upload-ci-artifact "$file"
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
continue
fi
if [[ -n $BUILDKITE ]]; then
echo --- AWS S3 Store: "$file"
(

View File

@ -24,6 +24,12 @@ blockstreamer=false
deployUpdateManifest=true
fetchLogs=true
maybeHashesPerTick=
maybeStakeNodesInGenesisBlock=
maybeExternalPrimordialAccountsFile=
maybeLamports=
maybeLetsEncrypt=
maybeFullnodeAdditionalDiskSize=
maybeNoSnapshot=
usage() {
exitcode=0
@ -62,11 +68,24 @@ Deploys a CD testnet
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
-S - Stop network software without tearing down nodes.
-f - Discard validator nodes that didn't bootup successfully
-w - Skip time-consuming "bells and whistles" that are
unnecessary for a high-node count demo testnet
--stake-internal-nodes NUM_LAMPORTS
- Amount to stake internal nodes. If set, airdrops are disabled.
--external-accounts-file FILE_PATH
- Path to external Primordial Accounts file, if it exists.
--hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster
--lamports NUM_LAMPORTS
- Specify the number of lamports to mint (default 100000000000000)
--skip-deploy-update
- If set, will skip software update deployment
--skip-remote-log-retrieval
- If set, will not fetch logs from remote nodes
--letsencrypt [dns name]
- Attempt to generate a TLS certificate using this DNS name
--fullnode-additional-disk-size-gb [number]
- Size of additional disk in GB for all fullnodes
--no-snapshot
- If set, disables booting validators from a snapshot
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
metrics
@ -82,6 +101,33 @@ while [[ -n $1 ]]; do
if [[ $1 = --hashes-per-tick ]]; then
maybeHashesPerTick="$1 $2"
shift 2
elif [[ $1 = --lamports ]]; then
maybeLamports="$1 $2"
shift 2
elif [[ $1 = --stake-internal-nodes ]]; then
maybeStakeNodesInGenesisBlock="$1 $2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
maybeExternalPrimordialAccountsFile="$1 $2"
shift 2
elif [[ $1 = --skip-deploy-update ]]; then
deployUpdateManifest=false
shift 1
elif [[ $1 = --skip-remote-log-retrieval ]]; then
fetchLogs=false
shift 1
elif [[ $1 = --letsencrypt ]]; then
maybeLetsEncrypt="$1 $2"
shift 2
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
maybeFullnodeAdditionalDiskSize="$1 $2"
shift 2
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
shortArgs+=("$1")
shift
elif [[ $1 = --no-snapshot ]]; then
maybeNoSnapshot="$1"
shift 1
else
usage "Unknown long option: $1"
fi
@ -228,6 +274,11 @@ if ! $skipCreate; then
# shellcheck disable=SC2206
create_args+=(${zone_args[@]})
if [[ -n $maybeLetsEncrypt ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeLetsEncrypt
create_args+=($maybeLetsEncrypt)
fi
if $blockstreamer; then
create_args+=(-u)
fi
@ -256,6 +307,11 @@ if ! $skipCreate; then
create_args+=(-f)
fi
if [[ -n $maybeFullnodeAdditionalDiskSize ]]; then
# shellcheck disable=SC2206 # Do not want to quote
create_args+=($maybeFullnodeAdditionalDiskSize)
fi
time net/"$cloudProvider".sh create "${create_args[@]}"
else
echo "--- $cloudProvider.sh config"
@ -318,7 +374,6 @@ if ! $skipStart; then
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
args+=($maybeHashesPerTick)
fi
if $reuseLedger; then
args+=(-r)
fi
@ -334,7 +389,24 @@ if ! $skipStart; then
args+=(--deploy-update windows)
fi
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
if [[ -n $maybeStakeNodesInGenesisBlock ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeStakeNodesInGenesisBlock
args+=($maybeStakeNodesInGenesisBlock)
fi
if [[ -n $maybeExternalPrimordialAccountsFile ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeExternalPrimordialAccountsFile
args+=($maybeExternalPrimordialAccountsFile)
fi
if [[ -n $maybeLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeLamports
args+=($maybeLamports)
fi
if [[ -n $maybeNoSnapshot ]]; then
# shellcheck disable=SC2206
args+=($maybeNoSnapshot)
fi
time net/net.sh "${args[@]}"
) || ok=false

View File

@ -44,6 +44,8 @@ steps:
value: "testnet-beta-perf"
- label: "testnet-demo"
value: "testnet-demo"
- label: "tds"
value: "tds"
- select: "Operation"
key: "testnet-operation"
default: "sanity-or-restart"
@ -153,6 +155,10 @@ testnet-demo)
: "${GCE_NODE_COUNT:=150}"
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
;;
tds)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
@ -287,6 +293,14 @@ sanity() {
$ok
)
;;
tds)
(
set -x
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-sanity.sh tds-solana-com gce "${GCE_ZONES[0]}" -f
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
@ -321,7 +335,8 @@ deploy() {
(
set -x
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-a eipalloc-0ccd4f2239886fa94 --letsencrypt edge.testnet.solana.com \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
@ -347,7 +362,8 @@ deploy() {
set -x
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-a eipalloc-0f286cf8a0771ce35 --letsencrypt beta.testnet.solana.com \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
@ -378,7 +394,8 @@ deploy() {
# shellcheck disable=SC2068
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f -a eipalloc-0fa502bf95f6f18b2 \
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f \
-a eipalloc-0fa502bf95f6f18b2 --letsencrypt testnet.solana.com \
${skipCreate:+-e} \
${maybeSkipStart:+-s} \
${maybeStop:+-S} \
@ -424,7 +441,9 @@ deploy() {
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f -w \
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \
--skip-deploy-update \
--skip-remote-log-retrieval \
-a demo-testnet-solana-com \
${skipCreate:+-e} \
${maybeSkipStart:+-s} \
@ -436,7 +455,9 @@ deploy() {
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x -w \
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \
--skip-deploy-update \
--skip-remote-log-retrieval \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
@ -444,6 +465,106 @@ deploy() {
fi
)
;;
tds)
(
set -x
# Allow cluster configuration to be overridden from env vars
if [[ -z $TDS_ZONES ]]; then
TDS_ZONES="us-west1-a,us-central1-a,europe-west4-a"
fi
GCE_CLOUD_ZONES=(); while read -r -d, ; do GCE_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TDS_ZONES},"
if [[ -z $TDS_NODE_COUNT ]]; then
TDS_NODE_COUNT="3"
fi
if [[ -z $TDS_CLIENT_COUNT ]]; then
TDS_CLIENT_COUNT="1"
fi
if [[ -z $ENABLE_GPU ]]; then
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
elif [[ $ENABLE_GPU == skip ]]; then
maybeGpu=()
else
maybeGpu=(-G "${ENABLE_GPU}")
fi
if [[ -z $HASHES_PER_TICK ]]; then
maybeHashesPerTick="--hashes-per-tick auto"
elif [[ $HASHES_PER_TICK == skip ]]; then
maybeHashesPerTick=""
else
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
fi
if [[ -z $STAKE_INTERNAL_NODES ]]; then
maybeStakeInternalNodes="--stake-internal-nodes 1000000000000"
elif [[ $STAKE_INTERNAL_NODES == skip ]]; then
maybeStakeInternalNodes=""
else
maybeStakeInternalNodes="--stake-internal-nodes ${STAKE_INTERNAL_NODES}"
fi
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/stage1/validator.yml
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
elif [[ $EXTERNAL_ACCOUNTS_FILE_URL == skip ]]; then
maybeExternalAccountsFile=""
else
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/stage1/validator.yml
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
fi
if [[ -z $LAMPORTS ]]; then
maybeLamports="--lamports 8589934592000000000"
elif [[ $LAMPORTS == skip ]]; then
maybeLamports=""
else
maybeLamports="--lamports ${LAMPORTS}"
fi
if [[ -z $ADDITIONAL_DISK_SIZE_GB ]]; then
maybeAdditionalDisk="--fullnode-additional-disk-size-gb 32000"
elif [[ $ADDITIONAL_DISK_SIZE_GB == skip ]]; then
maybeAdditionalDisk=""
else
maybeAdditionalDisk="--fullnode-additional-disk-size-gb ${ADDITIONAL_DISK_SIZE_GB}"
fi
# Multiple V100 GPUs are available in us-west1, us-central1 and europe-west4
# shellcheck disable=SC2068
# shellcheck disable=SC2086
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p tds-solana-com -C gce \
"${maybeGpu[@]}" \
-d pd-ssd \
${GCE_CLOUD_ZONES[@]/#/-z } \
-t "$CHANNEL_OR_TAG" \
-n ${TDS_NODE_COUNT} \
-c ${TDS_CLIENT_COUNT} \
-P -u \
-a tds-solana-com --letsencrypt tds.solana.com \
${maybeHashesPerTick} \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
${maybeDelete:+-D} \
${maybeStakeInternalNodes} \
${maybeExternalAccountsFile} \
${maybeLamports} \
${maybeAdditionalDisk} \
--skip-deploy-update \
--no-snapshot
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.16.0"
version = "0.16.5"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ reqwest = "0.9.18"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
[dev-dependencies]
jsonrpc-core = "12.0.0"
jsonrpc-http-server = "12.0.0"
solana-logger = { path = "../logger", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.5" }

View File

@ -274,6 +274,39 @@ impl RpcClient {
self.get_account(pubkey).map(|account| account.lamports)
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> io::Result<Vec<(Pubkey, Account)>> {
let params = json!([format!("{}", pubkey)]);
let response = self
.client
.send(&RpcRequest::GetProgramAccounts, Some(params), 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("AccountNotFound: pubkey={}: {}", pubkey, err),
)
})?;
let accounts: Vec<(String, Account)> =
serde_json::from_value::<Vec<(String, Account)>>(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for (string, account) in accounts.into_iter() {
let pubkey = string.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
pubkey_accounts.push((pubkey, account));
}
Ok(pubkey_accounts)
}
/// Request the transaction count. If the response packet is dropped by the network,
/// this method will try again 5 times.
pub fn get_transaction_count(&self) -> io::Result<u64> {

View File

@ -10,6 +10,7 @@ pub enum RpcRequest {
GetBalance,
GetClusterNodes,
GetNumBlocksSinceSignatureConfirmation,
GetProgramAccounts,
GetRecentBlockhash,
GetSignatureStatus,
GetSlot,
@ -38,6 +39,7 @@ impl RpcRequest {
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation"
}
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetSlot => "getSlot",

View File

@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.16.0"
version = "0.16.5"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -45,27 +45,27 @@ rocksdb = "0.11.0"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-config-program = { path = "../programs/config_program", version = "0.16.0" }
solana-drone = { path = "../drone", version = "0.16.0" }
solana-budget-api = { path = "../programs/budget_api", version = "0.16.5" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.5" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.5" }
solana-client = { path = "../client", version = "0.16.5" }
solana-config-program = { path = "../programs/config_program", version = "0.16.5" }
solana-drone = { path = "../drone", version = "0.16.5" }
solana-ed25519-dalek = "0.2.0"
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
solana-kvstore = { path = "../kvstore", version = "0.16.0", optional = true }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.16.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.5" }
solana-kvstore = { path = "../kvstore", version = "0.16.5", optional = true }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-metrics = { path = "../metrics", version = "0.16.5" }
solana-netutil = { path = "../netutil", version = "0.16.5" }
solana-runtime = { path = "../runtime", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.5" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.5" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.5" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.5" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.5" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.5" }
solana-vote-signer = { path = "../vote-signer", version = "0.16.5" }
sys-info = "0.5.7"
tokio = "0.1"
tokio-codec = "0.1"

View File

@ -329,8 +329,9 @@ impl BankForks {
names.sort();
let mut bank_maps = vec![];
let status_cache_rc = StatusCacheRc::default();
let id = (names[names.len() - 1] + 1) as usize;
let mut bank0 =
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc);
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
bank0.freeze();
let bank_root = BankForks::load_snapshots(
&names,

View File

@ -418,7 +418,7 @@ impl BankingStage {
// the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires.
let (loaded_accounts, results) =
let (loaded_accounts, results, tx_count, signature_count) =
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
let load_execute_time = now.elapsed();
@ -432,7 +432,7 @@ impl BankingStage {
let commit_time = {
let now = Instant::now();
bank.commit_transactions(txs, &loaded_accounts, &results);
bank.commit_transactions(txs, &loaded_accounts, &results, tx_count, signature_count);
now.elapsed()
};

View File

@ -722,70 +722,6 @@ impl Blocktree {
iter.map(|(_, blob_data)| Blob::new(&blob_data))
}
/// Return an iterator for all the entries in the given file.
pub fn read_ledger(&self) -> Result<impl Iterator<Item = Entry>> {
use crate::entry::EntrySlice;
use std::collections::VecDeque;
struct EntryIterator {
db_iterator: Cursor<cf::Data>,
// TODO: remove me when replay_stage is iterating by block (Blocktree)
// this verification is duplicating that of replay_stage, which
// can do this in parallel
blockhash: Option<Hash>,
// https://github.com/rust-rocksdb/rust-rocksdb/issues/234
// rocksdb issue: the _blocktree member must be lower in the struct to prevent a crash
// when the db_iterator member above is dropped.
// _blocktree is unused, but dropping _blocktree results in a broken db_iterator
// you have to hold the database open in order to iterate over it, and in order
// for db_iterator to be able to run Drop
// _blocktree: Blocktree,
entries: VecDeque<Entry>,
}
impl Iterator for EntryIterator {
type Item = Entry;
fn next(&mut self) -> Option<Entry> {
if !self.entries.is_empty() {
return Some(self.entries.pop_front().unwrap());
}
if self.db_iterator.valid() {
if let Some(value) = self.db_iterator.value_bytes() {
if let Ok(next_entries) =
deserialize::<Vec<Entry>>(&value[BLOB_HEADER_SIZE..])
{
if let Some(blockhash) = self.blockhash {
if !next_entries.verify(&blockhash) {
return None;
}
}
self.db_iterator.next();
if next_entries.is_empty() {
return None;
}
self.entries = VecDeque::from(next_entries);
let entry = self.entries.pop_front().unwrap();
self.blockhash = Some(entry.hash);
return Some(entry);
}
}
}
None
}
}
let mut db_iterator = self.db.cursor::<cf::Data>()?;
db_iterator.seek_to_first();
Ok(EntryIterator {
entries: VecDeque::new(),
db_iterator,
blockhash: None,
})
}
pub fn get_slot_entries_with_blob_count(
&self,
slot: u64,
@ -1662,9 +1598,7 @@ pub fn tmp_copy_blocktree(from: &str, name: &str) -> String {
#[cfg(test)]
pub mod tests {
use super::*;
use crate::entry::{
create_ticks, make_tiny_test_entries, make_tiny_test_entries_from_hash, Entry, EntrySlice,
};
use crate::entry::{create_ticks, make_tiny_test_entries, Entry, EntrySlice};
use crate::erasure::{CodingGenerator, NUM_CODING, NUM_DATA};
use crate::packet;
use rand::seq::SliceRandom;
@ -2192,59 +2126,6 @@ pub mod tests {
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_genesis_and_entry_iterator() {
let entries = make_tiny_test_entries_from_hash(&Hash::default(), 10);
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
{
genesis(&ledger_path, &Keypair::new(), &entries).unwrap();
let ledger = Blocktree::open(&ledger_path).expect("open failed");
let read_entries: Vec<Entry> =
ledger.read_ledger().expect("read_ledger failed").collect();
assert!(read_entries.verify(&Hash::default()));
assert_eq!(entries, read_entries);
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_entry_iterator_up_to_consumed() {
let entries = make_tiny_test_entries_from_hash(&Hash::default(), 3);
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
{
// put entries except last 2 into ledger
genesis(&ledger_path, &Keypair::new(), &entries[..entries.len() - 2]).unwrap();
let ledger = Blocktree::open(&ledger_path).expect("open failed");
// now write the last entry, ledger has a hole in it one before the end
// +-+-+-+-+-+-+-+ +-+
// | | | | | | | | | |
// +-+-+-+-+-+-+-+ +-+
ledger
.write_entries(
0u64,
0,
(entries.len() - 1) as u64,
16,
&entries[entries.len() - 1..],
)
.unwrap();
let read_entries: Vec<Entry> =
ledger.read_ledger().expect("read_ledger failed").collect();
assert!(read_entries.verify(&Hash::default()));
// enumeration should stop at the hole
assert_eq!(entries[..entries.len() - 2].to_vec(), read_entries);
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_new_blobs_signal() {
// Initialize ledger

View File

@ -142,6 +142,7 @@ pub fn process_blocktree(
genesis_block: &GenesisBlock,
blocktree: &Blocktree,
account_paths: Option<String>,
verify_ledger: bool,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
let now = Instant::now();
info!("processing ledger...");
@ -204,7 +205,7 @@ pub fn process_blocktree(
}
if !entries.is_empty() {
if !entries.verify(&last_entry_hash) {
if verify_ledger && !entries.verify(&last_entry_hash) {
warn!(
"Ledger proof of history failed at slot: {}, entry: {}",
slot, entry_height
@ -373,7 +374,7 @@ pub mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
let (mut _bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
@ -432,7 +433,7 @@ pub mod tests {
blocktree.set_roots(&[4, 1, 0]).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
@ -506,7 +507,7 @@ pub mod tests {
blocktree.set_roots(&[0, 1]).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 2); // There are two forks
assert_eq!(
@ -587,7 +588,7 @@ pub mod tests {
// Check that we can properly restart the ledger / leader scheduler doesn't fail
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); // There is one fork
assert_eq!(
@ -723,7 +724,7 @@ pub mod tests {
.unwrap();
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(bank_forks.root(), 0);
@ -754,7 +755,7 @@ pub mod tests {
let blocktree = Blocktree::open(&ledger_path).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(

View File

@ -133,7 +133,7 @@ mod tests {
hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes....
let golden: Hash = "E2HZjSC6VgH4nmEiTbMDATTeBcFjwSYz7QYvU7doGNhD"
let golden: Hash = "37YzrTgiFRGQG1EoMZVecnGqxEK7UGxEQeBSdGMJcKqp"
.parse()
.unwrap();

View File

@ -748,7 +748,7 @@ impl ClusterInfo {
/// retransmit messages to a list of nodes
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
/// We need to avoid having obj locked while doing a io, such as the `send_to`
pub fn retransmit_to(
obj: &Arc<RwLock<Self>>,
peers: &[ContactInfo],
@ -1092,7 +1092,7 @@ impl ClusterInfo {
if caller.contact_info().is_none() {
return vec![];
}
let mut from = caller.contact_info().cloned().unwrap();
let from = caller.contact_info().unwrap();
if from.id == self_id {
warn!(
"PullRequest ignored, I'm talking to myself: me={} remoteme={}",
@ -1110,15 +1110,10 @@ impl ClusterInfo {
let len = data.len();
trace!("get updates since response {}", len);
let rsp = Protocol::PullResponse(self_id, data);
// The remote node may not know its public IP:PORT. Record what it looks like to us.
// This may or may not be correct for everybody, but it's better than leaving the remote with
// an unspecified address in our table
if from.gossip.ip().is_unspecified() {
inc_new_counter_debug!("cluster_info-window-request-updates-unspec-gossip", 1);
from.gossip = *from_addr;
}
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
// gossip addr, respond to the origin addr.
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
to_shared_blob(rsp, from.gossip).ok().into_iter().collect()
to_shared_blob(rsp, *from_addr).ok().into_iter().collect()
}
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
let len = data.len();

View File

@ -260,14 +260,16 @@ impl ClusterInfoRepairListener {
num_slots_to_repair: usize,
epoch_schedule: &EpochSchedule,
) -> Result<()> {
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root + 1);
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root);
if slot_iter.is_err() {
warn!("Root for repairee is on different fork OR replay_stage hasn't marked this slot as root yet");
info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {}",
my_root, repairee_epoch_slots.root
);
return Ok(());
}
let slot_iter = slot_iter?;
let mut slot_iter = slot_iter?;
let mut total_data_blobs_sent = 0;
let mut total_coding_blobs_sent = 0;
@ -276,6 +278,10 @@ impl ClusterInfoRepairListener {
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
let max_confirmed_repairee_slot =
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
// Skip the first slot in the iterator because we know it's the root slot which the repairee
// already has
slot_iter.next();
for (slot, slot_meta) in slot_iter {
if slot > my_root
|| num_slots_repaired >= num_slots_to_repair

View File

@ -226,11 +226,13 @@ impl CodingGenerator {
let index = data_blob.index();
let slot = data_blob.slot();
let id = data_blob.id();
let version = data_blob.version();
let mut coding_blob = Blob::default();
coding_blob.set_index(index);
coding_blob.set_slot(slot);
coding_blob.set_id(&id);
coding_blob.set_version(version);
coding_blob.set_size(max_data_size);
coding_blob.set_coding();

View File

@ -176,6 +176,7 @@ impl LocalCluster {
&leader_voting_keypair,
&leader_storage_keypair,
None,
true,
&config.validator_configs[0],
);
@ -308,6 +309,7 @@ impl LocalCluster {
&voting_keypair,
&storage_keypair,
Some(&self.entry_point_info),
true,
&validator_config,
);
@ -561,6 +563,7 @@ impl Cluster for LocalCluster {
&fullnode_info.voting_keypair,
&fullnode_info.storage_keypair,
None,
true,
config,
);

View File

@ -341,7 +341,8 @@ macro_rules! range {
const SIGNATURE_RANGE: std::ops::Range<usize> = range!(0, Signature);
const FORWARDED_RANGE: std::ops::Range<usize> = range!(SIGNATURE_RANGE.end, bool);
const PARENT_RANGE: std::ops::Range<usize> = range!(FORWARDED_RANGE.end, u64);
const SLOT_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
const VERSION_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
const SLOT_RANGE: std::ops::Range<usize> = range!(VERSION_RANGE.end, u64);
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
const FLAGS_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, u32);
@ -391,6 +392,12 @@ impl Blob {
pub fn set_parent(&mut self, ix: u64) {
LittleEndian::write_u64(&mut self.data[PARENT_RANGE], ix);
}
pub fn version(&self) -> u64 {
LittleEndian::read_u64(&self.data[VERSION_RANGE])
}
pub fn set_version(&mut self, version: u64) {
LittleEndian::write_u64(&mut self.data[VERSION_RANGE], version);
}
pub fn slot(&self) -> u64 {
LittleEndian::read_u64(&self.data[SLOT_RANGE])
}
@ -862,4 +869,12 @@ mod tests {
b.sign(&k);
assert!(b.verify());
}
#[test]
fn test_version() {
let mut b = Blob::default();
assert_eq!(b.version(), 0);
b.set_version(1);
assert_eq!(b.version(), 1);
}
}

View File

@ -23,7 +23,6 @@ use solana_sdk::account_utils::State;
use solana_sdk::client::{AsyncClient, SyncClient};
use solana_sdk::hash::{Hash, Hasher};
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::timing::timestamp;
use solana_sdk::transaction::Transaction;
@ -303,7 +302,7 @@ impl Replicator {
})
}
pub fn run(&mut self, mining_pool_pubkey: Pubkey) {
pub fn run(&mut self) {
info!("waiting for ledger download");
self.thread_handles.pop().unwrap().join().unwrap();
self.encrypt_ledger()
@ -330,11 +329,11 @@ impl Replicator {
}
};
self.blockhash = storage_blockhash;
self.redeem_rewards(&mining_pool_pubkey);
self.redeem_rewards();
}
}
fn redeem_rewards(&self, mining_pool_pubkey: &Pubkey) {
fn redeem_rewards(&self) {
let nodes = self.cluster_info.read().unwrap().tvu_peers();
let client = crate::gossip_service::get_client(&nodes);
@ -347,7 +346,6 @@ impl Replicator {
let ix = storage_instruction::claim_reward(
&self.keypair.pubkey(),
&self.storage_keypair.pubkey(),
mining_pool_pubkey,
);
let message = Message::new_with_payer(vec![ix], Some(&self.keypair.pubkey()));
if let Err(e) = client.send_message(&[&self.keypair], message) {
@ -468,7 +466,15 @@ impl Replicator {
// check if the storage account exists
let balance = client.poll_get_balance(&storage_keypair.pubkey());
if balance.is_err() || balance.unwrap() == 0 {
let (blockhash, _fee_calculator) = client.get_recent_blockhash().expect("blockhash");
let blockhash = match client.get_recent_blockhash() {
Ok((blockhash, _)) => blockhash,
Err(_) => {
return Err(Error::IO(<io::Error>::new(
io::ErrorKind::Other,
"unable to get recent blockhash, can't submit proof",
)))
}
};
let ix = storage_instruction::create_replicator_storage_account(
&keypair.pubkey(),
@ -495,16 +501,25 @@ impl Replicator {
// No point if we've got no storage account...
let nodes = self.cluster_info.read().unwrap().tvu_peers();
let client = crate::gossip_service::get_client(&nodes);
assert!(
client
.poll_get_balance(&self.storage_keypair.pubkey())
.unwrap()
> 0
);
let storage_balance = client.poll_get_balance(&self.storage_keypair.pubkey());
if storage_balance.is_err() || storage_balance.unwrap() == 0 {
error!("Unable to submit mining proof, no storage account");
return;
}
// ...or no lamports for fees
assert!(client.poll_get_balance(&self.keypair.pubkey()).unwrap() > 0);
let balance = client.poll_get_balance(&self.keypair.pubkey());
if balance.is_err() || balance.unwrap() == 0 {
error!("Unable to submit mining proof, insufficient Replicator Account balance");
return;
}
let (blockhash, _) = client.get_recent_blockhash().expect("No recent blockhash");
let blockhash = match client.get_recent_blockhash() {
Ok((blockhash, _)) => blockhash,
Err(_) => {
error!("unable to get recent blockhash, can't submit proof");
return;
}
};
let instruction = storage_instruction::mining_proof(
&self.storage_keypair.pubkey(),
self.sha_state,
@ -518,14 +533,14 @@ impl Replicator {
message,
blockhash,
);
client
.send_and_confirm_transaction(
&[&self.keypair, &self.storage_keypair],
&mut transaction,
10,
0,
)
.expect("transfer didn't work!");
if let Err(err) = client.send_and_confirm_transaction(
&[&self.keypair, &self.storage_keypair],
&mut transaction,
10,
0,
) {
error!("Error: {:?}; while sending mining proof", err);
}
}
pub fn close(self) {

View File

@ -70,6 +70,15 @@ impl JsonRpcRequestProcessor {
.ok_or_else(Error::invalid_request)
}
pub fn get_program_accounts(&self, program_id: &Pubkey) -> Result<Vec<(String, Account)>> {
Ok(self
.bank()
.get_program_accounts(&program_id)
.into_iter()
.map(|(pubkey, account)| (pubkey.to_string(), account))
.collect())
}
pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
self.bank().get_balance(&pubkey)
}
@ -196,8 +205,8 @@ pub struct RpcVoteAccountInfo {
/// The current stake, in lamports, delegated to this vote account
pub stake: u64,
/// A 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
pub commission: u32,
/// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
pub commission: u8,
}
#[rpc(server)]
@ -210,6 +219,9 @@ pub trait RpcSol {
#[rpc(meta, name = "getAccountInfo")]
fn get_account_info(&self, _: Self::Metadata, _: String) -> Result<Account>;
#[rpc(meta, name = "getProgramAccounts")]
fn get_program_accounts(&self, _: Self::Metadata, _: String) -> Result<Vec<(String, Account)>>;
#[rpc(meta, name = "getBalance")]
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
@ -297,6 +309,19 @@ impl RpcSol for RpcSolImpl {
.get_account_info(&pubkey)
}
fn get_program_accounts(
&self,
meta: Self::Metadata,
id: String,
) -> Result<Vec<(String, Account)>> {
debug!("get_program_accounts rpc request received: {:?}", id);
let program_id = verify_pubkey(id)?;
meta.request_processor
.read()
.unwrap()
.get_program_accounts(&program_id)
}
fn get_balance(&self, meta: Self::Metadata, id: String) -> Result<u64> {
debug!("get_balance rpc request received: {:?}", id);
let pubkey = verify_pubkey(id)?;
@ -535,7 +560,7 @@ mod tests {
fn start_rpc_handler_with_tx(
pubkey: &Pubkey,
) -> (MetaIoHandler<Meta>, Meta, Hash, Keypair, Pubkey) {
) -> (MetaIoHandler<Meta>, Meta, Arc<Bank>, Hash, Keypair, Pubkey) {
let (bank_forks, alice) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank();
let exit = Arc::new(AtomicBool::new(false));
@ -567,7 +592,7 @@ mod tests {
request_processor,
cluster_info,
};
(io, meta, blockhash, alice, leader.id)
(io, meta, bank, blockhash, alice, leader.id)
}
#[test]
@ -595,7 +620,8 @@ mod tests {
#[test]
fn test_rpc_get_balance() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
@ -613,7 +639,8 @@ mod tests {
#[test]
fn test_rpc_get_cluster_nodes() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#);
let res = io.handle_request_sync(&req, meta);
@ -633,7 +660,8 @@ mod tests {
#[test]
fn test_rpc_get_slot_leader() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#);
let res = io.handle_request_sync(&req, meta);
@ -649,7 +677,8 @@ mod tests {
#[test]
fn test_rpc_get_tx_count() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
let res = io.handle_request_sync(&req, meta);
@ -664,7 +693,8 @@ mod tests {
#[test]
fn test_rpc_get_total_supply() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}}"#);
let rep = io.handle_request_sync(&req, meta);
@ -689,7 +719,8 @@ mod tests {
#[test]
fn test_rpc_get_account_info() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#,
@ -713,10 +744,46 @@ mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_get_program_accounts() {
let bob = Keypair::new();
let (io, meta, bank, blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob.pubkey());
let new_program_id = Pubkey::new_rand();
let tx = system_transaction::assign(&bob, blockhash, &new_program_id);
bank.process_transaction(&tx).unwrap();
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getProgramAccounts","params":["{}"]}}"#,
new_program_id
);
let res = io.handle_request_sync(&req, meta);
let expected = format!(
r#"{{
"jsonrpc":"2.0",
"result":[["{}", {{
"owner": {:?},
"lamports": 20,
"data": [],
"executable": false
}}]],
"id":1}}
"#,
bob.pubkey(),
new_program_id.as_ref()
);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
fn test_rpc_confirm_tx() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, blockhash, alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, blockhash, alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
let req = format!(
@ -735,7 +802,8 @@ mod tests {
#[test]
fn test_rpc_get_signature_status() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, blockhash, alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, blockhash, alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
let req = format!(
@ -799,7 +867,8 @@ mod tests {
#[test]
fn test_rpc_get_recent_blockhash() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#);
let res = io.handle_request_sync(&req, meta);
@ -824,7 +893,8 @@ mod tests {
#[test]
fn test_rpc_fail_request_airdrop() {
let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey);
let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
// Expect internal error because no drone is available
let req = format!(

View File

@ -84,6 +84,7 @@ impl Validator {
voting_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>,
entrypoint_info_option: Option<&ContactInfo>,
verify_ledger: bool,
config: &ValidatorConfig,
) -> Self {
warn!("CUDA is {}abled", if cfg!(cuda) { "en" } else { "dis" });
@ -104,6 +105,7 @@ impl Validator {
ledger_path,
config.account_paths.clone(),
config.snapshot_path.clone(),
verify_ledger,
);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
@ -301,6 +303,7 @@ fn get_bank_forks(
blocktree: &Blocktree,
account_paths: Option<String>,
snapshot_path: Option<String>,
verify_ledger: bool,
) -> (BankForks, Vec<BankForksInfo>, LeaderScheduleCache) {
if snapshot_path.is_some() {
let bank_forks =
@ -318,8 +321,13 @@ fn get_bank_forks(
}
}
let (mut bank_forks, bank_forks_info, leader_schedule_cache) =
blocktree_processor::process_blocktree(&genesis_block, &blocktree, account_paths)
.expect("process_blocktree failed");
blocktree_processor::process_blocktree(
&genesis_block,
&blocktree,
account_paths,
verify_ledger,
)
.expect("process_blocktree failed");
if snapshot_path.is_some() {
bank_forks.set_snapshot_config(snapshot_path);
let _ = bank_forks.add_snapshot(0, 0);
@ -331,6 +339,7 @@ pub fn new_banks_from_blocktree(
blocktree_path: &str,
account_paths: Option<String>,
snapshot_path: Option<String>,
verify_ledger: bool,
) -> (
BankForks,
Vec<BankForksInfo>,
@ -347,8 +356,13 @@ pub fn new_banks_from_blocktree(
Blocktree::open_with_signal(blocktree_path)
.expect("Expected to successfully open database ledger");
let (bank_forks, bank_forks_info, leader_schedule_cache) =
get_bank_forks(&genesis_block, &blocktree, account_paths, snapshot_path);
let (bank_forks, bank_forks_info, leader_schedule_cache) = get_bank_forks(
&genesis_block,
&blocktree,
account_paths,
snapshot_path,
verify_ledger,
);
(
bank_forks,
@ -412,6 +426,7 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, String) {
&voting_keypair,
&storage_keypair,
None,
true,
&ValidatorConfig::default(),
);
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
@ -447,6 +462,7 @@ mod tests {
&voting_keypair,
&storage_keypair,
Some(&leader_node.info),
true,
&ValidatorConfig::default(),
);
validator.close().unwrap();
@ -478,6 +494,7 @@ mod tests {
&voting_keypair,
&storage_keypair,
Some(&leader_node.info),
true,
&ValidatorConfig::default(),
)
})

View File

@ -7,6 +7,8 @@ use rand_chacha::ChaChaRng;
use std::iter;
use std::ops::Div;
/// Returns a list of indexes shuffled based on the input weights
/// Note - The sum of all weights must not exceed `u64::MAX`
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize>
where
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
@ -17,10 +19,13 @@ where
.into_iter()
.enumerate()
.map(|(i, v)| {
let x = (total_weight / v).to_u32().unwrap();
let x = (total_weight / v)
.to_u64()
.expect("values > u64::max are not supported");
(
i,
(&mut rng).gen_range(1, u64::from(std::u16::MAX)) * u64::from(x),
// capture the u64 into u128s to prevent overflow
(&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
)
})
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
@ -73,4 +78,18 @@ mod tests {
assert_eq!(x, y);
});
}
#[test]
fn test_weighted_shuffle_imbalanced() {
let mut weights = vec![std::u32::MAX as u64; 3];
weights.push(1);
let shuffle = weighted_shuffle(weights.clone(), ChaChaRng::from_seed([0x5a; 32]));
shuffle.into_iter().for_each(|x| {
if x == weights.len() - 1 {
assert_eq!(weights[x], 1);
} else {
assert_eq!(weights[x], std::u32::MAX as u64);
}
});
}
}

View File

@ -118,6 +118,7 @@ fn test_leader_failure_4() {
);
}
#[test]
#[ignore]
fn test_two_unbalanced_stakes() {
solana_logger::setup();
let mut validator_config = ValidatorConfig::default();

View File

@ -98,7 +98,7 @@ fn test_replay() {
completed_slots_receiver,
leader_schedule_cache,
_,
) = validator::new_banks_from_blocktree(&blocktree_path, None, None);
) = validator::new_banks_from_blocktree(&blocktree_path, None, None, true);
let working_bank = bank_forks.working_bank();
assert_eq!(
working_bank.get_balance(&mint_keypair.pubkey()),

View File

@ -1,6 +1,6 @@
[package]
name = "solana-drone"
version = "0.16.0"
version = "0.16.5"
description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -20,9 +20,9 @@ clap = "2.33"
log = "0.4.2"
serde = "1.0.92"
serde_derive = "1.0.92"
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-metrics = { path = "../metrics", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -15,24 +15,24 @@ serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
serde_yaml = "0.8.9"
solana = { path = "../core", version = "0.16.0" }
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.16.0" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.16.0" }
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" }
solana-config-api = { path = "../programs/config_api", version = "0.16.0" }
solana-config-program = { path = "../programs/config_program", version = "0.16.0" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" }
solana-token-api = { path = "../programs/token_api", version = "0.16.0" }
solana-token-program = { path = "../programs/token_program", version = "0.16.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" }
solana = { path = "../core", version = "0.16.5" }
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.16.5" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.16.5" }
solana-budget-api = { path = "../programs/budget_api", version = "0.16.5" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.5" }
solana-config-api = { path = "../programs/config_api", version = "0.16.5" }
solana-config-program = { path = "../programs/config_program", version = "0.16.5" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.5" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.5" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.5" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.5" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.5" }
solana-token-api = { path = "../programs/token_api", version = "0.16.5" }
solana-token-program = { path = "../programs/token_program", version = "0.16.5" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.5" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.5" }
[dev-dependencies]
hashbrown = "0.3.0"

View File

@ -147,14 +147,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.required(true)
.help("Path to file containing the bootstrap leader's storage keypair"),
)
.arg(
Arg::with_name("storage_mining_pool_lamports")
.long("storage-mining-pool-lamports")
.value_name("LAMPORTS")
.takes_value(true)
.required(true)
.help("Number of lamports to assign to the storage mining pool"),
)
.arg(
Arg::with_name("bootstrap_leader_lamports")
.long("bootstrap-leader-lamports")
@ -261,7 +253,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
let bootstrap_leader_stake_lamports =
value_t_or_exit!(matches, "bootstrap_leader_stake_lamports", u64);
let storage_pool_lamports = value_t_or_exit!(matches, "storage_mining_pool_lamports", u64);
let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?;
let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?;
@ -306,12 +297,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
1,
),
),
(
"StorageMiningPoo111111111111111111111111111"
.parse()
.unwrap(),
storage_contract::create_mining_pool_account(storage_pool_lamports),
),
])
.native_instruction_processors(&[
solana_bpf_loader_program!(),
@ -370,6 +355,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
builder = append_primordial_accounts(file, AccountFileFormat::Keypair, builder)?;
}
// add the reward pools
builder = solana_storage_api::rewards_pools::genesis(builder);
builder = solana_stake_api::rewards_pools::genesis(builder);
create_new_ledger(ledger_path, &builder.build())?;
Ok(())
}
@ -524,6 +513,8 @@ mod tests {
)
.expect("builder");
builder = solana_storage_api::rewards_pools::genesis(builder);
remove_file(path).unwrap();
let genesis_block = builder.clone().build();

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -11,10 +11,10 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
env_logger = "0.6.1"
solana = { path = "../core", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana = { path = "../core", version = "0.16.5" }
solana-client = { path = "../client", version = "0.16.5" }
solana-netutil = { path = "../netutil", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
[features]
cuda = []

View File

@ -41,12 +41,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
SubCommand::with_name("spy")
.about("Monitor the gossip entrypoint")
.setting(AppSettings::DisableVersion)
.arg(
clap::Arg::with_name("pull_only")
.long("pull-only")
.takes_value(false)
.help("Use a partial gossip node (Pulls only) to spy on the cluster. By default it will use a full fledged gossip node (Pushes and Pulls). Useful when behind a NAT"),
)
.arg(
Arg::with_name("num_nodes")
.short("N")
@ -120,9 +114,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.value_of("node_pubkey")
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
let gossip_addr = if matches.is_present("pull_only") {
None
} else {
let gossip_addr = {
let mut addr = socketaddr_any!();
addr.set_ip(
solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| {

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -13,25 +13,28 @@ cuda = []
[dependencies]
atty = "0.2.11"
bincode = "1.1.4"
bs58 = "0.2.0"
bzip2 = "0.3.3"
chrono = { version = "0.4.0", features = ["serde"] }
clap = { version = "2.33.0" }
console = "0.7.5"
console = "0.7.7"
ctrlc = { version = "3.1.3", features = ["termination"] }
dirs = "2.0.1"
indicatif = "0.11.0"
lazy_static = "1.3.0"
log = "0.4.2"
nix = "0.14.1"
reqwest = "0.9.18"
ring = "0.13.2"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_yaml = "0.8.9"
solana-client = { path = "../client", version = "0.16.0" }
solana-config-api = { path = "../programs/config_api", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.5" }
solana-config-api = { path = "../programs/config_api", version = "0.16.5" }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
tar = "0.4.26"
tempdir = "0.3.7"
url = "1.7.2"

View File

@ -1,11 +1,12 @@
use crate::config::Config;
use crate::stop_process::stop_process;
use crate::update_manifest::{SignedUpdateManifest, UpdateManifest};
use chrono::{Local, TimeZone};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use ring::digest::{Context, Digest, SHA256};
use solana_client::rpc_client::RpcClient;
use solana_config_api::config_instruction;
use solana_config_api::config_instruction::{self, ConfigKeys};
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil, Signable};
@ -13,7 +14,7 @@ use solana_sdk::transaction::Transaction;
use std::fs::{self, File};
use std::io::{self, BufReader, Read};
use std::path::{Path, PathBuf};
use std::thread::sleep;
use std::sync::mpsc;
use std::time::SystemTime;
use std::time::{Duration, Instant};
use tempdir::TempDir;
@ -202,7 +203,8 @@ fn new_update_manifest(
let new_account = config_instruction::create_account::<SignedUpdateManifest>(
&from_keypair.pubkey(),
&update_manifest_keypair.pubkey(),
1, // lamports
1, // lamports
vec![], // additional keys
);
let mut transaction = Transaction::new_unsigned_instructions(vec![new_account]);
transaction.sign(&[from_keypair], recent_blockhash);
@ -224,6 +226,8 @@ fn store_update_manifest(
let signers = [from_keypair, update_manifest_keypair];
let instruction = config_instruction::store::<SignedUpdateManifest>(
&update_manifest_keypair.pubkey(),
true, // update_manifest_keypair is signer
vec![], // additional keys
update_manifest,
);
@ -238,9 +242,10 @@ fn get_update_manifest(
rpc_client: &RpcClient,
update_manifest_pubkey: &Pubkey,
) -> Result<UpdateManifest, String> {
let data = rpc_client
let mut data = rpc_client
.get_account_data(update_manifest_pubkey)
.map_err(|err| format!("Unable to fetch update manifest: {}", err))?;
let data = data.split_off(ConfigKeys::serialized_size(vec![]));
let signed_update_manifest =
SignedUpdateManifest::deserialize(update_manifest_pubkey, &data)
@ -514,7 +519,7 @@ pub fn init(
false
};
if !path_modified {
if !path_modified && !no_modify_path {
check_env_path_for_bin_dir(&config);
}
Ok(())
@ -748,7 +753,11 @@ pub fn run(
) -> Result<(), String> {
let config = Config::load(config_file)?;
let full_program_path = config.active_release_bin_dir().join(program_name);
let mut full_program_path = config.active_release_bin_dir().join(program_name);
if cfg!(windows) && full_program_path.extension().is_none() {
full_program_path.set_extension("exe");
}
if !full_program_path.exists() {
Err(format!(
"{} does not exist",
@ -758,6 +767,13 @@ pub fn run(
let mut child_option: Option<std::process::Child> = None;
let mut now = Instant::now();
let (signal_sender, signal_receiver) = mpsc::channel();
ctrlc::set_handler(move || {
let _ = signal_sender.send(());
})
.expect("Error setting Ctrl-C handler");
loop {
child_option = match child_option {
Some(mut child) => match child.try_wait() {
@ -793,7 +809,9 @@ pub fn run(
Ok(true) => {
// Update successful, kill current process so it will be restart
if let Some(ref mut child) = child_option {
println!("Killing program: {:?}", child.kill());
stop_process(child).unwrap_or_else(|err| {
eprintln!("Failed to stop child: {:?}", err);
});
}
}
Ok(false) => {} // No update available
@ -803,6 +821,15 @@ pub fn run(
};
now = Instant::now();
}
sleep(Duration::from_secs(1));
if let Ok(()) = signal_receiver.recv_timeout(Duration::from_secs(1)) {
// Handle SIGTERM...
if let Some(ref mut child) = child_option {
stop_process(child).unwrap_or_else(|err| {
eprintln!("Failed to stop child: {:?}", err);
});
}
std::process::exit(0);
}
}
}

View File

@ -8,6 +8,7 @@ mod build_env;
mod command;
mod config;
mod defaults;
mod stop_process;
mod update_manifest;
// Return an error if a url cannot be parsed.

View File

@ -1,22 +1,21 @@
use atty;
use std::process::exit;
#[cfg(windows)]
fn press_enter() {
// On windows, where installation happens in a console that may have opened just for this
// purpose, give the user an opportunity to see the error before the window closes.
println!();
println!("Press the Enter key to continue.");
if cfg!(windows) && atty::is(atty::Stream::Stdin) {
println!();
println!("Press the Enter key to continue.");
use std::io::BufRead;
let stdin = std::io::stdin();
let stdin = stdin.lock();
let mut lines = stdin.lines();
lines.next();
use std::io::BufRead;
let stdin = std::io::stdin();
let stdin = stdin.lock();
let mut lines = stdin.lines();
lines.next();
}
}
#[cfg(not(windows))]
fn press_enter() {}
fn main() {
solana_install::main_init().unwrap_or_else(|err| {
println!("Error: {}", err);

View File

@ -0,0 +1,67 @@
use std::io;
use std::process::Child;
fn kill_process(process: &mut Child) -> Result<(), io::Error> {
if let Ok(()) = process.kill() {
process.wait()?;
} else {
println!("Process {} has already exited", process.id());
}
Ok(())
}
#[cfg(windows)]
pub fn stop_process(process: &mut Child) -> Result<(), io::Error> {
kill_process(process)
}
#[cfg(not(windows))]
pub fn stop_process(process: &mut Child) -> Result<(), io::Error> {
use nix::errno::Errno::{EINVAL, EPERM, ESRCH};
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
use nix::Error::Sys;
use std::io::ErrorKind;
use std::thread;
use std::time::{Duration, Instant};
let nice_wait = Duration::from_secs(5);
let pid = Pid::from_raw(process.id() as i32);
match kill(pid, Signal::SIGINT) {
Ok(()) => {
let expire = Instant::now() + nice_wait;
while let Ok(None) = process.try_wait() {
if Instant::now() > expire {
break;
}
thread::sleep(nice_wait / 10);
}
if let Ok(None) = process.try_wait() {
kill_process(process)?;
}
}
Err(Sys(EINVAL)) => {
println!("Invalid signal. Killing process {}", pid);
kill_process(process)?;
}
Err(Sys(EPERM)) => {
return Err(io::Error::new(
ErrorKind::InvalidInput,
format!("Insufficient permissions to signal process {}", pid),
));
}
Err(Sys(ESRCH)) => {
return Err(io::Error::new(
ErrorKind::InvalidInput,
format!("Process {} does not exist", pid),
));
}
Err(e) => {
return Err(io::Error::new(
ErrorKind::InvalidInput,
format!("Unexpected error {}", e),
));
}
};
Ok(())
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "0.16.0"
version = "0.16.5"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -15,7 +15,7 @@ cuda = []
[dependencies]
clap = "2.33"
dirs = "2.0.1"
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
[[bin]]
name = "solana-keygen"

View File

@ -1,7 +1,7 @@
[package]
name = "solana-kvstore"
description = "Embedded Key-Value store for solana"
version = "0.16.0"
version = "0.16.5"
homepage = "https://solana.com/"
repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"]

View File

@ -3,18 +3,21 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "0.16.0"
version = "0.16.5"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
serde_json = "1.0.39"
solana = { path = "../core", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
serde = "1.0.94"
serde_derive = "1.0.94"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
solana = { path = "../core", version = "0.16.5" }
solana-logger = { path = "../logger", version = "0.16.5" }
solana-runtime = { path = "../runtime", version = "0.16.5" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
[dev-dependencies]
assert_cmd = "0.11"

View File

@ -1,13 +1,70 @@
use clap::{crate_description, crate_name, crate_version, App, Arg, SubCommand};
use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, SubCommand};
use solana::blocktree::Blocktree;
use solana::blocktree_processor::process_blocktree;
use solana_sdk::genesis_block::GenesisBlock;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::{stdout, Write};
use std::process::exit;
use std::str::FromStr;
#[derive(PartialEq)]
enum LedgerOutputMethod {
Print,
Json,
}
fn output_ledger(blocktree: Blocktree, starting_slot: u64, method: LedgerOutputMethod) {
let rooted_slot_iterator = blocktree
.rooted_slot_iterator(starting_slot)
.unwrap_or_else(|err| {
eprintln!(
"Failed to load entries starting from slot {}: {:?}",
starting_slot, err
);
exit(1);
});
if method == LedgerOutputMethod::Json {
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
}
for (slot, slot_meta) in rooted_slot_iterator {
match method {
LedgerOutputMethod::Print => println!("Slot {}", slot),
LedgerOutputMethod::Json => {
serde_json::to_writer(stdout(), &slot_meta).expect("serialize slot_meta");
stdout().write_all(b",\n").expect("newline");
}
}
let entries = blocktree
.get_slot_entries(slot, 0, None)
.unwrap_or_else(|err| {
eprintln!("Failed to load entries for slot {}: {:?}", slot, err);
exit(1);
});
for entry in entries {
match method {
LedgerOutputMethod::Print => println!("{:?}", entry),
LedgerOutputMethod::Json => {
serde_json::to_writer(stdout(), &entry).expect("serialize entry");
stdout().write_all(b",\n").expect("newline");
}
}
}
}
if method == LedgerOutputMethod::Json {
stdout().write_all(b"\n]}\n").expect("close array");
}
}
fn main() {
const DEFAULT_ROOT_COUNT: &str = "1";
solana_logger::setup();
let matches = App::new(crate_name!()).about(crate_description!())
let matches = App::new(crate_name!())
.about(crate_description!())
.version(crate_version!())
.arg(
Arg::with_name("ledger")
@ -19,30 +76,46 @@ fn main() {
.help("Use directory for ledger location"),
)
.arg(
Arg::with_name("head")
.short("n")
.long("head")
Arg::with_name("starting_slot")
.long("starting-slot")
.value_name("NUM")
.takes_value(true)
.help("Limit to at most the first NUM entries in ledger\n (only applies to print and json commands)"),
)
.arg(
Arg::with_name("min-hashes")
.short("h")
.long("min-hashes")
.value_name("NUM")
.takes_value(true)
.help("Skip entries with fewer than NUM hashes\n (only applies to print and json commands)"),
)
.arg(
Arg::with_name("continue")
.short("c")
.long("continue")
.help("Continue verify even if verification fails"),
.default_value("0")
.help("Start at this slot (only applies to print and json commands)"),
)
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
.subcommand(SubCommand::with_name("prune").about("Prune the ledger at the block height").arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
.takes_value(true)
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
))
.subcommand(SubCommand::with_name("list-roots").about("Output upto last <num-roots> root hashes and their heights starting at the given block height").arg(
Arg::with_name("max_height")
.long("max-height")
.value_name("NUM")
.takes_value(true)
.required(true)
.help("Maximum block height"),
).arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
.required(false)
.takes_value(true)
.help("The location of the output YAML file. A list of rollback slot heights and hashes will be written to the file."),
).arg(
Arg::with_name("num_roots")
.long("num-roots")
.value_name("NUM")
.takes_value(true)
.default_value(DEFAULT_ROOT_COUNT)
.required(false)
.help("Number of roots in the output"),
))
.get_matches();
let ledger_path = matches.value_of("ledger").unwrap();
@ -63,63 +136,118 @@ fn main() {
}
};
let entries = match blocktree.read_ledger() {
Ok(entries) => entries,
Err(err) => {
eprintln!("Failed to read ledger at {}: {}", ledger_path, err);
exit(1);
}
};
let head = match matches.value_of("head") {
Some(head) => head.parse().expect("please pass a number for --head"),
None => <usize>::max_value(),
};
let min_hashes = match matches.value_of("min-hashes") {
Some(hashes) => hashes
.parse()
.expect("please pass a number for --min-hashes"),
None => 0,
} as u64;
let starting_slot = value_t!(matches, "starting_slot", u64).unwrap_or_else(|e| e.exit());
match matches.subcommand() {
("print", _) => {
for (i, entry) in entries.enumerate() {
if i >= head {
break;
}
if entry.num_hashes < min_hashes {
continue;
}
println!("{:?}", entry);
}
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Print);
}
("json", _) => {
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
for (i, entry) in entries.enumerate() {
if i >= head {
break;
}
if entry.num_hashes < min_hashes {
continue;
}
serde_json::to_writer(stdout(), &entry).expect("serialize");
stdout().write_all(b",\n").expect("newline");
}
stdout().write_all(b"\n]}\n").expect("close array");
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Json);
}
("verify", _) => match process_blocktree(&genesis_block, &blocktree, None) {
Ok((_bank_forks, bank_forks_info, _)) => {
println!("{:?}", bank_forks_info);
("verify", _) => {
println!("Verifying ledger...");
match process_blocktree(&genesis_block, &blocktree, None, true) {
Ok((_bank_forks, bank_forks_info, _)) => {
println!("{:?}", bank_forks_info);
}
Err(err) => {
eprintln!("Ledger verification failed: {:?}", err);
exit(1);
}
}
Err(err) => {
eprintln!("Ledger verification failed: {:?}", err);
exit(1);
}
("prune", Some(args_matches)) => {
if let Some(prune_file_path) = args_matches.value_of("slot_list") {
let prune_file = File::open(prune_file_path.to_string()).unwrap();
let slot_hashes: BTreeMap<u64, String> =
serde_yaml::from_reader(prune_file).unwrap();
let iter = blocktree
.rooted_slot_iterator(0)
.expect("Failed to get rooted slot");
let potential_hashes: Vec<_> = iter
.filter_map(|(slot, meta)| {
let blockhash = blocktree
.get_slot_entries(slot, meta.last_index, Some(1))
.unwrap()
.first()
.unwrap()
.hash
.to_string();
slot_hashes.get(&slot).and_then(|hash| {
if *hash == blockhash {
Some((slot, blockhash))
} else {
None
}
})
})
.collect();
let (target_slot, target_hash) = potential_hashes
.last()
.expect("Failed to find a valid slot");
println!("Prune at slot {:?} hash {:?}", target_slot, target_hash);
// ToDo: Do the actual pruning of the database
}
},
}
("list-roots", Some(args_matches)) => {
let max_height = if let Some(height) = args_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number")
} else {
panic!("Maximum height must be provided");
};
let num_roots = if let Some(roots) = args_matches.value_of("num_roots") {
usize::from_str(roots).expect("Number of roots must be a number")
} else {
usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
};
let iter = blocktree
.rooted_slot_iterator(0)
.expect("Failed to get rooted slot");
let slot_hash: Vec<_> = iter
.filter_map(|(slot, meta)| {
if slot <= max_height as u64 {
let blockhash = blocktree
.get_slot_entries(slot, meta.last_index, Some(1))
.unwrap()
.first()
.unwrap()
.hash;
Some((slot, blockhash))
} else {
None
}
})
.collect();
let mut output_file: Box<Write> = if let Some(path) = args_matches.value_of("slot_list")
{
match File::create(path) {
Ok(file) => Box::new(file),
_ => Box::new(stdout()),
}
} else {
Box::new(stdout())
};
slot_hash
.into_iter()
.rev()
.enumerate()
.for_each(|(i, (slot, hash))| {
if i < num_roots {
output_file
.write_all(format!("{:?}: {:?}\n", slot, hash).as_bytes())
.expect("failed to write");
}
});
}
("", _) => {
eprintln!("{}", matches.usage());
exit(1);

View File

@ -45,20 +45,5 @@ fn nominal() {
// Print everything
let output = run_ledger_tool(&["-l", &ledger_path, "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), ticks);
// Only print the first 5 items
let output = run_ledger_tool(&["-l", &ledger_path, "-n", "5", "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), 5);
// Skip entries with no hashes
let output = run_ledger_tool(&["-l", &ledger_path, "-h", "1", "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), ticks);
// Skip entries with fewer than 2 hashes (skip everything)
let output = run_ledger_tool(&["-l", &ledger_path, "-h", "2", "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), 0);
assert_eq!(count_newlines(&output.stdout), ticks + 1);
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "0.16.0"
version = "0.16.5"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "0.16.0"
version = "0.16.5"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
[dev-dependencies]
hex = "0.3.2"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "0.16.0"
version = "0.16.5"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -14,7 +14,7 @@ influx_db_client = "0.3.6"
lazy_static = "1.3.0"
log = "0.4.2"
reqwest = "0.9.18"
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.5" }
sys-info = "0.5.7"
[dev-dependencies]

View File

@ -11,7 +11,9 @@ set -e
for i in "$SOLANA_RSYNC_CONFIG_DIR" "$SOLANA_CONFIG_DIR"; do
echo "Cleaning $i"
rm -rvf "${i:?}/" # <-- $i might be a symlink, rm the other side of it first
rm -rvf "$i"
mkdir -p "$i"
done
setup_secondary_mount

View File

@ -72,6 +72,17 @@ SOLANA_RSYNC_CONFIG_DIR=$SOLANA_ROOT/config
# Configuration that remains local
SOLANA_CONFIG_DIR=$SOLANA_ROOT/config-local
SECONDARY_DISK_MOUNT_POINT=/mnt/extra-disk
setup_secondary_mount() {
# If there is a secondary disk, symlink the config-local dir there
if [[ -d $SECONDARY_DISK_MOUNT_POINT ]]; then
mkdir -p $SECONDARY_DISK_MOUNT_POINT/config-local
rm -rf "$SOLANA_CONFIG_DIR"
ln -sfT $SECONDARY_DISK_MOUNT_POINT/config-local "$SOLANA_CONFIG_DIR"
fi
}
setup_secondary_mount
default_arg() {
declare name=$1
declare value=$2
@ -88,3 +99,18 @@ default_arg() {
args+=("$name")
fi
}
replace_arg() {
declare name=$1
declare value=$2
default_arg "$name" "$value"
declare index=0
for arg in "${args[@]}"; do
index=$((index + 1))
if [[ $arg = "$name" ]]; then
args[$index]="$value"
fi
done
}

View File

@ -9,6 +9,7 @@ source "$here"/common.sh
# shellcheck source=scripts/oom-score-adj.sh
source "$here"/../scripts/oom-score-adj.sh
fullnode_usage() {
if [[ -n $1 ]]; then
echo "$*"
@ -76,24 +77,8 @@ rsync_url() { # adds the 'rsync://` prefix to URLs that need it
setup_validator_accounts() {
declare entrypoint_ip=$1
declare node_keypair_path=$2
declare vote_keypair_path=$3
declare stake_keypair_path=$4
declare storage_keypair_path=$5
declare node_lamports=$6
declare stake_lamports=$7
declare node_pubkey
node_pubkey=$($solana_keygen pubkey "$node_keypair_path")
declare vote_pubkey
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
declare stake_pubkey
stake_pubkey=$($solana_keygen pubkey "$stake_keypair_path")
declare storage_pubkey
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
declare node_lamports=$2
declare stake_lamports=$3
if [[ -f $configured_flag ]]; then
echo "Vote and stake accounts have already been configured"
@ -101,75 +86,68 @@ setup_validator_accounts() {
if ((airdrops_enabled)); then
# Fund the node with enough tokens to fund its Vote, Staking, and Storage accounts
declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" airdrop $((node_lamports+stake_lamports+fees)) || return $?
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" airdrop $((node_lamports+stake_lamports+fees)) || return $?
else
echo "current account balance is "
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
fi
# Fund the vote account from the node, with the node as the node_pubkey
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
create-vote-account "$vote_pubkey" "$node_pubkey" 1 --commission 65535 || return $?
# Fund the vote account from the node, with the node as the identity_pubkey
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-vote-account "$vote_pubkey" "$identity_pubkey" 1 --commission 127 || return $?
# Fund the stake account from the node, with the node as the node_pubkey
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
# Fund the stake account from the node, with the node as the identity_pubkey
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-stake-account "$stake_pubkey" "$stake_lamports" || return $?
# Delegate the stake. The transaction fee is paid by the node but the
# transaction must be signed by the stake_keypair
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
delegate-stake "$stake_keypair_path" "$vote_pubkey" "$stake_lamports" || return $?
# Setup validator storage account
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
create-validator-storage-account "$node_pubkey" "$storage_pubkey" || return $?
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-validator-storage-account "$identity_pubkey" "$storage_pubkey" || return $?
touch "$configured_flag"
fi
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
show-vote-account "$vote_pubkey"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
show-stake-account "$stake_pubkey"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
show-storage-account "$storage_pubkey"
echo "Identity account balance:"
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance
echo "========================================================================"
(
set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-vote-account "$vote_pubkey"
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-stake-account "$stake_pubkey"
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-storage-account "$storage_pubkey"
)
return 0
}
setup_replicator_account() {
declare entrypoint_ip=$1
declare node_keypair_path=$2
declare storage_keypair_path=$3
declare node_lamports=$4
declare node_pubkey
node_pubkey=$($solana_keygen pubkey "$node_keypair_path")
declare storage_pubkey
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
declare node_lamports=$2
if [[ -f $configured_flag ]]; then
echo "Replicator account has already been configured"
else
if ((airdrops_enabled)); then
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" airdrop "$node_lamports" || return $?
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" airdrop "$node_lamports" || return $?
else
echo "current account balance is "
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
fi
# Setup replicator storage account
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
create-replicator-storage-account "$node_pubkey" "$storage_pubkey" || return $?
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-replicator-storage-account "$identity_pubkey" "$storage_pubkey" || return $?
touch "$configured_flag"
fi
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-storage-account "$storage_pubkey"
return 0
@ -192,6 +170,7 @@ identity_keypair_path=
no_restart=0
airdrops_enabled=1
generate_snapshots=0
boot_from_snapshot=1
positional_args=()
while [[ -n $1 ]]; do
@ -209,6 +188,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --generate-snapshots ]]; then
generate_snapshots=1
shift
elif [[ $1 = --no-snapshot ]]; then
boot_from_snapshot=0
shift
elif [[ $1 = --replicator ]]; then
node_type=replicator
shift
@ -275,24 +257,13 @@ if [[ $node_type = replicator ]]; then
shift "$shift"
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/replicator-keypair$label.json}"
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-keypair$label.json
ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger$label
configured_flag=$SOLANA_CONFIG_DIR/replicator$label.configured
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
[[ -r "$storage_keypair_path" ]] || $solana_keygen new -o "$storage_keypair_path"
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
cat <<EOF
======================[ $node_type configuration ]======================
replicator pubkey: $identity_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
======================================================================
EOF
program=$solana_replicator
default_arg --entrypoint "$entrypoint_address"
default_arg --identity "$identity_keypair_path"
@ -300,6 +271,7 @@ EOF
default_arg --ledger "$ledger_config_dir"
rsync_entrypoint_url=$(rsync_url "$entrypoint")
elif [[ $node_type = bootstrap_leader ]]; then
if [[ ${#positional_args[@]} -ne 0 ]]; then
fullnode_usage "Unknown argument: ${positional_args[0]}"
@ -311,9 +283,11 @@ elif [[ $node_type = bootstrap_leader ]]; then
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/bootstrap-leader-keypair.json}"
vote_keypair_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-vote-keypair.json
ledger_config_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger
state_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-state
stake_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-stake-keypair.json
storage_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-storage-keypair.json
configured_flag=$SOLANA_CONFIG_DIR/bootstrap-leader.configured
@ -332,19 +306,16 @@ elif [[ $node_type = validator ]]; then
shift "$shift"
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/validator-keypair$label.json}"
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
vote_keypair_path=$SOLANA_CONFIG_DIR/validator-vote-keypair$label.json
ledger_config_dir=$SOLANA_CONFIG_DIR/validator-ledger$label
state_dir="$SOLANA_CONFIG_DIR"/validator-state$label
storage_keypair_path=$SOLANA_CONFIG_DIR/validator-storage-keypair$label.json
stake_keypair_path=$SOLANA_CONFIG_DIR/validator-stake-keypair$label.json
storage_keypair_path=$SOLANA_CONFIG_DIR/validator-storage-keypair$label.json
configured_flag=$SOLANA_CONFIG_DIR/validator$label.configured
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
[[ -r "$vote_keypair_path" ]] || $solana_keygen new -o "$vote_keypair_path"
[[ -r "$stake_keypair_path" ]] || $solana_keygen new -o "$stake_keypair_path"
[[ -r "$storage_keypair_path" ]] || $solana_keygen new -o "$storage_keypair_path"
default_arg --entrypoint "$entrypoint_address"
if ((airdrops_enabled)); then
default_arg --rpc-drone-address "${entrypoint_address%:*}:9900"
@ -356,29 +327,14 @@ else
exit 1
fi
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
if [[ $node_type != replicator ]]; then
accounts_config_dir="$state_dir"/accounts
snapshot_config_dir="$state_dir"/snapshots
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
cat <<EOF
======================[ $node_type configuration ]======================
identity pubkey: $identity_pubkey
vote pubkey: $vote_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
accounts: $accounts_config_dir
snapshots: $snapshot_config_dir
========================================================================
EOF
default_arg --identity "$identity_keypair_path"
default_arg --voting-keypair "$vote_keypair_path"
default_arg --vote-account "$vote_pubkey"
default_arg --storage-keypair "$storage_keypair_path"
default_arg --ledger "$ledger_config_dir"
default_arg --accounts "$accounts_config_dir"
@ -397,72 +353,137 @@ if [[ -z $CI ]]; then # Skip in CI
fi
new_gensis_block() {
(
set -x
$rsync -r "${rsync_entrypoint_url:?}"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
) || (
echo "Error: failed to rsync genesis ledger"
)
! diff -q "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.bin "$ledger_config_dir"/genesis.bin >/dev/null 2>&1
}
set -e
PS4="$(basename "$0"): "
pid=
trap '[[ -n $pid ]] && kill "$pid" >/dev/null 2>&1 && wait "$pid"' INT TERM ERR
kill_fullnode() {
if [[ -n $pid ]]; then
declare _pid=$pid
pid=
echo "killing pid $_pid"
kill "$_pid" || true
wait "$_pid" || true
echo "$_pid killed"
fi
}
trap 'kill_fullnode' INT TERM ERR
while true; do
if new_gensis_block; then
# If the genesis block has changed remove the now stale ledger and vote
# keypair for the node and start all over again
if [[ $node_type != bootstrap_leader ]] && new_gensis_block; then
# If the genesis block has changed remove the now stale ledger and
# vote/stake/storage keypairs for the node and start all over again
(
set -x
rm -rf "$ledger_config_dir" "$state_dir" "$configured_flag"
)
if [[ $node_type = validator ]]; then
$solana_keygen new -f -o "$vote_keypair_path"
$solana_keygen new -f -o "$stake_keypair_path"
$solana_keygen new -f -o "$storage_keypair_path"
fi
if [[ $node_type = replicator ]]; then
$solana_keygen new -f -o "$storage_keypair_path"
fi
fi
if [[ ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
if [[ $node_type = bootstrap_leader ]]; then
if [[ $node_type = replicator ]]; then
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
setup_replicator_account "${entrypoint_address%:*}" \
"$node_lamports"
cat <<EOF
======================[ $node_type configuration ]======================
replicator pubkey: $identity_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
======================================================================
EOF
else
if [[ $node_type = bootstrap_leader && ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
ledger_not_setup "$SOLANA_RSYNC_CONFIG_DIR/ledger does not exist"
elif [[ $node_type = validator ]]; then
(
SECONDS=0
set -x
cd "$SOLANA_RSYNC_CONFIG_DIR"
$rsync -qPr "${rsync_entrypoint_url:?}"/config/{ledger,state.tgz} .
echo "Fetched snapshot in $SECONDS seconds"
) || true
fi
fi
(
set -x
if [[ $node_type = validator ]]; then
if [[ -f "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz ]]; then
mkdir -p "$state_dir"
SECONDS=
tar -C "$state_dir" -zxf "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
echo "Extracted snapshot in $SECONDS seconds"
fi
fi
if [[ ! -d "$ledger_config_dir" ]]; then
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
fi
)
if [[ $node_type = validator ]]; then
(
cd "$SOLANA_RSYNC_CONFIG_DIR"
if ((stake_lamports)); then
if [[ $node_type = validator ]]; then
echo "Rsyncing genesis ledger from ${rsync_entrypoint_url:?}..."
SECONDS=
while ! $rsync -Pr "${rsync_entrypoint_url:?}"/config/ledger .; do
echo "Genesis ledger rsync failed"
sleep 5
done
echo "Fetched genesis ledger in $SECONDS seconds"
if ((boot_from_snapshot)); then
SECONDS=
echo "Rsyncing state snapshot ${rsync_entrypoint_url:?}..."
if ! $rsync -P "${rsync_entrypoint_url:?}"/config/state.tgz .; then
echo "State snapshot rsync failed"
rm -f "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
exit
fi
echo "Fetched snapshot in $SECONDS seconds"
SECONDS=
mkdir -p "$state_dir"
(
set -x
tar -C "$state_dir" -zxf "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
)
echo "Extracted snapshot in $SECONDS seconds"
fi
)
fi
(
set -x
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
)
fi
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
stake_pubkey=$($solana_keygen pubkey "$stake_keypair_path")
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
replace_arg --vote-account "$vote_pubkey"
if [[ $node_type = validator ]] && ((stake_lamports)); then
setup_validator_accounts "${entrypoint_address%:*}" \
"$identity_keypair_path" \
"$vote_keypair_path" \
"$stake_keypair_path" \
"$storage_keypair_path" \
"$node_lamports" \
"$stake_lamports"
elif [[ $node_type = replicator ]]; then
setup_replicator_account "${entrypoint_address%:*}" \
"$identity_keypair_path" \
"$storage_keypair_path" \
"$node_lamports"
fi
cat <<EOF
======================[ $node_type configuration ]======================
identity pubkey: $identity_pubkey
vote pubkey: $vote_pubkey
stake pubkey: $stake_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
accounts: $accounts_config_dir
snapshots: $snapshot_config_dir
========================================================================
EOF
fi
echo "$PS4$program ${args[*]}"
$program "${args[@]}" &
pid=$!
echo "pid: $pid"
oom_score_adj "$pid" 1000
if ((no_restart)); then
@ -488,9 +509,15 @@ while true; do
new_state_archive="$SOLANA_RSYNC_CONFIG_DIR"/new_state.tgz
(
rm -rf "$new_state_dir" "$new_state_archive"
cp -a "$state_dir" "$new_state_dir"
mkdir -p "$new_state_dir"
# When saving the state, its necessary to have the snapshots be saved first
# followed by the accounts folder. This would avoid conditions where incomplete
# accounts gets picked while its still in the process of being updated and are
# not frozen yet.
cp -a "$state_dir"/snapshots "$new_state_dir"
cp -a "$state_dir"/accounts "$new_state_dir"
cd "$new_state_dir"
tar zcf "$new_state_archive" ./*
tar zcfS "$new_state_archive" ./*
)
ln -f "$new_state_archive" "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
rm -rf "$new_state_dir" "$new_state_archive"
@ -504,21 +531,16 @@ while true; do
if ((poll_for_new_genesis_block && --secs_to_next_genesis_poll == 0)); then
echo "Polling for new genesis block..."
(
set -x
$rsync -r "${rsync_entrypoint_url:?}"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
) || (
echo "Error: failed to rsync ledger"
)
new_gensis_block && break
if new_gensis_block; then
echo "############## New genesis detected, restarting $node_type ##############"
break
fi
secs_to_next_genesis_poll=60
fi
done
echo "############## New genesis detected, restarting $node_type ##############"
kill "$pid" || true
wait "$pid" || true
kill_fullnode
# give the cluster time to come back up
(
set -x

View File

@ -23,7 +23,6 @@ default_arg --ledger "$SOLANA_RSYNC_CONFIG_DIR"/ledger
default_arg --mint "$SOLANA_CONFIG_DIR"/mint-keypair.json
default_arg --lamports 100000000000000
default_arg --bootstrap-leader-lamports 424242
default_arg --storage-mining-pool-lamports 100000000
default_arg --target-lamports-per-signature 42
default_arg --target-signatures-per-slot 42
default_arg --hashes-per-tick auto

View File

@ -25,6 +25,7 @@ entrypointIp=
publicNetwork=
netBasename=
sshPrivateKey=
letsEncryptDomainName=
externalNodeSshKey=
sshOptions=()
fullnodeIpList=()

View File

@ -63,10 +63,12 @@ blockstreamer=false
fullNodeBootDiskSizeInGb=1000
clientBootDiskSizeInGb=75
replicatorBootDiskSizeInGb=1000
fullNodeAdditionalDiskSizeInGb=
externalNodes=false
failOnValidatorBootupFailure=true
publicNetwork=false
letsEncryptDomainName=
enableGpu=false
customAddress=
zones=()
@ -122,7 +124,13 @@ Manage testnet instances
* For EC2, [address] is the "allocation ID" of the desired
Elastic IP.
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
--letsencrypt [dns name] - Attempt to generate a TLS certificate using this
DNS name (useful only when the -a and -P options
are also provided)
--fullnode-additional-disk-size-gb [number]
- Add an additional [number] GB SSD to all fullnodes to store the config-local directory.
If not set, config-local will be written to the boot disk by default.
Only supported on GCE.
config-specific options:
-P - Use public network IP addresses (default: $publicNetwork)
@ -136,14 +144,34 @@ EOF
exit $exitcode
}
command=$1
[[ -n $command ]] || usage
shift
[[ $command = create || $command = config || $command = info || $command = delete ]] ||
usage "Invalid command: $command"
while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt; do
shortArgs=()
while [[ -n $1 ]]; do
if [[ ${1:0:2} = -- ]]; then
if [[ $1 = --letsencrypt ]]; then
letsEncryptDomainName="$2"
shift 2
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
fullNodeAdditionalDiskSizeInGb="$2"
shift 2
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
shortArgs+=("$1")
shift
else
usage "Unknown long option: $1"
fi
else
shortArgs+=("$1")
shift
fi
done
while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt "${shortArgs[@]}"; do
case $opt in
h | \?)
usage
@ -199,7 +227,6 @@ while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt; do
;;
esac
done
shift $((OPTIND - 1))
[[ ${#zones[@]} -gt 0 ]] || zones+=("$(cloud_DefaultZone)")
@ -217,8 +244,14 @@ case $cloudProvider in
gce)
;;
ec2)
if [[ -n $fullNodeAdditionalDiskSizeInGb ]] ; then
usage "Error: --fullnode-additional-disk-size-gb currently only supported with cloud provider: gce"
fi
;;
azure)
if [[ -n $fullNodeAdditionalDiskSizeInGb ]] ; then
usage "Error: --fullnode-additional-disk-size-gb currently only supported with cloud provider: gce"
fi
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
@ -328,6 +361,7 @@ prepareInstancesAndWriteConfigFile() {
netBasename=$prefix
publicNetwork=$publicNetwork
sshPrivateKey=$sshPrivateKey
letsEncryptDomainName=$letsEncryptDomainName
EOF
fi
touch "$geoipConfigFile"
@ -598,6 +632,7 @@ $(
disable-background-upgrades.sh \
create-solana-user.sh \
add-solana-user-authorized_keys.sh \
install-certbot.sh \
install-earlyoom.sh \
install-libssl-compatability.sh \
install-nodejs.sh \
@ -611,6 +646,10 @@ $(
cat enable-nvidia-persistence-mode.sh
fi
if [[ -n $fullNodeAdditionalDiskSizeInGb ]]; then
cat mount-additional-disk.sh
fi
)
cat > /etc/motd <<EOM
@ -637,7 +676,7 @@ EOF
else
cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \
"$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType"
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb"
fi
if [[ $additionalFullNodeCount -gt 0 ]]; then
@ -657,7 +696,7 @@ EOF
fi
cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \
"$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType" &
"$startupScript" "" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb" &
done
wait

View File

@ -50,17 +50,18 @@ Operate a configured testnet
-c bench-tps=2="--tx_count 25000"
This will start 2 bench-tps clients, and supply "--tx_count 25000"
to the bench-tps client.
-n NUM_FULL_NODES - Number of fullnodes to apply command to.
--hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster
-n NUM_FULL_NODES - Number of fullnodes to apply command to.
-x Accounts and Stakes for external nodes
- A YML file with a list of account pubkeys and corresponding stakes
for external nodes
-s Num lamports per node in genesis block
- Create account keypairs for internal nodes and assign these many lamports
--lamports NUM_LAMPORTS_TO_MINT
- Override the default 100000000000000 lamports minted in genesis
--stake-internal-nodes NUM_LAMPORTS_PER_NODE
- Amount to stake internal nodes in genesis block. If set, airdrops are disabled.
--external-accounts-file FILE_PATH
- A YML file with a list of account pubkeys and corresponding stakes for external nodes
--no-snapshot
- If set, disables booting validators from a snapshot
sanity/start/update-specific options:
-F - Discard validator nodes that didn't bootup successfully
-o noLedgerVerify - Skip ledger verification
@ -96,7 +97,9 @@ failOnValidatorBootupFailure=true
genesisOptions=
numFullnodesRequested=
externalPrimordialAccountsFile=
remoteExternalPrimordialAccountsFile=
stakeNodesInGenesisBlock=
maybeNoSnapshot=""
command=$1
[[ -n $command ]] || usage
@ -111,9 +114,22 @@ while [[ -n $1 ]]; do
elif [[ $1 = --target-lamports-per-signature ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --lamports ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --no-snapshot ]]; then
maybeNoSnapshot="$1"
shift 1
elif [[ $1 = --deploy-update ]]; then
updatePlatforms="$updatePlatforms $2"
shift 2
elif [[ $1 = --stake-internal-nodes ]]; then
stakeNodesInGenesisBlock="$2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
externalPrimordialAccountsFile="$2"
remoteExternalPrimordialAccountsFile=/tmp/external-primordial-accounts.yml
shift 2
else
usage "Unknown long option: $1"
fi
@ -123,7 +139,7 @@ while [[ -n $1 ]]; do
fi
done
while getopts "h?T:t:o:f:rD:c:Fn:i:x:s:" opt "${shortArgs[@]}"; do
while getopts "h?T:t:o:f:rD:c:Fn:i:" opt "${shortArgs[@]}"; do
case $opt in
h | \?)
usage
@ -202,12 +218,6 @@ while getopts "h?T:t:o:f:rD:c:Fn:i:x:s:" opt "${shortArgs[@]}"; do
F)
failOnValidatorBootupFailure=false
;;
x)
externalPrimordialAccountsFile=$OPTARG
;;
s)
stakeNodesInGenesisBlock=$OPTARG
;;
i)
nodeAddress=$OPTARG
;;
@ -321,7 +331,7 @@ startBootstrapLeader() {
set -x
startCommon "$ipAddress" || exit 1
[[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \
"$ipAddress:~/solana/config/external-primodial-accounts.yml"
"$ipAddress:$remoteExternalPrimordialAccountsFile"
case $deployMethod in
tar)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
@ -343,12 +353,13 @@ startBootstrapLeader() {
\"$RUST_LOG\" \
$skipSetup \
$failOnValidatorBootupFailure \
\"$externalPrimordialAccountsFile\" \
\"$remoteExternalPrimordialAccountsFile\" \
\"$stakeNodesInGenesisBlock\" \
$nodeIndex \
$numBenchTpsClients \"$benchTpsExtraArgs\" \
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \
\"$genesisOptions\" \
$maybeNoSnapshot \
"
) >> "$logFile" 2>&1 || {
cat "$logFile"
@ -368,6 +379,23 @@ startNode() {
(
set -x
startCommon "$ipAddress"
if [[ $nodeType = blockstreamer ]] && [[ -n $letsEncryptDomainName ]]; then
#
# Create/renew TLS certificate
#
declare localArchive=~/letsencrypt-"$letsEncryptDomainName".tgz
if [[ -r "$localArchive" ]]; then
timeout 30s scp "${sshOptions[@]}" "$localArchive" "$ipAddress:letsencrypt.tgz"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"sudo -H /certbot-restore.sh $letsEncryptDomainName maintainers@solana.com"
rm -f letsencrypt.tgz
timeout 30s scp "${sshOptions[@]}" "$ipAddress:/letsencrypt.tgz" letsencrypt.tgz
test -s letsencrypt.tgz # Ensure non-empty before overwriting $localArchive
cp letsencrypt.tgz "$localArchive"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
@ -377,10 +405,11 @@ startNode() {
\"$RUST_LOG\" \
$skipSetup \
$failOnValidatorBootupFailure \
\"$externalPrimordialAccountsFile\" \
\"$remoteExternalPrimordialAccountsFile\" \
\"$stakeNodesInGenesisBlock\" \
$nodeIndex \
\"$genesisOptions\" \
$maybeNoSnapshot \
"
) >> "$logFile" 2>&1 &
declare pid=$!
@ -477,7 +506,8 @@ start() {
declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
(
set -x
curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
curl --retry 5 --retry-delay 2 --retry-connrefused \
-o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
)
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
else

View File

@ -34,4 +34,6 @@ loadConfigFile
PATH="$HOME"/.cargo/bin:"$PATH"
set -x
scripts/solana-install-deploy.sh localhost "$releaseChannel" "$updatePlatform"
scripts/solana-install-deploy.sh \
--keypair config-local/mint-keypair.json \
localhost "$releaseChannel" "$updatePlatform"

View File

@ -19,6 +19,7 @@ benchTpsExtraArgs="${12}"
numBenchExchangeClients="${13}"
benchExchangeExtraArgs="${14}"
genesisOptions="${15}"
noSnapshot="${16}"
set +x
export RUST_LOG
@ -92,7 +93,7 @@ local|tar)
SUDO_OK=1 source scripts/tune-system.sh
(
sudo scripts/oom-monitor.sh
sudo SOLANA_METRICS_CONFIG="$SOLANA_METRICS_CONFIG" scripts/oom-monitor.sh
) > oom-monitor.log 2>&1 &
echo $! > oom-monitor.pid
scripts/net-stats.sh > net-stats.log 2>&1 &
@ -170,6 +171,7 @@ local|tar)
args+=(--no-airdrop)
fi
args+=(--init-complete-file "$initCompleteFile")
args+=("$noSnapshot")
nohup ./multinode-demo/validator.sh --bootstrap-leader "${args[@]}" > fullnode.log 2>&1 &
waitForNodeToInit
;;
@ -223,8 +225,15 @@ local|tar)
if [[ -z $stakeNodesInGenesisBlock ]]; then
./multinode-demo/drone.sh > drone.log 2>&1 &
fi
# Grab the TLS cert generated by /certbot-restore.sh
if [[ -f /.cert.pem ]]; then
sudo install -o $UID -m 400 /.cert.pem /.key.pem .
ls -l .cert.pem .key.pem
fi
export BLOCKEXPLORER_GEOIP_WHITELIST=$PWD/net/config/geoip.yml
npm install @solana/blockexplorer@1
npm install @solana/blockexplorer@1.21.0
npx solana-blockexplorer > blockexplorer.log 2>&1 &
# Confirm the blockexplorer is accessible
@ -240,6 +249,7 @@ local|tar)
fi
args+=(--init-complete-file "$initCompleteFile")
args+=("$noSnapshot")
nohup ./multinode-demo/validator.sh "${args[@]}" > fullnode.log 2>&1 &
waitForNodeToInit
;;
@ -257,6 +267,7 @@ local|tar)
if [[ $skipSetup != true ]]; then
./multinode-demo/clear-config.sh
fi
args+=("$noSnapshot")
nohup ./multinode-demo/replicator.sh "${args[@]}" > fullnode.log 2>&1 &
sleep 1
;;

View File

@ -183,13 +183,11 @@ if $installCheck && [[ -r update_manifest_keypair.json ]]; then
(
set -x
update_manifest_pubkey=$($solana_keygen pubkey update_manifest_keypair.json)
rm -rf install-data-dir
$solana_install init \
--no-modify-path \
--data-dir install-data-dir \
--url http://"$sanityTargetIp":8899 \
--pubkey "$update_manifest_pubkey"
$solana_install info
)

View File

@ -14,6 +14,8 @@ set -ex
# 2. Inline ~/.ssh/id-solana-testnet.pub below
cat > /solana-authorized_keys <<EOF
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGqZAwAZeBl0buOMz4FpUYrtpwk1L5aGKlbd7lI8dpbSx5WVRPWCVKhWzsGMtDUIfmozdzJouk1LPyihghTDgsE=
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOk4jgcX/VWSk3j//wXeIynSQjsOt+AjYXM/XZUMa7R1Q8lfIJGK/qHLBP86CMXdpyEKJ5i37QLYOL+0VuRy0CI=
EOF
sudo -u solana bash -c "

View File

@ -309,3 +309,12 @@ cloud_FetchFile() {
cloud_GetConfigValueFromInstanceName "$instanceName" osProfile.adminUsername
scp "${config_value}@${publicIp}:${remoteFile}" "$localFile"
}
#
# cloud_CreateAndAttachPersistentDisk
#
# Not yet implemented for this cloud provider
cloud_CreateAndAttachPersistentDisk() {
echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for azure"
exit 1
}

View File

@ -381,3 +381,12 @@ cloud_FetchFile() {
"solana@$publicIp:$remoteFile" "$localFile"
)
}
#
# cloud_CreateAndAttachPersistentDisk
#
# Not yet implemented for this cloud provider
cloud_CreateAndAttachPersistentDisk() {
echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for ec2"
exit 1
}

View File

@ -81,7 +81,7 @@
"FromPort": 3001,
"IpRanges": [
{
"Description": "blockexplorer API port",
"Description": "blockexplorer http API port",
"CidrIp": "0.0.0.0/0"
}
],
@ -91,7 +91,26 @@
"Ipv6Ranges": [
{
"CidrIpv6": "::/0",
"Description": "blockexplorer API port"
"Description": "blockexplorer http API port"
}
]
},
{
"PrefixListIds": [],
"FromPort": 3443,
"IpRanges": [
{
"Description": "blockexplorer https API port",
"CidrIp": "0.0.0.0/0"
}
],
"ToPort": 3443,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
"Ipv6Ranges": [
{
"CidrIpv6": "::/0",
"Description": "blockexplorer https API port"
}
]
},

View File

@ -126,6 +126,7 @@ cloud_CreateInstances() {
declare optionalStartupScript="$8"
declare optionalAddress="$9"
declare optionalBootDiskType="${10}"
declare optionalAdditionalDiskSize="${11}"
if $enableGpu; then
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
@ -198,6 +199,22 @@ cloud_CreateInstances() {
set -x
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
)
if [[ -n $optionalAdditionalDiskSize ]]; then
if [[ $numNodes = 1 ]]; then
(
set -x
cloud_CreateAndAttachPersistentDisk "${namePrefix}" "$optionalAdditionalDiskSize" "pd-ssd" "$zone"
)
else
for node in $(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes"); do
(
set -x
cloud_CreateAndAttachPersistentDisk "${node}" "$optionalAdditionalDiskSize" "pd-ssd" "$zone"
)
done
fi
fi
}
#
@ -256,3 +273,31 @@ cloud_FetchFile() {
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
)
}
#
# cloud_CreateAndAttachPersistentDisk [instanceName] [diskSize] [diskType]
#
# Create a persistent disk and attach it to a pre-existing VM instance.
# Set disk to auto-delete upon instance deletion
#
cloud_CreateAndAttachPersistentDisk() {
declare instanceName="$1"
declare diskSize="$2"
declare diskType="$3"
declare zone="$4"
diskName="${instanceName}-pd"
gcloud beta compute disks create "$diskName" \
--size "$diskSize" \
--type "$diskType" \
--zone "$zone"
gcloud compute instances attach-disk "$instanceName" \
--disk "$diskName" \
--zone "$zone"
gcloud compute instances set-disk-auto-delete "$instanceName" \
--disk "$diskName" \
--zone "$zone" \
--auto-delete
}

52
net/scripts/install-certbot.sh Executable file
View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -ex
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
apt-get update
add-apt-repository --yes ppa:certbot/certbot
apt-get --assume-yes install certbot
cat > /certbot-restore.sh <<'EOF'
#!/usr/bin/env bash
set -e
domain=$1
email=$2
if [[ $USER != root ]]; then
echo "Run as root"
exit 1
fi
if [[ -f /.cert.pem ]]; then
echo "Certificate already initialized"
exit 0
fi
set -x
if [[ -r letsencrypt.tgz ]]; then
tar -C / -zxf letsencrypt.tgz
fi
cd /
rm -f letsencrypt.tgz
maybeDryRun=
# Uncomment during testing to avoid hitting LetsEncrypt API limits while iterating
#maybeDryRun="--dry-run"
certbot certonly --standalone -d "$domain" --email "$email" --agree-tos -n $maybeDryRun
tar zcf letsencrypt.tgz /etc/letsencrypt
ls -l letsencrypt.tgz
# Copy certificates to / for easy access without knowing the value of "$domain"
rm -f /.key.pem /.cert.pem
cp /etc/letsencrypt/live/$domain/privkey.pem /.key.pem
cp /etc/letsencrypt/live/$domain/cert.pem /.cert.pem
EOF
chmod +x /certbot-restore.sh

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -x
mount_point=/mnt/extra-disk
disk=sdb
if ! lsblk | grep -q ${disk} ; then
echo "${disk} does not exist"
else
if mount | grep -q ${disk} ; then
echo "${disk} is already mounted"
else
sudo mkfs.ext4 -F /dev/"$disk"
sudo mkdir -p "$mount_point"
sudo mount /dev/"$disk" "$mount_point"
sudo chmod a+w "$mount_point"
if ! mount | grep -q ${mount_point} ; then
echo "${disk} failed to mount!"
exit 1
fi
fi
fi

View File

@ -1,6 +1,6 @@
[package]
name = "solana-netutil"
version = "0.16.0"
version = "0.16.5"
description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -15,7 +15,7 @@ log = "0.4.2"
nix = "0.14.1"
rand = "0.6.1"
socket2 = "0.3.9"
solana-logger = { path = "../logger", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.5" }
tokio = "0.1"
[lib]

View File

@ -1,7 +1,7 @@
[package]
name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale"
version = "0.16.0"
version = "0.16.5"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@ -9,6 +9,7 @@ repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0"
edition = "2018"
publish = false
[features]
bpf_c = []
@ -21,10 +22,10 @@ walkdir = "2"
bincode = "1.1.4"
byteorder = "1.3.2"
elf = "0.0.10"
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-runtime = { path = "../../runtime", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.5" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-runtime = { path = "../../runtime", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
solana_rbpf = "=0.1.13"
[[bench]]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,8 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit-dep"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF many-args-dep program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-alloc"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF alloc program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-dep-crate"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF dep-crate program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ edition = "2018"
[dependencies]
byteorder = { version = "1", default-features = false }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-iter"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF many-args program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,8 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args-dep"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF many-args-dep program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-noop"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF noop program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-panic"
version = "0.15.0"
version = "0.16.5"
description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-tick-height"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF noop program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -14,7 +14,7 @@ edition = "2018"
[dependencies]
byteorder = { version = "1", default-features = false }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.5" }
[workspace]
members = []

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bpf-loader-api"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -14,8 +14,8 @@ byteorder = "1.3.2"
libc = "0.2.58"
log = "0.4.2"
serde = "1.0.92"
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
solana_rbpf = "=0.1.13"
[lib]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bpf-loader-program"
version = "0.16.0"
version = "0.16.5"
description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies]
log = "0.4.2"
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.5" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-budget-api"
version = "0.16.0"
version = "0.16.5"
description = "Solana Budget program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -16,10 +16,10 @@ num-derive = "0.2"
num-traits = "0.2"
serde = "1.0.92"
serde_derive = "1.0.92"
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.16.0" }
solana-runtime = { path = "../../runtime", version = "0.16.5" }
[lib]
crate-type = ["lib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-budget-program"
version = "0.16.0"
version = "0.16.5"
description = "Solana budget program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies]
log = "0.4.2"
solana-budget-api = { path = "../budget_api", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-budget-api = { path = "../budget_api", version = "0.16.5" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-config-api"
version = "0.16.0"
version = "0.16.5"
description = "config program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,13 +13,12 @@ bincode = "1.1.4"
log = "0.4.2"
serde = "1.0.92"
serde_derive = "1.0.92"
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.16.0" }
solana-runtime = { path = "../../runtime", version = "0.16.5" }
[lib]
crate-type = ["lib"]
name = "solana_config_api"

View File

@ -1,26 +1,59 @@
use crate::id;
use crate::ConfigState;
use bincode::serialize;
use serde_derive::{Deserialize, Serialize};
use solana_sdk::instruction::{AccountMeta, Instruction};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::short_vec;
use solana_sdk::system_instruction;
/// A collection of keys to be stored in Config account data.
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct ConfigKeys {
// Each key tuple comprises a unique `Pubkey` identifier,
// and `bool` whether that key is a signer of the data
#[serde(with = "short_vec")]
pub keys: Vec<(Pubkey, bool)>,
}
impl ConfigKeys {
pub fn serialized_size(keys: Vec<(Pubkey, bool)>) -> usize {
serialize(&ConfigKeys { keys })
.unwrap_or_else(|_| vec![])
.len()
}
}
/// Create a new, empty configuration account
pub fn create_account<T: ConfigState>(
from_account_pubkey: &Pubkey,
config_account_pubkey: &Pubkey,
lamports: u64,
keys: Vec<(Pubkey, bool)>,
) -> Instruction {
let space = T::max_space() + ConfigKeys::serialized_size(keys) as u64;
system_instruction::create_account(
from_account_pubkey,
config_account_pubkey,
lamports,
T::max_space(),
space,
&id(),
)
}
/// Store new data in a configuration account
pub fn store<T: ConfigState>(config_account_pubkey: &Pubkey, data: &T) -> Instruction {
let account_metas = vec![AccountMeta::new(*config_account_pubkey, true)];
Instruction::new(id(), data, account_metas)
pub fn store<T: ConfigState>(
config_account_pubkey: &Pubkey,
is_config_signer: bool,
keys: Vec<(Pubkey, bool)>,
data: &T,
) -> Instruction {
let mut account_metas = vec![AccountMeta::new(*config_account_pubkey, is_config_signer)];
for (signer_pubkey, _) in keys.iter().filter(|(_, is_signer)| *is_signer) {
if signer_pubkey != config_account_pubkey {
account_metas.push(AccountMeta::new(*signer_pubkey, true));
}
}
let account_data = (ConfigKeys { keys }, data);
Instruction::new(id(), &account_data, account_metas)
}

View File

@ -1,5 +1,7 @@
//! Config program
use crate::config_instruction::ConfigKeys;
use bincode::deserialize;
use log::*;
use solana_sdk::account::KeyedAccount;
use solana_sdk::instruction::InstructionError;
@ -10,8 +12,81 @@ pub fn process_instruction(
keyed_accounts: &mut [KeyedAccount],
data: &[u8],
) -> Result<(), InstructionError> {
if keyed_accounts[0].signer_key().is_none() {
error!("account[0].signer_key().is_none()");
let key_list: ConfigKeys = deserialize(data).map_err(|err| {
error!("Invalid ConfigKeys data: {:?} {:?}", data, err);
InstructionError::InvalidInstructionData
})?;
let current_data: ConfigKeys = deserialize(&keyed_accounts[0].account.data).map_err(|err| {
error!("Invalid data in account[0]: {:?} {:?}", data, err);
InstructionError::InvalidAccountData
})?;
let current_signer_keys: Vec<Pubkey> = current_data
.keys
.iter()
.filter(|(_, is_signer)| *is_signer)
.map(|(pubkey, _)| *pubkey)
.collect();
if current_signer_keys.is_empty() {
// Config account keypair must be a signer on account initilization,
// or when no signers specified in Config data
if keyed_accounts[0].signer_key().is_none() {
error!("account[0].signer_key().is_none()");
Err(InstructionError::MissingRequiredSignature)?;
}
}
let mut counter = 0;
for (i, (signer, _)) in key_list
.keys
.iter()
.filter(|(_, is_signer)| *is_signer)
.enumerate()
{
counter += 1;
if signer != keyed_accounts[0].unsigned_key() {
let account_index = i + 1;
let signer_account = keyed_accounts.get(account_index);
if signer_account.is_none() {
error!("account {:?} is not in account list", signer);
Err(InstructionError::MissingRequiredSignature)?;
}
let signer_key = signer_account.unwrap().signer_key();
if signer_key.is_none() {
error!("account {:?} signer_key().is_none()", signer);
Err(InstructionError::MissingRequiredSignature)?;
}
if signer_key.unwrap() != signer {
error!(
"account[{:?}].signer_key() does not match Config data)",
account_index
);
Err(InstructionError::MissingRequiredSignature)?;
}
// If Config account is already initialized, update signatures must match Config data
if !current_data.keys.is_empty()
&& current_signer_keys
.iter()
.find(|&pubkey| pubkey == signer)
.is_none()
{
error!("account {:?} is not in stored signer list", signer);
Err(InstructionError::MissingRequiredSignature)?;
}
} else if keyed_accounts[0].signer_key().is_none() {
error!("account[0].signer_key().is_none()");
Err(InstructionError::MissingRequiredSignature)?;
}
}
// Check for Config data signers not present in incoming account update
if current_signer_keys.len() > counter {
error!(
"too few signers: {:?}; expected: {:?}",
counter,
current_signer_keys.len()
);
Err(InstructionError::MissingRequiredSignature)?;
}
@ -20,7 +95,7 @@ pub fn process_instruction(
Err(InstructionError::InvalidInstructionData)?;
}
keyed_accounts[0].account.data[0..data.len()].copy_from_slice(data);
keyed_accounts[0].account.data[0..data.len()].copy_from_slice(&data);
Ok(())
}
@ -64,7 +139,11 @@ mod tests {
(bank, mint_keypair)
}
fn create_config_account(bank: Bank, mint_keypair: &Keypair) -> (BankClient, Keypair) {
fn create_config_account(
bank: Bank,
mint_keypair: &Keypair,
keys: Vec<(Pubkey, bool)>,
) -> (BankClient, Keypair) {
let config_keypair = Keypair::new();
let config_pubkey = config_keypair.pubkey();
@ -76,6 +155,7 @@ mod tests {
&mint_keypair.pubkey(),
&config_pubkey,
1,
keys,
),
)
.expect("new_account");
@ -87,7 +167,7 @@ mod tests {
fn test_process_create_ok() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair, vec![]);
let config_account_data = bank_client
.get_account_data(&config_keypair.pubkey())
.unwrap()
@ -102,13 +182,16 @@ mod tests {
fn test_process_store_ok() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
let keys = vec![];
let (bank_client, config_keypair) =
create_config_account(bank, &mint_keypair, keys.clone());
let config_pubkey = config_keypair.pubkey();
let my_config = MyConfig::new(42);
let instruction = config_instruction::store(&config_pubkey, &my_config);
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &config_keypair], message)
.unwrap();
@ -117,6 +200,8 @@ mod tests {
.get_account_data(&config_pubkey)
.unwrap()
.unwrap();
let meta_length = ConfigKeys::serialized_size(keys);
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
assert_eq!(
my_config,
MyConfig::deserialize(&config_account_data).unwrap()
@ -127,12 +212,12 @@ mod tests {
fn test_process_store_fail_instruction_data_too_large() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair, vec![]);
let config_pubkey = config_keypair.pubkey();
let my_config = MyConfig::new(42);
let mut instruction = config_instruction::store(&config_pubkey, &my_config);
let mut instruction = config_instruction::store(&config_pubkey, true, vec![], &my_config);
instruction.data = vec![0; 123]; // <-- Replace data with a vector that's too large
let message = Message::new(vec![instruction]);
bank_client
@ -148,13 +233,14 @@ mod tests {
let system_pubkey = system_keypair.pubkey();
bank.transfer(42, &mint_keypair, &system_pubkey).unwrap();
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair);
let (bank_client, config_keypair) = create_config_account(bank, &mint_keypair, vec![]);
let config_pubkey = config_keypair.pubkey();
let transfer_instruction =
system_instruction::transfer(&system_pubkey, &Pubkey::new_rand(), 42);
let my_config = MyConfig::new(42);
let mut store_instruction = config_instruction::store(&config_pubkey, &my_config);
let mut store_instruction =
config_instruction::store(&config_pubkey, true, vec![], &my_config);
store_instruction.accounts[0].is_signer = false; // <----- not a signer
let message = Message::new(vec![transfer_instruction, store_instruction]);
@ -162,4 +248,232 @@ mod tests {
.send_message(&[&system_keypair], message)
.unwrap_err();
}
#[test]
fn test_process_store_with_additional_signers() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let pubkey = Pubkey::new_rand();
let signer0 = Keypair::new();
let signer1 = Keypair::new();
let keys = vec![
(pubkey, false),
(signer0.pubkey(), true),
(signer1.pubkey(), true),
];
let (bank_client, config_keypair) =
create_config_account(bank, &mint_keypair, keys.clone());
let config_pubkey = config_keypair.pubkey();
let my_config = MyConfig::new(42);
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(
&[&mint_keypair, &config_keypair, &signer0, &signer1],
message,
)
.unwrap();
let config_account_data = bank_client
.get_account_data(&config_pubkey)
.unwrap()
.unwrap();
let meta_length = ConfigKeys::serialized_size(keys.clone());
let meta_data: ConfigKeys = deserialize(&config_account_data[0..meta_length]).unwrap();
assert_eq!(meta_data.keys, keys);
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
assert_eq!(
my_config,
MyConfig::deserialize(&config_account_data).unwrap()
);
}
#[test]
fn test_process_store_without_config_signer() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let pubkey = Pubkey::new_rand();
let signer0 = Keypair::new();
let keys = vec![(pubkey, false), (signer0.pubkey(), true)];
let (bank_client, config_keypair) =
create_config_account(bank, &mint_keypair, keys.clone());
let config_pubkey = config_keypair.pubkey();
let my_config = MyConfig::new(42);
let instruction =
config_instruction::store(&config_pubkey, false, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &signer0], message)
.unwrap_err();
}
#[test]
fn test_process_store_with_bad_additional_signer() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let signer0 = Keypair::new();
let signer1 = Keypair::new();
let keys = vec![(signer0.pubkey(), true)];
let (bank_client, config_keypair) =
create_config_account(bank, &mint_keypair, keys.clone());
let config_pubkey = config_keypair.pubkey();
let my_config = MyConfig::new(42);
// Config-data pubkey doesn't match signer
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
let mut message =
Message::new_with_payer(vec![instruction.clone()], Some(&mint_keypair.pubkey()));
message.account_keys[2] = signer1.pubkey();
bank_client
.send_message(&[&mint_keypair, &config_keypair, &signer1], message)
.unwrap_err();
// Config-data pubkey not a signer
let mut message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
message.header.num_required_signatures = 2;
bank_client
.send_message(&[&mint_keypair, &config_keypair], message)
.unwrap_err();
}
#[test]
fn test_config_updates() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let pubkey = Pubkey::new_rand();
let signer0 = Keypair::new();
let signer1 = Keypair::new();
let keys = vec![
(pubkey, false),
(signer0.pubkey(), true),
(signer1.pubkey(), true),
];
let (bank_client, config_keypair) =
create_config_account(bank, &mint_keypair, keys.clone());
let config_pubkey = config_keypair.pubkey();
let my_config = MyConfig::new(42);
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(
&[&mint_keypair, &config_keypair, &signer0, &signer1],
message,
)
.unwrap();
// Update with expected signatures
let new_config = MyConfig::new(84);
let instruction =
config_instruction::store(&config_pubkey, false, keys.clone(), &new_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &signer0, &signer1], message)
.unwrap();
let config_account_data = bank_client
.get_account_data(&config_pubkey)
.unwrap()
.unwrap();
let meta_length = ConfigKeys::serialized_size(keys.clone());
let meta_data: ConfigKeys = deserialize(&config_account_data[0..meta_length]).unwrap();
assert_eq!(meta_data.keys, keys);
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
assert_eq!(
new_config,
MyConfig::deserialize(&config_account_data).unwrap()
);
// Attempt update with incomplete signatures
let keys = vec![(pubkey, false), (signer0.pubkey(), true)];
let instruction =
config_instruction::store(&config_pubkey, false, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &signer0], message)
.unwrap_err();
// Attempt update with incorrect signatures
let signer2 = Keypair::new();
let keys = vec![
(pubkey, false),
(signer0.pubkey(), true),
(signer2.pubkey(), true),
];
let instruction =
config_instruction::store(&config_pubkey, false, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &signer0, &signer2], message)
.unwrap_err();
}
#[test]
fn test_config_updates_requiring_config() {
solana_logger::setup();
let (bank, mint_keypair) = create_bank(10_000);
let pubkey = Pubkey::new_rand();
let signer0 = Keypair::new();
let keys = vec![
(pubkey, false),
(signer0.pubkey(), true),
(signer0.pubkey(), true),
]; // Dummy keys for account sizing
let (bank_client, config_keypair) =
create_config_account(bank, &mint_keypair, keys.clone());
let config_pubkey = config_keypair.pubkey();
let keys = vec![
(pubkey, false),
(signer0.pubkey(), true),
(config_keypair.pubkey(), true),
];
let my_config = MyConfig::new(42);
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &config_keypair, &signer0], message)
.unwrap();
// Update with expected signatures
let new_config = MyConfig::new(84);
let instruction =
config_instruction::store(&config_pubkey, true, keys.clone(), &new_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &config_keypair, &signer0], message)
.unwrap();
let config_account_data = bank_client
.get_account_data(&config_pubkey)
.unwrap()
.unwrap();
let meta_length = ConfigKeys::serialized_size(keys.clone());
let meta_data: ConfigKeys = deserialize(&config_account_data[0..meta_length]).unwrap();
assert_eq!(meta_data.keys, keys);
let config_account_data = &config_account_data[meta_length..config_account_data.len()];
assert_eq!(
new_config,
MyConfig::deserialize(&config_account_data).unwrap()
);
// Attempt update with incomplete signatures
let keys = vec![(pubkey, false), (config_keypair.pubkey(), true)];
let instruction = config_instruction::store(&config_pubkey, true, keys.clone(), &my_config);
let message = Message::new_with_payer(vec![instruction], Some(&mint_keypair.pubkey()));
bank_client
.send_message(&[&mint_keypair, &config_keypair], message)
.unwrap_err();
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-config-program"
version = "0.16.0"
version = "0.16.5"
description = "config program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies]
log = "0.4.2"
solana-config-api = { path = "../config_api", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-config-api = { path = "../config_api", version = "0.16.5" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-exchange-api"
version = "0.16.0"
version = "0.16.5"
description = "Solana Exchange program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,12 +13,12 @@ bincode = "1.1.4"
log = "0.4.2"
serde = "1.0.92"
serde_derive = "1.0.92"
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-metrics = { path = "../../metrics", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-metrics = { path = "../../metrics", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.16.0" }
solana-runtime = { path = "../../runtime", version = "0.16.5" }
[lib]
crate-type = ["lib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-exchange-program"
version = "0.16.0"
version = "0.16.5"
description = "Solana exchange program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,9 +10,9 @@ edition = "2018"
[dependencies]
log = "0.4.2"
solana-exchange-api = { path = "../exchange_api", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-exchange-api = { path = "../exchange_api", version = "0.16.5" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[lib]
crate-type = ["lib", "cdylib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-failure-program"
version = "0.16.0"
version = "0.16.5"
description = "Solana failure program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,10 +10,10 @@ edition = "2018"
[dependencies]
log = "0.4.2"
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[dev-dependencies]
solana-runtime = { path = "../../runtime", version = "0.16.0" }
solana-runtime = { path = "../../runtime", version = "0.16.5" }
[lib]
crate-type = ["cdylib"]

View File

@ -1,24 +1,23 @@
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_runtime::loader_utils::{create_invoke_instruction, load_program};
use solana_runtime::loader_utils::create_invoke_instruction;
use solana_sdk::client::SyncClient;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::instruction::InstructionError;
use solana_sdk::native_loader;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::KeypairUtil;
use solana_sdk::transaction::TransactionError;
#[test]
fn test_program_native_failure() {
let (genesis_block, alice_keypair) = create_genesis_block(50);
let program_id = Pubkey::new_rand();
let bank = Bank::new(&genesis_block);
let bank_client = BankClient::new(bank);
let program = "solana_failure_program".as_bytes().to_vec();
let program_id = load_program(&bank_client, &alice_keypair, &native_loader::id(), program);
bank.register_native_instruction_processor("solana_failure_program", &program_id);
// Call user program
let instruction = create_invoke_instruction(alice_keypair.pubkey(), program_id, &1u8);
let bank_client = BankClient::new(bank);
assert_eq!(
bank_client
.send_instruction(&alice_keypair, instruction)

View File

@ -1,6 +1,6 @@
[package]
name = "solana-noop-program"
version = "0.16.0"
version = "0.16.5"
description = "Solana noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,8 +10,8 @@ edition = "2018"
[dependencies]
log = "0.4.2"
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
[lib]
crate-type = ["cdylib"]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-stake-api"
version = "0.16.0"
version = "0.16.5"
description = "Solana Stake program API"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -14,10 +14,10 @@ log = "0.4.2"
rand = "0.6.5"
serde = "1.0.92"
serde_derive = "1.0.92"
solana-logger = { path = "../../logger", version = "0.16.0" }
solana-metrics = { path = "../../metrics", version = "0.16.0" }
solana-sdk = { path = "../../sdk", version = "0.16.0" }
solana-vote-api = { path = "../vote_api", version = "0.16.0" }
solana-logger = { path = "../../logger", version = "0.16.5" }
solana-metrics = { path = "../../metrics", version = "0.16.5" }
solana-sdk = { path = "../../sdk", version = "0.16.5" }
solana-vote-api = { path = "../vote_api", version = "0.16.5" }
[lib]
crate-type = ["lib"]

View File

@ -31,6 +31,24 @@ pub enum StakeInstruction {
/// 2 - RewardsPool Stake Account from which to redeem credits
/// 3 - Rewards syscall Account that carries points values
RedeemVoteCredits,
/// Withdraw unstaked lamports from the stake account
///
/// Expects 3 Accounts:
/// 0 - Delegate StakeAccount
/// 1 - System account to which the lamports will be transferred,
/// 2 - Syscall Account that carries epoch
///
/// The u64 is the portion of the Stake account balance to be withdrawn,
/// must be <= StakeAccount.lamports - staked lamports
Withdraw(u64),
/// Deactivates the stake in the account
///
/// Expects 2 Accounts:
/// 0 - Delegate StakeAccount
/// 1 - Syscall Account that carries epoch
Deactivate,
}
pub fn create_stake_account(
@ -77,6 +95,23 @@ pub fn delegate_stake(stake_pubkey: &Pubkey, vote_pubkey: &Pubkey, stake: u64) -
Instruction::new(id(), &StakeInstruction::DelegateStake(stake), account_metas)
}
pub fn withdraw(stake_pubkey: &Pubkey, to_pubkey: &Pubkey, lamports: u64) -> Instruction {
let account_metas = vec![
AccountMeta::new(*stake_pubkey, true),
AccountMeta::new(*to_pubkey, false),
AccountMeta::new(syscall::current::id(), false),
];
Instruction::new(id(), &StakeInstruction::Withdraw(lamports), account_metas)
}
pub fn deactivate_stake(stake_pubkey: &Pubkey) -> Instruction {
let account_metas = vec![
AccountMeta::new(*stake_pubkey, true),
AccountMeta::new(syscall::current::id(), false),
];
Instruction::new(id(), &StakeInstruction::Deactivate, account_metas)
}
pub fn process_instruction(
_program_id: &Pubkey,
keyed_accounts: &mut [KeyedAccount],
@ -123,6 +158,27 @@ pub fn process_instruction(
&syscall::rewards::from_keyed_account(&rest[0])?,
)
}
StakeInstruction::Withdraw(lamports) => {
if rest.len() != 2 {
Err(InstructionError::InvalidInstructionData)?;
}
let (to, syscall) = &mut rest.split_at_mut(1);
let mut to = &mut to[0];
me.withdraw(
lamports,
&mut to,
&syscall::current::from_keyed_account(&syscall[0])?,
)
}
StakeInstruction::Deactivate => {
if rest.len() != 1 {
Err(InstructionError::InvalidInstructionData)?;
}
let syscall = &rest[0];
me.deactivate_stake(&syscall::current::from_keyed_account(&syscall)?)
}
}
}
@ -168,6 +224,14 @@ mod tests {
process_instruction(&delegate_stake(&Pubkey::default(), &Pubkey::default(), 0)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&withdraw(&Pubkey::default(), &Pubkey::new_rand(), 100)),
Err(InstructionError::InvalidAccountData),
);
assert_eq!(
process_instruction(&deactivate_stake(&Pubkey::default())),
Err(InstructionError::InvalidAccountData),
);
}
#[test]
@ -250,6 +314,76 @@ mod tests {
),
Err(InstructionError::InvalidAccountData),
);
// Tests 3rd keyed account is of correct type (Current instead of rewards) in withdraw
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&mut [
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
KeyedAccount::new(
&syscall::rewards::id(),
false,
&mut syscall::rewards::create_account(1, 0.0, 0.0)
),
],
&serialize(&StakeInstruction::Withdraw(42)).unwrap(),
),
Err(InstructionError::InvalidArgument),
);
// Tests correct number of accounts are provided in withdraw
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&mut [
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
KeyedAccount::new(
&syscall::current::id(),
false,
&mut syscall::rewards::create_account(1, 0.0, 0.0)
),
],
&serialize(&StakeInstruction::Withdraw(42)).unwrap(),
),
Err(InstructionError::InvalidInstructionData),
);
// Tests 2nd keyed account is of correct type (Current instead of rewards) in deactivate
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&mut [
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
KeyedAccount::new(
&syscall::rewards::id(),
false,
&mut syscall::rewards::create_account(1, 0.0, 0.0)
),
],
&serialize(&StakeInstruction::Deactivate).unwrap(),
),
Err(InstructionError::InvalidArgument),
);
// Tests correct number of accounts are provided in deactivate
assert_eq!(
super::process_instruction(
&Pubkey::default(),
&mut [
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
KeyedAccount::new(&Pubkey::default(), false, &mut Account::default()),
KeyedAccount::new(
&syscall::current::id(),
false,
&mut syscall::rewards::create_account(1, 0.0, 0.0)
),
],
&serialize(&StakeInstruction::Deactivate).unwrap(),
),
Err(InstructionError::InvalidInstructionData),
);
}
}

Some files were not shown because too many files have changed in this diff Show More