Compare commits

...

125 Commits

Author SHA1 Message Date
b5ca6e8e5a Update testnet docs to 0.16.6 release (#5257) 2019-07-23 21:29:43 -06:00
16f50729e9 Add logs to indicate when the leader changes (bp #5253) (#5256)
automerge
2019-07-23 20:19:33 -07:00
c119fdf711 Sort bench-tps keypairs (#5254) (#5255)
automerge
2019-07-23 19:04:08 -07:00
b2a467fa7d Bump blockexplorer version to 1.27.0 2019-07-23 17:13:45 -07:00
cb6f14004d Disable snapshot generation on cluster entrypoint 2019-07-23 14:04:44 -07:00
952e28343e Add support to install a specific Solana version directly from the Github releases (#5249)
automerge
2019-07-23 13:26:39 -07:00
cdfeee1854 Introduce --config-dir to avoid shipping clear-config.sh (#5241) 2019-07-22 23:08:07 -07:00
5eb5589bb3 validator.sh: Add --reset-ledger option (#5235) (#5240)
automerge
2019-07-22 23:04:41 -07:00
c1d78ddbb5 Call book/build.sh from docker (#5237) (#5238)
automerge
2019-07-22 22:22:06 -07:00
cf7c5cdb03 Add --no-deploy option to allow restarting nodes without a software update (#5182) (#5239)
automerge
2019-07-22 22:20:39 -07:00
8c23508cd5 Add manual publish for book and create book-beta (#5112) (#5236)
(cherry picked from commit 735c7c9841)
2019-07-22 18:14:42 -06:00
3ca91c316a Surface validator pubkey in metrics (#5227) (#5234)
(cherry picked from commit 3a69459645)
2019-07-22 17:05:58 -07:00
be3a025491 Do not attempt to create solana user multiple times (#5228) (#5233)
* Do not attempt to create solana user multiple times

(cherry picked from commit 21cef2fe21)
2019-07-22 16:32:25 -07:00
94c757013d Log the repairee pubkey when unable to serve them (#5222) (#5226)
automerge
2019-07-22 15:09:33 -07:00
d6372a930c disable audit until crossbeam epoch release (#5208) 2019-07-22 09:43:03 -07:00
4746902074 add root to terminology (#5209) (#5217)
* add root to terminology

* review feedback

(cherry picked from commit 2d42c1e33e)
2019-07-22 09:39:55 -07:00
c6b95a8f65 Request a uniform timezone 2019-07-22 09:25:15 -07:00
fdfdf2eb39 Encourage setting an RPC port 2019-07-22 08:25:07 -07:00
3448842c0c Add time units 2019-07-22 08:25:07 -07:00
f5f1efe94a Update location of TdS external accounts file url 2019-07-21 17:37:58 -07:00
50e0c806da Update incoming_webhook 2019-07-21 11:27:31 -07:00
e613a0aa7e Update S3 key 2019-07-21 11:17:54 -07:00
2c54cdd07e Ensure CI_OS_NAME is set for appveyor server 2019-07-21 09:26:10 -07:00
cbb0ed7c56 Adjustments for appveyor server 2019-07-21 09:25:22 -07:00
50aa099400 Show wallet commands for better log debugging 2019-07-19 20:20:38 -07:00
53c901471c Fix internal node lamport funding and staking allocation logic (#5192) (#5200)
automerge
2019-07-19 12:28:27 -07:00
4badf63cfd Update struct order arbitrarily to match rpc output (#5197) (#5199)
automerge
2019-07-19 11:23:47 -07:00
d04fd3c3ab Fix up signal handling 2019-07-19 08:34:56 -07:00
a8a9907ace Disable restart 2019-07-19 08:34:56 -07:00
a7644630cc Document fetch-perf-libs.sh when building with CUDA 2019-07-19 08:34:56 -07:00
58666543e2 Add getEpochInfo() and getLeaderSchedule() RPC methods (#5189) (#5195)
automerge
2019-07-19 08:27:44 -07:00
a8a5d16278 Select stable rust version (#5180) (#5191)
automerge
2019-07-18 22:28:11 -07:00
01ebbe367a Ensure validator process is kill when stdout/stderr are redirected (#5179) (#5190)
automerge
2019-07-18 22:01:05 -07:00
2cada71151 fix book typos (#5185) (#5186)
automerge
2019-07-18 17:56:44 -07:00
d6ce97bcbd Fix misleading variable name (bp #5176) (#5183)
automerge
2019-07-18 16:58:53 -07:00
3ddc92ab86 Skip sleeping in replay stage if a bank was recently processed (bp #5161) (#5178)
automerge
2019-07-18 15:47:14 -07:00
5a99e86a60 Keybase: s/id/username (bp #5165) (#5173)
automerge
2019-07-18 12:00:26 -07:00
d9e1a8b492 net/: startnode/stopnode now works for blockstreamer/replicator nodes (#5146) (#5162)
automerge
2019-07-17 20:03:16 -07:00
4244a0f716 Add ability to prune ledger (bp #5128) (#5158)
automerge
2019-07-17 17:14:38 -07:00
5f1d86c040 Bump cargo.toml files to 0.16.6 (#5155) 2019-07-17 14:29:33 -06:00
f9d9c1fcbf Update book to SOLANA_RELEASE=v0.16.5 (#5154) 2019-07-17 14:20:49 -06:00
7c59c105cf Add weighted shuffle support for values upto u64::MAX (#5151) (#5152)
automerge

(cherry picked from commit 10d85f8366)
2019-07-17 13:12:05 -07:00
a8ea9f2738 Fix bench-tps funding math; make generate_keypairs() and fund_keys() algorithms consistent (#4841) (#5145)
* Fix funding math; make generate_keypairs and fund_keys consistent

* Add test, and fix inconsistencies it exposes

* De-pow math, and use assert_eq in tests for better failure msgs
2019-07-17 09:01:13 -07:00
651f87a937 Show stake pubkey 2019-07-16 20:10:58 -07:00
88f8e2f332 Check harder on crates.io for recently published crates (#5136) (#5138)
automerge
2019-07-16 19:54:17 -07:00
a2cb289503 clear-config.sh now works with a secondary disk (#5135) (#5137)
automerge
2019-07-16 19:47:40 -07:00
89bd9d5b72 Bump blockexplorer version 2019-07-16 19:44:42 -07:00
7edaaeb2a1 Improve validator-info CLI (#5121) (#5125)
automerge
2019-07-16 09:08:35 -07:00
1c3ade80c2 Add missing dash 2019-07-16 07:28:26 -07:00
3606d51507 Increment toml and cargo.ock to 0.16.5 (#5119) 2019-07-15 17:32:12 -06:00
281fd88ea7 Update testnet doc to use latest release (#5118) 2019-07-15 17:11:42 -06:00
ee6b625c13 fix transaction_count (bp #5110) (#5111)
automerge
2019-07-15 14:46:11 -07:00
4cc1b85376 Boot remote native loads, take 2 (#5106) (#5109)
automerge
2019-07-15 12:54:24 -07:00
f8312ce125 Keybase pubkey file instructions and verification for validators (#5090) (#5102)
automerge
2019-07-14 23:25:50 -07:00
6a4cd02f64 Add node zone and count to ENV (#5100) (#5101)
automerge
2019-07-14 22:13:50 -07:00
50f238d900 Pull testnet vars up to buildkite env (#5098) (#5099)
automerge
2019-07-14 19:56:59 -07:00
23e3f4e8a2 Plumb --no-snapshot in from CI (#5077) (#5095)
* Plumb --no-snapshot in from CI

(cherry picked from commit 440d006ec1)
2019-07-14 13:22:28 -06:00
27f70dfa49 Correctly decode update manifest (#5086) (#5087)
automerge
2019-07-12 23:25:15 -07:00
72d366a84e Stop trying to publish crates that are unpublishable 2019-07-12 21:53:33 -07:00
2da9de8861 Avoid trying to republish crates already on crates.io 2019-07-12 21:36:07 -07:00
f4288961d5 Add epoch voting history to show-vote-account (#5080) 2019-07-12 21:23:15 -07:00
143ad436cf Give publish-crate more time 2019-07-12 20:28:23 -07:00
0a9fbc3e4c Facility to generate a blocktree prune list using ledger tool (#5041) (#5081)
automerge
2019-07-12 17:49:29 -07:00
7aa091bf8c Add rewards to is_syscall_id() (#5035) 2019-07-12 16:10:28 -07:00
91d8bfa828 Increment cargo tomls to 0.16.4 (#5078) 2019-07-12 16:30:55 -06:00
c501c19750 Add a version field to blobs (bp #5057) (#5068)
automerge
2019-07-12 14:38:32 -07:00
acd55660da Add --no-snapshot to disable booting a validator from a snapshot (#5050) (#5073)
automerge
2019-07-12 15:35:42 -06:00
855bd7d3b8 apt-get update before installing certbot (#5054) (#5056)
* apt-get update before installing certbot

(cherry picked from commit f093377805)
2019-07-12 11:54:56 -06:00
a2e9d8e0bf Enable GPUs and secondary disks for TdS net, pull external account file (#5031) (#5053) 2019-07-12 10:17:46 -06:00
81dbe3c49b Add support for additional disks for config-local (#5030) (#5040)
* Add support for additional disks for config-local

(cherry picked from commit e4861f52e0)
2019-07-12 10:01:07 -06:00
086e20f6c7 Restore ledger-tool print and json commands (#5048) (#5049)
automerge
2019-07-11 21:14:37 -07:00
d08a810c08 v0.16: Expand Config program; implement Validator Info CLI (#5045)
* Update config program to accommodate multiple signers (#4946)

* Update config program to accommodate multiple signers

* Update install CLI

* Remove account_type u32; add handling for unsigned keys in list

* ConfigKeys doc

* Make config_api more robust (#4980)

* Make config_api more robust

* Add test and update store instruction

* Improve signature checks in config_api (#5001)

automerge

* Add validator-info CLI (#4970)

* Add validator-info CLI

* Add GetProgramAccounts method to solana-client

* Update validator-info args, and add get subcommand

* Update ValidatorInfo lengths

* Add account filter for get --all

* Update testnet participation doc to reflect validator-info

* Flesh out tests

* Review comments
2019-07-11 18:28:49 -06:00
400610bf6a v0.16: AccountsDB updates and getProgramAccounts RPC fix (#5044)
* reduce replicode in accounts, fix cast to i64 (#5025)

* add accounts_index_scan_accounts (#5020)

* Plumb scan_accounts into accounts_db, adding load from storage (#5029)

* Fix getProgramAccounts RPC (#5024)

* Use scan_accounts to load accounts by program_id

* Add bank test

* Use get_program_accounts in RPC

* Rebase for v0.16
2019-07-11 17:57:56 -06:00
f759ac3a8d add node_pubkey to vote warning (#5033) (#5034)
(cherry picked from commit a191f3fd90)
2019-07-11 13:58:49 -07:00
558411364e Pass SOLANA_METRICS_CONFIG along to oom-monitor.sh (#5021) (#5026)
(cherry picked from commit 8781aebe06)
2019-07-11 12:43:38 -07:00
d0b5be3051 Rename tds-testnet to tds (#5008) (#5009)
(cherry picked from commit e563a4dda3)
2019-07-10 11:39:57 -06:00
dc6da6fcca Bump @solana/blockexplorer to v1.17.2 2019-07-10 09:33:10 -07:00
8ae11a74fa Move letsencrypt arg to create_args 2019-07-09 21:26:56 -07:00
11f0333728 Include --letsencrypt ($1) 2019-07-09 20:55:41 -07:00
aac74d2357 Fund solana-install deployments from the mint keypair to avoid airdrops (#4997) (#5000)
automerge
2019-07-09 17:29:43 -07:00
508abcf4ed net/ plumbing to manage LetsEncrypt TLS certificates (#4985) (#4996)
automerge
2019-07-09 16:29:45 -07:00
6dbb6c7fe2 Fix always passing in remote filename, even if no accounts file (#4993) (#4994)
* Fix always passing in remote filename, even if no accounts file

* typo

(cherry picked from commit d111223085)
2019-07-09 15:44:04 -07:00
2f58658f61 Add testnet-tds support to testnet manager (#4762) (#4987)
automerge
2019-07-09 14:16:13 -07:00
0ec7ff5e2f Add pubkey (#4971) (#4977)
automerge
2019-07-09 01:28:48 -07:00
4d49820188 Handle replicator errors without panicking (#4957)
* Handle replicator errors without panicking

* Typo

* Handle error with match instead of if-let

* Discard error
2019-07-08 11:23:21 -07:00
6e51babff9 Reduce default commission from 100% to 50% (#4929) 2019-07-05 08:00:39 -07:00
872cf100d7 [Security] Bump smallvec from 0.6.9 to 0.6.10 (#4921)
Bumps [smallvec](https://github.com/servo/rust-smallvec) from 0.6.9 to 0.6.10. **This update includes security fixes.**
- [Release notes](https://github.com/servo/rust-smallvec/releases)
- [Commits](https://github.com/servo/rust-smallvec/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-03 18:12:11 -07:00
78cc4e644c Cargo.lock 2019-07-03 18:10:31 -07:00
81c0152187 install: more little window fixes (#4930)
* Only add .exe extension if no extension was given

* Switch to ctrlc crate for freebie Windows ^C handling
2019-07-03 18:00:27 -07:00
4779625f23 change vote commission to u8 (from u32) (#4887) (#4918)
automerge
2019-07-02 14:52:53 -07:00
3c0b03ba84 Update cargo.toml and cargo.lock files for 0.16.3 (#4917) 2019-07-02 14:33:04 -06:00
c53f163ef5 Add RPC api to return program accounts (#4876) (#4912)
automerge
2019-07-02 11:48:00 -07:00
ca35854417 Add .exe extension before checking for a program file on windows (#4902) (#4903)
automerge
2019-07-02 08:48:35 -07:00
ab1fda2a54 Add curl retries 2019-07-02 08:38:18 -07:00
a6ec77c230 Update Cargo.toml 2019-07-01 23:18:15 -07:00
1d7894f1be Avoid signal-hook crate on windows (#4901) 2019-07-01 22:52:49 -07:00
4866a1fc39 run command now kills child process on SIGTERM to cleanly exit (#4896) (#4899)
automerge
2019-07-01 19:28:12 -07:00
60c5e59a5e Always send pull responses to the origin addr (#4894) (#4897)
automerge
2019-07-01 17:34:51 -07:00
fd93bdadf6 Try to gracefully terminal child process before using SIGKILL (#4890) (#4892)
automerge
2019-07-01 14:43:15 -07:00
6089db2a07 Rework fullnode.sh to recover better from genesis block resets (#4884) (#4888)
automerge
2019-07-01 12:31:53 -07:00
462d0cfc6c Disable Enter prompt when stdin is not a tty (#4874) (#4877)
(cherry picked from commit 41bda18046)
2019-06-28 18:18:39 -07:00
e6d6fc4391 Don't prompt the user to update their PATH if --no-modify-path was supplied (#4872) (#4875)
automerge
2019-06-28 17:39:21 -07:00
092556ae5e Lower warn to info, fetch from validator root instead of root + 1 (#4870) (#4873)
automerge
2019-06-28 16:55:46 -07:00
ad9fa54a47 Ensure validator process is killed when fullnode.sh is killed (#4869) (#4871)
automerge
2019-06-28 15:03:46 -07:00
2d68170747 Update cargo.toml files to version 0.16.2 (#4854) 2019-06-27 11:08:02 -06:00
20f3d18458 Save snapshots followed by accounts to avoid stale account data (#4847) (#4849)
automerge
2019-06-26 23:53:14 -07:00
be79efe9b7 rsync of ledger/ and state.tgz now works on both macOS and Linux (#4845) (#4846)
automerge
2019-06-26 22:45:44 -07:00
5db377f743 Use default pubkey for solana-install sanity check 2019-06-26 21:51:10 -07:00
9c2f45a1e0 Fix possible subtract with overflow if mining pool is not setup (#4836)
automerge
2019-06-26 15:28:37 -07:00
8646918d00 Upload all artifacts 2019-06-26 14:38:55 -07:00
7c44fc3561 Set CI_REPO_SLUG correctly for the solana-secondary pipeline 2019-06-26 14:38:55 -07:00
686403eb1d Setup reward pools in genesis (#4831) (#4834)
automerge
2019-06-26 14:29:27 -07:00
6cf9b60a9c Airdrop more token in wallet sanity due to fee (#4830) (#4832)
automerge
2019-06-26 14:05:17 -07:00
aca142df16 Update cargo.toml files to 0.16.1 (#4829) 2019-06-26 13:25:13 -06:00
b2582196db Create snapshots sparsely (#4815) (#4816)
(cherry picked from commit c5e6ebb496)
2019-06-25 12:13:05 -07:00
85a77bec5f Set proper count value for account stores (#4797) (#4813)
* set count values for store accounts

* Use AppendVecId type

(cherry picked from commit 9e7f618cff)
2019-06-25 07:57:40 -07:00
e781cbf4ba Ignore flaky test_two_unbalanced_stakes (#4794) (#4796)
automerge
2019-06-23 21:30:02 -07:00
59956e4543 Remove --storage-mining-pool-lamports (#4792) (#4793)
automerge
2019-06-23 20:53:31 -07:00
303417f981 Lock blockexplorer version 2019-06-22 22:23:16 -07:00
fea03fdf33 Add storage reward pools (#4779) (#4789)
automerge
2019-06-22 21:26:11 -07:00
e8160efc46 Prevent Travis/Appveyor from trying to build mergify branches (#4786) (#4787)
(cherry picked from commit 23b6b85bf0)
2019-06-22 08:58:13 -07:00
e0ba0d581c Update authorized public key (#4783) 2019-06-22 08:44:00 -07:00
36eda29fc9 Add instructions and processor for stake deactivation (#4781)
automerge
2019-06-22 08:43:17 -07:00
2ec73db6bd Program instruction to withdraw un-staked lamports from stake account (#4780) (#4782) 2019-06-22 00:11:15 -07:00
ef6ce2765e Remove storage-mining-pool-keypair arg 2019-06-21 21:38:43 -07:00
164 changed files with 5242 additions and 1724 deletions

View File

@ -4,7 +4,7 @@ version: '{build}'
branches: branches:
only: only:
- master - master
- /v[0-9.]+/ - /^v[0-9.]+/
cache: cache:
- '%USERPROFILE%\.cargo' - '%USERPROFILE%\.cargo'
@ -16,7 +16,7 @@ build_script:
notifications: notifications:
- provider: Slack - provider: Slack
incoming_webhook: incoming_webhook:
secure: 6HTXVh+FBz29LGJb+taFOo9dqoADfo9xyAszeyXZF5Ub9t5NERytKAR35B2wb+uIOOCBF8+JhmH4437Cgf/ti4IqvURzW1QReXK7eQhn1EI= secure: 6HnLbeS6/Iv7JSMrrHQ7V9OSIjH/3KFzvZiinNWgQqEN0e9A6zaE4MwEXUYDWbcvVJiQneWit6dswY8Scoms2rS1PWEN5N6sjgLgyzroptc=
channel: ci-status channel: ci-status
on_build_success: false on_build_success: false
on_build_failure: true on_build_failure: true
@ -25,9 +25,9 @@ notifications:
deploy: deploy:
- provider: S3 - provider: S3
access_key_id: access_key_id:
secure: ptvqM/yvgeTeA12XOzybH1KYNh95AdfEvqoH9mvP2ic= secure: G6uzyGqbkMCXS2+sCeBCT/+s/11AHLWXCuGayfKcMEE=
secret_access_key: secret_access_key:
secure: IkrgBlz5hdxvwcJdMXyyHUrpWhKa6fXLOD/8rm/rjKqYCdrba9B8V1nLZVrzXGGy secure: Lc+aVrbcPSXoDV7h2J7gqKT+HX0n3eEzp3JIrSP2pcKxbAikGnCtOogCiHO9/er2
bucket: release.solana.com bucket: release.solana.com
region: us-west-1 region: us-west-1
set_public: true set_public: true

1
.gitignore vendored
View File

@ -23,3 +23,4 @@ log-*.txt
# intellij files # intellij files
/.idea/ /.idea/
/solana.iml /solana.iml
/.vscode/

View File

@ -17,7 +17,7 @@ script:
branches: branches:
only: only:
- master - master
- /v.*/ - /^v\d+\.\d+(\.\d+)?(-\S*)?$/
notifications: notifications:
slack: slack:

663
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -41,6 +41,7 @@ members = [
"runtime", "runtime",
"sdk", "sdk",
"upload-perf", "upload-perf",
"validator-info",
"vote-signer", "vote-signer",
"wallet", "wallet",
] ]

View File

@ -61,7 +61,7 @@ There are three release channels that map to branches as follows:
## Release Steps ## Release Steps
### Advance the Channels ### Creating a new branch from master
#### Create the new branch #### Create the new branch
1. Pick your branch point for release on master. 1. Pick your branch point for release on master.
@ -84,6 +84,12 @@ There are three release channels that map to branches as follows:
At this point, `ci/channel-info.sh` should show your freshly cut release branch as At this point, `ci/channel-info.sh` should show your freshly cut release branch as
"BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL". "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
### Update documentation
Document the new recommended version by updating
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
in book/src/testnet-participation.md on the release (beta) branch.
### Make the Release ### Make the Release
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release. We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
@ -106,6 +112,25 @@ We use [github's Releases UI](https://github.com/solana-labs/solana/releases) fo
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the 1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
release branch. release branch.
### Publish updated Book
We maintain three copies of the "book" as official documentation:
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
https://solana-labs.github.io/book/
2) "Book-edge" tracks the tip of the master branch and updates automatically.
https://solana-labs.github.io/book-edge/
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
https://solana-labs.github.io/book-beta/
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
```bash
PUBLISH_BOOK_TAG=v0.16.6
```
https://buildkite.com/solana-labs/manual-update-book
### Update software on testnet.solana.com ### Update software on testnet.solana.com
The testnet running on testnet.solana.com is set to use a fixed release tag The testnet running on testnet.solana.com is set to use a fixed release tag
@ -145,12 +170,6 @@ TESTNET_TAG=[same value as used in TESTNET_TAG in the schedules]
TESTNET_OP=create-and-start TESTNET_OP=create-and-start
``` ```
#### Update documentation
Document the new recommended version by updating
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
in book/src/testnet-participation.md for both edge and beta channel branches.
### Alert the community ### Alert the community
Notify Discord users on #validator-support that a new release for Notify Discord users on #validator-support that a new release for

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-exchange" name = "solana-bench-exchange"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -24,16 +24,16 @@ serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
serde_yaml = "0.8.9" serde_yaml = "0.8.9"
# solana-runtime = { path = "../solana/runtime"} # solana-runtime = { path = "../solana/runtime"}
solana = { path = "../core", version = "0.16.0" } solana = { path = "../core", version = "0.16.6" }
solana-client = { path = "../client", version = "0.16.0" } solana-client = { path = "../client", version = "0.16.6" }
solana-drone = { path = "../drone", version = "0.16.0" } solana-drone = { path = "../drone", version = "0.16.6" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" } solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.6" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.6" }
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
solana-metrics = { path = "../metrics", version = "0.16.0" } solana-metrics = { path = "../metrics", version = "0.16.6" }
solana-netutil = { path = "../netutil", version = "0.16.0" } solana-netutil = { path = "../netutil", version = "0.16.6" }
solana-runtime = { path = "../runtime", version = "0.16.0" } solana-runtime = { path = "../runtime", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
untrusted = "0.6.2" untrusted = "0.6.2"
ws = "0.8.1" ws = "0.8.1"

View File

@ -2,16 +2,16 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
solana = { path = "../core", version = "0.16.0" } solana = { path = "../core", version = "0.16.6" }
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
solana-netutil = { path = "../netutil", version = "0.16.0" } solana-netutil = { path = "../netutil", version = "0.16.6" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-tps" name = "solana-bench-tps"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -15,14 +15,14 @@ serde = "1.0.92"
serde_derive = "1.0.92" serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
serde_yaml = "0.8.9" serde_yaml = "0.8.9"
solana = { path = "../core", version = "0.16.0" } solana = { path = "../core", version = "0.16.6" }
solana-client = { path = "../client", version = "0.16.0" } solana-client = { path = "../client", version = "0.16.6" }
solana-drone = { path = "../drone", version = "0.16.0" } solana-drone = { path = "../drone", version = "0.16.6" }
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
solana-metrics = { path = "../metrics", version = "0.16.0" } solana-metrics = { path = "../metrics", version = "0.16.6" }
solana-netutil = { path = "../netutil", version = "0.16.0" } solana-netutil = { path = "../netutil", version = "0.16.6" }
solana-runtime = { path = "../runtime", version = "0.16.0" } solana-runtime = { path = "../runtime", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
[features] [features]
cuda = ["solana/cuda"] cuda = ["solana/cuda"]

View File

@ -346,10 +346,12 @@ pub fn fund_keys<T: Client>(
source: &Keypair, source: &Keypair,
dests: &[Keypair], dests: &[Keypair],
total: u64, total: u64,
lamports_per_signature: u64, max_fee: u64,
mut extra: u64,
) { ) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)]; let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect(); let mut notfunded: Vec<&Keypair> = dests.iter().collect();
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
println!("funding keys {}", dests.len()); println!("funding keys {}", dests.len());
while !notfunded.is_empty() { while !notfunded.is_empty() {
@ -362,7 +364,8 @@ pub fn fund_keys<T: Client>(
break; break;
} }
let start = notfunded.len() - max_units as usize; let start = notfunded.len() - max_units as usize;
let per_unit = (f.1 - max_units * lamports_per_signature) / max_units; let fees = if extra > 0 { max_fee } else { 0 };
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
let moves: Vec<_> = notfunded[start..] let moves: Vec<_> = notfunded[start..]
.iter() .iter()
.map(|k| (k.pubkey(), per_unit)) .map(|k| (k.pubkey(), per_unit))
@ -374,6 +377,7 @@ pub fn fund_keys<T: Client>(
if !moves.is_empty() { if !moves.is_empty() {
to_fund.push((f.0, moves)); to_fund.push((f.0, moves));
} }
extra -= 1;
} }
// try to transfer a "few" at a time with recent blockhash // try to transfer a "few" at a time with recent blockhash
@ -582,16 +586,20 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4) i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
} }
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> Vec<Keypair> { pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
let mut seed = [0u8; 32]; let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]); seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
let mut rnd = GenKeys::new(seed); let mut rnd = GenKeys::new(seed);
let mut total_keys = 1; let mut total_keys = 0;
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
let mut delta = 1;
while total_keys < count { while total_keys < count {
total_keys *= MAX_SPENDS_PER_TX; extra += delta;
delta *= MAX_SPENDS_PER_TX;
total_keys += delta;
} }
rnd.gen_n_keypairs(total_keys) (rnd.gen_n_keypairs(total_keys), extra)
} }
pub fn generate_and_fund_keypairs<T: Client>( pub fn generate_and_fund_keypairs<T: Client>(
@ -602,8 +610,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
lamports_per_account: u64, lamports_per_account: u64,
) -> Result<(Vec<Keypair>, u64)> { ) -> Result<(Vec<Keypair>, u64)> {
info!("Creating {} keypairs...", tx_count * 2); info!("Creating {} keypairs...", tx_count * 2);
let mut keypairs = generate_keypairs(funding_pubkey, tx_count as u64 * 2); let (mut keypairs, extra) = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
info!("Get lamports..."); info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume. // Sample the first keypair, see if it has lamports, if so then resume.
@ -614,19 +621,21 @@ pub fn generate_and_fund_keypairs<T: Client>(
if lamports_per_account > last_keypair_balance { if lamports_per_account > last_keypair_balance {
let (_, fee_calculator) = client.get_recent_blockhash().unwrap(); let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
let extra = let account_desired_balance =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature; lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let total = extra * (keypairs.len() as u64); let extra_fees = extra * fee_calculator.max_lamports_per_signature;
let total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total { if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?; airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
} }
info!("adding more lamports {}", extra); info!("adding more lamports {}", account_desired_balance);
fund_keys( fund_keys(
client, client,
funding_pubkey, funding_pubkey,
&keypairs, &keypairs,
total, total,
fee_calculator.max_lamports_per_signature, fee_calculator.max_lamports_per_signature,
extra,
); );
} }
@ -647,6 +656,7 @@ mod tests {
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient; use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient; use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_block::create_genesis_block; use solana_sdk::genesis_block::create_genesis_block;
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
@ -735,7 +745,33 @@ mod tests {
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap(); generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
for kp in &keypairs { for kp in &keypairs {
assert!(client.get_balance(&kp.pubkey()).unwrap() >= lamports); assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
}
}
#[test]
fn test_bench_tps_fund_keys_with_fees() {
let (mut genesis_block, id) = create_genesis_block(10_000);
let fee_calculator = FeeCalculator::new(11);
genesis_block.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_block);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
let max_fee = client
.get_recent_blockhash()
.unwrap()
.1
.max_lamports_per_signature;
for kp in &keypairs {
assert_eq!(
client.get_balance(&kp.pubkey()).unwrap(),
lamports + max_fee
);
} }
} }
} }

View File

@ -6,7 +6,7 @@ use crate::bench::{
}; };
use solana::gossip_service::{discover_cluster, get_multi_client}; use solana::gossip_service::{discover_cluster, get_multi_client};
use solana_sdk::fee_calculator::FeeCalculator; use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::Keypair; use solana_sdk::signature::{Keypair, KeypairUtil};
use std::collections::HashMap; use std::collections::HashMap;
use std::fs::File; use std::fs::File;
use std::io::prelude::*; use std::io::prelude::*;
@ -40,7 +40,7 @@ fn main() {
} = cli_config; } = cli_config;
if write_to_client_file { if write_to_client_file {
let keypairs = generate_keypairs(&id, tx_count as u64 * 2); let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2);
let num_accounts = keypairs.len() as u64; let num_accounts = keypairs.len() as u64;
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature; let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee) let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
@ -91,6 +91,10 @@ fn main() {
keypairs.push(Keypair::from_bytes(&bytes).unwrap()); keypairs.push(Keypair::from_bytes(&bytes).unwrap());
last_balance = balance; last_balance = balance;
}); });
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
// This prevents the amount of storage needed for bench-tps accounts from creeping up
// across multiple runs.
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
(keypairs, last_balance) (keypairs, last_balance)
} else { } else {
generate_and_fund_keypairs( generate_and_fund_keypairs(

View File

@ -22,7 +22,7 @@ gossip endpoint (a socket address).
Records shared over gossip are arbitrary, but signed and versioned (with a Records shared over gossip are arbitrary, but signed and versioned (with a
timestamp) as needed to make sense to the node receiving them. If a node timestamp) as needed to make sense to the node receiving them. If a node
recieves two records from the same source, it it updates its own copy with the receives two records from the same source, it updates its own copy with the
record with the most recent timestamp. record with the most recent timestamp.
## Gossip Service Interface ## Gossip Service Interface

View File

@ -25,6 +25,9 @@ Methods
* [getAccountInfo](#getaccountinfo) * [getAccountInfo](#getaccountinfo)
* [getBalance](#getbalance) * [getBalance](#getbalance)
* [getClusterNodes](#getclusternodes) * [getClusterNodes](#getclusternodes)
* [getEpochInfo](#getepochinfo)
* [getLeaderSchedule](#getleaderschedule)
* [getProgramAccounts](#getprogramaccounts)
* [getRecentBlockhash](#getrecentblockhash) * [getRecentBlockhash](#getrecentblockhash)
* [getSignatureStatus](#getsignaturestatus) * [getSignatureStatus](#getsignaturestatus)
* [getSlotLeader](#getslotleader) * [getSlotLeader](#getslotleader)
@ -96,6 +99,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
{"jsonrpc":"2.0","result":true,"id":1} {"jsonrpc":"2.0","result":true,"id":1}
``` ```
---
### getAccountInfo
Returns all information associated with the account of provided Pubkey
##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
##### Results:
The result field will be a JSON object with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
```
--- ---
### getBalance ### getBalance
@ -142,28 +171,73 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
--- ---
### getAccountInfo ### getEpochInfo
Returns all information associated with the account of provided Pubkey Returns information about the current epoch
##### Parameters: ##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string None
##### Results: ##### Results:
The result field will be a JSON object with the following sub fields: The result field will be an object with the following fields:
* `epoch`, the current epoch
* `slotIndex`, the current slot relative to the start of the current epoch
* `slotsInEpoch`, the number of slots in this epoch
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
```
---
### getLeaderSchedule
Returns the leader schedule for the current epoch
##### Parameters:
None
##### Results:
The result field will be an array of leader public keys (as base-58 encoded
strings) for each slot in the current epoch
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[...],"id":1}
```
---
### getProgramAccounts
Returns all accounts owned by the provided program Pubkey
##### Parameters:
* `string` - Pubkey of program, as base-58 encoded string
##### Results:
The result field will be an array of arrays. Each sub array will contain:
* `string` - a the account Pubkey as base-58 encoded string
and a JSON object, with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer * `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to * `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account * `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only) * `executable`, boolean indicating if the account contains a program (and is strictly read-only)
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
##### Example: ##### Example:
```bash ```bash
// Request // Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1} {"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
``` ```
--- ---
@ -402,7 +476,7 @@ for a given account public key changes
##### Notification Format: ##### Notification Format:
```bash ```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}} {"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
``` ```
--- ---

View File

@ -11,7 +11,7 @@ of getting its stake slashed. The economics are covered in [staking
rewards](staking-rewards.md). This chapter, on the other hand, describes the rewards](staking-rewards.md). This chapter, on the other hand, describes the
underlying mechanics of its implementation. underlying mechanics of its implementation.
## Basic Besign ## Basic Design
The general idea is that the validator owns a Vote account. The Vote account The general idea is that the validator owns a Vote account. The Vote account
tracks validator votes, counts validator generated credits, and provides any tracks validator votes, counts validator generated credits, and provides any
@ -114,7 +114,13 @@ tokens stored as `Account::lamports`.
The stakes and the MiningPool are accounts that are owned by the same `Stake` The stakes and the MiningPool are accounts that are owned by the same `Stake`
program. program.
<<<<<<< HEAD
### StakeInstruction::Initialize ### StakeInstruction::Initialize
=======
The Stake account is moved from Uninitialized to StakeState::Stake form. This is
how stakers choose their initial delegate validator node and activate their
stake account lamports.
>>>>>>> 25080f1a3... fix book typos (#5185)
* `account[0]` - RW - The StakeState::Delegate instance. * `account[0]` - RW - The StakeState::Delegate instance.
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`. `StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.

View File

@ -91,6 +91,10 @@ History](#proof-of-history).
The time, i.e. number of [slots](#slot), for which a [leader The time, i.e. number of [slots](#slot), for which a [leader
schedule](#leader-schedule) is valid. schedule](#leader-schedule) is valid.
#### finality
When nodes representing 2/3rd of the stake have a common [root](#root).
#### fork #### fork
A [ledger](#ledger) derived from common entries but then diverged. A [ledger](#ledger) derived from common entries but then diverged.
@ -213,6 +217,15 @@ The public key of a [keypair](#keypair).
Storage mining client, stores some part of the ledger enumerated in blocks and Storage mining client, stores some part of the ledger enumerated in blocks and
submits storage proofs to the chain. Not a full-node. submits storage proofs to the chain. Not a full-node.
#### root
A [block](#block) or [slot](#slot) that has reached maximum [lockout](#lockout)
on a validator. The root is the highest block that is an ancestor of all active
forks on a validator. All ancestor blocks of a root are also transitively a
root. Blocks that are not an ancestor and not a descendant of the root are
excluded from consideration for consensus and can be discarded.
#### runtime #### runtime
The component of a [fullnode](#fullnode) responsible for [program](#program) The component of a [fullnode](#fullnode) responsible for [program](#program)

View File

@ -74,8 +74,7 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
software on Linux x86_64 and mac OS systems. software on Linux x86_64 and mac OS systems.
```bash ```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.6/install/solana-install-init.sh | sh -s
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
``` ```
Alternatively build the `solana-install` program from source and run the Alternatively build the `solana-install` program from source and run the
@ -122,8 +121,11 @@ $ ./scripts/cargo-install-all.sh .
$ export PATH=$PWD/bin:$PATH $ export PATH=$PWD/bin:$PATH
``` ```
If building for CUDA, include the `cuda` feature flag as well: If building for CUDA (Linux only), fetch the perf-libs first then include the
`cuda` feature flag when building:
```bash ```bash
$ ./fetch-perf-libs.sh
$ source /home/mvines/ws/solana/target/perf-libs/env.sh
$ ./scripts/cargo-install-all.sh . cuda $ ./scripts/cargo-install-all.sh . cuda
$ export PATH=$PWD/bin:$PATH $ export PATH=$PWD/bin:$PATH
``` ```
@ -152,21 +154,18 @@ choice, to start the node:
If this is a `solana-install`-installation: If this is a `solana-install`-installation:
```bash ```bash
$ clear-config.sh $ validator.sh --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
$ validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
``` ```
Alternatively, the `solana-install run` command can be used to run the validator Alternatively, the `solana-install run` command can be used to run the validator
node while periodically checking for and applying software updates: node while periodically checking for and applying software updates:
```bash ```bash
$ clear-config.sh $ solana-install run validator.sh -- --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
``` ```
If you built from source: If you built from source:
```bash ```bash
$ USE_INSTALL=1 ./multinode-demo/clear-config.sh $ USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
$ USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
``` ```
#### Enabling CUDA #### Enabling CUDA
@ -240,3 +239,46 @@ A local InfluxDB and Grafana instance is now running on your machine. Define
`start.sh` output and restart your validator. `start.sh` output and restart your validator.
Metrics should now be streaming and visible from your local Grafana dashboard. Metrics should now be streaming and visible from your local Grafana dashboard.
#### Timezone For Log Messages
Log messages emitted by your validator include a timestamp. When sharing logs
with others to help triage issues, that timestamp can cause confusion as it does
not contain timezone information.
To make it easier to compare logs between different sources we request that
everybody use Pacific Time on their validator nodes. In Linux this can be
accomplished by running:
```bash
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
```
#### Publishing Validator Info
You can publish your validator information to the chain to be publicly visible
to other users.
Run the solana-validator-info CLI to populate a validator-info account:
```bash
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
```
Optional fields for VALIDATOR_INFO_ARGS:
* Website
* Keybase Username
* Details
##### Keybase
Including a Keybase username allows client applications (like the Solana Network
Explorer) to automatically pull in your validator public profile, including
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
Keybase:
1. Join https://keybase.io/ and complete the profile for your validator
2. Add your validator **identity pubkey** to Keybase:
* Create an empty file on your local computer called `solana_pubkey_<PUBKEY>`
* In Keybase, navigate to the Files section, and upload your pubkey file to
a `solana` subdirectory in your public folder: `/keybase/public/<KEYBASE_USERNAME>/solana`
* To check your pubkey, ensure you can successfully browse to
`https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<PUBKEY>`
3. Add or update your `solana-validator-info` with your Keybase username. The
CLI will verify the `validator-<PUBKEY>` file

View File

@ -53,8 +53,8 @@ software.
##### Linux and mac OS ##### Linux and mac OS
```bash ```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release $ export SOLANA_RELEASE=v0.16.6 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.6/install/solana-install-init.sh | sh -s
``` ```
Alternatively build the `solana-install` program from source and run the Alternatively build the `solana-install` program from source and run the

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-chacha-sys" name = "solana-chacha-sys"
version = "0.16.0" version = "0.16.6"
description = "Solana chacha-sys" description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -3,7 +3,7 @@ steps:
timeout_in_minutes: 20 timeout_in_minutes: 20
name: "publish docker" name: "publish docker"
- command: "ci/publish-crate.sh" - command: "ci/publish-crate.sh"
timeout_in_minutes: 40 timeout_in_minutes: 60
name: "publish crate" name: "publish crate"
branches: "!master" branches: "!master"
- command: "ci/publish-bpf-sdk.sh" - command: "ci/publish-bpf-sdk.sh"

View File

@ -33,9 +33,15 @@ if [[ -n $CI ]]; then
export CI_PULL_REQUEST= export CI_PULL_REQUEST=
fi fi
export CI_OS_NAME=linux export CI_OS_NAME=linux
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
else
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
fi
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into # TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
# the solana-secondary builder # the solana-secondary pipeline
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
export CI_TAG=$TRIGGERED_BUILDKITE_TAG export CI_TAG=$TRIGGERED_BUILDKITE_TAG
else else
@ -53,7 +59,7 @@ if [[ -n $CI ]]; then
fi fi
if [[ $CI_LINUX = True ]]; then if [[ $CI_LINUX = True ]]; then
export CI_OS_NAME=linux export CI_OS_NAME=linux
elif [[ $CI_WINDOWS = True ]]; then else
export CI_OS_NAME=windows export CI_OS_NAME=windows
fi fi
export CI_REPO_SLUG=$APPVEYOR_REPO_NAME export CI_REPO_SLUG=$APPVEYOR_REPO_NAME

View File

@ -2,8 +2,50 @@
set -e set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
BOOK="book"
book/build.sh source ci/rust-version.sh stable
eval "$(ci/channel-info.sh)"
if [[ -n $PUBLISH_BOOK_TAG ]]; then
CURRENT_TAG="$(git describe --tags)"
COMMIT_TO_PUBLISH="$(git rev-list -n 1 "${PUBLISH_BOOK_TAG}")"
# book is manually published at a specified release tag
if [[ $PUBLISH_BOOK_TAG != "$CURRENT_TAG" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
commit: "$COMMIT_TO_PUBLISH"
env:
PUBLISH_BOOK_TAG: "$PUBLISH_BOOK_TAG"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
repo=git@github.com:solana-labs/book.git
else
# book-edge and book-beta are published automatically on the tip of the branch
case $CHANNEL in
edge)
repo=git@github.com:solana-labs/book-edge.git
;;
beta)
repo=git@github.com:solana-labs/book-beta.git
;;
*)
echo "--- publish skipped"
exit 0
;;
esac
BOOK=$CHANNEL
fi
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "book/build.sh"
echo --- create book repo echo --- create book repo
( (
@ -16,22 +58,7 @@ echo --- create book repo
git commit -m "${CI_COMMIT:-local}" git commit -m "${CI_COMMIT:-local}"
) )
eval "$(ci/channel-info.sh)" echo "--- publish $BOOK"
# Only publish the book from the edge and beta channels for now.
case $CHANNEL in
edge)
repo=git@github.com:solana-labs/book-edge.git
;;
beta)
repo=git@github.com:solana-labs/book.git
;;
*)
echo "--- publish skipped"
exit 0
;;
esac
echo "--- publish $CHANNEL"
cd book/html/ cd book/html/
git remote add origin $repo git remote add origin $repo
git fetch origin master git fetch origin master

View File

@ -2,6 +2,7 @@
set -e set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
source ci/semver_bash/semver.sh source ci/semver_bash/semver.sh
source ci/rust-version.sh stable
# shellcheck disable=SC2086 # shellcheck disable=SC2086
is_crate_version_uploaded() { is_crate_version_uploaded() {
@ -25,35 +26,56 @@ expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
exit 1 exit 1
} }
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
Cargo_tomls=$(ci/order-crates-for-publishing.py) Cargo_tomls=$(ci/order-crates-for-publishing.py)
for Cargo_toml in $Cargo_tomls; do for Cargo_toml in $Cargo_tomls; do
echo "-- $Cargo_toml" echo "--- $Cargo_toml"
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || { grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
echo "Error: $Cargo_toml version is not $expectedCrateVersion" echo "Error: $Cargo_toml version is not $expectedCrateVersion"
exit 1 exit 1
} }
crate_name=$(grep -m 1 '^name = ' "$Cargo_toml" | cut -f 3 -d ' ' | tr -d \")
if grep -q "^publish = false" "$Cargo_toml"; then
echo "$crate_name is is marked as unpublishable"
continue
fi
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
echo "${crate_name} version ${expectedCrateVersion} is already on crates.io"
continue
fi
( (
set -x set -x
crate=$(dirname "$Cargo_toml") crate=$(dirname "$Cargo_toml")
# TODO: the rocksdb package does not build with the stock rust docker image, # TODO: the rocksdb package does not build with the stock rust docker image,
# so use the solana rust docker image until this is resolved upstream # so use the solana rust docker image until this is resolved upstream
source ci/rust-version.sh cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand" ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues ) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
# shellcheck disable=SC2086
crate_name=$(grep -m 1 '^name = ' $Cargo_toml | cut -f 3 -d ' ' | tr -d \")
numRetries=30 numRetries=30
for ((i = 1 ; i <= numRetries ; i++)); do for ((i = 1 ; i <= numRetries ; i++)); do
echo "Attempt ${i} of ${numRetries}" echo "Attempt ${i} of ${numRetries}"
# shellcheck disable=SC2086 if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
if [[ $(is_crate_version_uploaded $crate_name $expectedCrateVersion) = True ]] ; then echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io REST API"
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io"
break really_uploaded=0
(
set -x
rm -rf crate-test
cargo +"$rust_stable" init crate-test
cd crate-test/
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
echo "[workspace]" >> Cargo.toml
cargo +"$rust_stable" check
) && really_uploaded=1
if ((really_uploaded)); then
break;
fi
echo "${crate_name} not yet available for download from crates.io"
fi fi
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds." echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
sleep 2 sleep 2

View File

@ -49,7 +49,8 @@ windows)
TARGET=x86_64-pc-windows-msvc TARGET=x86_64-pc-windows-msvc
;; ;;
*) *)
TARGET=unknown-unknown-unknown echo CI_OS_NAME unset
exit 1
;; ;;
esac esac
@ -70,6 +71,12 @@ echo --- Creating tarball
source ci/rust-version.sh stable source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" solana-release scripts/cargo-install-all.sh +"$rust_stable" solana-release
# Reduce the archive size until
# https://github.com/appveyor/ci/issues/2997 is fixed
if [[ -n $APPVEYOR ]]; then
rm -f solana-release/bin/solana-validator.exe solana-release/bin/solana-bench-exchange.exe
fi
if $PERF_LIBS; then if $PERF_LIBS; then
rm -rf target/perf-libs rm -rf target/perf-libs
./fetch-perf-libs.sh ./fetch-perf-libs.sh
@ -94,22 +101,13 @@ echo --- Creating tarball
set -e set -e
cd "$(dirname "$0")"/.. cd "$(dirname "$0")"/..
export USE_INSTALL=1 export USE_INSTALL=1
export REQUIRE_CONFIG_DIR=1
exec multinode-demo/validator.sh "$@" exec multinode-demo/validator.sh "$@"
EOF EOF
chmod +x solana-release/bin/validator.sh chmod +x solana-release/bin/validator.sh
# Add a wrapper script for clear-config.sh tar cvf solana-release-$TARGET.tar solana-release
# TODO: Remove multinode/... from tarball bzip2 solana-release-$TARGET.tar
cat > solana-release/bin/clear-config.sh <<'EOF'
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"/..
export USE_INSTALL=1
exec multinode-demo/clear-config.sh "$@"
EOF
chmod +x solana-release/bin/clear-config.sh
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
cp solana-release/bin/solana-install-init solana-install-init-$TARGET cp solana-release/bin/solana-install-init solana-install-init-$TARGET
) )
@ -120,16 +118,16 @@ if [[ "$CI_OS_NAME" = linux ]]; then
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2 MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
fi fi
echo --- Saving build artifacts
source ci/upload-ci-artifact.sh source ci/upload-ci-artifact.sh
upload-ci-artifact solana-release-$TARGET.tar.bz2
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0
fi
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
upload-ci-artifact "$file"
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
continue
fi
if [[ -n $BUILDKITE ]]; then if [[ -n $BUILDKITE ]]; then
echo --- AWS S3 Store: "$file" echo --- AWS S3 Store: "$file"
( (

View File

@ -14,7 +14,7 @@ do_bpf_check() {
_ cargo +"$rust_stable" fmt --all -- --check _ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_nightly" clippy --all -- --version _ cargo +"$rust_nightly" clippy --all -- --version
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings _ cargo +"$rust_nightly" clippy --all -- --deny=warnings
_ cargo +"$rust_stable" audit # _ cargo +"$rust_stable" audit
} }
( (
@ -33,7 +33,7 @@ do_bpf_check() {
_ cargo +"$rust_stable" fmt --all -- --check _ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_stable" clippy --all -- --version _ cargo +"$rust_stable" clippy --all -- --version
_ cargo +"$rust_stable" clippy --all -- --deny=warnings _ cargo +"$rust_stable" clippy --all -- --deny=warnings
_ cargo +"$rust_stable" audit #_ cargo +"$rust_stable" audit
_ ci/nits.sh _ ci/nits.sh
_ ci/order-crates-for-publishing.py _ ci/order-crates-for-publishing.py
_ book/build.sh _ book/build.sh

View File

@ -24,6 +24,14 @@ blockstreamer=false
deployUpdateManifest=true deployUpdateManifest=true
fetchLogs=true fetchLogs=true
maybeHashesPerTick= maybeHashesPerTick=
maybeDisableAirdrops=
maybeInternalNodesStakeLamports=
maybeInternalNodesLamports=
maybeExternalPrimordialAccountsFile=
maybeLamports=
maybeLetsEncrypt=
maybeFullnodeAdditionalDiskSize=
maybeNoSnapshot=
usage() { usage() {
exitcode=0 exitcode=0
@ -62,11 +70,28 @@ Deploys a CD testnet
-s - Skip start. Nodes will still be created or configured, but network software will not be started. -s - Skip start. Nodes will still be created or configured, but network software will not be started.
-S - Stop network software without tearing down nodes. -S - Stop network software without tearing down nodes.
-f - Discard validator nodes that didn't bootup successfully -f - Discard validator nodes that didn't bootup successfully
-w - Skip time-consuming "bells and whistles" that are --no-airdrop
unnecessary for a high-node count demo testnet - If set, disables airdrops. Nodes must be funded in genesis block when airdrops are disabled.
--internal-nodes-stake-lamports NUM_LAMPORTS
- Amount to stake internal nodes.
--internal-nodes-lamports NUM_LAMPORTS
- Amount to fund internal nodes in genesis block
--external-accounts-file FILE_PATH
- Path to external Primordial Accounts file, if it exists.
--hashes-per-tick NUM_HASHES|sleep|auto --hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster - Override the default --hashes-per-tick for the cluster
--lamports NUM_LAMPORTS
- Specify the number of lamports to mint (default 100000000000000)
--skip-deploy-update
- If set, will skip software update deployment
--skip-remote-log-retrieval
- If set, will not fetch logs from remote nodes
--letsencrypt [dns name]
- Attempt to generate a TLS certificate using this DNS name
--fullnode-additional-disk-size-gb [number]
- Size of additional disk in GB for all fullnodes
--no-snapshot
- If set, disables booting validators from a snapshot
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
metrics metrics
@ -82,6 +107,39 @@ while [[ -n $1 ]]; do
if [[ $1 = --hashes-per-tick ]]; then if [[ $1 = --hashes-per-tick ]]; then
maybeHashesPerTick="$1 $2" maybeHashesPerTick="$1 $2"
shift 2 shift 2
elif [[ $1 = --lamports ]]; then
maybeLamports="$1 $2"
shift 2
elif [[ $1 = --no-airdrop ]]; then
maybeDisableAirdrops="$1"
shift 1
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
maybeInternalNodesStakeLamports="$1 $2"
shift 2
elif [[ $1 = --internal-nodes-lamports ]]; then
maybeInternalNodesLamports="$1 $2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
maybeExternalPrimordialAccountsFile="$1 $2"
shift 2
elif [[ $1 = --skip-deploy-update ]]; then
deployUpdateManifest=false
shift 1
elif [[ $1 = --skip-remote-log-retrieval ]]; then
fetchLogs=false
shift 1
elif [[ $1 = --letsencrypt ]]; then
maybeLetsEncrypt="$1 $2"
shift 2
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
maybeFullnodeAdditionalDiskSize="$1 $2"
shift 2
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
shortArgs+=("$1")
shift
elif [[ $1 = --no-snapshot ]]; then
maybeNoSnapshot="$1"
shift 1
else else
usage "Unknown long option: $1" usage "Unknown long option: $1"
fi fi
@ -228,6 +286,11 @@ if ! $skipCreate; then
# shellcheck disable=SC2206 # shellcheck disable=SC2206
create_args+=(${zone_args[@]}) create_args+=(${zone_args[@]})
if [[ -n $maybeLetsEncrypt ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeLetsEncrypt
create_args+=($maybeLetsEncrypt)
fi
if $blockstreamer; then if $blockstreamer; then
create_args+=(-u) create_args+=(-u)
fi fi
@ -256,6 +319,11 @@ if ! $skipCreate; then
create_args+=(-f) create_args+=(-f)
fi fi
if [[ -n $maybeFullnodeAdditionalDiskSize ]]; then
# shellcheck disable=SC2206 # Do not want to quote
create_args+=($maybeFullnodeAdditionalDiskSize)
fi
time net/"$cloudProvider".sh create "${create_args[@]}" time net/"$cloudProvider".sh create "${create_args[@]}"
else else
echo "--- $cloudProvider.sh config" echo "--- $cloudProvider.sh config"
@ -318,7 +386,6 @@ if ! $skipStart; then
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick # shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
args+=($maybeHashesPerTick) args+=($maybeHashesPerTick)
fi fi
if $reuseLedger; then if $reuseLedger; then
args+=(-r) args+=(-r)
fi fi
@ -334,7 +401,32 @@ if ! $skipStart; then
args+=(--deploy-update windows) args+=(--deploy-update windows)
fi fi
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables if [[ -n $maybeDisableAirdrops ]]; then
# shellcheck disable=SC2206
args+=($maybeDisableAirdrops)
fi
if [[ -n $maybeInternalNodesStakeLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeInternalNodesStakeLamports
args+=($maybeInternalNodesStakeLamports)
fi
if [[ -n $maybeInternalNodesLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeInternalNodesLamports
args+=($maybeInternalNodesLamports)
fi
if [[ -n $maybeExternalPrimordialAccountsFile ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeExternalPrimordialAccountsFile
args+=($maybeExternalPrimordialAccountsFile)
fi
if [[ -n $maybeLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeLamports
args+=($maybeLamports)
fi
if [[ -n $maybeNoSnapshot ]]; then
# shellcheck disable=SC2206
args+=($maybeNoSnapshot)
fi
time net/net.sh "${args[@]}" time net/net.sh "${args[@]}"
) || ok=false ) || ok=false

View File

@ -44,6 +44,8 @@ steps:
value: "testnet-beta-perf" value: "testnet-beta-perf"
- label: "testnet-demo" - label: "testnet-demo"
value: "testnet-demo" value: "testnet-demo"
- label: "tds"
value: "tds"
- select: "Operation" - select: "Operation"
key: "testnet-operation" key: "testnet-operation"
default: "sanity-or-restart" default: "sanity-or-restart"
@ -153,6 +155,10 @@ testnet-demo)
: "${GCE_NODE_COUNT:=150}" : "${GCE_NODE_COUNT:=150}"
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}" : "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
;; ;;
tds)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
;;
*) *)
echo "Error: Invalid TESTNET=$TESTNET" echo "Error: Invalid TESTNET=$TESTNET"
exit 1 exit 1
@ -287,6 +293,14 @@ sanity() {
$ok $ok
) )
;; ;;
tds)
(
set -x
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-sanity.sh tds-solana-com gce "${GCE_ZONES[0]}" -f
)
;;
*) *)
echo "Error: Invalid TESTNET=$TESTNET" echo "Error: Invalid TESTNET=$TESTNET"
exit 1 exit 1
@ -321,7 +335,8 @@ deploy() {
( (
set -x set -x
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \ ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \ -t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-a eipalloc-0ccd4f2239886fa94 --letsencrypt edge.testnet.solana.com \
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
@ -347,7 +362,8 @@ deploy() {
set -x set -x
NO_VALIDATOR_SANITY=1 \ NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \ ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \ -t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-a eipalloc-0f286cf8a0771ce35 --letsencrypt beta.testnet.solana.com \
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
@ -378,7 +394,8 @@ deploy() {
# shellcheck disable=SC2068 # shellcheck disable=SC2068
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \ ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f -a eipalloc-0fa502bf95f6f18b2 \ -t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f \
-a eipalloc-0fa502bf95f6f18b2 --letsencrypt testnet.solana.com \
${skipCreate:+-e} \ ${skipCreate:+-e} \
${maybeSkipStart:+-s} \ ${maybeSkipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
@ -424,7 +441,9 @@ deploy() {
NO_LEDGER_VERIFY=1 \ NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \ NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \ ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f -w \ -t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \
--skip-deploy-update \
--skip-remote-log-retrieval \
-a demo-testnet-solana-com \ -a demo-testnet-solana-com \
${skipCreate:+-e} \ ${skipCreate:+-e} \
${maybeSkipStart:+-s} \ ${maybeSkipStart:+-s} \
@ -436,7 +455,9 @@ deploy() {
NO_LEDGER_VERIFY=1 \ NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \ NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \ ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x -w \ -t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \
--skip-deploy-update \
--skip-remote-log-retrieval \
${skipCreate:+-e} \ ${skipCreate:+-e} \
${skipStart:+-s} \ ${skipStart:+-s} \
${maybeStop:+-S} \ ${maybeStop:+-S} \
@ -444,6 +465,125 @@ deploy() {
fi fi
) )
;; ;;
tds)
(
set -x
# Allow cluster configuration to be overridden from env vars
if [[ -z $TDS_ZONES ]]; then
TDS_ZONES="us-west1-a,us-central1-a,europe-west4-a"
fi
GCE_CLOUD_ZONES=(); while read -r -d, ; do GCE_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TDS_ZONES},"
if [[ -z $TDS_NODE_COUNT ]]; then
TDS_NODE_COUNT="3"
fi
if [[ -z $TDS_CLIENT_COUNT ]]; then
TDS_CLIENT_COUNT="1"
fi
if [[ -z $ENABLE_GPU ]]; then
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
elif [[ $ENABLE_GPU == skip ]]; then
maybeGpu=()
else
maybeGpu=(-G "${ENABLE_GPU}")
fi
if [[ -z $HASHES_PER_TICK ]]; then
maybeHashesPerTick="--hashes-per-tick auto"
elif [[ $HASHES_PER_TICK == skip ]]; then
maybeHashesPerTick=""
else
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
fi
if [[ -z $DISABLE_AIRDROPS ]]; then
DISABLE_AIRDROPS="true"
fi
if [[ $DISABLE_AIRDROPS == true ]] ; then
maybeDisableAirdrops="--no-airdrop"
else
maybeDisableAirdrops=""
fi
if [[ -z $INTERNAL_NODES_STAKE_LAMPORTS ]]; then
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 1000000000000"
elif [[ $INTERNAL_NODES_STAKE_LAMPORTS == skip ]]; then
maybeInternalNodesStakeLamports=""
else
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports ${INTERNAL_NODES_STAKE_LAMPORTS}"
fi
if [[ -z $INTERNAL_NODES_LAMPORTS ]]; then
maybeInternalNodesLamports="--internal-nodes-lamports 2000000000000"
elif [[ $INTERNAL_NODES_LAMPORTS == skip ]]; then
maybeInternalNodesLamports=""
else
maybeInternalNodesLamports="--internal-nodes-lamports ${INTERNAL_NODES_LAMPORTS}"
fi
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/validators/all.yml
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
elif [[ $EXTERNAL_ACCOUNTS_FILE_URL == skip ]]; then
maybeExternalAccountsFile=""
else
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
fi
if [[ -z $LAMPORTS ]]; then
maybeLamports="--lamports 8589934592000000000"
elif [[ $LAMPORTS == skip ]]; then
maybeLamports=""
else
maybeLamports="--lamports ${LAMPORTS}"
fi
if [[ -z $ADDITIONAL_DISK_SIZE_GB ]]; then
maybeAdditionalDisk="--fullnode-additional-disk-size-gb 32000"
elif [[ $ADDITIONAL_DISK_SIZE_GB == skip ]]; then
maybeAdditionalDisk=""
else
maybeAdditionalDisk="--fullnode-additional-disk-size-gb ${ADDITIONAL_DISK_SIZE_GB}"
fi
# Multiple V100 GPUs are available in us-west1, us-central1 and europe-west4
# shellcheck disable=SC2068
# shellcheck disable=SC2086
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p tds-solana-com -C gce \
"${maybeGpu[@]}" \
-d pd-ssd \
${GCE_CLOUD_ZONES[@]/#/-z } \
-t "$CHANNEL_OR_TAG" \
-n ${TDS_NODE_COUNT} \
-c ${TDS_CLIENT_COUNT} \
-P -u \
-a tds-solana-com --letsencrypt tds.solana.com \
${maybeHashesPerTick} \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
${maybeDelete:+-D} \
${maybeDisableAirdrops} \
${maybeInternalNodesStakeLamports} \
${maybeInternalNodesLamports} \
${maybeExternalAccountsFile} \
${maybeLamports} \
${maybeAdditionalDisk} \
--skip-deploy-update \
--no-snapshot
)
;;
*) *)
echo "Error: Invalid TESTNET=$TESTNET" echo "Error: Invalid TESTNET=$TESTNET"
exit 1 exit 1

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-client" name = "solana-client"
version = "0.16.0" version = "0.16.6"
description = "Solana Client" description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ reqwest = "0.9.18"
serde = "1.0.92" serde = "1.0.92"
serde_derive = "1.0.92" serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
solana-netutil = { path = "../netutil", version = "0.16.0" } solana-netutil = { path = "../netutil", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
[dev-dependencies] [dev-dependencies]
jsonrpc-core = "12.0.0" jsonrpc-core = "12.0.0"
jsonrpc-http-server = "12.0.0" jsonrpc-http-server = "12.0.0"
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }

View File

@ -274,6 +274,39 @@ impl RpcClient {
self.get_account(pubkey).map(|account| account.lamports) self.get_account(pubkey).map(|account| account.lamports)
} }
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> io::Result<Vec<(Pubkey, Account)>> {
let params = json!([format!("{}", pubkey)]);
let response = self
.client
.send(&RpcRequest::GetProgramAccounts, Some(params), 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("AccountNotFound: pubkey={}: {}", pubkey, err),
)
})?;
let accounts: Vec<(String, Account)> =
serde_json::from_value::<Vec<(String, Account)>>(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for (string, account) in accounts.into_iter() {
let pubkey = string.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
pubkey_accounts.push((pubkey, account));
}
Ok(pubkey_accounts)
}
/// Request the transaction count. If the response packet is dropped by the network, /// Request the transaction count. If the response packet is dropped by the network,
/// this method will try again 5 times. /// this method will try again 5 times.
pub fn get_transaction_count(&self) -> io::Result<u64> { pub fn get_transaction_count(&self) -> io::Result<u64> {

View File

@ -10,6 +10,7 @@ pub enum RpcRequest {
GetBalance, GetBalance,
GetClusterNodes, GetClusterNodes,
GetNumBlocksSinceSignatureConfirmation, GetNumBlocksSinceSignatureConfirmation,
GetProgramAccounts,
GetRecentBlockhash, GetRecentBlockhash,
GetSignatureStatus, GetSignatureStatus,
GetSlot, GetSlot,
@ -38,6 +39,7 @@ impl RpcRequest {
RpcRequest::GetNumBlocksSinceSignatureConfirmation => { RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation" "getNumBlocksSinceSignatureConfirmation"
} }
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash", RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatus => "getSignatureStatus", RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetSlot => "getSlot", RpcRequest::GetSlot => "getSlot",

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana" name = "solana"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.16.0" version = "0.16.6"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "../README.md" readme = "../README.md"
@ -45,27 +45,27 @@ rocksdb = "0.11.0"
serde = "1.0.92" serde = "1.0.92"
serde_derive = "1.0.92" serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.16.6" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" } solana-budget-program = { path = "../programs/budget_program", version = "0.16.6" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.0" } solana-chacha-sys = { path = "../chacha-sys", version = "0.16.6" }
solana-client = { path = "../client", version = "0.16.0" } solana-client = { path = "../client", version = "0.16.6" }
solana-config-program = { path = "../programs/config_program", version = "0.16.0" } solana-config-program = { path = "../programs/config_program", version = "0.16.6" }
solana-drone = { path = "../drone", version = "0.16.0" } solana-drone = { path = "../drone", version = "0.16.6" }
solana-ed25519-dalek = "0.2.0" solana-ed25519-dalek = "0.2.0"
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.6" }
solana-kvstore = { path = "../kvstore", version = "0.16.0", optional = true } solana-kvstore = { path = "../kvstore", version = "0.16.6", optional = true }
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
solana-metrics = { path = "../metrics", version = "0.16.0" } solana-metrics = { path = "../metrics", version = "0.16.6" }
solana-netutil = { path = "../netutil", version = "0.16.0" } solana-netutil = { path = "../netutil", version = "0.16.6" }
solana-runtime = { path = "../runtime", version = "0.16.0" } solana-runtime = { path = "../runtime", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.16.6" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" } solana-stake-program = { path = "../programs/stake_program", version = "0.16.6" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.16.6" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" } solana-storage-program = { path = "../programs/storage_program", version = "0.16.6" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.16.6" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" } solana-vote-program = { path = "../programs/vote_program", version = "0.16.6" }
solana-vote-signer = { path = "../vote-signer", version = "0.16.0" } solana-vote-signer = { path = "../vote-signer", version = "0.16.6" }
sys-info = "0.5.7" sys-info = "0.5.7"
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"

View File

@ -329,8 +329,9 @@ impl BankForks {
names.sort(); names.sort();
let mut bank_maps = vec![]; let mut bank_maps = vec![];
let status_cache_rc = StatusCacheRc::default(); let status_cache_rc = StatusCacheRc::default();
let id = (names[names.len() - 1] + 1) as usize;
let mut bank0 = let mut bank0 =
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc); Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
bank0.freeze(); bank0.freeze();
let bank_root = BankForks::load_snapshots( let bank_root = BankForks::load_snapshots(
&names, &names,

View File

@ -418,7 +418,7 @@ impl BankingStage {
// the likelihood of any single thread getting starved and processing old ids. // the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue // TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires. // expires.
let (loaded_accounts, results) = let (loaded_accounts, results, tx_count, signature_count) =
bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2); bank.load_and_execute_transactions(txs, lock_results, MAX_RECENT_BLOCKHASHES / 2);
let load_execute_time = now.elapsed(); let load_execute_time = now.elapsed();
@ -432,7 +432,7 @@ impl BankingStage {
let commit_time = { let commit_time = {
let now = Instant::now(); let now = Instant::now();
bank.commit_transactions(txs, &loaded_accounts, &results); bank.commit_transactions(txs, &loaded_accounts, &results, tx_count, signature_count);
now.elapsed() now.elapsed()
}; };

View File

@ -193,6 +193,63 @@ impl Blocktree {
false false
} }
// silently deletes all blocktree column families starting at the given slot
fn delete_all_columns(&self, starting_slot: u64) {
match self.meta_cf.force_delete_all(Some(starting_slot)) {
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting meta_cf for slot {:?}",
e, starting_slot
),
}
match self.data_cf.force_delete_all(Some((starting_slot, 0))) {
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting data_cf for slot {:?}",
e, starting_slot
),
}
match self
.erasure_meta_cf
.force_delete_all(Some((starting_slot, 0)))
{
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting erasure_meta_cf for slot {:?}",
e, starting_slot
),
}
match self.erasure_cf.force_delete_all(Some((starting_slot, 0))) {
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting erasure_cf for slot {:?}",
e, starting_slot
),
}
match self.orphans_cf.force_delete_all(Some(starting_slot)) {
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting orphans_cf for slot {:?}",
e, starting_slot
),
}
match self.dead_slots_cf.force_delete_all(Some(starting_slot)) {
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting dead_slots_cf for slot {:?}",
e, starting_slot
),
}
let roots_cf = self.db.column::<cf::Root>();
match roots_cf.force_delete_all(Some(starting_slot)) {
Ok(_) => (),
Err(e) => error!(
"Error: {:?} while deleting roots_cf for slot {:?}",
e, starting_slot
),
}
}
pub fn erasure_meta(&self, slot: u64, set_index: u64) -> Result<Option<ErasureMeta>> { pub fn erasure_meta(&self, slot: u64, set_index: u64) -> Result<Option<ErasureMeta>> {
self.erasure_meta_cf.get((slot, set_index)) self.erasure_meta_cf.get((slot, set_index))
} }
@ -201,7 +258,7 @@ impl Blocktree {
self.orphans_cf.get(slot) self.orphans_cf.get(slot)
} }
pub fn rooted_slot_iterator<'a>(&'a self, slot: u64) -> Result<RootedSlotIterator<'a>> { pub fn rooted_slot_iterator(&self, slot: u64) -> Result<RootedSlotIterator> {
RootedSlotIterator::new(slot, self) RootedSlotIterator::new(slot, self)
} }
@ -527,6 +584,13 @@ impl Blocktree {
self.data_cf.get_bytes((slot, index)) self.data_cf.get_bytes((slot, index))
} }
/// Manually update the meta for a slot.
/// Can interfere with automatic meta update and potentially break chaining.
/// Dangerous. Use with care.
pub fn put_meta_bytes(&self, slot: u64, bytes: &[u8]) -> Result<()> {
self.meta_cf.put_bytes(slot, bytes)
}
/// For benchmarks, testing, and setup. /// For benchmarks, testing, and setup.
/// Does no metadata tracking. Use with care. /// Does no metadata tracking. Use with care.
pub fn put_data_blob_bytes(&self, slot: u64, index: u64, bytes: &[u8]) -> Result<()> { pub fn put_data_blob_bytes(&self, slot: u64, index: u64, bytes: &[u8]) -> Result<()> {
@ -722,70 +786,6 @@ impl Blocktree {
iter.map(|(_, blob_data)| Blob::new(&blob_data)) iter.map(|(_, blob_data)| Blob::new(&blob_data))
} }
/// Return an iterator for all the entries in the given file.
pub fn read_ledger(&self) -> Result<impl Iterator<Item = Entry>> {
use crate::entry::EntrySlice;
use std::collections::VecDeque;
struct EntryIterator {
db_iterator: Cursor<cf::Data>,
// TODO: remove me when replay_stage is iterating by block (Blocktree)
// this verification is duplicating that of replay_stage, which
// can do this in parallel
blockhash: Option<Hash>,
// https://github.com/rust-rocksdb/rust-rocksdb/issues/234
// rocksdb issue: the _blocktree member must be lower in the struct to prevent a crash
// when the db_iterator member above is dropped.
// _blocktree is unused, but dropping _blocktree results in a broken db_iterator
// you have to hold the database open in order to iterate over it, and in order
// for db_iterator to be able to run Drop
// _blocktree: Blocktree,
entries: VecDeque<Entry>,
}
impl Iterator for EntryIterator {
type Item = Entry;
fn next(&mut self) -> Option<Entry> {
if !self.entries.is_empty() {
return Some(self.entries.pop_front().unwrap());
}
if self.db_iterator.valid() {
if let Some(value) = self.db_iterator.value_bytes() {
if let Ok(next_entries) =
deserialize::<Vec<Entry>>(&value[BLOB_HEADER_SIZE..])
{
if let Some(blockhash) = self.blockhash {
if !next_entries.verify(&blockhash) {
return None;
}
}
self.db_iterator.next();
if next_entries.is_empty() {
return None;
}
self.entries = VecDeque::from(next_entries);
let entry = self.entries.pop_front().unwrap();
self.blockhash = Some(entry.hash);
return Some(entry);
}
}
}
None
}
}
let mut db_iterator = self.db.cursor::<cf::Data>()?;
db_iterator.seek_to_first();
Ok(EntryIterator {
entries: VecDeque::new(),
db_iterator,
blockhash: None,
})
}
pub fn get_slot_entries_with_blob_count( pub fn get_slot_entries_with_blob_count(
&self, &self,
slot: u64, slot: u64,
@ -912,6 +912,39 @@ impl Blocktree {
batch_processor.write(batch)?; batch_processor.write(batch)?;
Ok(()) Ok(())
} }
/// Prune blocktree such that slots higher than `target_slot` are deleted and all references to
/// higher slots are removed
pub fn prune(&self, target_slot: u64) {
let mut meta = self
.meta(target_slot)
.expect("couldn't read slot meta")
.expect("no meta for target slot");
meta.next_slots.clear();
self.put_meta_bytes(
target_slot,
&bincode::serialize(&meta).expect("couldn't get meta bytes"),
)
.expect("unable to update meta for target slot");
self.delete_all_columns(target_slot + 1);
// fixup anything that refers to non-root slots and delete the rest
for (slot, mut meta) in self
.slot_meta_iterator(0)
.expect("unable to iterate over meta")
{
if slot > target_slot {
break;
}
meta.next_slots.retain(|slot| *slot <= target_slot);
self.put_meta_bytes(
slot,
&bincode::serialize(&meta).expect("couldn't update meta"),
)
.expect("couldn't update meta");
}
}
} }
fn insert_data_blob_batch<'a, I>( fn insert_data_blob_batch<'a, I>(
@ -1662,9 +1695,7 @@ pub fn tmp_copy_blocktree(from: &str, name: &str) -> String {
#[cfg(test)] #[cfg(test)]
pub mod tests { pub mod tests {
use super::*; use super::*;
use crate::entry::{ use crate::entry::{create_ticks, make_tiny_test_entries, Entry, EntrySlice};
create_ticks, make_tiny_test_entries, make_tiny_test_entries_from_hash, Entry, EntrySlice,
};
use crate::erasure::{CodingGenerator, NUM_CODING, NUM_DATA}; use crate::erasure::{CodingGenerator, NUM_CODING, NUM_DATA};
use crate::packet; use crate::packet;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@ -2192,59 +2223,6 @@ pub mod tests {
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
} }
#[test]
pub fn test_genesis_and_entry_iterator() {
let entries = make_tiny_test_entries_from_hash(&Hash::default(), 10);
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
{
genesis(&ledger_path, &Keypair::new(), &entries).unwrap();
let ledger = Blocktree::open(&ledger_path).expect("open failed");
let read_entries: Vec<Entry> =
ledger.read_ledger().expect("read_ledger failed").collect();
assert!(read_entries.verify(&Hash::default()));
assert_eq!(entries, read_entries);
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
pub fn test_entry_iterator_up_to_consumed() {
let entries = make_tiny_test_entries_from_hash(&Hash::default(), 3);
let ledger_path = get_tmp_ledger_path("test_genesis_and_entry_iterator");
{
// put entries except last 2 into ledger
genesis(&ledger_path, &Keypair::new(), &entries[..entries.len() - 2]).unwrap();
let ledger = Blocktree::open(&ledger_path).expect("open failed");
// now write the last entry, ledger has a hole in it one before the end
// +-+-+-+-+-+-+-+ +-+
// | | | | | | | | | |
// +-+-+-+-+-+-+-+ +-+
ledger
.write_entries(
0u64,
0,
(entries.len() - 1) as u64,
16,
&entries[entries.len() - 1..],
)
.unwrap();
let read_entries: Vec<Entry> =
ledger.read_ledger().expect("read_ledger failed").collect();
assert!(read_entries.verify(&Hash::default()));
// enumeration should stop at the hole
assert_eq!(entries[..entries.len() - 2].to_vec(), read_entries);
}
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test] #[test]
pub fn test_new_blobs_signal() { pub fn test_new_blobs_signal() {
// Initialize ledger // Initialize ledger
@ -3179,6 +3157,66 @@ pub mod tests {
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction"); Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
} }
#[test]
fn test_prune() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
let (blobs, _) = make_many_slot_entries(0, 50, 6);
blocktree.write_blobs(blobs).unwrap();
blocktree
.slot_meta_iterator(0)
.unwrap()
.for_each(|(_, meta)| assert_eq!(meta.last_index, 5));
blocktree.prune(5);
blocktree
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, meta)| {
assert!(slot <= 5);
assert_eq!(meta.last_index, 5)
});
let data_iter = blocktree.data_cf.iter(Some((0, 0))).unwrap();
for ((slot, _), _) in data_iter {
if slot > 5 {
assert!(false);
}
}
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[should_panic]
#[test]
fn test_prune_out_of_bounds() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// slot 5 does not exist, prune should panic
blocktree.prune(5);
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
fn test_iter_bounds() {
let blocktree_path = get_tmp_ledger_path!();
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// slot 5 does not exist, iter should be ok and should be a noop
blocktree
.slot_meta_iterator(5)
.unwrap()
.for_each(|_| assert!(false));
drop(blocktree);
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
mod erasure { mod erasure {
use super::*; use super::*;
use crate::blocktree::meta::ErasureMetaStatus; use crate::blocktree::meta::ErasureMetaStatus;

View File

@ -405,6 +405,16 @@ where
Ok(iter.map(|(key, value)| (C::index(&key), value))) Ok(iter.map(|(key, value)| (C::index(&key), value)))
} }
//TODO add a delete_until that goes the other way
pub fn force_delete_all(&self, start_from: Option<C::Index>) -> Result<()> {
let iter = self.iter(start_from)?;
iter.for_each(|(index, _)| match self.delete(index) {
Ok(_) => (),
Err(e) => error!("Error: {:?} while deleting {:?}", e, C::NAME),
});
Ok(())
}
#[inline] #[inline]
pub fn handle(&self) -> B::ColumnFamily { pub fn handle(&self) -> B::ColumnFamily {
self.backend.cf_handle(C::NAME).clone() self.backend.cf_handle(C::NAME).clone()

View File

@ -142,6 +142,7 @@ pub fn process_blocktree(
genesis_block: &GenesisBlock, genesis_block: &GenesisBlock,
blocktree: &Blocktree, blocktree: &Blocktree,
account_paths: Option<String>, account_paths: Option<String>,
verify_ledger: bool,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> { ) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
let now = Instant::now(); let now = Instant::now();
info!("processing ledger..."); info!("processing ledger...");
@ -204,7 +205,7 @@ pub fn process_blocktree(
} }
if !entries.is_empty() { if !entries.is_empty() {
if !entries.verify(&last_entry_hash) { if verify_ledger && !entries.verify(&last_entry_hash) {
warn!( warn!(
"Ledger proof of history failed at slot: {}, entry: {}", "Ledger proof of history failed at slot: {}, entry: {}",
slot, entry_height slot, entry_height
@ -373,7 +374,7 @@ pub mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash); fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
let (mut _bank_forks, bank_forks_info, _) = let (mut _bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!( assert_eq!(
@ -432,7 +433,7 @@ pub mod tests {
blocktree.set_roots(&[4, 1, 0]).unwrap(); blocktree.set_roots(&[4, 1, 0]).unwrap();
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
@ -506,7 +507,7 @@ pub mod tests {
blocktree.set_roots(&[0, 1]).unwrap(); blocktree.set_roots(&[0, 1]).unwrap();
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 2); // There are two forks assert_eq!(bank_forks_info.len(), 2); // There are two forks
assert_eq!( assert_eq!(
@ -587,7 +588,7 @@ pub mod tests {
// Check that we can properly restart the ledger / leader scheduler doesn't fail // Check that we can properly restart the ledger / leader scheduler doesn't fail
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); // There is one fork assert_eq!(bank_forks_info.len(), 1); // There is one fork
assert_eq!( assert_eq!(
@ -723,7 +724,7 @@ pub mod tests {
.unwrap(); .unwrap();
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64; let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!(bank_forks.root(), 0); assert_eq!(bank_forks.root(), 0);
@ -754,7 +755,7 @@ pub mod tests {
let blocktree = Blocktree::open(&ledger_path).unwrap(); let blocktree = Blocktree::open(&ledger_path).unwrap();
let (bank_forks, bank_forks_info, _) = let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap(); process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); assert_eq!(bank_forks_info.len(), 1);
assert_eq!( assert_eq!(

View File

@ -133,7 +133,7 @@ mod tests {
hasher.hash(&buf[..size]); hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes.... // golden needs to be updated if blob stuff changes....
let golden: Hash = "E2HZjSC6VgH4nmEiTbMDATTeBcFjwSYz7QYvU7doGNhD" let golden: Hash = "37YzrTgiFRGQG1EoMZVecnGqxEK7UGxEQeBSdGMJcKqp"
.parse() .parse()
.unwrap(); .unwrap();

View File

@ -748,7 +748,7 @@ impl ClusterInfo {
/// retransmit messages to a list of nodes /// retransmit messages to a list of nodes
/// # Remarks /// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to` /// We need to avoid having obj locked while doing a io, such as the `send_to`
pub fn retransmit_to( pub fn retransmit_to(
obj: &Arc<RwLock<Self>>, obj: &Arc<RwLock<Self>>,
peers: &[ContactInfo], peers: &[ContactInfo],
@ -1092,7 +1092,7 @@ impl ClusterInfo {
if caller.contact_info().is_none() { if caller.contact_info().is_none() {
return vec![]; return vec![];
} }
let mut from = caller.contact_info().cloned().unwrap(); let from = caller.contact_info().unwrap();
if from.id == self_id { if from.id == self_id {
warn!( warn!(
"PullRequest ignored, I'm talking to myself: me={} remoteme={}", "PullRequest ignored, I'm talking to myself: me={} remoteme={}",
@ -1110,15 +1110,10 @@ impl ClusterInfo {
let len = data.len(); let len = data.len();
trace!("get updates since response {}", len); trace!("get updates since response {}", len);
let rsp = Protocol::PullResponse(self_id, data); let rsp = Protocol::PullResponse(self_id, data);
// The remote node may not know its public IP:PORT. Record what it looks like to us. // The remote node may not know its public IP:PORT. Instead of responding to the caller's
// This may or may not be correct for everybody, but it's better than leaving the remote with // gossip addr, respond to the origin addr.
// an unspecified address in our table
if from.gossip.ip().is_unspecified() {
inc_new_counter_debug!("cluster_info-window-request-updates-unspec-gossip", 1);
from.gossip = *from_addr;
}
inc_new_counter_debug!("cluster_info-pull_request-rsp", len); inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
to_shared_blob(rsp, from.gossip).ok().into_iter().collect() to_shared_blob(rsp, *from_addr).ok().into_iter().collect()
} }
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) { fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
let len = data.len(); let len = data.len();

View File

@ -234,6 +234,7 @@ impl ClusterInfoRepairListener {
let _ = Self::serve_repairs_to_repairee( let _ = Self::serve_repairs_to_repairee(
my_pubkey, my_pubkey,
repairee_pubkey,
my_root, my_root,
blocktree, blocktree,
&repairee_epoch_slots, &repairee_epoch_slots,
@ -249,8 +250,10 @@ impl ClusterInfoRepairListener {
Ok(()) Ok(())
} }
#[allow(clippy::too_many_arguments)]
fn serve_repairs_to_repairee( fn serve_repairs_to_repairee(
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
repairee_pubkey: &Pubkey,
my_root: u64, my_root: u64,
blocktree: &Blocktree, blocktree: &Blocktree,
repairee_epoch_slots: &EpochSlots, repairee_epoch_slots: &EpochSlots,
@ -260,14 +263,16 @@ impl ClusterInfoRepairListener {
num_slots_to_repair: usize, num_slots_to_repair: usize,
epoch_schedule: &EpochSchedule, epoch_schedule: &EpochSchedule,
) -> Result<()> { ) -> Result<()> {
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root + 1); let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root);
if slot_iter.is_err() { if slot_iter.is_err() {
warn!("Root for repairee is on different fork OR replay_stage hasn't marked this slot as root yet"); info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
my_root, repairee_epoch_slots.root, repairee_pubkey,
);
return Ok(()); return Ok(());
} }
let slot_iter = slot_iter?; let mut slot_iter = slot_iter?;
let mut total_data_blobs_sent = 0; let mut total_data_blobs_sent = 0;
let mut total_coding_blobs_sent = 0; let mut total_coding_blobs_sent = 0;
@ -276,6 +281,10 @@ impl ClusterInfoRepairListener {
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root); epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
let max_confirmed_repairee_slot = let max_confirmed_repairee_slot =
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch); epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
// Skip the first slot in the iterator because we know it's the root slot which the repairee
// already has
slot_iter.next();
for (slot, slot_meta) in slot_iter { for (slot, slot_meta) in slot_iter {
if slot > my_root if slot > my_root
|| num_slots_repaired >= num_slots_to_repair || num_slots_repaired >= num_slots_to_repair
@ -650,6 +659,7 @@ mod tests {
for repairman_pubkey in &eligible_repairmen { for repairman_pubkey in &eligible_repairmen {
ClusterInfoRepairListener::serve_repairs_to_repairee( ClusterInfoRepairListener::serve_repairs_to_repairee(
&repairman_pubkey, &repairman_pubkey,
&mock_repairee.id,
num_slots - 1, num_slots - 1,
&blocktree, &blocktree,
&repairee_epoch_slots, &repairee_epoch_slots,
@ -719,6 +729,7 @@ mod tests {
ClusterInfoRepairListener::serve_repairs_to_repairee( ClusterInfoRepairListener::serve_repairs_to_repairee(
&my_pubkey, &my_pubkey,
&mock_repairee.id,
total_slots - 1, total_slots - 1,
&blocktree, &blocktree,
&repairee_epoch_slots, &repairee_epoch_slots,
@ -740,6 +751,7 @@ mod tests {
EpochSlots::new(mock_repairee.id, stakers_slot_offset, repairee_slots, 1); EpochSlots::new(mock_repairee.id, stakers_slot_offset, repairee_slots, 1);
ClusterInfoRepairListener::serve_repairs_to_repairee( ClusterInfoRepairListener::serve_repairs_to_repairee(
&my_pubkey, &my_pubkey,
&mock_repairee.id,
total_slots - 1, total_slots - 1,
&blocktree, &blocktree,
&repairee_epoch_slots, &repairee_epoch_slots,

View File

@ -226,11 +226,13 @@ impl CodingGenerator {
let index = data_blob.index(); let index = data_blob.index();
let slot = data_blob.slot(); let slot = data_blob.slot();
let id = data_blob.id(); let id = data_blob.id();
let version = data_blob.version();
let mut coding_blob = Blob::default(); let mut coding_blob = Blob::default();
coding_blob.set_index(index); coding_blob.set_index(index);
coding_blob.set_slot(slot); coding_blob.set_slot(slot);
coding_blob.set_id(&id); coding_blob.set_id(&id);
coding_blob.set_version(version);
coding_blob.set_size(max_data_size); coding_blob.set_size(max_data_size);
coding_blob.set_coding(); coding_blob.set_coding();

View File

@ -29,6 +29,10 @@ impl LeaderSchedule {
.collect(); .collect();
Self { slot_leaders } Self { slot_leaders }
} }
pub(crate) fn get_slot_leaders(&self) -> &[Pubkey] {
&self.slot_leaders
}
} }
impl Index<u64> for LeaderSchedule { impl Index<u64> for LeaderSchedule {

View File

@ -5,16 +5,16 @@ use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS; use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS;
/// Return the leader schedule for the given epoch. /// Return the leader schedule for the given epoch.
pub fn leader_schedule(epoch_height: u64, bank: &Bank) -> Option<LeaderSchedule> { pub fn leader_schedule(epoch: u64, bank: &Bank) -> Option<LeaderSchedule> {
staking_utils::staked_nodes_at_epoch(bank, epoch_height).map(|stakes| { staking_utils::staked_nodes_at_epoch(bank, epoch).map(|stakes| {
let mut seed = [0u8; 32]; let mut seed = [0u8; 32];
seed[0..8].copy_from_slice(&epoch_height.to_le_bytes()); seed[0..8].copy_from_slice(&epoch.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect(); let mut stakes: Vec<_> = stakes.into_iter().collect();
sort_stakes(&mut stakes); sort_stakes(&mut stakes);
LeaderSchedule::new( LeaderSchedule::new(
&stakes, &stakes,
seed, seed,
bank.get_slots_in_epoch(epoch_height), bank.get_slots_in_epoch(epoch),
NUM_CONSECUTIVE_LEADER_SLOTS, NUM_CONSECUTIVE_LEADER_SLOTS,
) )
}) })

View File

@ -176,6 +176,7 @@ impl LocalCluster {
&leader_voting_keypair, &leader_voting_keypair,
&leader_storage_keypair, &leader_storage_keypair,
None, None,
true,
&config.validator_configs[0], &config.validator_configs[0],
); );
@ -308,6 +309,7 @@ impl LocalCluster {
&voting_keypair, &voting_keypair,
&storage_keypair, &storage_keypair,
Some(&self.entry_point_info), Some(&self.entry_point_info),
true,
&validator_config, &validator_config,
); );
@ -561,6 +563,7 @@ impl Cluster for LocalCluster {
&fullnode_info.voting_keypair, &fullnode_info.voting_keypair,
&fullnode_info.storage_keypair, &fullnode_info.storage_keypair,
None, None,
true,
config, config,
); );

View File

@ -341,7 +341,8 @@ macro_rules! range {
const SIGNATURE_RANGE: std::ops::Range<usize> = range!(0, Signature); const SIGNATURE_RANGE: std::ops::Range<usize> = range!(0, Signature);
const FORWARDED_RANGE: std::ops::Range<usize> = range!(SIGNATURE_RANGE.end, bool); const FORWARDED_RANGE: std::ops::Range<usize> = range!(SIGNATURE_RANGE.end, bool);
const PARENT_RANGE: std::ops::Range<usize> = range!(FORWARDED_RANGE.end, u64); const PARENT_RANGE: std::ops::Range<usize> = range!(FORWARDED_RANGE.end, u64);
const SLOT_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64); const VERSION_RANGE: std::ops::Range<usize> = range!(PARENT_RANGE.end, u64);
const SLOT_RANGE: std::ops::Range<usize> = range!(VERSION_RANGE.end, u64);
const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64); const INDEX_RANGE: std::ops::Range<usize> = range!(SLOT_RANGE.end, u64);
const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey); const ID_RANGE: std::ops::Range<usize> = range!(INDEX_RANGE.end, Pubkey);
const FLAGS_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, u32); const FLAGS_RANGE: std::ops::Range<usize> = range!(ID_RANGE.end, u32);
@ -391,6 +392,12 @@ impl Blob {
pub fn set_parent(&mut self, ix: u64) { pub fn set_parent(&mut self, ix: u64) {
LittleEndian::write_u64(&mut self.data[PARENT_RANGE], ix); LittleEndian::write_u64(&mut self.data[PARENT_RANGE], ix);
} }
pub fn version(&self) -> u64 {
LittleEndian::read_u64(&self.data[VERSION_RANGE])
}
pub fn set_version(&mut self, version: u64) {
LittleEndian::write_u64(&mut self.data[VERSION_RANGE], version);
}
pub fn slot(&self) -> u64 { pub fn slot(&self) -> u64 {
LittleEndian::read_u64(&self.data[SLOT_RANGE]) LittleEndian::read_u64(&self.data[SLOT_RANGE])
} }
@ -862,4 +869,12 @@ mod tests {
b.sign(&k); b.sign(&k);
assert!(b.verify()); assert!(b.verify());
} }
#[test]
fn test_version() {
let mut b = Blob::default();
assert_eq!(b.version(), 0);
b.set_version(1);
assert_eq!(b.version(), 1);
}
} }

View File

@ -112,6 +112,8 @@ impl ReplayStage {
.spawn(move || { .spawn(move || {
let _exit = Finalizer::new(exit_.clone()); let _exit = Finalizer::new(exit_.clone());
let mut progress = HashMap::new(); let mut progress = HashMap::new();
let mut current_leader = None;
loop { loop {
let now = Instant::now(); let now = Instant::now();
// Stop getting entries if we get exit signal // Stop getting entries if we get exit signal
@ -124,17 +126,15 @@ impl ReplayStage {
&mut bank_forks.write().unwrap(), &mut bank_forks.write().unwrap(),
&leader_schedule_cache, &leader_schedule_cache,
); );
let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some(); let mut is_tpu_bank_active = poh_recorder.lock().unwrap().bank().is_some();
let did_complete_bank = Self::replay_active_banks(
Self::replay_active_banks(
&blocktree, &blocktree,
&bank_forks, &bank_forks,
&my_pubkey, &my_pubkey,
&mut ticks_per_slot, &mut ticks_per_slot,
&mut progress, &mut progress,
&slot_full_sender, &slot_full_sender,
)?; );
if ticks_per_slot == 0 { if ticks_per_slot == 0 {
let frozen_banks = bank_forks.read().unwrap().frozen_banks(); let frozen_banks = bank_forks.read().unwrap().frozen_banks();
@ -148,6 +148,17 @@ impl ReplayStage {
if let Some((_, bank)) = votable.last() { if let Some((_, bank)) = votable.last() {
subscriptions.notify_subscribers(bank.slot(), &bank_forks); subscriptions.notify_subscribers(bank.slot(), &bank_forks);
if let Some(new_leader) =
leader_schedule_cache.slot_leader_at(bank.slot(), Some(&bank))
{
Self::log_leader_change(
&my_pubkey,
bank.slot(),
&mut current_leader,
&new_leader,
);
}
Self::handle_votable_bank( Self::handle_votable_bank(
&bank, &bank,
&bank_forks, &bank_forks,
@ -197,12 +208,25 @@ impl ReplayStage {
grace_ticks, grace_ticks,
&leader_schedule_cache, &leader_schedule_cache,
); );
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
Self::log_leader_change(
&my_pubkey,
bank.slot(),
&mut current_leader,
&my_pubkey,
);
}
} }
inc_new_counter_info!( inc_new_counter_info!(
"replicate_stage-duration", "replicate_stage-duration",
duration_as_ms(&now.elapsed()) as usize duration_as_ms(&now.elapsed()) as usize
); );
if did_complete_bank {
//just processed a bank, skip the signal; maybe there's more slots available
continue;
}
let timer = Duration::from_millis(100); let timer = Duration::from_millis(100);
let result = ledger_signal_receiver.recv_timeout(timer); let result = ledger_signal_receiver.recv_timeout(timer);
match result { match result {
@ -216,6 +240,31 @@ impl ReplayStage {
.unwrap(); .unwrap();
(Self { t_replay }, slot_full_receiver, root_bank_receiver) (Self { t_replay }, slot_full_receiver, root_bank_receiver)
} }
fn log_leader_change(
my_pubkey: &Pubkey,
bank_slot: u64,
current_leader: &mut Option<Pubkey>,
new_leader: &Pubkey,
) {
if let Some(ref current_leader) = current_leader {
if current_leader != new_leader {
let msg = if current_leader == my_pubkey {
"I am no longer the leader"
} else if new_leader == my_pubkey {
"I am the new leader"
} else {
""
};
info!(
"LEADER CHANGE at slot: {} leader: {}. {}",
bank_slot, new_leader, msg
);
}
}
current_leader.replace(new_leader.to_owned());
}
pub fn start_leader( pub fn start_leader(
my_pubkey: &Pubkey, my_pubkey: &Pubkey,
bank_forks: &Arc<RwLock<BankForks>>, bank_forks: &Arc<RwLock<BankForks>>,
@ -237,7 +286,8 @@ impl ReplayStage {
}; };
assert!(parent.is_frozen()); assert!(parent.is_frozen());
leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) leader_schedule_cache
.slot_leader_at(poh_slot, Some(&parent))
.map(|next_leader| { .map(|next_leader| {
debug!( debug!(
"me: {} leader {} at poh slot {}", "me: {} leader {} at poh slot {}",
@ -249,7 +299,8 @@ impl ReplayStage {
datapoint_warn!( datapoint_warn!(
"replay_stage-new_leader", "replay_stage-new_leader",
("count", poh_slot, i64), ("count", poh_slot, i64),
("grace", grace_ticks, i64)); ("grace", grace_ticks, i64)
);
let tpu_bank = Bank::new_from_parent(&parent, my_pubkey, poh_slot); let tpu_bank = Bank::new_from_parent(&parent, my_pubkey, poh_slot);
bank_forks.write().unwrap().insert(tpu_bank); bank_forks.write().unwrap().insert(tpu_bank);
if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() { if let Some(tpu_bank) = bank_forks.read().unwrap().get(poh_slot).cloned() {
@ -257,12 +308,6 @@ impl ReplayStage {
bank_forks.read().unwrap().working_bank().slot(), bank_forks.read().unwrap().working_bank().slot(),
tpu_bank.slot() tpu_bank.slot()
); );
debug!(
"poh_recorder new working bank: me: {} next_slot: {} next_leader: {}",
my_pubkey,
tpu_bank.slot(),
next_leader
);
poh_recorder.lock().unwrap().set_bank(&tpu_bank); poh_recorder.lock().unwrap().set_bank(&tpu_bank);
} }
} }
@ -391,11 +436,18 @@ impl ReplayStage {
next_leader_slot, next_leader_slot,
ticks_per_slot, ticks_per_slot,
); );
debug!(
"{:?} voted and reset poh at {}. next leader slot {:?}", let next_leader_msg = if let Some(next_leader_slot) = next_leader_slot {
format!("My next leader slot is #{}", next_leader_slot)
} else {
"I am not in the upcoming leader schedule yet".to_owned()
};
info!(
"{} voted and reset poh at {}. {}",
my_pubkey, my_pubkey,
bank.tick_height(), bank.tick_height(),
next_leader_slot next_leader_msg,
); );
} }
@ -406,7 +458,8 @@ impl ReplayStage {
ticks_per_slot: &mut u64, ticks_per_slot: &mut u64,
progress: &mut HashMap<u64, ForkProgress>, progress: &mut HashMap<u64, ForkProgress>,
slot_full_sender: &Sender<(u64, Pubkey)>, slot_full_sender: &Sender<(u64, Pubkey)>,
) -> Result<()> { ) -> bool {
let mut did_complete_bank = false;
let active_banks = bank_forks.read().unwrap().active_banks(); let active_banks = bank_forks.read().unwrap().active_banks();
trace!("active banks {:?}", active_banks); trace!("active banks {:?}", active_banks);
@ -429,11 +482,11 @@ impl ReplayStage {
} }
let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1; let max_tick_height = (*bank_slot + 1) * bank.ticks_per_slot() - 1;
if bank.tick_height() == max_tick_height { if bank.tick_height() == max_tick_height {
did_complete_bank = true;
Self::process_completed_bank(my_pubkey, bank, slot_full_sender); Self::process_completed_bank(my_pubkey, bank, slot_full_sender);
} }
} }
did_complete_bank
Ok(())
} }
fn generate_votable_banks( fn generate_votable_banks(
@ -525,12 +578,12 @@ impl ReplayStage {
.map(|s| s.is_frozen()) .map(|s| s.is_frozen())
.unwrap_or(true) .unwrap_or(true)
{ {
info!("validator fork confirmed {} {}", *slot, duration); info!("validator fork confirmed {} {}ms", *slot, duration);
datapoint_warn!("validator-confirmation", ("duration_ms", duration, i64)); datapoint_warn!("validator-confirmation", ("duration_ms", duration, i64));
false false
} else { } else {
debug!( debug!(
"validator fork not confirmed {} {} {:?}", "validator fork not confirmed {} {}ms {:?}",
*slot, *slot,
duration, duration,
stake_lockouts.get(slot) stake_lockouts.get(slot)

View File

@ -23,7 +23,6 @@ use solana_sdk::account_utils::State;
use solana_sdk::client::{AsyncClient, SyncClient}; use solana_sdk::client::{AsyncClient, SyncClient};
use solana_sdk::hash::{Hash, Hasher}; use solana_sdk::hash::{Hash, Hasher};
use solana_sdk::message::Message; use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::timing::timestamp; use solana_sdk::timing::timestamp;
use solana_sdk::transaction::Transaction; use solana_sdk::transaction::Transaction;
@ -303,7 +302,7 @@ impl Replicator {
}) })
} }
pub fn run(&mut self, mining_pool_pubkey: Pubkey) { pub fn run(&mut self) {
info!("waiting for ledger download"); info!("waiting for ledger download");
self.thread_handles.pop().unwrap().join().unwrap(); self.thread_handles.pop().unwrap().join().unwrap();
self.encrypt_ledger() self.encrypt_ledger()
@ -330,11 +329,11 @@ impl Replicator {
} }
}; };
self.blockhash = storage_blockhash; self.blockhash = storage_blockhash;
self.redeem_rewards(&mining_pool_pubkey); self.redeem_rewards();
} }
} }
fn redeem_rewards(&self, mining_pool_pubkey: &Pubkey) { fn redeem_rewards(&self) {
let nodes = self.cluster_info.read().unwrap().tvu_peers(); let nodes = self.cluster_info.read().unwrap().tvu_peers();
let client = crate::gossip_service::get_client(&nodes); let client = crate::gossip_service::get_client(&nodes);
@ -347,7 +346,6 @@ impl Replicator {
let ix = storage_instruction::claim_reward( let ix = storage_instruction::claim_reward(
&self.keypair.pubkey(), &self.keypair.pubkey(),
&self.storage_keypair.pubkey(), &self.storage_keypair.pubkey(),
mining_pool_pubkey,
); );
let message = Message::new_with_payer(vec![ix], Some(&self.keypair.pubkey())); let message = Message::new_with_payer(vec![ix], Some(&self.keypair.pubkey()));
if let Err(e) = client.send_message(&[&self.keypair], message) { if let Err(e) = client.send_message(&[&self.keypair], message) {
@ -468,7 +466,15 @@ impl Replicator {
// check if the storage account exists // check if the storage account exists
let balance = client.poll_get_balance(&storage_keypair.pubkey()); let balance = client.poll_get_balance(&storage_keypair.pubkey());
if balance.is_err() || balance.unwrap() == 0 { if balance.is_err() || balance.unwrap() == 0 {
let (blockhash, _fee_calculator) = client.get_recent_blockhash().expect("blockhash"); let blockhash = match client.get_recent_blockhash() {
Ok((blockhash, _)) => blockhash,
Err(_) => {
return Err(Error::IO(<io::Error>::new(
io::ErrorKind::Other,
"unable to get recent blockhash, can't submit proof",
)))
}
};
let ix = storage_instruction::create_replicator_storage_account( let ix = storage_instruction::create_replicator_storage_account(
&keypair.pubkey(), &keypair.pubkey(),
@ -495,16 +501,25 @@ impl Replicator {
// No point if we've got no storage account... // No point if we've got no storage account...
let nodes = self.cluster_info.read().unwrap().tvu_peers(); let nodes = self.cluster_info.read().unwrap().tvu_peers();
let client = crate::gossip_service::get_client(&nodes); let client = crate::gossip_service::get_client(&nodes);
assert!( let storage_balance = client.poll_get_balance(&self.storage_keypair.pubkey());
client if storage_balance.is_err() || storage_balance.unwrap() == 0 {
.poll_get_balance(&self.storage_keypair.pubkey()) error!("Unable to submit mining proof, no storage account");
.unwrap() return;
> 0 }
);
// ...or no lamports for fees // ...or no lamports for fees
assert!(client.poll_get_balance(&self.keypair.pubkey()).unwrap() > 0); let balance = client.poll_get_balance(&self.keypair.pubkey());
if balance.is_err() || balance.unwrap() == 0 {
error!("Unable to submit mining proof, insufficient Replicator Account balance");
return;
}
let (blockhash, _) = client.get_recent_blockhash().expect("No recent blockhash"); let blockhash = match client.get_recent_blockhash() {
Ok((blockhash, _)) => blockhash,
Err(_) => {
error!("unable to get recent blockhash, can't submit proof");
return;
}
};
let instruction = storage_instruction::mining_proof( let instruction = storage_instruction::mining_proof(
&self.storage_keypair.pubkey(), &self.storage_keypair.pubkey(),
self.sha_state, self.sha_state,
@ -518,14 +533,14 @@ impl Replicator {
message, message,
blockhash, blockhash,
); );
client if let Err(err) = client.send_and_confirm_transaction(
.send_and_confirm_transaction( &[&self.keypair, &self.storage_keypair],
&[&self.keypair, &self.storage_keypair], &mut transaction,
&mut transaction, 10,
10, 0,
0, ) {
) error!("Error: {:?}; while sending mining proof", err);
.expect("transfer didn't work!"); }
} }
pub fn close(self) { pub fn close(self) {

View File

@ -70,6 +70,15 @@ impl JsonRpcRequestProcessor {
.ok_or_else(Error::invalid_request) .ok_or_else(Error::invalid_request)
} }
pub fn get_program_accounts(&self, program_id: &Pubkey) -> Result<Vec<(String, Account)>> {
Ok(self
.bank()
.get_program_accounts(&program_id)
.into_iter()
.map(|(pubkey, account)| (pubkey.to_string(), account))
.collect())
}
pub fn get_balance(&self, pubkey: &Pubkey) -> u64 { pub fn get_balance(&self, pubkey: &Pubkey) -> u64 {
self.bank().get_balance(&pubkey) self.bank().get_balance(&pubkey)
} }
@ -196,8 +205,21 @@ pub struct RpcVoteAccountInfo {
/// The current stake, in lamports, delegated to this vote account /// The current stake, in lamports, delegated to this vote account
pub stake: u64, pub stake: u64,
/// A 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout /// An 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
pub commission: u32, pub commission: u8,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcEpochInfo {
/// The current epoch
pub epoch: u64,
/// The current slot, relative to the start of the current epoch
pub slot_index: u64,
/// The number of slots in this epoch
pub slots_in_epoch: u64,
} }
#[rpc(server)] #[rpc(server)]
@ -210,12 +232,21 @@ pub trait RpcSol {
#[rpc(meta, name = "getAccountInfo")] #[rpc(meta, name = "getAccountInfo")]
fn get_account_info(&self, _: Self::Metadata, _: String) -> Result<Account>; fn get_account_info(&self, _: Self::Metadata, _: String) -> Result<Account>;
#[rpc(meta, name = "getProgramAccounts")]
fn get_program_accounts(&self, _: Self::Metadata, _: String) -> Result<Vec<(String, Account)>>;
#[rpc(meta, name = "getBalance")] #[rpc(meta, name = "getBalance")]
fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>; fn get_balance(&self, _: Self::Metadata, _: String) -> Result<u64>;
#[rpc(meta, name = "getClusterNodes")] #[rpc(meta, name = "getClusterNodes")]
fn get_cluster_nodes(&self, _: Self::Metadata) -> Result<Vec<RpcContactInfo>>; fn get_cluster_nodes(&self, _: Self::Metadata) -> Result<Vec<RpcContactInfo>>;
#[rpc(meta, name = "getEpochInfo")]
fn get_epoch_info(&self, _: Self::Metadata) -> Result<RpcEpochInfo>;
#[rpc(meta, name = "getLeaderSchedule")]
fn get_leader_schedule(&self, _: Self::Metadata) -> Result<Option<Vec<String>>>;
#[rpc(meta, name = "getRecentBlockhash")] #[rpc(meta, name = "getRecentBlockhash")]
fn get_recent_blockhash(&self, _: Self::Metadata) -> Result<(String, FeeCalculator)>; fn get_recent_blockhash(&self, _: Self::Metadata) -> Result<(String, FeeCalculator)>;
@ -297,6 +328,19 @@ impl RpcSol for RpcSolImpl {
.get_account_info(&pubkey) .get_account_info(&pubkey)
} }
fn get_program_accounts(
&self,
meta: Self::Metadata,
id: String,
) -> Result<Vec<(String, Account)>> {
debug!("get_program_accounts rpc request received: {:?}", id);
let program_id = verify_pubkey(id)?;
meta.request_processor
.read()
.unwrap()
.get_program_accounts(&program_id)
}
fn get_balance(&self, meta: Self::Metadata, id: String) -> Result<u64> { fn get_balance(&self, meta: Self::Metadata, id: String) -> Result<u64> {
debug!("get_balance rpc request received: {:?}", id); debug!("get_balance rpc request received: {:?}", id);
let pubkey = verify_pubkey(id)?; let pubkey = verify_pubkey(id)?;
@ -330,6 +374,32 @@ impl RpcSol for RpcSolImpl {
.collect()) .collect())
} }
fn get_epoch_info(&self, meta: Self::Metadata) -> Result<RpcEpochInfo> {
let bank = meta.request_processor.read().unwrap().bank();
let epoch_schedule = bank.epoch_schedule();
let (epoch, slot_index) = epoch_schedule.get_epoch_and_slot_index(bank.slot());
Ok(RpcEpochInfo {
epoch,
slot_index,
slots_in_epoch: epoch_schedule.get_slots_in_epoch(epoch),
})
}
fn get_leader_schedule(&self, meta: Self::Metadata) -> Result<Option<Vec<String>>> {
let bank = meta.request_processor.read().unwrap().bank();
Ok(
crate::leader_schedule_utils::leader_schedule(bank.epoch(), &bank).map(
|leader_schedule| {
leader_schedule
.get_slot_leaders()
.iter()
.map(|pubkey| pubkey.to_string())
.collect()
},
),
)
}
fn get_recent_blockhash(&self, meta: Self::Metadata) -> Result<(String, FeeCalculator)> { fn get_recent_blockhash(&self, meta: Self::Metadata) -> Result<(String, FeeCalculator)> {
debug!("get_recent_blockhash rpc request received"); debug!("get_recent_blockhash rpc request received");
Ok(meta Ok(meta
@ -535,7 +605,7 @@ mod tests {
fn start_rpc_handler_with_tx( fn start_rpc_handler_with_tx(
pubkey: &Pubkey, pubkey: &Pubkey,
) -> (MetaIoHandler<Meta>, Meta, Hash, Keypair, Pubkey) { ) -> (MetaIoHandler<Meta>, Meta, Arc<Bank>, Hash, Keypair, Pubkey) {
let (bank_forks, alice) = new_bank_forks(); let (bank_forks, alice) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank(); let bank = bank_forks.read().unwrap().working_bank();
let exit = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false));
@ -567,7 +637,7 @@ mod tests {
request_processor, request_processor,
cluster_info, cluster_info,
}; };
(io, meta, blockhash, alice, leader.id) (io, meta, bank, blockhash, alice, leader.id)
} }
#[test] #[test]
@ -595,7 +665,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_balance() { fn test_rpc_get_balance() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!( let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#,
@ -613,7 +684,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_cluster_nodes() { fn test_rpc_get_cluster_nodes() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}}"#);
let res = io.handle_request_sync(&req, meta); let res = io.handle_request_sync(&req, meta);
@ -633,7 +705,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_slot_leader() { fn test_rpc_get_slot_leader() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}}"#);
let res = io.handle_request_sync(&req, meta); let res = io.handle_request_sync(&req, meta);
@ -649,7 +722,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_tx_count() { fn test_rpc_get_tx_count() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}}"#);
let res = io.handle_request_sync(&req, meta); let res = io.handle_request_sync(&req, meta);
@ -664,7 +738,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_total_supply() { fn test_rpc_get_total_supply() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}}"#); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getTotalSupply"}}"#);
let rep = io.handle_request_sync(&req, meta); let rep = io.handle_request_sync(&req, meta);
@ -689,7 +764,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_account_info() { fn test_rpc_get_account_info() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!( let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#, r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#,
@ -713,10 +789,46 @@ mod tests {
assert_eq!(expected, result); assert_eq!(expected, result);
} }
#[test]
fn test_rpc_get_program_accounts() {
let bob = Keypair::new();
let (io, meta, bank, blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob.pubkey());
let new_program_id = Pubkey::new_rand();
let tx = system_transaction::assign(&bob, blockhash, &new_program_id);
bank.process_transaction(&tx).unwrap();
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getProgramAccounts","params":["{}"]}}"#,
new_program_id
);
let res = io.handle_request_sync(&req, meta);
let expected = format!(
r#"{{
"jsonrpc":"2.0",
"result":[["{}", {{
"owner": {:?},
"lamports": 20,
"data": [],
"executable": false
}}]],
"id":1}}
"#,
bob.pubkey(),
new_program_id.as_ref()
);
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test] #[test]
fn test_rpc_confirm_tx() { fn test_rpc_confirm_tx() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, blockhash, alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, blockhash, alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash); let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
let req = format!( let req = format!(
@ -735,7 +847,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_signature_status() { fn test_rpc_get_signature_status() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, blockhash, alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, blockhash, alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash); let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
let req = format!( let req = format!(
@ -799,7 +912,8 @@ mod tests {
#[test] #[test]
fn test_rpc_get_recent_blockhash() { fn test_rpc_get_recent_blockhash() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#); let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}}"#);
let res = io.handle_request_sync(&req, meta); let res = io.handle_request_sync(&req, meta);
@ -824,7 +938,8 @@ mod tests {
#[test] #[test]
fn test_rpc_fail_request_airdrop() { fn test_rpc_fail_request_airdrop() {
let bob_pubkey = Pubkey::new_rand(); let bob_pubkey = Pubkey::new_rand();
let (io, meta, _blockhash, _alice, _leader_pubkey) = start_rpc_handler_with_tx(&bob_pubkey); let (io, meta, _bank, _blockhash, _alice, _leader_pubkey) =
start_rpc_handler_with_tx(&bob_pubkey);
// Expect internal error because no drone is available // Expect internal error because no drone is available
let req = format!( let req = format!(

View File

@ -84,6 +84,7 @@ impl Validator {
voting_keypair: &Arc<Keypair>, voting_keypair: &Arc<Keypair>,
storage_keypair: &Arc<Keypair>, storage_keypair: &Arc<Keypair>,
entrypoint_info_option: Option<&ContactInfo>, entrypoint_info_option: Option<&ContactInfo>,
verify_ledger: bool,
config: &ValidatorConfig, config: &ValidatorConfig,
) -> Self { ) -> Self {
warn!("CUDA is {}abled", if cfg!(cuda) { "en" } else { "dis" }); warn!("CUDA is {}abled", if cfg!(cuda) { "en" } else { "dis" });
@ -104,6 +105,7 @@ impl Validator {
ledger_path, ledger_path,
config.account_paths.clone(), config.account_paths.clone(),
config.snapshot_path.clone(), config.snapshot_path.clone(),
verify_ledger,
); );
let leader_schedule_cache = Arc::new(leader_schedule_cache); let leader_schedule_cache = Arc::new(leader_schedule_cache);
@ -301,6 +303,7 @@ fn get_bank_forks(
blocktree: &Blocktree, blocktree: &Blocktree,
account_paths: Option<String>, account_paths: Option<String>,
snapshot_path: Option<String>, snapshot_path: Option<String>,
verify_ledger: bool,
) -> (BankForks, Vec<BankForksInfo>, LeaderScheduleCache) { ) -> (BankForks, Vec<BankForksInfo>, LeaderScheduleCache) {
if snapshot_path.is_some() { if snapshot_path.is_some() {
let bank_forks = let bank_forks =
@ -318,8 +321,13 @@ fn get_bank_forks(
} }
} }
let (mut bank_forks, bank_forks_info, leader_schedule_cache) = let (mut bank_forks, bank_forks_info, leader_schedule_cache) =
blocktree_processor::process_blocktree(&genesis_block, &blocktree, account_paths) blocktree_processor::process_blocktree(
.expect("process_blocktree failed"); &genesis_block,
&blocktree,
account_paths,
verify_ledger,
)
.expect("process_blocktree failed");
if snapshot_path.is_some() { if snapshot_path.is_some() {
bank_forks.set_snapshot_config(snapshot_path); bank_forks.set_snapshot_config(snapshot_path);
let _ = bank_forks.add_snapshot(0, 0); let _ = bank_forks.add_snapshot(0, 0);
@ -331,6 +339,7 @@ pub fn new_banks_from_blocktree(
blocktree_path: &str, blocktree_path: &str,
account_paths: Option<String>, account_paths: Option<String>,
snapshot_path: Option<String>, snapshot_path: Option<String>,
verify_ledger: bool,
) -> ( ) -> (
BankForks, BankForks,
Vec<BankForksInfo>, Vec<BankForksInfo>,
@ -347,8 +356,13 @@ pub fn new_banks_from_blocktree(
Blocktree::open_with_signal(blocktree_path) Blocktree::open_with_signal(blocktree_path)
.expect("Expected to successfully open database ledger"); .expect("Expected to successfully open database ledger");
let (bank_forks, bank_forks_info, leader_schedule_cache) = let (bank_forks, bank_forks_info, leader_schedule_cache) = get_bank_forks(
get_bank_forks(&genesis_block, &blocktree, account_paths, snapshot_path); &genesis_block,
&blocktree,
account_paths,
snapshot_path,
verify_ledger,
);
( (
bank_forks, bank_forks,
@ -412,6 +426,7 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, String) {
&voting_keypair, &voting_keypair,
&storage_keypair, &storage_keypair,
None, None,
true,
&ValidatorConfig::default(), &ValidatorConfig::default(),
); );
discover_cluster(&contact_info.gossip, 1).expect("Node startup failed"); discover_cluster(&contact_info.gossip, 1).expect("Node startup failed");
@ -447,6 +462,7 @@ mod tests {
&voting_keypair, &voting_keypair,
&storage_keypair, &storage_keypair,
Some(&leader_node.info), Some(&leader_node.info),
true,
&ValidatorConfig::default(), &ValidatorConfig::default(),
); );
validator.close().unwrap(); validator.close().unwrap();
@ -478,6 +494,7 @@ mod tests {
&voting_keypair, &voting_keypair,
&storage_keypair, &storage_keypair,
Some(&leader_node.info), Some(&leader_node.info),
true,
&ValidatorConfig::default(), &ValidatorConfig::default(),
) )
}) })

View File

@ -7,6 +7,8 @@ use rand_chacha::ChaChaRng;
use std::iter; use std::iter;
use std::ops::Div; use std::ops::Div;
/// Returns a list of indexes shuffled based on the input weights
/// Note - The sum of all weights must not exceed `u64::MAX`
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize> pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize>
where where
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive, T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
@ -17,10 +19,13 @@ where
.into_iter() .into_iter()
.enumerate() .enumerate()
.map(|(i, v)| { .map(|(i, v)| {
let x = (total_weight / v).to_u32().unwrap(); let x = (total_weight / v)
.to_u64()
.expect("values > u64::max are not supported");
( (
i, i,
(&mut rng).gen_range(1, u64::from(std::u16::MAX)) * u64::from(x), // capture the u64 into u128s to prevent overflow
(&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
) )
}) })
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val)) .sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
@ -73,4 +78,18 @@ mod tests {
assert_eq!(x, y); assert_eq!(x, y);
}); });
} }
#[test]
fn test_weighted_shuffle_imbalanced() {
let mut weights = vec![std::u32::MAX as u64; 3];
weights.push(1);
let shuffle = weighted_shuffle(weights.clone(), ChaChaRng::from_seed([0x5a; 32]));
shuffle.into_iter().for_each(|x| {
if x == weights.len() - 1 {
assert_eq!(weights[x], 1);
} else {
assert_eq!(weights[x], std::u32::MAX as u64);
}
});
}
} }

View File

@ -118,6 +118,7 @@ fn test_leader_failure_4() {
); );
} }
#[test] #[test]
#[ignore]
fn test_two_unbalanced_stakes() { fn test_two_unbalanced_stakes() {
solana_logger::setup(); solana_logger::setup();
let mut validator_config = ValidatorConfig::default(); let mut validator_config = ValidatorConfig::default();

View File

@ -98,7 +98,7 @@ fn test_replay() {
completed_slots_receiver, completed_slots_receiver,
leader_schedule_cache, leader_schedule_cache,
_, _,
) = validator::new_banks_from_blocktree(&blocktree_path, None, None); ) = validator::new_banks_from_blocktree(&blocktree_path, None, None, true);
let working_bank = bank_forks.working_bank(); let working_bank = bank_forks.working_bank();
assert_eq!( assert_eq!(
working_bank.get_balance(&mint_keypair.pubkey()), working_bank.get_balance(&mint_keypair.pubkey()),

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-drone" name = "solana-drone"
version = "0.16.0" version = "0.16.6"
description = "Solana Drone" description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -20,9 +20,9 @@ clap = "2.33"
log = "0.4.2" log = "0.4.2"
serde = "1.0.92" serde = "1.0.92"
serde_derive = "1.0.92" serde_derive = "1.0.92"
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
solana-metrics = { path = "../metrics", version = "0.16.0" } solana-metrics = { path = "../metrics", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
tokio = "0.1" tokio = "0.1"
tokio-codec = "0.1" tokio-codec = "0.1"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-genesis" name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -15,24 +15,24 @@ serde = "1.0.92"
serde_derive = "1.0.92" serde_derive = "1.0.92"
serde_json = "1.0.39" serde_json = "1.0.39"
serde_yaml = "0.8.9" serde_yaml = "0.8.9"
solana = { path = "../core", version = "0.16.0" } solana = { path = "../core", version = "0.16.6" }
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.16.0" } solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.16.6" }
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.16.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.16.6" }
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" } solana-budget-api = { path = "../programs/budget_api", version = "0.16.6" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" } solana-budget-program = { path = "../programs/budget_program", version = "0.16.6" }
solana-config-api = { path = "../programs/config_api", version = "0.16.0" } solana-config-api = { path = "../programs/config_api", version = "0.16.6" }
solana-config-program = { path = "../programs/config_program", version = "0.16.0" } solana-config-program = { path = "../programs/config_program", version = "0.16.6" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" } solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.6" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" } solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" } solana-stake-api = { path = "../programs/stake_api", version = "0.16.6" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" } solana-stake-program = { path = "../programs/stake_program", version = "0.16.6" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" } solana-storage-api = { path = "../programs/storage_api", version = "0.16.6" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" } solana-storage-program = { path = "../programs/storage_program", version = "0.16.6" }
solana-token-api = { path = "../programs/token_api", version = "0.16.0" } solana-token-api = { path = "../programs/token_api", version = "0.16.6" }
solana-token-program = { path = "../programs/token_program", version = "0.16.0" } solana-token-program = { path = "../programs/token_program", version = "0.16.6" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" } solana-vote-api = { path = "../programs/vote_api", version = "0.16.6" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" } solana-vote-program = { path = "../programs/vote_program", version = "0.16.6" }
[dev-dependencies] [dev-dependencies]
hashbrown = "0.3.0" hashbrown = "0.3.0"

View File

@ -147,14 +147,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.required(true) .required(true)
.help("Path to file containing the bootstrap leader's storage keypair"), .help("Path to file containing the bootstrap leader's storage keypair"),
) )
.arg(
Arg::with_name("storage_mining_pool_lamports")
.long("storage-mining-pool-lamports")
.value_name("LAMPORTS")
.takes_value(true)
.required(true)
.help("Number of lamports to assign to the storage mining pool"),
)
.arg( .arg(
Arg::with_name("bootstrap_leader_lamports") Arg::with_name("bootstrap_leader_lamports")
.long("bootstrap-leader-lamports") .long("bootstrap-leader-lamports")
@ -261,7 +253,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64); let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
let bootstrap_leader_stake_lamports = let bootstrap_leader_stake_lamports =
value_t_or_exit!(matches, "bootstrap_leader_stake_lamports", u64); value_t_or_exit!(matches, "bootstrap_leader_stake_lamports", u64);
let storage_pool_lamports = value_t_or_exit!(matches, "storage_mining_pool_lamports", u64);
let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?; let bootstrap_leader_keypair = read_keypair(bootstrap_leader_keypair_file)?;
let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?; let bootstrap_vote_keypair = read_keypair(bootstrap_vote_keypair_file)?;
@ -306,12 +297,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
1, 1,
), ),
), ),
(
"StorageMiningPoo111111111111111111111111111"
.parse()
.unwrap(),
storage_contract::create_mining_pool_account(storage_pool_lamports),
),
]) ])
.native_instruction_processors(&[ .native_instruction_processors(&[
solana_bpf_loader_program!(), solana_bpf_loader_program!(),
@ -370,6 +355,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
builder = append_primordial_accounts(file, AccountFileFormat::Keypair, builder)?; builder = append_primordial_accounts(file, AccountFileFormat::Keypair, builder)?;
} }
// add the reward pools
builder = solana_storage_api::rewards_pools::genesis(builder);
builder = solana_stake_api::rewards_pools::genesis(builder);
create_new_ledger(ledger_path, &builder.build())?; create_new_ledger(ledger_path, &builder.build())?;
Ok(()) Ok(())
} }
@ -524,6 +513,8 @@ mod tests {
) )
.expect("builder"); .expect("builder");
builder = solana_storage_api::rewards_pools::genesis(builder);
remove_file(path).unwrap(); remove_file(path).unwrap();
let genesis_block = builder.clone().build(); let genesis_block = builder.clone().build();

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-gossip" name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -11,10 +11,10 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
env_logger = "0.6.1" env_logger = "0.6.1"
solana = { path = "../core", version = "0.16.0" } solana = { path = "../core", version = "0.16.6" }
solana-client = { path = "../client", version = "0.16.0" } solana-client = { path = "../client", version = "0.16.6" }
solana-netutil = { path = "../netutil", version = "0.16.0" } solana-netutil = { path = "../netutil", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
[features] [features]
cuda = [] cuda = []

View File

@ -41,12 +41,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
SubCommand::with_name("spy") SubCommand::with_name("spy")
.about("Monitor the gossip entrypoint") .about("Monitor the gossip entrypoint")
.setting(AppSettings::DisableVersion) .setting(AppSettings::DisableVersion)
.arg(
clap::Arg::with_name("pull_only")
.long("pull-only")
.takes_value(false)
.help("Use a partial gossip node (Pulls only) to spy on the cluster. By default it will use a full fledged gossip node (Pushes and Pulls). Useful when behind a NAT"),
)
.arg( .arg(
Arg::with_name("num_nodes") Arg::with_name("num_nodes")
.short("N") .short("N")
@ -120,9 +114,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.value_of("node_pubkey") .value_of("node_pubkey")
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap()); .map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
let gossip_addr = if matches.is_present("pull_only") { let gossip_addr = {
None
} else {
let mut addr = socketaddr_any!(); let mut addr = socketaddr_any!();
addr.set_ip( addr.set_ip(
solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| { solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| {

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-install" name = "solana-install"
description = "The solana cluster software installer" description = "The solana cluster software installer"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@ -13,25 +13,29 @@ cuda = []
[dependencies] [dependencies]
atty = "0.2.11"
bincode = "1.1.4" bincode = "1.1.4"
bs58 = "0.2.0" bs58 = "0.2.0"
bzip2 = "0.3.3" bzip2 = "0.3.3"
chrono = { version = "0.4.0", features = ["serde"] } chrono = { version = "0.4.0", features = ["serde"] }
clap = { version = "2.33.0" } clap = { version = "2.33.0" }
console = "0.7.5" console = "0.7.7"
ctrlc = { version = "3.1.3", features = ["termination"] }
dirs = "2.0.1" dirs = "2.0.1"
indicatif = "0.11.0" indicatif = "0.11.0"
lazy_static = "1.3.0" lazy_static = "1.3.0"
log = "0.4.2" log = "0.4.2"
nix = "0.14.1"
reqwest = "0.9.18" reqwest = "0.9.18"
ring = "0.13.2" ring = "0.13.2"
semver = "0.7.0"
serde = "1.0.92" serde = "1.0.92"
serde_derive = "1.0.92" serde_derive = "1.0.92"
serde_yaml = "0.8.9" serde_yaml = "0.8.9"
solana-client = { path = "../client", version = "0.16.0" } solana-client = { path = "../client", version = "0.16.6" }
solana-config-api = { path = "../programs/config_api", version = "0.16.0" } solana-config-api = { path = "../programs/config_api", version = "0.16.6" }
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
tar = "0.4.26" tar = "0.4.26"
tempdir = "0.3.7" tempdir = "0.3.7"
url = "1.7.2" url = "1.7.2"

View File

@ -1,11 +1,12 @@
use crate::config::Config; use crate::config::Config;
use crate::stop_process::stop_process;
use crate::update_manifest::{SignedUpdateManifest, UpdateManifest}; use crate::update_manifest::{SignedUpdateManifest, UpdateManifest};
use chrono::{Local, TimeZone}; use chrono::{Local, TimeZone};
use console::{style, Emoji}; use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle}; use indicatif::{ProgressBar, ProgressStyle};
use ring::digest::{Context, Digest, SHA256}; use ring::digest::{Context, Digest, SHA256};
use solana_client::rpc_client::RpcClient; use solana_client::rpc_client::RpcClient;
use solana_config_api::config_instruction; use solana_config_api::config_instruction::{self, ConfigKeys};
use solana_sdk::message::Message; use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil, Signable}; use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil, Signable};
@ -13,7 +14,7 @@ use solana_sdk::transaction::Transaction;
use std::fs::{self, File}; use std::fs::{self, File};
use std::io::{self, BufReader, Read}; use std::io::{self, BufReader, Read};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::thread::sleep; use std::sync::mpsc;
use std::time::SystemTime; use std::time::SystemTime;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tempdir::TempDir; use tempdir::TempDir;
@ -202,7 +203,8 @@ fn new_update_manifest(
let new_account = config_instruction::create_account::<SignedUpdateManifest>( let new_account = config_instruction::create_account::<SignedUpdateManifest>(
&from_keypair.pubkey(), &from_keypair.pubkey(),
&update_manifest_keypair.pubkey(), &update_manifest_keypair.pubkey(),
1, // lamports 1, // lamports
vec![], // additional keys
); );
let mut transaction = Transaction::new_unsigned_instructions(vec![new_account]); let mut transaction = Transaction::new_unsigned_instructions(vec![new_account]);
transaction.sign(&[from_keypair], recent_blockhash); transaction.sign(&[from_keypair], recent_blockhash);
@ -224,6 +226,8 @@ fn store_update_manifest(
let signers = [from_keypair, update_manifest_keypair]; let signers = [from_keypair, update_manifest_keypair];
let instruction = config_instruction::store::<SignedUpdateManifest>( let instruction = config_instruction::store::<SignedUpdateManifest>(
&update_manifest_keypair.pubkey(), &update_manifest_keypair.pubkey(),
true, // update_manifest_keypair is signer
vec![], // additional keys
update_manifest, update_manifest,
); );
@ -238,9 +242,10 @@ fn get_update_manifest(
rpc_client: &RpcClient, rpc_client: &RpcClient,
update_manifest_pubkey: &Pubkey, update_manifest_pubkey: &Pubkey,
) -> Result<UpdateManifest, String> { ) -> Result<UpdateManifest, String> {
let data = rpc_client let mut data = rpc_client
.get_account_data(update_manifest_pubkey) .get_account_data(update_manifest_pubkey)
.map_err(|err| format!("Unable to fetch update manifest: {}", err))?; .map_err(|err| format!("Unable to fetch update manifest: {}", err))?;
let data = data.split_off(ConfigKeys::serialized_size(vec![]));
let signed_update_manifest = let signed_update_manifest =
SignedUpdateManifest::deserialize(update_manifest_pubkey, &data) SignedUpdateManifest::deserialize(update_manifest_pubkey, &data)
@ -493,13 +498,19 @@ pub fn init(
json_rpc_url: &str, json_rpc_url: &str,
update_manifest_pubkey: &Pubkey, update_manifest_pubkey: &Pubkey,
no_modify_path: bool, no_modify_path: bool,
release_semver: Option<&str>,
) -> Result<(), String> { ) -> Result<(), String> {
let config = { let config = {
// Write new config file only if different, so that running |solana-install init| // Write new config file only if different, so that running |solana-install init|
// repeatedly doesn't unnecessarily re-download // repeatedly doesn't unnecessarily re-download
let mut current_config = Config::load(config_file).unwrap_or_default(); let mut current_config = Config::load(config_file).unwrap_or_default();
current_config.current_update_manifest = None; current_config.current_update_manifest = None;
let config = Config::new(data_dir, json_rpc_url, update_manifest_pubkey); let config = Config::new(
data_dir,
json_rpc_url,
update_manifest_pubkey,
release_semver,
);
if current_config != config { if current_config != config {
config.save(config_file)?; config.save(config_file)?;
} }
@ -514,30 +525,48 @@ pub fn init(
false false
}; };
if !path_modified { if !path_modified && !no_modify_path {
check_env_path_for_bin_dir(&config); check_env_path_for_bin_dir(&config);
} }
Ok(()) Ok(())
} }
fn github_download_url(release_semver: &str) -> String {
format!(
"https://github.com/solana-labs/solana/releases/download/v{}/solana-release-{}.tar.bz2",
release_semver,
crate::build_env::TARGET
)
}
pub fn info(config_file: &str, local_info_only: bool) -> Result<Option<UpdateManifest>, String> { pub fn info(config_file: &str, local_info_only: bool) -> Result<Option<UpdateManifest>, String> {
let config = Config::load(config_file)?; let config = Config::load(config_file)?;
println_name_value("JSON RPC URL:", &config.json_rpc_url);
println_name_value(
"Update manifest pubkey:",
&config.update_manifest_pubkey.to_string(),
);
println_name_value("Configuration:", &config_file); println_name_value("Configuration:", &config_file);
println_name_value( println_name_value(
"Active release directory:", "Active release directory:",
&config.active_release_dir().to_str().unwrap_or("?"), &config.active_release_dir().to_str().unwrap_or("?"),
); );
if let Some(release_semver) = &config.release_semver {
println_name_value(&format!("{}Release version:", BULLET), &release_semver);
println_name_value(
&format!("{}Release URL:", BULLET),
&github_download_url(release_semver),
);
return Ok(None);
}
println_name_value("JSON RPC URL:", &config.json_rpc_url);
println_name_value(
"Update manifest pubkey:",
&config.update_manifest_pubkey.to_string(),
);
fn print_update_manifest(update_manifest: &UpdateManifest) { fn print_update_manifest(update_manifest: &UpdateManifest) {
let when = Local.timestamp(update_manifest.timestamp_secs as i64, 0); let when = Local.timestamp(update_manifest.timestamp_secs as i64, 0);
println_name_value(&format!("{}release date", BULLET), &when.to_string()); println_name_value(&format!("{}release date:", BULLET), &when.to_string());
println_name_value( println_name_value(
&format!("{}download URL", BULLET), &format!("{}download URL:", BULLET),
&update_manifest.download_url, &update_manifest.download_url,
); );
} }
@ -670,44 +699,66 @@ fn symlink_dir<P: AsRef<Path>, Q: AsRef<Path>>(src: P, dst: Q) -> std::io::Resul
} }
pub fn update(config_file: &str) -> Result<bool, String> { pub fn update(config_file: &str) -> Result<bool, String> {
let update_manifest = info(config_file, false)?;
if update_manifest.is_none() {
return Ok(false);
}
let update_manifest = update_manifest.unwrap();
if timestamp_secs()
< u64::from_str_radix(crate::build_env::BUILD_SECONDS_SINCE_UNIX_EPOCH, 10).unwrap()
{
Err("Unable to update as system time seems unreliable".to_string())?
}
let mut config = Config::load(config_file)?; let mut config = Config::load(config_file)?;
if let Some(ref current_update_manifest) = config.current_update_manifest { let update_manifest = info(config_file, false)?;
if update_manifest.timestamp_secs < current_update_manifest.timestamp_secs {
Err("Unable to update to an older version".to_string())? let release_dir = if let Some(release_semver) = &config.release_semver {
let download_url = github_download_url(release_semver);
let release_dir = config.release_dir(&release_semver);
let ok_dir = release_dir.join(".ok");
if ok_dir.exists() {
return Ok(false);
} }
} let (_temp_dir, temp_archive, _temp_archive_sha256) =
download_to_temp_archive(&download_url, None)
.map_err(|err| format!("Unable to download {}: {}", download_url, err))?;
extract_release_archive(&temp_archive, &release_dir).map_err(|err| {
format!(
"Unable to extract {:?} to {:?}: {}",
temp_archive, release_dir, err
)
})?;
let _ = fs::create_dir_all(ok_dir);
let (_temp_dir, temp_archive, _temp_archive_sha256) = download_to_temp_archive( release_dir
&update_manifest.download_url, } else {
Some(&update_manifest.download_sha256), if update_manifest.is_none() {
) return Ok(false);
.map_err(|err| { }
format!( let update_manifest = update_manifest.unwrap();
"Unable to download {}: {}",
update_manifest.download_url, err if timestamp_secs()
< u64::from_str_radix(crate::build_env::BUILD_SECONDS_SINCE_UNIX_EPOCH, 10).unwrap()
{
Err("Unable to update as system time seems unreliable".to_string())?
}
if let Some(ref current_update_manifest) = config.current_update_manifest {
if update_manifest.timestamp_secs < current_update_manifest.timestamp_secs {
Err("Unable to update to an older version".to_string())?
}
}
let release_dir = config.release_dir(&update_manifest.download_sha256);
let (_temp_dir, temp_archive, _temp_archive_sha256) = download_to_temp_archive(
&update_manifest.download_url,
Some(&update_manifest.download_sha256),
) )
})?; .map_err(|err| {
format!(
"Unable to download {}: {}",
update_manifest.download_url, err
)
})?;
extract_release_archive(&temp_archive, &release_dir).map_err(|err| {
format!(
"Unable to extract {:?} to {:?}: {}",
temp_archive, release_dir, err
)
})?;
let release_dir = config.release_dir(&update_manifest.download_sha256); config.current_update_manifest = Some(update_manifest);
release_dir
extract_release_archive(&temp_archive, &release_dir).map_err(|err| { };
format!(
"Unable to extract {:?} to {:?}: {}",
temp_archive, release_dir, err
)
})?;
let release_target = load_release_target(&release_dir).map_err(|err| { let release_target = load_release_target(&release_dir).map_err(|err| {
format!( format!(
@ -734,7 +785,6 @@ pub fn update(config_file: &str) -> Result<bool, String> {
) )
})?; })?;
config.current_update_manifest = Some(update_manifest);
config.save(config_file)?; config.save(config_file)?;
println!(" {}{}", SPARKLE, style("Update successful").bold()); println!(" {}{}", SPARKLE, style("Update successful").bold());
@ -748,7 +798,11 @@ pub fn run(
) -> Result<(), String> { ) -> Result<(), String> {
let config = Config::load(config_file)?; let config = Config::load(config_file)?;
let full_program_path = config.active_release_bin_dir().join(program_name); let mut full_program_path = config.active_release_bin_dir().join(program_name);
if cfg!(windows) && full_program_path.extension().is_none() {
full_program_path.set_extension("exe");
}
if !full_program_path.exists() { if !full_program_path.exists() {
Err(format!( Err(format!(
"{} does not exist", "{} does not exist",
@ -758,6 +812,13 @@ pub fn run(
let mut child_option: Option<std::process::Child> = None; let mut child_option: Option<std::process::Child> = None;
let mut now = Instant::now(); let mut now = Instant::now();
let (signal_sender, signal_receiver) = mpsc::channel();
ctrlc::set_handler(move || {
let _ = signal_sender.send(());
})
.expect("Error setting Ctrl-C handler");
loop { loop {
child_option = match child_option { child_option = match child_option {
Some(mut child) => match child.try_wait() { Some(mut child) => match child.try_wait() {
@ -793,7 +854,9 @@ pub fn run(
Ok(true) => { Ok(true) => {
// Update successful, kill current process so it will be restart // Update successful, kill current process so it will be restart
if let Some(ref mut child) = child_option { if let Some(ref mut child) = child_option {
println!("Killing program: {:?}", child.kill()); stop_process(child).unwrap_or_else(|err| {
eprintln!("Failed to stop child: {:?}", err);
});
} }
} }
Ok(false) => {} // No update available Ok(false) => {} // No update available
@ -803,6 +866,15 @@ pub fn run(
}; };
now = Instant::now(); now = Instant::now();
} }
sleep(Duration::from_secs(1));
if let Ok(()) = signal_receiver.recv_timeout(Duration::from_secs(1)) {
// Handle SIGTERM...
if let Some(ref mut child) = child_option {
stop_process(child).unwrap_or_else(|err| {
eprintln!("Failed to stop child: {:?}", err);
});
}
std::process::exit(0);
}
} }
} }

View File

@ -11,17 +11,24 @@ pub struct Config {
pub update_manifest_pubkey: Pubkey, pub update_manifest_pubkey: Pubkey,
pub current_update_manifest: Option<UpdateManifest>, pub current_update_manifest: Option<UpdateManifest>,
pub update_poll_secs: u64, pub update_poll_secs: u64,
pub release_semver: Option<String>,
releases_dir: PathBuf, releases_dir: PathBuf,
active_release_dir: PathBuf, active_release_dir: PathBuf,
} }
impl Config { impl Config {
pub fn new(data_dir: &str, json_rpc_url: &str, update_manifest_pubkey: &Pubkey) -> Self { pub fn new(
data_dir: &str,
json_rpc_url: &str,
update_manifest_pubkey: &Pubkey,
release_semver: Option<&str>,
) -> Self {
Self { Self {
json_rpc_url: json_rpc_url.to_string(), json_rpc_url: json_rpc_url.to_string(),
update_manifest_pubkey: *update_manifest_pubkey, update_manifest_pubkey: *update_manifest_pubkey,
current_update_manifest: None, current_update_manifest: None,
update_poll_secs: 60, // check for updates once a minute update_poll_secs: 60, // check for updates once a minute
release_semver: release_semver.map(|s| s.to_string()),
releases_dir: PathBuf::from(data_dir).join("releases"), releases_dir: PathBuf::from(data_dir).join("releases"),
active_release_dir: PathBuf::from(data_dir).join("active_release"), active_release_dir: PathBuf::from(data_dir).join("active_release"),
} }
@ -64,7 +71,7 @@ impl Config {
self.active_release_dir.join("bin") self.active_release_dir.join("bin")
} }
pub fn release_dir(&self, release_sha256: &str) -> PathBuf { pub fn release_dir(&self, release_id: &str) -> PathBuf {
self.releases_dir.join(release_sha256) self.releases_dir.join(release_id)
} }
} }

View File

@ -8,6 +8,7 @@ mod build_env;
mod command; mod command;
mod config; mod config;
mod defaults; mod defaults;
mod stop_process;
mod update_manifest; mod update_manifest;
// Return an error if a url cannot be parsed. // Return an error if a url cannot be parsed.
@ -32,6 +33,13 @@ fn is_pubkey(string: String) -> Result<(), String> {
} }
} }
fn is_semver(string: String) -> Result<(), String> {
match semver::Version::parse(&string) {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
}
pub fn main() -> Result<(), String> { pub fn main() -> Result<(), String> {
solana_logger::setup(); solana_logger::setup();
@ -97,7 +105,15 @@ pub fn main() -> Result<(), String> {
Some(default_value) => arg.default_value(default_value), Some(default_value) => arg.default_value(default_value),
None => arg, None => arg,
} }
}), })
.arg(
Arg::with_name("release_semver")
.value_name("release-semver")
.index(1)
.conflicts_with_all(&["json_rpc_url", "update_manifest_pubkey"])
.validator(is_semver)
.help("The exact version to install. Updates will not be available if this argument is used"),
),
) )
.subcommand( .subcommand(
SubCommand::with_name("info") SubCommand::with_name("info")
@ -190,6 +206,7 @@ pub fn main() -> Result<(), String> {
.unwrap(); .unwrap();
let data_dir = matches.value_of("data_dir").unwrap(); let data_dir = matches.value_of("data_dir").unwrap();
let no_modify_path = matches.is_present("no_modify_path"); let no_modify_path = matches.is_present("no_modify_path");
let release_semver = matches.value_of("release_semver");
command::init( command::init(
config_file, config_file,
@ -197,6 +214,7 @@ pub fn main() -> Result<(), String> {
json_rpc_url, json_rpc_url,
&update_manifest_pubkey, &update_manifest_pubkey,
no_modify_path, no_modify_path,
release_semver,
) )
} }
("info", Some(matches)) => { ("info", Some(matches)) => {
@ -291,6 +309,14 @@ pub fn main_init() -> Result<(), String> {
None => arg, None => arg,
} }
}) })
.arg(
Arg::with_name("release_semver")
.value_name("release-semver")
.index(1)
.conflicts_with_all(&["json_rpc_url", "update_manifest_pubkey"])
.validator(is_semver)
.help("The exact version to install. Updates will not be available if this argument is used"),
)
.get_matches(); .get_matches();
let config_file = matches.value_of("config_file").unwrap(); let config_file = matches.value_of("config_file").unwrap();
@ -303,6 +329,7 @@ pub fn main_init() -> Result<(), String> {
.unwrap(); .unwrap();
let data_dir = matches.value_of("data_dir").unwrap(); let data_dir = matches.value_of("data_dir").unwrap();
let no_modify_path = matches.is_present("no_modify_path"); let no_modify_path = matches.is_present("no_modify_path");
let release_semver = matches.value_of("release_semver");
command::init( command::init(
config_file, config_file,
@ -310,5 +337,6 @@ pub fn main_init() -> Result<(), String> {
json_rpc_url, json_rpc_url,
&update_manifest_pubkey, &update_manifest_pubkey,
no_modify_path, no_modify_path,
release_semver,
) )
} }

View File

@ -1,22 +1,21 @@
use atty;
use std::process::exit; use std::process::exit;
#[cfg(windows)]
fn press_enter() { fn press_enter() {
// On windows, where installation happens in a console that may have opened just for this // On windows, where installation happens in a console that may have opened just for this
// purpose, give the user an opportunity to see the error before the window closes. // purpose, give the user an opportunity to see the error before the window closes.
println!(); if cfg!(windows) && atty::is(atty::Stream::Stdin) {
println!("Press the Enter key to continue."); println!();
println!("Press the Enter key to continue.");
use std::io::BufRead; use std::io::BufRead;
let stdin = std::io::stdin(); let stdin = std::io::stdin();
let stdin = stdin.lock(); let stdin = stdin.lock();
let mut lines = stdin.lines(); let mut lines = stdin.lines();
lines.next(); lines.next();
}
} }
#[cfg(not(windows))]
fn press_enter() {}
fn main() { fn main() {
solana_install::main_init().unwrap_or_else(|err| { solana_install::main_init().unwrap_or_else(|err| {
println!("Error: {}", err); println!("Error: {}", err);

View File

@ -0,0 +1,67 @@
use std::io;
use std::process::Child;
fn kill_process(process: &mut Child) -> Result<(), io::Error> {
if let Ok(()) = process.kill() {
process.wait()?;
} else {
println!("Process {} has already exited", process.id());
}
Ok(())
}
#[cfg(windows)]
pub fn stop_process(process: &mut Child) -> Result<(), io::Error> {
kill_process(process)
}
#[cfg(not(windows))]
pub fn stop_process(process: &mut Child) -> Result<(), io::Error> {
use nix::errno::Errno::{EINVAL, EPERM, ESRCH};
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
use nix::Error::Sys;
use std::io::ErrorKind;
use std::thread;
use std::time::{Duration, Instant};
let nice_wait = Duration::from_secs(5);
let pid = Pid::from_raw(process.id() as i32);
match kill(pid, Signal::SIGINT) {
Ok(()) => {
let expire = Instant::now() + nice_wait;
while let Ok(None) = process.try_wait() {
if Instant::now() > expire {
break;
}
thread::sleep(nice_wait / 10);
}
if let Ok(None) = process.try_wait() {
kill_process(process)?;
}
}
Err(Sys(EINVAL)) => {
println!("Invalid signal. Killing process {}", pid);
kill_process(process)?;
}
Err(Sys(EPERM)) => {
return Err(io::Error::new(
ErrorKind::InvalidInput,
format!("Insufficient permissions to signal process {}", pid),
));
}
Err(Sys(ESRCH)) => {
return Err(io::Error::new(
ErrorKind::InvalidInput,
format!("Process {} does not exist", pid),
));
}
Err(e) => {
return Err(io::Error::new(
ErrorKind::InvalidInput,
format!("Unexpected error {}", e),
));
}
};
Ok(())
}

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-keygen" name = "solana-keygen"
version = "0.16.0" version = "0.16.6"
description = "Solana key generation utility" description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -15,7 +15,7 @@ cuda = []
[dependencies] [dependencies]
clap = "2.33" clap = "2.33"
dirs = "2.0.1" dirs = "2.0.1"
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
[[bin]] [[bin]]
name = "solana-keygen" name = "solana-keygen"

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-kvstore" name = "solana-kvstore"
description = "Embedded Key-Value store for solana" description = "Embedded Key-Value store for solana"
version = "0.16.0" version = "0.16.6"
homepage = "https://solana.com/" homepage = "https://solana.com/"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]

View File

@ -3,18 +3,22 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-ledger-tool" name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.16.0" version = "0.16.6"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
bincode = "1.1.4"
clap = "2.33.0" clap = "2.33.0"
serde_json = "1.0.39" serde = "1.0.94"
solana = { path = "../core", version = "0.16.0" } serde_derive = "1.0.94"
solana-logger = { path = "../logger", version = "0.16.0" } serde_json = "1.0.40"
solana-runtime = { path = "../runtime", version = "0.16.0" } serde_yaml = "0.8.9"
solana-sdk = { path = "../sdk", version = "0.16.0" } solana = { path = "../core", version = "0.16.6" }
solana-logger = { path = "../logger", version = "0.16.6" }
solana-runtime = { path = "../runtime", version = "0.16.6" }
solana-sdk = { path = "../sdk", version = "0.16.6" }
[dev-dependencies] [dev-dependencies]
assert_cmd = "0.11" assert_cmd = "0.11"

View File

@ -1,13 +1,70 @@
use clap::{crate_description, crate_name, crate_version, App, Arg, SubCommand}; use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, SubCommand};
use solana::blocktree::Blocktree; use solana::blocktree::Blocktree;
use solana::blocktree_processor::process_blocktree; use solana::blocktree_processor::process_blocktree;
use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::genesis_block::GenesisBlock;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::{stdout, Write}; use std::io::{stdout, Write};
use std::process::exit; use std::process::exit;
use std::str::FromStr;
#[derive(PartialEq)]
enum LedgerOutputMethod {
Print,
Json,
}
fn output_ledger(blocktree: Blocktree, starting_slot: u64, method: LedgerOutputMethod) {
let rooted_slot_iterator = blocktree
.rooted_slot_iterator(starting_slot)
.unwrap_or_else(|err| {
eprintln!(
"Failed to load entries starting from slot {}: {:?}",
starting_slot, err
);
exit(1);
});
if method == LedgerOutputMethod::Json {
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
}
for (slot, slot_meta) in rooted_slot_iterator {
match method {
LedgerOutputMethod::Print => println!("Slot {}", slot),
LedgerOutputMethod::Json => {
serde_json::to_writer(stdout(), &slot_meta).expect("serialize slot_meta");
stdout().write_all(b",\n").expect("newline");
}
}
let entries = blocktree
.get_slot_entries(slot, 0, None)
.unwrap_or_else(|err| {
eprintln!("Failed to load entries for slot {}: {:?}", slot, err);
exit(1);
});
for entry in entries {
match method {
LedgerOutputMethod::Print => println!("{:?}", entry),
LedgerOutputMethod::Json => {
serde_json::to_writer(stdout(), &entry).expect("serialize entry");
stdout().write_all(b",\n").expect("newline");
}
}
}
}
if method == LedgerOutputMethod::Json {
stdout().write_all(b"\n]}\n").expect("close array");
}
}
fn main() { fn main() {
const DEFAULT_ROOT_COUNT: &str = "1";
solana_logger::setup(); solana_logger::setup();
let matches = App::new(crate_name!()).about(crate_description!()) let matches = App::new(crate_name!())
.about(crate_description!())
.version(crate_version!()) .version(crate_version!())
.arg( .arg(
Arg::with_name("ledger") Arg::with_name("ledger")
@ -19,30 +76,47 @@ fn main() {
.help("Use directory for ledger location"), .help("Use directory for ledger location"),
) )
.arg( .arg(
Arg::with_name("head") Arg::with_name("starting_slot")
.short("n") .long("starting-slot")
.long("head")
.value_name("NUM") .value_name("NUM")
.takes_value(true) .takes_value(true)
.help("Limit to at most the first NUM entries in ledger\n (only applies to print and json commands)"), .default_value("0")
) .help("Start at this slot (only applies to print and json commands)"),
.arg(
Arg::with_name("min-hashes")
.short("h")
.long("min-hashes")
.value_name("NUM")
.takes_value(true)
.help("Skip entries with fewer than NUM hashes\n (only applies to print and json commands)"),
)
.arg(
Arg::with_name("continue")
.short("c")
.long("continue")
.help("Continue verify even if verification fails"),
) )
.subcommand(SubCommand::with_name("print").about("Print the ledger")) .subcommand(SubCommand::with_name("print").about("Print the ledger"))
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format")) .subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH")) .subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
.subcommand(SubCommand::with_name("prune").about("Prune the ledger at the block height").arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
.takes_value(true)
.required(true)
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
))
.subcommand(SubCommand::with_name("list-roots").about("Output upto last <num-roots> root hashes and their heights starting at the given block height").arg(
Arg::with_name("max_height")
.long("max-height")
.value_name("NUM")
.takes_value(true)
.required(true)
.help("Maximum block height"),
).arg(
Arg::with_name("slot_list")
.long("slot-list")
.value_name("FILENAME")
.required(false)
.takes_value(true)
.help("The location of the output YAML file. A list of rollback slot heights and hashes will be written to the file."),
).arg(
Arg::with_name("num_roots")
.long("num-roots")
.value_name("NUM")
.takes_value(true)
.default_value(DEFAULT_ROOT_COUNT)
.required(false)
.help("Number of roots in the output"),
))
.get_matches(); .get_matches();
let ledger_path = matches.value_of("ledger").unwrap(); let ledger_path = matches.value_of("ledger").unwrap();
@ -63,63 +137,118 @@ fn main() {
} }
}; };
let entries = match blocktree.read_ledger() { let starting_slot = value_t!(matches, "starting_slot", u64).unwrap_or_else(|e| e.exit());
Ok(entries) => entries,
Err(err) => {
eprintln!("Failed to read ledger at {}: {}", ledger_path, err);
exit(1);
}
};
let head = match matches.value_of("head") {
Some(head) => head.parse().expect("please pass a number for --head"),
None => <usize>::max_value(),
};
let min_hashes = match matches.value_of("min-hashes") {
Some(hashes) => hashes
.parse()
.expect("please pass a number for --min-hashes"),
None => 0,
} as u64;
match matches.subcommand() { match matches.subcommand() {
("print", _) => { ("print", _) => {
for (i, entry) in entries.enumerate() { output_ledger(blocktree, starting_slot, LedgerOutputMethod::Print);
if i >= head {
break;
}
if entry.num_hashes < min_hashes {
continue;
}
println!("{:?}", entry);
}
} }
("json", _) => { ("json", _) => {
stdout().write_all(b"{\"ledger\":[\n").expect("open array"); output_ledger(blocktree, starting_slot, LedgerOutputMethod::Json);
for (i, entry) in entries.enumerate() {
if i >= head {
break;
}
if entry.num_hashes < min_hashes {
continue;
}
serde_json::to_writer(stdout(), &entry).expect("serialize");
stdout().write_all(b",\n").expect("newline");
}
stdout().write_all(b"\n]}\n").expect("close array");
} }
("verify", _) => match process_blocktree(&genesis_block, &blocktree, None) { ("verify", _) => {
Ok((_bank_forks, bank_forks_info, _)) => { println!("Verifying ledger...");
println!("{:?}", bank_forks_info); match process_blocktree(&genesis_block, &blocktree, None, true) {
Ok((_bank_forks, bank_forks_info, _)) => {
println!("{:?}", bank_forks_info);
}
Err(err) => {
eprintln!("Ledger verification failed: {:?}", err);
exit(1);
}
} }
Err(err) => { }
eprintln!("Ledger verification failed: {:?}", err); ("prune", Some(args_matches)) => {
exit(1); if let Some(prune_file_path) = args_matches.value_of("slot_list") {
let prune_file = File::open(prune_file_path.to_string()).unwrap();
let slot_hashes: BTreeMap<u64, String> =
serde_yaml::from_reader(prune_file).unwrap();
let iter = blocktree
.rooted_slot_iterator(0)
.expect("Failed to get rooted slot");
let potential_hashes: Vec<_> = iter
.filter_map(|(slot, meta)| {
let blockhash = blocktree
.get_slot_entries(slot, meta.last_index, Some(1))
.unwrap()
.first()
.unwrap()
.hash
.to_string();
slot_hashes.get(&slot).and_then(|hash| {
if *hash == blockhash {
Some((slot, blockhash))
} else {
None
}
})
})
.collect();
let (target_slot, target_hash) = potential_hashes
.last()
.expect("Failed to find a valid slot");
println!("Prune at slot {:?} hash {:?}", target_slot, target_hash);
blocktree.prune(*target_slot);
} }
}, }
("list-roots", Some(args_matches)) => {
let max_height = if let Some(height) = args_matches.value_of("max_height") {
usize::from_str(height).expect("Maximum height must be a number")
} else {
panic!("Maximum height must be provided");
};
let num_roots = if let Some(roots) = args_matches.value_of("num_roots") {
usize::from_str(roots).expect("Number of roots must be a number")
} else {
usize::from_str(DEFAULT_ROOT_COUNT).unwrap()
};
let iter = blocktree
.rooted_slot_iterator(0)
.expect("Failed to get rooted slot");
let slot_hash: Vec<_> = iter
.filter_map(|(slot, meta)| {
if slot <= max_height as u64 {
let blockhash = blocktree
.get_slot_entries(slot, meta.last_index, Some(1))
.unwrap()
.first()
.unwrap()
.hash;
Some((slot, blockhash))
} else {
None
}
})
.collect();
let mut output_file: Box<Write> = if let Some(path) = args_matches.value_of("slot_list")
{
match File::create(path) {
Ok(file) => Box::new(file),
_ => Box::new(stdout()),
}
} else {
Box::new(stdout())
};
slot_hash
.into_iter()
.rev()
.enumerate()
.for_each(|(i, (slot, hash))| {
if i < num_roots {
output_file
.write_all(format!("{:?}: {:?}\n", slot, hash).as_bytes())
.expect("failed to write");
}
});
}
("", _) => { ("", _) => {
eprintln!("{}", matches.usage()); eprintln!("{}", matches.usage());
exit(1); exit(1);

View File

@ -45,20 +45,5 @@ fn nominal() {
// Print everything // Print everything
let output = run_ledger_tool(&["-l", &ledger_path, "print"]); let output = run_ledger_tool(&["-l", &ledger_path, "print"]);
assert!(output.status.success()); assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), ticks); assert_eq!(count_newlines(&output.stdout), ticks + 1);
// Only print the first 5 items
let output = run_ledger_tool(&["-l", &ledger_path, "-n", "5", "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), 5);
// Skip entries with no hashes
let output = run_ledger_tool(&["-l", &ledger_path, "-h", "1", "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), ticks);
// Skip entries with fewer than 2 hashes (skip everything)
let output = run_ledger_tool(&["-l", &ledger_path, "-h", "2", "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), 0);
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-logger" name = "solana-logger"
version = "0.16.0" version = "0.16.6"
description = "Solana Logger" description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-merkle-tree" name = "solana-merkle-tree"
version = "0.16.0" version = "0.16.6"
description = "Solana Merkle Tree" description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
[dev-dependencies] [dev-dependencies]
hex = "0.3.2" hex = "0.3.2"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-metrics" name = "solana-metrics"
version = "0.16.0" version = "0.16.6"
description = "Solana Metrics" description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -14,7 +14,7 @@ influx_db_client = "0.3.6"
lazy_static = "1.3.0" lazy_static = "1.3.0"
log = "0.4.2" log = "0.4.2"
reqwest = "0.9.18" reqwest = "0.9.18"
solana-sdk = { path = "../sdk", version = "0.16.0" } solana-sdk = { path = "../sdk", version = "0.16.6" }
sys-info = "0.5.7" sys-info = "0.5.7"
[dev-dependencies] [dev-dependencies]

View File

@ -125,20 +125,11 @@ macro_rules! datapoint_debug {
} }
lazy_static! { lazy_static! {
static ref HOST_INFO: String = { static ref HOST_ID: String = {
let v = env::var("SOLANA_METRICS_DISPLAY_HOSTNAME") env::var("SOLANA_METRICS_HOST_ID").unwrap_or_else(|_| {
.map(|x| { let hostname: String = hostname().unwrap_or_else(|_| "".to_string());
x.parse() format!("host-{}", hash(hostname.as_bytes())).to_string()
.expect("Failed to parse SOLANA_METRICS_DISPLAY_HOSTNAME") })
})
.unwrap_or(0);
let name: String = hostname().unwrap_or_else(|_| "".to_string());
if v == 0 {
hash(name.as_bytes()).to_string()
} else {
name
}
}; };
} }
@ -254,7 +245,7 @@ impl MetricsAgent {
let extra = influxdb::Point::new("metrics") let extra = influxdb::Point::new("metrics")
.add_timestamp(timing::timestamp() as i64) .add_timestamp(timing::timestamp() as i64)
.add_field("host_id", influxdb::Value::String(HOST_INFO.to_string())) .add_field("host_id", influxdb::Value::String(HOST_ID.to_string()))
.add_field( .add_field(
"points_written", "points_written",
influxdb::Value::Integer(points_written as i64), influxdb::Value::Integer(points_written as i64),
@ -351,7 +342,7 @@ impl MetricsAgent {
} }
pub fn submit(&self, mut point: influxdb::Point, level: log::Level) { pub fn submit(&self, mut point: influxdb::Point, level: log::Level) {
point.add_field("host_id", influxdb::Value::String(HOST_INFO.to_string())); point.add_field("host_id", influxdb::Value::String(HOST_ID.to_string()));
if point.timestamp.is_none() { if point.timestamp.is_none() {
point.timestamp = Some(timing::timestamp() as i64); point.timestamp = Some(timing::timestamp() as i64);
} }
@ -461,7 +452,7 @@ pub fn set_panic_hook(program: &'static str) {
None => "?".to_string(), None => "?".to_string(),
}), }),
) )
.add_field("host_id", influxdb::Value::String(HOST_INFO.to_string())) .add_field("host_id", influxdb::Value::String(HOST_ID.to_string()))
.to_owned(), .to_owned(),
Level::Error, Level::Error,
); );

View File

@ -11,7 +11,9 @@ set -e
for i in "$SOLANA_RSYNC_CONFIG_DIR" "$SOLANA_CONFIG_DIR"; do for i in "$SOLANA_RSYNC_CONFIG_DIR" "$SOLANA_CONFIG_DIR"; do
echo "Cleaning $i" echo "Cleaning $i"
rm -rvf "${i:?}/" # <-- $i might be a symlink, rm the other side of it first
rm -rvf "$i" rm -rvf "$i"
mkdir -p "$i" mkdir -p "$i"
done done
setup_secondary_mount

View File

@ -72,6 +72,16 @@ SOLANA_RSYNC_CONFIG_DIR=$SOLANA_ROOT/config
# Configuration that remains local # Configuration that remains local
SOLANA_CONFIG_DIR=$SOLANA_ROOT/config-local SOLANA_CONFIG_DIR=$SOLANA_ROOT/config-local
SECONDARY_DISK_MOUNT_POINT=/mnt/extra-disk
setup_secondary_mount() {
# If there is a secondary disk, symlink the config-local dir there
if [[ -d $SECONDARY_DISK_MOUNT_POINT ]]; then
mkdir -p $SECONDARY_DISK_MOUNT_POINT/config-local
rm -rf "$SOLANA_CONFIG_DIR"
ln -sfT $SECONDARY_DISK_MOUNT_POINT/config-local "$SOLANA_CONFIG_DIR"
fi
}
default_arg() { default_arg() {
declare name=$1 declare name=$1
declare value=$2 declare value=$2
@ -88,3 +98,18 @@ default_arg() {
args+=("$name") args+=("$name")
fi fi
} }
replace_arg() {
declare name=$1
declare value=$2
default_arg "$name" "$value"
declare index=0
for arg in "${args[@]}"; do
index=$((index + 1))
if [[ $arg = "$name" ]]; then
args[$index]="$value"
fi
done
}

View File

@ -9,6 +9,7 @@ source "$here"/common.sh
# shellcheck source=scripts/oom-score-adj.sh # shellcheck source=scripts/oom-score-adj.sh
source "$here"/../scripts/oom-score-adj.sh source "$here"/../scripts/oom-score-adj.sh
fullnode_usage() { fullnode_usage() {
if [[ -n $1 ]]; then if [[ -n $1 ]]; then
echo "$*" echo "$*"
@ -17,15 +18,17 @@ fullnode_usage() {
cat <<EOF cat <<EOF
Fullnode Usage: Fullnode Usage:
usage: $0 [--blockstream PATH] [--init-complete-file FILE] [--label LABEL] [--stake LAMPORTS] [--no-voting] [--rpc-port port] [rsync network path to bootstrap leader configuration] [cluster entry point] usage: $0 [--config-dir PATH] [--blockstream PATH] [--init-complete-file FILE] [--label LABEL] [--stake LAMPORTS] [--no-voting] [--rpc-port port] [rsync network path to bootstrap leader configuration] [cluster entry point]
Start a validator or a replicator Start a validator or a replicator
--config-dir PATH - store configuration and data files under this PATH
--blockstream PATH - open blockstream at this unix domain socket location --blockstream PATH - open blockstream at this unix domain socket location
--init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete --init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete
--label LABEL - Append the given label to the configuration files, useful when running --label LABEL - Append the given label to the configuration files, useful when running
multiple fullnodes in the same workspace multiple fullnodes in the same workspace
--stake LAMPORTS - Number of lamports to stake --stake LAMPORTS - Number of lamports to stake
--node-lamports LAMPORTS - Number of lamports this node has been funded from the genesis block
--no-voting - start node without vote signer --no-voting - start node without vote signer
--rpc-port port - custom RPC port for this node --rpc-port port - custom RPC port for this node
--no-restart - do not restart the node if it exits --no-restart - do not restart the node if it exits
@ -76,101 +79,104 @@ rsync_url() { # adds the 'rsync://` prefix to URLs that need it
setup_validator_accounts() { setup_validator_accounts() {
declare entrypoint_ip=$1 declare entrypoint_ip=$1
declare node_keypair_path=$2 declare node_lamports=$2
declare vote_keypair_path=$3 declare stake_lamports=$3
declare stake_keypair_path=$4
declare storage_keypair_path=$5
declare node_lamports=$6
declare stake_lamports=$7
declare node_pubkey
node_pubkey=$($solana_keygen pubkey "$node_keypair_path")
declare vote_pubkey
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
declare stake_pubkey
stake_pubkey=$($solana_keygen pubkey "$stake_keypair_path")
declare storage_pubkey
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
if [[ -f $configured_flag ]]; then if [[ -f $configured_flag ]]; then
echo "Vote and stake accounts have already been configured" echo "Vote and stake accounts have already been configured"
else else
if ((airdrops_enabled)); then if ((airdrops_enabled)); then
# Fund the node with enough tokens to fund its Vote, Staking, and Storage accounts echo "Fund the node with enough tokens to fund its Vote, Staking, and Storage accounts"
declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees (
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" airdrop $((node_lamports+stake_lamports+fees)) || return $? declare fees=100 # TODO: No hardcoded transaction fees, fetch the current cluster fees
set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
airdrop $((node_lamports+stake_lamports+fees))
) || return $?
else else
echo "current account balance is " echo "current account balance is "
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $? $solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
fi fi
# Fund the vote account from the node, with the node as the node_pubkey echo "Fund the vote account from the node's identity pubkey"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \ (
create-vote-account "$vote_pubkey" "$node_pubkey" 1 --commission 65535 || return $? set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-vote-account "$vote_pubkey" "$identity_pubkey" 1 --commission 127
) || return $?
# Fund the stake account from the node, with the node as the node_pubkey echo "Fund the stake account from the node's identity pubkey"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \ (
create-stake-account "$stake_pubkey" "$stake_lamports" || return $? set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-stake-account "$stake_pubkey" "$stake_lamports"
) || return $?
# Delegate the stake. The transaction fee is paid by the node but the echo "Delegate the stake account to the node's vote account"
# transaction must be signed by the stake_keypair # transaction must be signed by the stake_keypair
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \ (
delegate-stake "$stake_keypair_path" "$vote_pubkey" "$stake_lamports" || return $? set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
delegate-stake "$stake_keypair_path" "$vote_pubkey" "$stake_lamports"
) || return $?
# Setup validator storage account echo "Create validator storage account"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \ (
create-validator-storage-account "$node_pubkey" "$storage_pubkey" || return $? set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-validator-storage-account "$identity_pubkey" "$storage_pubkey"
) || return $?
touch "$configured_flag" touch "$configured_flag"
fi fi
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
show-vote-account "$vote_pubkey"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
show-stake-account "$stake_pubkey"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \
show-storage-account "$storage_pubkey"
echo "Identity account balance:" echo "Identity account balance:"
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance (
echo "========================================================================" set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-vote-account "$vote_pubkey"
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-stake-account "$stake_pubkey"
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-storage-account "$storage_pubkey"
)
return 0 return 0
} }
setup_replicator_account() { setup_replicator_account() {
declare entrypoint_ip=$1 declare entrypoint_ip=$1
declare node_keypair_path=$2 declare node_lamports=$2
declare storage_keypair_path=$3
declare node_lamports=$4
declare node_pubkey
node_pubkey=$($solana_keygen pubkey "$node_keypair_path")
declare storage_pubkey
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
if [[ -f $configured_flag ]]; then if [[ -f $configured_flag ]]; then
echo "Replicator account has already been configured" echo "Replicator account has already been configured"
else else
if ((airdrops_enabled)); then if ((airdrops_enabled)); then
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" airdrop "$node_lamports" || return $? (
set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899"
airdrop "$node_lamports"
) || return $?
else else
echo "current account balance is " echo "current account balance is "
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $? $solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" balance || return $?
fi fi
# Setup replicator storage account echo "Create replicator storage account"
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \ (
create-replicator-storage-account "$node_pubkey" "$storage_pubkey" || return $? set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
create-replicator-storage-account "$identity_pubkey" "$storage_pubkey"
) || return $?
touch "$configured_flag" touch "$configured_flag"
fi fi
$solana_wallet --keypair "$node_keypair_path" --url "http://$entrypoint_ip:8899" \ (
show-storage-account "$storage_pubkey" set -x
$solana_wallet --keypair "$identity_keypair_path" --url "http://$entrypoint_ip:8899" \
show-storage-account "$storage_pubkey"
)
return 0 return 0
} }
@ -192,6 +198,9 @@ identity_keypair_path=
no_restart=0 no_restart=0
airdrops_enabled=1 airdrops_enabled=1
generate_snapshots=0 generate_snapshots=0
boot_from_snapshot=1
reset_ledger=0
config_dir=
positional_args=() positional_args=()
while [[ -n $1 ]]; do while [[ -n $1 ]]; do
@ -209,6 +218,9 @@ while [[ -n $1 ]]; do
elif [[ $1 = --generate-snapshots ]]; then elif [[ $1 = --generate-snapshots ]]; then
generate_snapshots=1 generate_snapshots=1
shift shift
elif [[ $1 = --no-snapshot ]]; then
boot_from_snapshot=0
shift
elif [[ $1 = --replicator ]]; then elif [[ $1 = --replicator ]]; then
node_type=replicator node_type=replicator
shift shift
@ -235,9 +247,15 @@ while [[ -n $1 ]]; do
elif [[ $1 = --stake ]]; then elif [[ $1 = --stake ]]; then
stake_lamports="$2" stake_lamports="$2"
shift 2 shift 2
elif [[ $1 = --node-lamports ]]; then
node_lamports="$2"
shift 2
elif [[ $1 = --no-voting ]]; then elif [[ $1 = --no-voting ]]; then
args+=("$1") args+=("$1")
shift shift
elif [[ $1 = --skip-ledger-verify ]]; then
args+=("$1")
shift
elif [[ $1 = --no-sigverify ]]; then elif [[ $1 = --no-sigverify ]]; then
args+=("$1") args+=("$1")
shift shift
@ -253,6 +271,12 @@ while [[ -n $1 ]]; do
elif [[ $1 = --no-airdrop ]]; then elif [[ $1 = --no-airdrop ]]; then
airdrops_enabled=0 airdrops_enabled=0
shift shift
elif [[ $1 = --reset-ledger ]]; then
reset_ledger=1
shift
elif [[ $1 = --config-dir ]]; then
config_dir=$2
shift 2
elif [[ $1 = -h ]]; then elif [[ $1 = -h ]]; then
fullnode_usage "$@" fullnode_usage "$@"
else else
@ -265,6 +289,16 @@ while [[ -n $1 ]]; do
fi fi
done done
if [[ -n $REQUIRE_CONFIG_DIR ]]; then
if [[ -z $config_dir ]]; then
fullnode_usage "Error: --config-dir not specified"
fi
SOLANA_RSYNC_CONFIG_DIR="$config_dir"/config
SOLANA_CONFIG_DIR="$config_dir"/config-local
fi
setup_secondary_mount
if [[ $node_type = replicator ]]; then if [[ $node_type = replicator ]]; then
if [[ ${#positional_args[@]} -gt 2 ]]; then if [[ ${#positional_args[@]} -gt 2 ]]; then
@ -275,24 +309,13 @@ if [[ $node_type = replicator ]]; then
shift "$shift" shift "$shift"
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/replicator-keypair$label.json}" : "${identity_keypair_path:=$SOLANA_CONFIG_DIR/replicator-keypair$label.json}"
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-keypair$label.json storage_keypair_path="$SOLANA_CONFIG_DIR"/replicator-storage-keypair$label.json
ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger$label ledger_config_dir=$SOLANA_CONFIG_DIR/replicator-ledger$label
configured_flag=$SOLANA_CONFIG_DIR/replicator$label.configured configured_flag=$SOLANA_CONFIG_DIR/replicator$label.configured
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
[[ -r "$storage_keypair_path" ]] || $solana_keygen new -o "$storage_keypair_path"
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
cat <<EOF
======================[ $node_type configuration ]======================
replicator pubkey: $identity_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
======================================================================
EOF
program=$solana_replicator program=$solana_replicator
default_arg --entrypoint "$entrypoint_address" default_arg --entrypoint "$entrypoint_address"
default_arg --identity "$identity_keypair_path" default_arg --identity "$identity_keypair_path"
@ -300,6 +323,7 @@ EOF
default_arg --ledger "$ledger_config_dir" default_arg --ledger "$ledger_config_dir"
rsync_entrypoint_url=$(rsync_url "$entrypoint") rsync_entrypoint_url=$(rsync_url "$entrypoint")
elif [[ $node_type = bootstrap_leader ]]; then elif [[ $node_type = bootstrap_leader ]]; then
if [[ ${#positional_args[@]} -ne 0 ]]; then if [[ ${#positional_args[@]} -ne 0 ]]; then
fullnode_usage "Unknown argument: ${positional_args[0]}" fullnode_usage "Unknown argument: ${positional_args[0]}"
@ -311,9 +335,11 @@ elif [[ $node_type = bootstrap_leader ]]; then
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify $solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger verify
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/bootstrap-leader-keypair.json}" : "${identity_keypair_path:=$SOLANA_CONFIG_DIR/bootstrap-leader-keypair.json}"
vote_keypair_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-vote-keypair.json vote_keypair_path="$SOLANA_CONFIG_DIR"/bootstrap-leader-vote-keypair.json
ledger_config_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger ledger_config_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger
state_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-state state_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader-state
stake_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-stake-keypair.json
storage_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-storage-keypair.json storage_keypair_path=$SOLANA_CONFIG_DIR/bootstrap-leader-storage-keypair.json
configured_flag=$SOLANA_CONFIG_DIR/bootstrap-leader.configured configured_flag=$SOLANA_CONFIG_DIR/bootstrap-leader.configured
@ -332,19 +358,16 @@ elif [[ $node_type = validator ]]; then
shift "$shift" shift "$shift"
: "${identity_keypair_path:=$SOLANA_CONFIG_DIR/validator-keypair$label.json}" : "${identity_keypair_path:=$SOLANA_CONFIG_DIR/validator-keypair$label.json}"
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
vote_keypair_path=$SOLANA_CONFIG_DIR/validator-vote-keypair$label.json vote_keypair_path=$SOLANA_CONFIG_DIR/validator-vote-keypair$label.json
ledger_config_dir=$SOLANA_CONFIG_DIR/validator-ledger$label ledger_config_dir=$SOLANA_CONFIG_DIR/validator-ledger$label
state_dir="$SOLANA_CONFIG_DIR"/validator-state$label state_dir="$SOLANA_CONFIG_DIR"/validator-state$label
storage_keypair_path=$SOLANA_CONFIG_DIR/validator-storage-keypair$label.json
stake_keypair_path=$SOLANA_CONFIG_DIR/validator-stake-keypair$label.json stake_keypair_path=$SOLANA_CONFIG_DIR/validator-stake-keypair$label.json
storage_keypair_path=$SOLANA_CONFIG_DIR/validator-storage-keypair$label.json
configured_flag=$SOLANA_CONFIG_DIR/validator$label.configured configured_flag=$SOLANA_CONFIG_DIR/validator$label.configured
mkdir -p "$SOLANA_CONFIG_DIR"
[[ -r "$identity_keypair_path" ]] || $solana_keygen new -o "$identity_keypair_path"
[[ -r "$vote_keypair_path" ]] || $solana_keygen new -o "$vote_keypair_path"
[[ -r "$stake_keypair_path" ]] || $solana_keygen new -o "$stake_keypair_path"
[[ -r "$storage_keypair_path" ]] || $solana_keygen new -o "$storage_keypair_path"
default_arg --entrypoint "$entrypoint_address" default_arg --entrypoint "$entrypoint_address"
if ((airdrops_enabled)); then if ((airdrops_enabled)); then
default_arg --rpc-drone-address "${entrypoint_address%:*}:9900" default_arg --rpc-drone-address "${entrypoint_address%:*}:9900"
@ -356,29 +379,15 @@ else
exit 1 exit 1
fi fi
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
export SOLANA_METRICS_HOST_ID="$identity_pubkey"
if [[ $node_type != replicator ]]; then if [[ $node_type != replicator ]]; then
accounts_config_dir="$state_dir"/accounts accounts_config_dir="$state_dir"/accounts
snapshot_config_dir="$state_dir"/snapshots snapshot_config_dir="$state_dir"/snapshots
identity_pubkey=$($solana_keygen pubkey "$identity_keypair_path")
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
cat <<EOF
======================[ $node_type configuration ]======================
identity pubkey: $identity_pubkey
vote pubkey: $vote_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
accounts: $accounts_config_dir
snapshots: $snapshot_config_dir
========================================================================
EOF
default_arg --identity "$identity_keypair_path" default_arg --identity "$identity_keypair_path"
default_arg --voting-keypair "$vote_keypair_path" default_arg --voting-keypair "$vote_keypair_path"
default_arg --vote-account "$vote_pubkey"
default_arg --storage-keypair "$storage_keypair_path" default_arg --storage-keypair "$storage_keypair_path"
default_arg --ledger "$ledger_config_dir" default_arg --ledger "$ledger_config_dir"
default_arg --accounts "$accounts_config_dir" default_arg --accounts "$accounts_config_dir"
@ -397,72 +406,151 @@ if [[ -z $CI ]]; then # Skip in CI
fi fi
new_gensis_block() { new_gensis_block() {
(
set -x
$rsync -r "${rsync_entrypoint_url:?}"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR"
) || (
echo "Error: failed to rsync genesis ledger"
)
! diff -q "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.bin "$ledger_config_dir"/genesis.bin >/dev/null 2>&1 ! diff -q "$SOLANA_RSYNC_CONFIG_DIR"/ledger/genesis.bin "$ledger_config_dir"/genesis.bin >/dev/null 2>&1
} }
set -e set -e
PS4="$(basename "$0"): " PS4="$(basename "$0"): "
pid= pid=
trap '[[ -n $pid ]] && kill "$pid" >/dev/null 2>&1 && wait "$pid"' INT TERM ERR kill_fullnode() {
# Note: do not echo anything from this function to ensure $pid is actually
# killed when stdout/stderr are redirected
set +ex
if [[ -n $pid ]]; then
declare _pid=$pid
pid=
kill "$_pid" || true
wait "$_pid" || true
fi
exit
}
trap 'kill_fullnode' INT TERM ERR
if ((reset_ledger)); then
echo "Resetting ledger..."
(
set -x
rm -rf "$state_dir"
rm -rf "$ledger_config_dir"
)
if [[ -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ ]]; then
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
fi
fi
while true; do while true; do
if new_gensis_block; then if [[ $node_type != bootstrap_leader ]] && new_gensis_block; then
# If the genesis block has changed remove the now stale ledger and vote # If the genesis block has changed remove the now stale ledger and
# keypair for the node and start all over again # vote/stake/storage keypairs for the node and start all over again
( (
set -x set -x
rm -rf "$ledger_config_dir" "$state_dir" "$configured_flag" rm -rf "$ledger_config_dir" "$state_dir" "$configured_flag"
) )
if [[ $node_type = validator ]]; then
$solana_keygen new -f -o "$vote_keypair_path"
$solana_keygen new -f -o "$stake_keypair_path"
$solana_keygen new -f -o "$storage_keypair_path"
fi
if [[ $node_type = replicator ]]; then
$solana_keygen new -f -o "$storage_keypair_path"
fi
fi fi
if [[ ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then if [[ $node_type = replicator ]]; then
if [[ $node_type = bootstrap_leader ]]; then storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
setup_replicator_account "${entrypoint_address%:*}" \
"$node_lamports"
cat <<EOF
======================[ $node_type configuration ]======================
replicator pubkey: $identity_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
======================================================================
EOF
else
if [[ $node_type = bootstrap_leader && ! -d "$SOLANA_RSYNC_CONFIG_DIR"/ledger ]]; then
ledger_not_setup "$SOLANA_RSYNC_CONFIG_DIR/ledger does not exist" ledger_not_setup "$SOLANA_RSYNC_CONFIG_DIR/ledger does not exist"
elif [[ $node_type = validator ]]; then
(
SECONDS=0
set -x
cd "$SOLANA_RSYNC_CONFIG_DIR"
$rsync -qPr "${rsync_entrypoint_url:?}"/config/{ledger,state.tgz} .
echo "Fetched snapshot in $SECONDS seconds"
) || true
fi fi
fi
(
set -x
if [[ $node_type = validator ]]; then
if [[ -f "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz ]]; then
mkdir -p "$state_dir"
SECONDS=
tar -C "$state_dir" -zxf "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
echo "Extracted snapshot in $SECONDS seconds"
fi
fi
if [[ ! -d "$ledger_config_dir" ]]; then if [[ ! -d "$ledger_config_dir" ]]; then
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir" if [[ $node_type = validator ]]; then
fi (
) cd "$SOLANA_RSYNC_CONFIG_DIR"
if ((stake_lamports)); then echo "Rsyncing genesis ledger from ${rsync_entrypoint_url:?}..."
if [[ $node_type = validator ]]; then SECONDS=
while ! $rsync -Pr "${rsync_entrypoint_url:?}"/config/ledger .; do
echo "Genesis ledger rsync failed"
sleep 5
done
echo "Fetched genesis ledger in $SECONDS seconds"
if ((boot_from_snapshot)); then
SECONDS=
echo "Rsyncing state snapshot ${rsync_entrypoint_url:?}..."
if ! $rsync -P "${rsync_entrypoint_url:?}"/config/state.tgz .; then
echo "State snapshot rsync failed"
rm -f "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
exit
fi
echo "Fetched snapshot in $SECONDS seconds"
SECONDS=
mkdir -p "$state_dir"
(
set -x
tar -C "$state_dir" -zxf "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
)
echo "Extracted snapshot in $SECONDS seconds"
fi
)
fi
(
set -x
cp -a "$SOLANA_RSYNC_CONFIG_DIR"/ledger/ "$ledger_config_dir"
)
fi
vote_pubkey=$($solana_keygen pubkey "$vote_keypair_path")
stake_pubkey=$($solana_keygen pubkey "$stake_keypair_path")
storage_pubkey=$($solana_keygen pubkey "$storage_keypair_path")
replace_arg --vote-account "$vote_pubkey"
if [[ $node_type = validator ]] && ((stake_lamports)); then
setup_validator_accounts "${entrypoint_address%:*}" \ setup_validator_accounts "${entrypoint_address%:*}" \
"$identity_keypair_path" \
"$vote_keypair_path" \
"$stake_keypair_path" \
"$storage_keypair_path" \
"$node_lamports" \ "$node_lamports" \
"$stake_lamports" "$stake_lamports"
elif [[ $node_type = replicator ]]; then
setup_replicator_account "${entrypoint_address%:*}" \
"$identity_keypair_path" \
"$storage_keypair_path" \
"$node_lamports"
fi fi
cat <<EOF
======================[ $node_type configuration ]======================
identity pubkey: $identity_pubkey
vote pubkey: $vote_pubkey
stake pubkey: $stake_pubkey
storage pubkey: $storage_pubkey
ledger: $ledger_config_dir
accounts: $accounts_config_dir
snapshots: $snapshot_config_dir
========================================================================
EOF
fi fi
echo "$PS4$program ${args[*]}" echo "$PS4$program ${args[*]}"
$program "${args[@]}" & $program "${args[@]}" &
pid=$! pid=$!
echo "pid: $pid"
oom_score_adj "$pid" 1000 oom_score_adj "$pid" 1000
if ((no_restart)); then if ((no_restart)); then
@ -473,8 +561,8 @@ while true; do
secs_to_next_genesis_poll=5 secs_to_next_genesis_poll=5
secs_to_next_snapshot=30 secs_to_next_snapshot=30
while true; do while true; do
if ! kill -0 "$pid"; then if [[ -z $pid ]] || ! kill -0 "$pid"; then
wait "$pid" || true [[ -z $pid ]] || wait "$pid"
echo "############## $node_type exited, restarting ##############" echo "############## $node_type exited, restarting ##############"
break break
fi fi
@ -488,9 +576,15 @@ while true; do
new_state_archive="$SOLANA_RSYNC_CONFIG_DIR"/new_state.tgz new_state_archive="$SOLANA_RSYNC_CONFIG_DIR"/new_state.tgz
( (
rm -rf "$new_state_dir" "$new_state_archive" rm -rf "$new_state_dir" "$new_state_archive"
cp -a "$state_dir" "$new_state_dir" mkdir -p "$new_state_dir"
# When saving the state, its necessary to have the snapshots be saved first
# followed by the accounts folder. This would avoid conditions where incomplete
# accounts gets picked while its still in the process of being updated and are
# not frozen yet.
cp -a "$state_dir"/snapshots "$new_state_dir"
cp -a "$state_dir"/accounts "$new_state_dir"
cd "$new_state_dir" cd "$new_state_dir"
tar zcf "$new_state_archive" ./* tar zcfS "$new_state_archive" ./*
) )
ln -f "$new_state_archive" "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz ln -f "$new_state_archive" "$SOLANA_RSYNC_CONFIG_DIR"/state.tgz
rm -rf "$new_state_dir" "$new_state_archive" rm -rf "$new_state_dir" "$new_state_archive"
@ -504,21 +598,16 @@ while true; do
if ((poll_for_new_genesis_block && --secs_to_next_genesis_poll == 0)); then if ((poll_for_new_genesis_block && --secs_to_next_genesis_poll == 0)); then
echo "Polling for new genesis block..." echo "Polling for new genesis block..."
( if new_gensis_block; then
set -x echo "############## New genesis detected, restarting $node_type ##############"
$rsync -r "${rsync_entrypoint_url:?}"/config/ledger "$SOLANA_RSYNC_CONFIG_DIR" break
) || ( fi
echo "Error: failed to rsync ledger"
)
new_gensis_block && break
secs_to_next_genesis_poll=60 secs_to_next_genesis_poll=60
fi fi
done done
echo "############## New genesis detected, restarting $node_type ##############" kill_fullnode
kill "$pid" || true
wait "$pid" || true
# give the cluster time to come back up # give the cluster time to come back up
( (
set -x set -x

View File

@ -3,6 +3,7 @@
here=$(dirname "$0") here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh # shellcheck source=multinode-demo/common.sh
source "$here"/common.sh source "$here"/common.sh
setup_secondary_mount
set -e set -e
"$here"/clear-config.sh "$here"/clear-config.sh
@ -23,7 +24,6 @@ default_arg --ledger "$SOLANA_RSYNC_CONFIG_DIR"/ledger
default_arg --mint "$SOLANA_CONFIG_DIR"/mint-keypair.json default_arg --mint "$SOLANA_CONFIG_DIR"/mint-keypair.json
default_arg --lamports 100000000000000 default_arg --lamports 100000000000000
default_arg --bootstrap-leader-lamports 424242 default_arg --bootstrap-leader-lamports 424242
default_arg --storage-mining-pool-lamports 100000000
default_arg --target-lamports-per-signature 42 default_arg --target-lamports-per-signature 42
default_arg --target-signatures-per-slot 42 default_arg --target-signatures-per-slot 42
default_arg --hashes-per-tick auto default_arg --hashes-per-tick auto

View File

@ -25,6 +25,7 @@ entrypointIp=
publicNetwork= publicNetwork=
netBasename= netBasename=
sshPrivateKey= sshPrivateKey=
letsEncryptDomainName=
externalNodeSshKey= externalNodeSshKey=
sshOptions=() sshOptions=()
fullnodeIpList=() fullnodeIpList=()

View File

@ -63,10 +63,12 @@ blockstreamer=false
fullNodeBootDiskSizeInGb=1000 fullNodeBootDiskSizeInGb=1000
clientBootDiskSizeInGb=75 clientBootDiskSizeInGb=75
replicatorBootDiskSizeInGb=1000 replicatorBootDiskSizeInGb=1000
fullNodeAdditionalDiskSizeInGb=
externalNodes=false externalNodes=false
failOnValidatorBootupFailure=true failOnValidatorBootupFailure=true
publicNetwork=false publicNetwork=false
letsEncryptDomainName=
enableGpu=false enableGpu=false
customAddress= customAddress=
zones=() zones=()
@ -122,7 +124,13 @@ Manage testnet instances
* For EC2, [address] is the "allocation ID" of the desired * For EC2, [address] is the "allocation ID" of the desired
Elastic IP. Elastic IP.
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE. -d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
--letsencrypt [dns name] - Attempt to generate a TLS certificate using this
DNS name (useful only when the -a and -P options
are also provided)
--fullnode-additional-disk-size-gb [number]
- Add an additional [number] GB SSD to all fullnodes to store the config-local directory.
If not set, config-local will be written to the boot disk by default.
Only supported on GCE.
config-specific options: config-specific options:
-P - Use public network IP addresses (default: $publicNetwork) -P - Use public network IP addresses (default: $publicNetwork)
@ -136,14 +144,34 @@ EOF
exit $exitcode exit $exitcode
} }
command=$1 command=$1
[[ -n $command ]] || usage [[ -n $command ]] || usage
shift shift
[[ $command = create || $command = config || $command = info || $command = delete ]] || [[ $command = create || $command = config || $command = info || $command = delete ]] ||
usage "Invalid command: $command" usage "Invalid command: $command"
while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt; do shortArgs=()
while [[ -n $1 ]]; do
if [[ ${1:0:2} = -- ]]; then
if [[ $1 = --letsencrypt ]]; then
letsEncryptDomainName="$2"
shift 2
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
fullNodeAdditionalDiskSizeInGb="$2"
shift 2
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
shortArgs+=("$1")
shift
else
usage "Unknown long option: $1"
fi
else
shortArgs+=("$1")
shift
fi
done
while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt "${shortArgs[@]}"; do
case $opt in case $opt in
h | \?) h | \?)
usage usage
@ -199,7 +227,6 @@ while getopts "h?p:Pn:c:r:z:gG:a:d:uxf" opt; do
;; ;;
esac esac
done done
shift $((OPTIND - 1))
[[ ${#zones[@]} -gt 0 ]] || zones+=("$(cloud_DefaultZone)") [[ ${#zones[@]} -gt 0 ]] || zones+=("$(cloud_DefaultZone)")
@ -217,8 +244,14 @@ case $cloudProvider in
gce) gce)
;; ;;
ec2) ec2)
if [[ -n $fullNodeAdditionalDiskSizeInGb ]] ; then
usage "Error: --fullnode-additional-disk-size-gb currently only supported with cloud provider: gce"
fi
;; ;;
azure) azure)
if [[ -n $fullNodeAdditionalDiskSizeInGb ]] ; then
usage "Error: --fullnode-additional-disk-size-gb currently only supported with cloud provider: gce"
fi
;; ;;
*) *)
echo "Error: Unknown cloud provider: $cloudProvider" echo "Error: Unknown cloud provider: $cloudProvider"
@ -328,6 +361,7 @@ prepareInstancesAndWriteConfigFile() {
netBasename=$prefix netBasename=$prefix
publicNetwork=$publicNetwork publicNetwork=$publicNetwork
sshPrivateKey=$sshPrivateKey sshPrivateKey=$sshPrivateKey
letsEncryptDomainName=$letsEncryptDomainName
EOF EOF
fi fi
touch "$geoipConfigFile" touch "$geoipConfigFile"
@ -598,6 +632,7 @@ $(
disable-background-upgrades.sh \ disable-background-upgrades.sh \
create-solana-user.sh \ create-solana-user.sh \
add-solana-user-authorized_keys.sh \ add-solana-user-authorized_keys.sh \
install-certbot.sh \
install-earlyoom.sh \ install-earlyoom.sh \
install-libssl-compatability.sh \ install-libssl-compatability.sh \
install-nodejs.sh \ install-nodejs.sh \
@ -611,6 +646,10 @@ $(
cat enable-nvidia-persistence-mode.sh cat enable-nvidia-persistence-mode.sh
fi fi
if [[ -n $fullNodeAdditionalDiskSizeInGb ]]; then
cat mount-additional-disk.sh
fi
) )
cat > /etc/motd <<EOM cat > /etc/motd <<EOM
@ -637,7 +676,7 @@ EOF
else else
cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \ cloud_CreateInstances "$prefix" "$prefix-bootstrap-leader" 1 \
"$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \ "$enableGpu" "$bootstrapLeaderMachineType" "${zones[0]}" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "$bootstrapLeaderAddress" "$bootDiskType" "$startupScript" "$bootstrapLeaderAddress" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb"
fi fi
if [[ $additionalFullNodeCount -gt 0 ]]; then if [[ $additionalFullNodeCount -gt 0 ]]; then
@ -657,7 +696,7 @@ EOF
fi fi
cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \ cloud_CreateInstances "$prefix" "$prefix-$zone-fullnode" "$numNodesPerZone" \
"$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \ "$enableGpu" "$fullNodeMachineType" "$zone" "$fullNodeBootDiskSizeInGb" \
"$startupScript" "" "$bootDiskType" & "$startupScript" "" "$bootDiskType" "$fullNodeAdditionalDiskSizeInGb" &
done done
wait wait

View File

@ -24,6 +24,8 @@ Operate a configured testnet
restart - Shortcut for stop then start restart - Shortcut for stop then start
update - Live update all network nodes update - Live update all network nodes
logs - Fetch remote logs from each network node logs - Fetch remote logs from each network node
startnode- Start an individual node (previously stopped with stopNode)
stopnode - Stop an individual node
start/update-specific options: start/update-specific options:
-T [tarFilename] - Deploy the specified release tarball -T [tarFilename] - Deploy the specified release tarball
@ -50,16 +52,30 @@ Operate a configured testnet
-c bench-tps=2="--tx_count 25000" -c bench-tps=2="--tx_count 25000"
This will start 2 bench-tps clients, and supply "--tx_count 25000" This will start 2 bench-tps clients, and supply "--tx_count 25000"
to the bench-tps client. to the bench-tps client.
-n NUM_FULL_NODES - Number of fullnodes to apply command to.
--hashes-per-tick NUM_HASHES|sleep|auto --hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster - Override the default --hashes-per-tick for the cluster
-n NUM_FULL_NODES - Number of fullnodes to apply command to. --no-airdrop
- If set, disables airdrops. Nodes must be funded in genesis block when airdrops are disabled.
--lamports NUM_LAMPORTS_TO_MINT
- Override the default 100000000000000 lamports minted in genesis
--internal-nodes-stake-lamports NUM_LAMPORTS_PER_NODE
- Amount to stake internal nodes.
--internal-nodes-lamports NUM_LAMPORTS_PER_NODE
- Amount to fund internal nodes in genesis block.
--external-accounts-file FILE_PATH
- A YML file with a list of account pubkeys and corresponding lamport balances in genesis block for external nodes
--no-snapshot
- If set, disables booting validators from a snapshot
--skip-ledger-verify
- If set, validators will skip verifying
the ledger they already have saved to disk at
boot (results in a much faster boot)
--no-deploy
- Don't deploy new software, use the
existing deployment
-x Accounts and Stakes for external nodes
- A YML file with a list of account pubkeys and corresponding stakes
for external nodes
-s Num lamports per node in genesis block
- Create account keypairs for internal nodes and assign these many lamports
sanity/start/update-specific options: sanity/start/update-specific options:
-F - Discard validator nodes that didn't bootup successfully -F - Discard validator nodes that didn't bootup successfully
@ -74,6 +90,9 @@ Operate a configured testnet
logs-specific options: logs-specific options:
none none
startnode/stopnode-specific options:
-i [ip address] - IP Address of the node to start or stop
Note: if RUST_LOG is set in the environment it will be propogated into the Note: if RUST_LOG is set in the environment it will be propogated into the
network nodes. network nodes.
EOF EOF
@ -88,6 +107,7 @@ skipSetup=false
updateNodes=false updateNodes=false
customPrograms= customPrograms=
updatePlatforms= updatePlatforms=
nodeAddress=
numBenchTpsClients=0 numBenchTpsClients=0
numBenchExchangeClients=0 numBenchExchangeClients=0
benchTpsExtraArgs= benchTpsExtraArgs=
@ -96,7 +116,12 @@ failOnValidatorBootupFailure=true
genesisOptions= genesisOptions=
numFullnodesRequested= numFullnodesRequested=
externalPrimordialAccountsFile= externalPrimordialAccountsFile=
stakeNodesInGenesisBlock= remoteExternalPrimordialAccountsFile=
internalNodesStakeLamports=
internalNodesLamports=
maybeNoSnapshot=""
maybeSkipLedgerVerify=""
maybeDisableAirdrops=""
command=$1 command=$1
[[ -n $command ]] || usage [[ -n $command ]] || usage
@ -111,9 +136,34 @@ while [[ -n $1 ]]; do
elif [[ $1 = --target-lamports-per-signature ]]; then elif [[ $1 = --target-lamports-per-signature ]]; then
genesisOptions="$genesisOptions $1 $2" genesisOptions="$genesisOptions $1 $2"
shift 2 shift 2
elif [[ $1 = --lamports ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --no-snapshot ]]; then
maybeNoSnapshot="$1"
shift 1
elif [[ $1 = --no-deploy ]]; then
deployMethod=skip
shift 1
elif [[ $1 = --skip-ledger-verify ]]; then
maybeSkipLedgerVerify="$1"
shift 1
elif [[ $1 = --deploy-update ]]; then elif [[ $1 = --deploy-update ]]; then
updatePlatforms="$updatePlatforms $2" updatePlatforms="$updatePlatforms $2"
shift 2 shift 2
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
internalNodesStakeLamports="$2"
shift 2
elif [[ $1 = --internal-nodes-lamports ]]; then
internalNodesLamports="$2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
externalPrimordialAccountsFile="$2"
remoteExternalPrimordialAccountsFile=/tmp/external-primordial-accounts.yml
shift 2
elif [[ $1 = --no-airdrop ]]; then
maybeDisableAirdrops="$1"
shift 1
else else
usage "Unknown long option: $1" usage "Unknown long option: $1"
fi fi
@ -123,7 +173,7 @@ while [[ -n $1 ]]; do
fi fi
done done
while getopts "h?T:t:o:f:rD:c:Fn:i:x:s:" opt "${shortArgs[@]}"; do while getopts "h?T:t:o:f:rD:c:Fn:i:" opt "${shortArgs[@]}"; do
case $opt in case $opt in
h | \?) h | \?)
usage usage
@ -202,12 +252,6 @@ while getopts "h?T:t:o:f:rD:c:Fn:i:x:s:" opt "${shortArgs[@]}"; do
F) F)
failOnValidatorBootupFailure=false failOnValidatorBootupFailure=false
;; ;;
x)
externalPrimordialAccountsFile=$OPTARG
;;
s)
stakeNodesInGenesisBlock=$OPTARG
;;
i) i)
nodeAddress=$OPTARG nodeAddress=$OPTARG
;; ;;
@ -310,8 +354,8 @@ startCommon() {
startBootstrapLeader() { startBootstrapLeader() {
declare ipAddress=$1 declare ipAddress=$1
declare logFile="$2" declare nodeIndex="$2"
declare nodeIndex="$3" declare logFile="$3"
echo "--- Starting bootstrap leader: $ipAddress" echo "--- Starting bootstrap leader: $ipAddress"
echo "start log: $logFile" echo "start log: $logFile"
@ -321,7 +365,7 @@ startBootstrapLeader() {
set -x set -x
startCommon "$ipAddress" || exit 1 startCommon "$ipAddress" || exit 1
[[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \ [[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \
"$ipAddress:~/solana/config/external-primodial-accounts.yml" "$ipAddress:$remoteExternalPrimordialAccountsFile"
case $deployMethod in case $deployMethod in
tar) tar)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/" rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
@ -329,11 +373,14 @@ startBootstrapLeader() {
local) local)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/" rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
;; ;;
skip)
;;
*) *)
usage "Internal error: invalid deployMethod: $deployMethod" usage "Internal error: invalid deployMethod: $deployMethod"
;; ;;
esac esac
# shellcheck disable=SC2086 # Don't want to double quote "$maybeNoSnapshot $maybeSkipLedgerVerify"
ssh "${sshOptions[@]}" -n "$ipAddress" \ ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \ "./solana/net/remote/remote-node.sh \
$deployMethod \ $deployMethod \
@ -343,12 +390,15 @@ startBootstrapLeader() {
\"$RUST_LOG\" \ \"$RUST_LOG\" \
$skipSetup \ $skipSetup \
$failOnValidatorBootupFailure \ $failOnValidatorBootupFailure \
\"$externalPrimordialAccountsFile\" \ \"$remoteExternalPrimordialAccountsFile\" \
\"$stakeNodesInGenesisBlock\" \ \"$maybeDisableAirdrops\" \
\"$internalNodesStakeLamports\" \
\"$internalNodesLamports\" \
$nodeIndex \ $nodeIndex \
$numBenchTpsClients \"$benchTpsExtraArgs\" \ $numBenchTpsClients \"$benchTpsExtraArgs\" \
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \ $numBenchExchangeClients \"$benchExchangeExtraArgs\" \
\"$genesisOptions\" \ \"$genesisOptions\" \
"$maybeNoSnapshot $maybeSkipLedgerVerify" \
" "
) >> "$logFile" 2>&1 || { ) >> "$logFile" 2>&1 || {
cat "$logFile" cat "$logFile"
@ -363,11 +413,38 @@ startNode() {
declare nodeIndex="$3" declare nodeIndex="$3"
declare logFile="$netLogDir/fullnode-$ipAddress.log" declare logFile="$netLogDir/fullnode-$ipAddress.log"
if [[ -z $nodeType ]]; then
echo nodeType not specified
exit 1
fi
if [[ -z $nodeIndex ]]; then
echo nodeIndex not specified
exit 1
fi
echo "--- Starting $nodeType: $ipAddress" echo "--- Starting $nodeType: $ipAddress"
echo "start log: $logFile" echo "start log: $logFile"
( (
set -x set -x
startCommon "$ipAddress" startCommon "$ipAddress"
if [[ $nodeType = blockstreamer ]] && [[ -n $letsEncryptDomainName ]]; then
#
# Create/renew TLS certificate
#
declare localArchive=~/letsencrypt-"$letsEncryptDomainName".tgz
if [[ -r "$localArchive" ]]; then
timeout 30s scp "${sshOptions[@]}" "$localArchive" "$ipAddress:letsencrypt.tgz"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"sudo -H /certbot-restore.sh $letsEncryptDomainName maintainers@solana.com"
rm -f letsencrypt.tgz
timeout 30s scp "${sshOptions[@]}" "$ipAddress:/letsencrypt.tgz" letsencrypt.tgz
test -s letsencrypt.tgz # Ensure non-empty before overwriting $localArchive
cp letsencrypt.tgz "$localArchive"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \ ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \ "./solana/net/remote/remote-node.sh \
$deployMethod \ $deployMethod \
@ -377,10 +454,15 @@ startNode() {
\"$RUST_LOG\" \ \"$RUST_LOG\" \
$skipSetup \ $skipSetup \
$failOnValidatorBootupFailure \ $failOnValidatorBootupFailure \
\"$externalPrimordialAccountsFile\" \ \"$remoteExternalPrimordialAccountsFile\" \
\"$stakeNodesInGenesisBlock\" \ \"$maybeDisableAirdrops\" \
\"$internalNodesStakeLamports\" \
\"$internalNodesLamports\" \
$nodeIndex \ $nodeIndex \
$numBenchTpsClients \"$benchTpsExtraArgs\" \
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \
\"$genesisOptions\" \ \"$genesisOptions\" \
\"$maybeNoSnapshot $maybeSkipLedgerVerify\" \
" "
) >> "$logFile" 2>&1 & ) >> "$logFile" 2>&1 &
declare pid=$! declare pid=$!
@ -466,7 +548,35 @@ deployUpdate() {
) || ok=false ) || ok=false
$ok || exit 1 $ok || exit 1
done done
}
getNodeType() {
echo "getNodeType: $nodeAddress"
[[ -n $nodeAddress ]] || {
echo "Error: nodeAddress not set"
exit 1
}
nodeIndex=0 # <-- global
nodeType=validator # <-- global
for ipAddress in "${fullnodeIpList[@]}" b "${blockstreamerIpList[@]}" r "${replicatorIpList[@]}"; do
if [[ $ipAddress = b ]]; then
nodeType=blockstreamer
continue
elif [[ $ipAddress = r ]]; then
nodeType=replicator
continue
fi
if [[ $ipAddress = "$nodeAddress" ]]; then
echo "getNodeType: $nodeType ($nodeIndex)"
return
fi
((nodeIndex = nodeIndex + 1))
done
echo "Error: Unknown node: $nodeAddress"
exit 1
} }
start() { start() {
@ -477,7 +587,8 @@ start() {
declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2 declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
( (
set -x set -x
curl -o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl" curl --retry 5 --retry-delay 2 --retry-connrefused \
-o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
) )
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2 tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
else else
@ -496,6 +607,8 @@ start() {
local) local)
build build
;; ;;
skip)
;;
*) *)
usage "Internal error: invalid deployMethod: $deployMethod" usage "Internal error: invalid deployMethod: $deployMethod"
;; ;;
@ -509,23 +622,14 @@ start() {
fi fi
declare bootstrapLeader=true declare bootstrapLeader=true
declare nodeType=validator for nodeAddress in "${fullnodeIpList[@]}" "${blockstreamerIpList[@]}" "${replicatorIpList[@]}"; do
declare loopCount=0 nodeType=
for ipAddress in "${fullnodeIpList[@]}" b "${blockstreamerIpList[@]}" r "${replicatorIpList[@]}"; do nodeIndex=
if [[ $ipAddress = b ]]; then getNodeType
nodeType=blockstreamer
continue
elif [[ $ipAddress = r ]]; then
nodeType=replicator
continue
fi
if $updateNodes; then
stopNode "$ipAddress" true
fi
if $bootstrapLeader; then if $bootstrapLeader; then
SECONDS=0 SECONDS=0
declare bootstrapNodeDeployTime= declare bootstrapNodeDeployTime=
startBootstrapLeader "$ipAddress" "$netLogDir/bootstrap-leader-$ipAddress.log" $loopCount startBootstrapLeader "$nodeAddress" $nodeIndex "$netLogDir/bootstrap-leader-$ipAddress.log"
bootstrapNodeDeployTime=$SECONDS bootstrapNodeDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-bootnode-leader-started=1" $metricsWriteDatapoint "testnet-deploy net-bootnode-leader-started=1"
@ -533,15 +637,18 @@ start() {
SECONDS=0 SECONDS=0
pids=() pids=()
else else
startNode "$ipAddress" $nodeType $loopCount startNode "$ipAddress" $nodeType $nodeIndex
# Stagger additional node start time. If too many nodes start simultaneously # Stagger additional node start time. If too many nodes start simultaneously
# the bootstrap node gets more rsync requests from the additional nodes than # the bootstrap node gets more rsync requests from the additional nodes than
# it can handle. # it can handle.
((loopCount++ % 2 == 0)) && sleep 2 if ((nodeIndex % 2 == 0)); then
sleep 2
fi
fi fi
done done
for pid in "${pids[@]}"; do for pid in "${pids[@]}"; do
declare ok=true declare ok=true
wait "$pid" || ok=false wait "$pid" || ok=false
@ -597,6 +704,8 @@ start() {
local) local)
networkVersion="$(git rev-parse HEAD || echo local-unknown)" networkVersion="$(git rev-parse HEAD || echo local-unknown)"
;; ;;
skip)
;;
*) *)
usage "Internal error: invalid deployMethod: $deployMethod" usage "Internal error: invalid deployMethod: $deployMethod"
;; ;;
@ -691,10 +800,21 @@ stop)
stop stop
;; ;;
stopnode) stopnode)
if [[ -z $nodeAddress ]]; then
usage "node address (-i) not specified"
exit 1
fi
stopNode "$nodeAddress" true stopNode "$nodeAddress" true
;; ;;
startnode) startnode)
startNode "$nodeAddress" validator if [[ -z $nodeAddress ]]; then
usage "node address (-i) not specified"
exit 1
fi
nodeType=
nodeIndex=
getNodeType
startNode "$nodeAddress" $nodeType $nodeIndex
;; ;;
logs) logs)
fetchRemoteLog() { fetchRemoteLog() {

View File

@ -41,6 +41,8 @@ local|tar)
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/ net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
;; ;;
skip)
;;
*) *)
echo "Unknown deployment method: $deployMethod" echo "Unknown deployment method: $deployMethod"
exit 1 exit 1

View File

@ -34,4 +34,6 @@ loadConfigFile
PATH="$HOME"/.cargo/bin:"$PATH" PATH="$HOME"/.cargo/bin:"$PATH"
set -x set -x
scripts/solana-install-deploy.sh localhost "$releaseChannel" "$updatePlatform" scripts/solana-install-deploy.sh \
--keypair config-local/mint-keypair.json \
localhost "$releaseChannel" "$updatePlatform"

View File

@ -12,13 +12,16 @@ RUST_LOG="$5"
skipSetup="$6" skipSetup="$6"
failOnValidatorBootupFailure="$7" failOnValidatorBootupFailure="$7"
externalPrimordialAccountsFile="$8" externalPrimordialAccountsFile="$8"
stakeNodesInGenesisBlock="$9" maybeDisableAirdrops="$9"
nodeIndex="${10}" internalNodesStakeLamports="${10}"
numBenchTpsClients="${11}" internalNodesLamports="${11}"
benchTpsExtraArgs="${12}" nodeIndex="${12}"
numBenchExchangeClients="${13}" numBenchTpsClients="${13}"
benchExchangeExtraArgs="${14}" benchTpsExtraArgs="${14}"
genesisOptions="${15}" numBenchExchangeClients="${15}"
benchExchangeExtraArgs="${16}"
genesisOptions="${17}"
extraNodeArgs="${18}"
set +x set +x
export RUST_LOG export RUST_LOG
@ -28,7 +31,7 @@ export RUST_LOG
# trouble # trouble
# #
# Ref: https://github.com/solana-labs/solana/issues/3798 # Ref: https://github.com/solana-labs/solana/issues/3798
stake=424243 stake=${internalNodesStakeLamports:=424243}
missing() { missing() {
echo "Error: $1 not specified" echo "Error: $1 not specified"
@ -43,7 +46,7 @@ missing() {
[[ -n $failOnValidatorBootupFailure ]] || missing failOnValidatorBootupFailure [[ -n $failOnValidatorBootupFailure ]] || missing failOnValidatorBootupFailure
airdropsEnabled=true airdropsEnabled=true
if [[ -n $stakeNodesInGenesisBlock ]]; then if [[ -n $maybeDisableAirdrops ]]; then
airdropsEnabled=false airdropsEnabled=false
fi fi
cat > deployConfig <<EOF cat > deployConfig <<EOF
@ -75,10 +78,9 @@ waitForNodeToInit() {
} }
case $deployMethod in case $deployMethod in
local|tar) local|tar|skip)
PATH="$HOME"/.cargo/bin:"$PATH" PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1 export USE_INSTALL=1
export SOLANA_METRICS_DISPLAY_HOSTNAME=1
# Setup `/var/snap/solana/current` symlink so rsyncing the genesis # Setup `/var/snap/solana/current` symlink so rsyncing the genesis
# ledger works (reference: `net/scripts/install-rsync.sh`) # ledger works (reference: `net/scripts/install-rsync.sh`)
@ -92,7 +94,7 @@ local|tar)
SUDO_OK=1 source scripts/tune-system.sh SUDO_OK=1 source scripts/tune-system.sh
( (
sudo scripts/oom-monitor.sh sudo SOLANA_METRICS_CONFIG="$SOLANA_METRICS_CONFIG" scripts/oom-monitor.sh
) > oom-monitor.log 2>&1 & ) > oom-monitor.log 2>&1 &
echo $! > oom-monitor.pid echo $! > oom-monitor.pid
scripts/net-stats.sh > net-stats.log 2>&1 & scripts/net-stats.sh > net-stats.log 2>&1 &
@ -105,60 +107,64 @@ local|tar)
export SOLANA_CUDA=1 export SOLANA_CUDA=1
fi fi
set -x set -x
rm -rf ./solana-node-keys
rm -rf ./solana-node-stakes
mkdir ./solana-node-stakes
if [[ -n $stakeNodesInGenesisBlock ]]; then
for i in $(seq 0 "$numNodes"); do
solana-keygen new -o ./solana-node-keys/"$i"
pubkey="$(solana-keygen pubkey ./solana-node-keys/"$i")"
echo "${pubkey}: $stakeNodesInGenesisBlock" >> ./solana-node-stakes/fullnode-stakes.yml
done
fi
lamports_per_signature="42"
# shellcheck disable=SC2206 # Do not want to quote $genesisOptions
genesis_args=($genesisOptions)
for i in "${!genesis_args[@]}"; do
if [[ "${genesis_args[$i]}" = --target-lamports-per-signature ]]; then
lamports_per_signature="${genesis_args[$((i+1))]}"
break
fi
done
rm -rf ./solana-client-accounts
mkdir ./solana-client-accounts
for i in $(seq 0 $((numBenchTpsClients-1))); do
# shellcheck disable=SC2086 # Do not want to quote $benchTpsExtraArgs
solana-bench-tps --write-client-keys ./solana-client-accounts/bench-tps"$i".yml \
--target-lamports-per-signature "$lamports_per_signature" $benchTpsExtraArgs
# Skip first line, as it contains header
tail -n +2 -q ./solana-client-accounts/bench-tps"$i".yml >> ./solana-client-accounts/client-accounts.yml
echo "" >> ./solana-client-accounts/client-accounts.yml
done
for i in $(seq 0 $((numBenchExchangeClients-1))); do
# shellcheck disable=SC2086 # Do not want to quote $benchExchangeExtraArgs
solana-bench-exchange --batch-size 1000 --fund-amount 20000 \
--write-client-keys ./solana-client-accounts/bench-exchange"$i".yml $benchExchangeExtraArgs
tail -n +2 -q ./solana-client-accounts/bench-exchange"$i".yml >> ./solana-client-accounts/client-accounts.yml
echo "" >> ./solana-client-accounts/client-accounts.yml
done
[[ -z $externalPrimordialAccountsFile ]] || cat "$externalPrimordialAccountsFile" >> ./solana-node-stakes/fullnode-stakes.yml
if [ -f ./solana-node-stakes/fullnode-stakes.yml ]; then
genesisOptions+=" --primordial-accounts-file ./solana-node-stakes/fullnode-stakes.yml"
fi
if [ -f ./solana-client-accounts/client-accounts.yml ]; then
genesisOptions+=" --primordial-keypairs-file ./solana-client-accounts/client-accounts.yml"
fi
if [[ $skipSetup != true ]]; then if [[ $skipSetup != true ]]; then
rm -rf ./solana-node-keys
rm -rf ./solana-node-balances
mkdir ./solana-node-balances
if [[ -n $internalNodesLamports ]]; then
for i in $(seq 0 "$numNodes"); do
solana-keygen new -o ./solana-node-keys/"$i"
pubkey="$(solana-keygen pubkey ./solana-node-keys/"$i")"
echo "${pubkey}: $internalNodesLamports" >> ./solana-node-balances/fullnode-balances.yml
done
fi
lamports_per_signature="42"
# shellcheck disable=SC2206 # Do not want to quote $genesisOptions
genesis_args=($genesisOptions)
for i in "${!genesis_args[@]}"; do
if [[ "${genesis_args[$i]}" = --target-lamports-per-signature ]]; then
lamports_per_signature="${genesis_args[$((i+1))]}"
break
fi
done
rm -rf ./solana-client-accounts
mkdir ./solana-client-accounts
for i in $(seq 0 $((numBenchTpsClients-1))); do
# shellcheck disable=SC2086 # Do not want to quote $benchTpsExtraArgs
solana-bench-tps --write-client-keys ./solana-client-accounts/bench-tps"$i".yml \
--target-lamports-per-signature "$lamports_per_signature" $benchTpsExtraArgs
# Skip first line, as it contains header
tail -n +2 -q ./solana-client-accounts/bench-tps"$i".yml >> ./solana-client-accounts/client-accounts.yml
echo "" >> ./solana-client-accounts/client-accounts.yml
done
for i in $(seq 0 $((numBenchExchangeClients-1))); do
# shellcheck disable=SC2086 # Do not want to quote $benchExchangeExtraArgs
solana-bench-exchange --batch-size 1000 --fund-amount 20000 \
--write-client-keys ./solana-client-accounts/bench-exchange"$i".yml $benchExchangeExtraArgs
tail -n +2 -q ./solana-client-accounts/bench-exchange"$i".yml >> ./solana-client-accounts/client-accounts.yml
echo "" >> ./solana-client-accounts/client-accounts.yml
done
[[ -z $externalPrimordialAccountsFile ]] || cat "$externalPrimordialAccountsFile" >> ./solana-node-balances/fullnode-balances.yml
if [ -f ./solana-node-balances/fullnode-balances.yml ]; then
genesisOptions+=" --primordial-accounts-file ./solana-node-balances/fullnode-balances.yml"
fi
if [ -f ./solana-client-accounts/client-accounts.yml ]; then
genesisOptions+=" --primordial-keypairs-file ./solana-client-accounts/client-accounts.yml"
fi
args=( args=(
--bootstrap-leader-stake-lamports "$stake" --bootstrap-leader-stake-lamports "$stake"
) )
if [[ -n $internalNodesLamports ]]; then
args+=(--bootstrap-leader-lamports "$internalNodesLamports")
fi
# shellcheck disable=SC2206 # Do not want to quote $genesisOptions # shellcheck disable=SC2206 # Do not want to quote $genesisOptions
args+=($genesisOptions) args+=($genesisOptions)
./multinode-demo/setup.sh "${args[@]}" ./multinode-demo/setup.sh "${args[@]}"
fi fi
if [[ -z $stakeNodesInGenesisBlock ]]; then if [[ $airdropsEnabled = true ]]; then
./multinode-demo/drone.sh > drone.log 2>&1 & ./multinode-demo/drone.sh > drone.log 2>&1 &
fi fi
args=( args=(
@ -166,18 +172,24 @@ local|tar)
--gossip-port "$entrypointIp":8001 --gossip-port "$entrypointIp":8001
) )
if [[ -n $stakeNodesInGenesisBlock ]]; then if [[ $airdropsEnabled != true ]]; then
args+=(--no-airdrop) args+=(--no-airdrop)
fi fi
args+=(--init-complete-file "$initCompleteFile") args+=(--init-complete-file "$initCompleteFile")
# shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs
args+=($extraNodeArgs)
nohup ./multinode-demo/validator.sh --bootstrap-leader "${args[@]}" > fullnode.log 2>&1 & nohup ./multinode-demo/validator.sh --bootstrap-leader "${args[@]}" > fullnode.log 2>&1 &
waitForNodeToInit waitForNodeToInit
;; ;;
validator|blockstreamer) validator|blockstreamer)
net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/ if [[ $deployMethod != skip ]]; then
rm -f ~/solana/fullnode-identity.json net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/
[[ -z $stakeNodesInGenesisBlock ]] || net/scripts/rsync-retry.sh -vPrc \ fi
"$entrypointIp":~/solana/solana-node-keys/"$nodeIndex" ~/solana/fullnode-identity.json if [[ $skipSetup != true ]]; then
rm -f ~/solana/fullnode-identity.json
[[ -z $internalNodesLamports ]] || net/scripts/rsync-retry.sh -vPrc \
"$entrypointIp":~/solana/solana-node-keys/"$nodeIndex" ~/solana/fullnode-identity.json
fi
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-validator-cuda ]]; then if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-validator-cuda ]]; then
echo Selecting solana-validator-cuda echo Selecting solana-validator-cuda
@ -194,18 +206,20 @@ local|tar)
--blockstream /tmp/solana-blockstream.sock --blockstream /tmp/solana-blockstream.sock
--no-voting --no-voting
--stake 0 --stake 0
--generate-snapshots
) )
else else
args+=(--stake "$stake") args+=(--stake "$stake")
args+=(--enable-rpc-exit) args+=(--enable-rpc-exit)
if [[ -n $internalNodesLamports ]]; then
args+=(--node-lamports "$internalNodesLamports")
fi
fi fi
if [[ -f ~/solana/fullnode-identity.json ]]; then if [[ -f ~/solana/fullnode-identity.json ]]; then
args+=(--identity ~/solana/fullnode-identity.json) args+=(--identity ~/solana/fullnode-identity.json)
fi fi
if [[ -n $stakeNodesInGenesisBlock ]]; then if [[ $airdropsEnabled != true ]]; then
args+=(--no-airdrop) args+=(--no-airdrop)
fi fi
@ -220,11 +234,18 @@ local|tar)
# a static IP/DNS name for hosting the blockexplorer web app, and is # a static IP/DNS name for hosting the blockexplorer web app, and is
# a location that somebody would expect to be able to airdrop from # a location that somebody would expect to be able to airdrop from
scp "$entrypointIp":~/solana/config-local/mint-keypair.json config-local/ scp "$entrypointIp":~/solana/config-local/mint-keypair.json config-local/
if [[ -z $stakeNodesInGenesisBlock ]]; then if [[ $airdropsEnabled = true ]]; then
./multinode-demo/drone.sh > drone.log 2>&1 & ./multinode-demo/drone.sh > drone.log 2>&1 &
fi fi
# Grab the TLS cert generated by /certbot-restore.sh
if [[ -f /.cert.pem ]]; then
sudo install -o $UID -m 400 /.cert.pem /.key.pem .
ls -l .cert.pem .key.pem
fi
export BLOCKEXPLORER_GEOIP_WHITELIST=$PWD/net/config/geoip.yml export BLOCKEXPLORER_GEOIP_WHITELIST=$PWD/net/config/geoip.yml
npm install @solana/blockexplorer@1 npm install @solana/blockexplorer@1.27.0
npx solana-blockexplorer > blockexplorer.log 2>&1 & npx solana-blockexplorer > blockexplorer.log 2>&1 &
# Confirm the blockexplorer is accessible # Confirm the blockexplorer is accessible
@ -240,23 +261,33 @@ local|tar)
fi fi
args+=(--init-complete-file "$initCompleteFile") args+=(--init-complete-file "$initCompleteFile")
# shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs
args+=($extraNodeArgs)
nohup ./multinode-demo/validator.sh "${args[@]}" > fullnode.log 2>&1 & nohup ./multinode-demo/validator.sh "${args[@]}" > fullnode.log 2>&1 &
waitForNodeToInit waitForNodeToInit
;; ;;
replicator) replicator)
net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/ if [[ $deployMethod != skip ]]; then
net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/
fi
args=( args=(
"$entrypointIp":~/solana "$entrypointIp:8001" "$entrypointIp":~/solana "$entrypointIp:8001"
) )
if [[ -n $stakeNodesInGenesisBlock ]]; then if [[ $airdropsEnabled != true ]]; then
args+=(--no-airdrop) args+=(--no-airdrop)
fi fi
if [[ -n $internalNodesLamports ]] ; then
args+=(--node-lamports "$internalNodesLamports")
fi
if [[ $skipSetup != true ]]; then if [[ $skipSetup != true ]]; then
./multinode-demo/clear-config.sh ./multinode-demo/clear-config.sh
fi fi
# shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs
args+=($extraNodeArgs)
nohup ./multinode-demo/replicator.sh "${args[@]}" > fullnode.log 2>&1 & nohup ./multinode-demo/replicator.sh "${args[@]}" > fullnode.log 2>&1 &
sleep 1 sleep 1
;; ;;

View File

@ -67,7 +67,7 @@ source net/common.sh
loadConfigFile loadConfigFile
case $deployMethod in case $deployMethod in
local|tar) local|tar|skip)
PATH="$HOME"/.cargo/bin:"$PATH" PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1 export USE_INSTALL=1
if [[ -r target/perf-libs/env.sh ]]; then if [[ -r target/perf-libs/env.sh ]]; then
@ -158,7 +158,7 @@ echo "--- $sanityTargetIp: validator sanity"
if $validatorSanity; then if $validatorSanity; then
( (
set -x -o pipefail set -x -o pipefail
timeout 10s ./multinode-demo/validator-x.sh --stake 0 \ timeout 10s ./multinode-demo/validator-x.sh --no-restart --stake 0 \
"$entrypointRsyncUrl" \ "$entrypointRsyncUrl" \
"$sanityTargetIp:8001" 2>&1 | tee validator-sanity.log "$sanityTargetIp:8001" 2>&1 | tee validator-sanity.log
) || { ) || {
@ -183,13 +183,11 @@ if $installCheck && [[ -r update_manifest_keypair.json ]]; then
( (
set -x set -x
update_manifest_pubkey=$($solana_keygen pubkey update_manifest_keypair.json)
rm -rf install-data-dir rm -rf install-data-dir
$solana_install init \ $solana_install init \
--no-modify-path \ --no-modify-path \
--data-dir install-data-dir \ --data-dir install-data-dir \
--url http://"$sanityTargetIp":8899 \ --url http://"$sanityTargetIp":8899 \
--pubkey "$update_manifest_pubkey"
$solana_install info $solana_install info
) )

View File

@ -14,6 +14,8 @@ set -ex
# 2. Inline ~/.ssh/id-solana-testnet.pub below # 2. Inline ~/.ssh/id-solana-testnet.pub below
cat > /solana-authorized_keys <<EOF cat > /solana-authorized_keys <<EOF
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGqZAwAZeBl0buOMz4FpUYrtpwk1L5aGKlbd7lI8dpbSx5WVRPWCVKhWzsGMtDUIfmozdzJouk1LPyihghTDgsE=
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOk4jgcX/VWSk3j//wXeIynSQjsOt+AjYXM/XZUMa7R1Q8lfIJGK/qHLBP86CMXdpyEKJ5i37QLYOL+0VuRy0CI=
EOF EOF
sudo -u solana bash -c " sudo -u solana bash -c "

View File

@ -309,3 +309,12 @@ cloud_FetchFile() {
cloud_GetConfigValueFromInstanceName "$instanceName" osProfile.adminUsername cloud_GetConfigValueFromInstanceName "$instanceName" osProfile.adminUsername
scp "${config_value}@${publicIp}:${remoteFile}" "$localFile" scp "${config_value}@${publicIp}:${remoteFile}" "$localFile"
} }
#
# cloud_CreateAndAttachPersistentDisk
#
# Not yet implemented for this cloud provider
cloud_CreateAndAttachPersistentDisk() {
echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for azure"
exit 1
}

View File

@ -4,26 +4,29 @@ set -ex
[[ $(uname) = Linux ]] || exit 1 [[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1 [[ $USER = root ]] || exit 1
adduser solana --gecos "" --disabled-password --quiet if grep -q solana /etc/passwd ; then
adduser solana sudo echo "User solana already exists"
adduser solana adm else
echo "solana ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers adduser solana --gecos "" --disabled-password --quiet
id solana adduser solana sudo
adduser solana adm
echo "solana ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
id solana
[[ -r /solana-id_ecdsa ]] || exit 1 [[ -r /solana-id_ecdsa ]] || exit 1
[[ -r /solana-id_ecdsa.pub ]] || exit 1 [[ -r /solana-id_ecdsa.pub ]] || exit 1
sudo -u solana bash -c "
mkdir -p /home/solana/.ssh/
cd /home/solana/.ssh/
cp /solana-id_ecdsa.pub authorized_keys
umask 377
cp /solana-id_ecdsa id_ecdsa
echo \"
Host *
BatchMode yes
IdentityFile ~/.ssh/id_ecdsa
StrictHostKeyChecking no
\" > config
"
sudo -u solana bash -c "
mkdir -p /home/solana/.ssh/
cd /home/solana/.ssh/
cp /solana-id_ecdsa.pub authorized_keys
umask 377
cp /solana-id_ecdsa id_ecdsa
echo \"
Host *
BatchMode yes
IdentityFile ~/.ssh/id_ecdsa
StrictHostKeyChecking no
\" > config
"
fi

View File

@ -381,3 +381,12 @@ cloud_FetchFile() {
"solana@$publicIp:$remoteFile" "$localFile" "solana@$publicIp:$remoteFile" "$localFile"
) )
} }
#
# cloud_CreateAndAttachPersistentDisk
#
# Not yet implemented for this cloud provider
cloud_CreateAndAttachPersistentDisk() {
echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for ec2"
exit 1
}

View File

@ -81,7 +81,7 @@
"FromPort": 3001, "FromPort": 3001,
"IpRanges": [ "IpRanges": [
{ {
"Description": "blockexplorer API port", "Description": "blockexplorer http API port",
"CidrIp": "0.0.0.0/0" "CidrIp": "0.0.0.0/0"
} }
], ],
@ -91,7 +91,26 @@
"Ipv6Ranges": [ "Ipv6Ranges": [
{ {
"CidrIpv6": "::/0", "CidrIpv6": "::/0",
"Description": "blockexplorer API port" "Description": "blockexplorer http API port"
}
]
},
{
"PrefixListIds": [],
"FromPort": 3443,
"IpRanges": [
{
"Description": "blockexplorer https API port",
"CidrIp": "0.0.0.0/0"
}
],
"ToPort": 3443,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
"Ipv6Ranges": [
{
"CidrIpv6": "::/0",
"Description": "blockexplorer https API port"
} }
] ]
}, },

View File

@ -126,6 +126,7 @@ cloud_CreateInstances() {
declare optionalStartupScript="$8" declare optionalStartupScript="$8"
declare optionalAddress="$9" declare optionalAddress="$9"
declare optionalBootDiskType="${10}" declare optionalBootDiskType="${10}"
declare optionalAdditionalDiskSize="${11}"
if $enableGpu; then if $enableGpu; then
# Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed # Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed
@ -198,6 +199,22 @@ cloud_CreateInstances() {
set -x set -x
gcloud beta compute instances create "${nodes[@]}" "${args[@]}" gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
) )
if [[ -n $optionalAdditionalDiskSize ]]; then
if [[ $numNodes = 1 ]]; then
(
set -x
cloud_CreateAndAttachPersistentDisk "${namePrefix}" "$optionalAdditionalDiskSize" "pd-ssd" "$zone"
)
else
for node in $(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes"); do
(
set -x
cloud_CreateAndAttachPersistentDisk "${node}" "$optionalAdditionalDiskSize" "pd-ssd" "$zone"
)
done
fi
fi
} }
# #
@ -256,3 +273,31 @@ cloud_FetchFile() {
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile" gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
) )
} }
#
# cloud_CreateAndAttachPersistentDisk [instanceName] [diskSize] [diskType]
#
# Create a persistent disk and attach it to a pre-existing VM instance.
# Set disk to auto-delete upon instance deletion
#
cloud_CreateAndAttachPersistentDisk() {
declare instanceName="$1"
declare diskSize="$2"
declare diskType="$3"
declare zone="$4"
diskName="${instanceName}-pd"
gcloud beta compute disks create "$diskName" \
--size "$diskSize" \
--type "$diskType" \
--zone "$zone"
gcloud compute instances attach-disk "$instanceName" \
--disk "$diskName" \
--zone "$zone"
gcloud compute instances set-disk-auto-delete "$instanceName" \
--disk "$diskName" \
--zone "$zone" \
--auto-delete
}

52
net/scripts/install-certbot.sh Executable file
View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -ex
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
apt-get update
add-apt-repository --yes ppa:certbot/certbot
apt-get --assume-yes install certbot
cat > /certbot-restore.sh <<'EOF'
#!/usr/bin/env bash
set -e
domain=$1
email=$2
if [[ $USER != root ]]; then
echo "Run as root"
exit 1
fi
if [[ -f /.cert.pem ]]; then
echo "Certificate already initialized"
exit 0
fi
set -x
if [[ -r letsencrypt.tgz ]]; then
tar -C / -zxf letsencrypt.tgz
fi
cd /
rm -f letsencrypt.tgz
maybeDryRun=
# Uncomment during testing to avoid hitting LetsEncrypt API limits while iterating
#maybeDryRun="--dry-run"
certbot certonly --standalone -d "$domain" --email "$email" --agree-tos -n $maybeDryRun
tar zcf letsencrypt.tgz /etc/letsencrypt
ls -l letsencrypt.tgz
# Copy certificates to / for easy access without knowing the value of "$domain"
rm -f /.key.pem /.cert.pem
cp /etc/letsencrypt/live/$domain/privkey.pem /.key.pem
cp /etc/letsencrypt/live/$domain/cert.pem /.cert.pem
EOF
chmod +x /certbot-restore.sh

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -x
mount_point=/mnt/extra-disk
disk=sdb
if ! lsblk | grep -q ${disk} ; then
echo "${disk} does not exist"
else
if mount | grep -q ${disk} ; then
echo "${disk} is already mounted"
else
sudo mkfs.ext4 -F /dev/"$disk"
sudo mkdir -p "$mount_point"
sudo mount /dev/"$disk" "$mount_point"
sudo chmod a+w "$mount_point"
if ! mount | grep -q ${mount_point} ; then
echo "${disk} failed to mount!"
exit 1
fi
fi
fi

View File

@ -1,6 +1,6 @@
[package] [package]
name = "solana-netutil" name = "solana-netutil"
version = "0.16.0" version = "0.16.6"
description = "Solana Network Utilities" description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -15,7 +15,7 @@ log = "0.4.2"
nix = "0.14.1" nix = "0.14.1"
rand = "0.6.1" rand = "0.6.1"
socket2 = "0.3.9" socket2 = "0.3.9"
solana-logger = { path = "../logger", version = "0.16.0" } solana-logger = { path = "../logger", version = "0.16.6" }
tokio = "0.1" tokio = "0.1"
[lib] [lib]

View File

@ -1,7 +1,7 @@
[package] [package]
name = "solana-bpf-programs" name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.16.0" version = "0.16.6"
documentation = "https://docs.rs/solana" documentation = "https://docs.rs/solana"
homepage = "https://solana.com/" homepage = "https://solana.com/"
readme = "README.md" readme = "README.md"
@ -9,6 +9,7 @@ repository = "https://github.com/solana-labs/solana"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0" license = "Apache-2.0"
edition = "2018" edition = "2018"
publish = false
[features] [features]
bpf_c = [] bpf_c = []
@ -21,10 +22,10 @@ walkdir = "2"
bincode = "1.1.4" bincode = "1.1.4"
byteorder = "1.3.2" byteorder = "1.3.2"
elf = "0.0.10" elf = "0.0.10"
solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.0" } solana-bpf-loader-api = { path = "../bpf_loader_api", version = "0.16.6" }
solana-logger = { path = "../../logger", version = "0.16.0" } solana-logger = { path = "../../logger", version = "0.16.6" }
solana-runtime = { path = "../../runtime", version = "0.16.0" } solana-runtime = { path = "../../runtime", version = "0.16.6" }
solana-sdk = { path = "../../sdk", version = "0.16.0" } solana-sdk = { path = "../../sdk", version = "0.16.6" }
solana_rbpf = "=0.1.13" solana_rbpf = "=0.1.13"
[[bench]] [[bench]]

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-128bit" name = "solana-bpf-rust-128bit"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF iter program written in Rust" description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,8 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.16.0" } solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-128bit-dep" name = "solana-bpf-rust-128bit-dep"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF many-args-dep program written in Rust" description = "Solana BPF many-args-dep program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-alloc" name = "solana-bpf-rust-alloc"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF alloc program written in Rust" description = "Solana BPF alloc program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-dep-crate" name = "solana-bpf-rust-dep-crate"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF dep-crate program written in Rust" description = "Solana BPF dep-crate program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ edition = "2018"
[dependencies] [dependencies]
byteorder = { version = "1", default-features = false } byteorder = { version = "1", default-features = false }
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-iter" name = "solana-bpf-rust-iter"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF iter program written in Rust" description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-many-args" name = "solana-bpf-rust-many-args"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF many-args program written in Rust" description = "Solana BPF many-args program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,8 +12,8 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.16.0" } solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-many-args-dep" name = "solana-bpf-rust-many-args-dep"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF many-args-dep program written in Rust" description = "Solana BPF many-args-dep program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-noop" name = "solana-bpf-rust-noop"
version = "0.16.0" version = "0.16.6"
description = "Solana BPF noop program written in Rust" description = "Solana BPF noop program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

View File

@ -3,7 +3,7 @@
[package] [package]
name = "solana-bpf-rust-panic" name = "solana-bpf-rust-panic"
version = "0.15.0" version = "0.16.6"
description = "Solana BPF iter program written in Rust" description = "Solana BPF iter program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ homepage = "https://solana.com/"
edition = "2018" edition = "2018"
[dependencies] [dependencies]
solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.0" } solana-sdk-bpf-utils = { path = "../../../../sdk/bpf/rust/rust-utils", version = "0.16.6" }
[workspace] [workspace]
members = [] members = []

Some files were not shown because too many files have changed in this diff Show More