Compare commits

..

296 Commits

Author SHA1 Message Date
mergify[bot]
293bb63ed8 Reduce rpc client pre-flight requests by setting max-age header (#8082) (#8083)
automerge
2020-02-01 08:48:40 -08:00
Trent Nelson
8f8fb720af CLI: Fix stake-account auth withdrawer output (#8071)
automerge

(cherry picked from commit 9739be9ecf)
2020-02-01 08:58:13 -07:00
mergify[bot]
19f414d843 Use solana-cli config keypair in solana-keygen (bp #8074) (#8080)
* Use solana-cli config keypair in solana-keygen (#8074)

* Use solana-cli config keypair in solana-keygen

* s/infile/keypair for consistency across modules and more generality across access methods

* Move config into separate crate

(cherry picked from commit fab8ef379f)

# Conflicts:
#	Cargo.lock
#	cli/Cargo.toml
#	keygen/Cargo.toml

* Fixup version numbers for backport

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-01-31 23:08:08 -07:00
mergify[bot]
eaca1c3170 Add new colo test cases using reduced node count (#8078) (#8079)
automerge
2020-01-31 19:06:36 -08:00
mergify[bot]
9fc75925f9 CLI: De-replicode SigningAuthority instatiation (#8076) (#8077)
automerge
2020-01-31 17:42:15 -08:00
mergify[bot]
b5098ac87c Filter repairman peers based on shred_version (#8069) (#8073)
automerge
2020-01-31 15:29:30 -08:00
mergify[bot]
e23aec9728 Update key (#8062) (#8066)
automerge
2020-01-31 12:55:49 -08:00
mergify[bot]
57d490c84f Minor cli fixes (bp #8061) (#8065)
automerge
2020-01-31 12:36:35 -08:00
mergify[bot]
aa8c9f6a98 Remove asteroids and pacman from QA/dev testnet availability (#8050) (#8063)
automerge
2020-01-31 11:28:33 -08:00
Michael Vines
57772dc73d s/mint/faucet 2020-01-31 12:15:20 -07:00
Justin Starry
21706108e8 Don't exit early if add. validators not found during gce.sh config
(cherry picked from commit 9adf0d4ee0)
2020-01-31 08:36:03 -07:00
mergify[bot]
50d0caf00f Remove support for 0.22.3 snapshots (#8058)
automerge
2020-01-31 00:15:44 -08:00
mergify[bot]
2739332306 Fix stale gossip entrypoint (#8053) (#8057)
automerge
2020-01-30 23:13:26 -08:00
mergify[bot]
c85c4699aa validator: add --private-rpc flag (bp #8037) (#8054)
automerge
2020-01-30 20:44:53 -08:00
Michael Vines
81add4d6bf Make tds slots-per-epoch configurable 2020-01-30 21:38:39 -07:00
Michael Vines
8e31eeb696 Dial testnet down to a single node 2020-01-30 21:17:38 -07:00
mergify[bot]
e1ce8b37ff Minor --expected-shred fix, clean up shred-related gossip log messages (#8041) (#8045)
automerge
2020-01-30 14:41:21 -08:00
Michael Vines
3f831c05f5 Add different shred test to test_tvu_peers_and_stakes
(cherry picked from commit 0c55b37976)
2020-01-30 11:28:45 -07:00
Trent Nelson
f0d7ce6bb6 CLI: Disallow blockhash/fee-calc lookups when offline (#7981)
* CLI: Add BlockhashSpec to tighten control over --blockhash

* Use BlockhashSpec

* Add a matches-free constructor

* More descriptive naming

(cherry picked from commit 966d077431)
2020-01-30 09:39:04 -07:00
Justin Starry
6ba95b2545 Ignore slow archiver tests (#8032)
automerge

(cherry picked from commit 400412d76c)
2020-01-30 09:38:49 -07:00
Sagar Dhawan
6818e68542 Add shred version filters to Crds Accessors (#8027)
* Add shred version filters to Crds Accessors

* Adopt entrypoint shred_version if one isn't provided

(cherry picked from commit 64c42e28dc)
2020-01-30 08:58:36 -07:00
mergify[bot]
43659d7deb Remove support for stake redelegation (#7995) (#8024)
automerge
2020-01-29 23:46:42 -08:00
Rob Walker
f24d8e7d2d Add set_lockup to stake (#7997)
(cherry picked from commit 0d6c233747)
2020-01-29 23:22:04 -07:00
Jack May
e10fe5e125 Update and fix transaction error documentation (#7998)
(cherry picked from commit fed3817ed3)
2020-01-29 23:20:32 -07:00
mergify[bot]
0f8c9ab1c4 Various fixes/improvements resulting from SLP 1.1 restart debug (bp #8019) (#8026)
automerge
2020-01-29 20:11:23 -08:00
Justin Starry
8a9a9cb991 Log solana-validator args on startup to aid debugging
(cherry picked from commit effe6e3ff3)
2020-01-29 09:40:33 -07:00
Jon-Eric Cook
44208ffa67 refactored 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
5df0478fa3 refactored the thread loop
a thread will break if the atomic bool is true
2020-01-28 20:29:56 -07:00
Jon-Eric Cook
d52567933e refactored grind_parse_args and grind_print_info 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
a32cdb9f4d updated to slice 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
eacd8d986c put some logic into functions 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
1d32603b49 taking care of errors from ./test-check.sh 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
8c6f7ee5a4 ran cargo fmt 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
be482eed3f removed whitespace 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
6e1c53cb0f simplified messaging and if blocks 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
af92f205cf simplified messaging 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
87047b08c8 removed found and changed count to AtomicU64 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
e282161872 updated bs58 decode check 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
01b1e287ed fixed prefix typo 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
d7fd1fa467 added informative print statements 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
bfa34cd494 it works
need to add print out to inform user
2020-01-28 20:29:56 -07:00
Jon-Eric Cook
915835e224 this command works but wont exit right when the 6th key is found
cargo run grind --starts-with hj:2 --ends-with jk:2 --starts-and-ends-with nⓂ️2
2020-01-28 20:29:56 -07:00
Jon-Eric Cook
659332e7ac progress on storing parameters 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
272986c6ac validator methods work 2020-01-28 20:29:56 -07:00
Jon-Eric Cook
4d8ab45c56 removed includes
added ends-with and starts-and-ends-with
updated help messages
added expected number of values
updated .value_name for each option
2020-01-28 20:29:56 -07:00
mergify[bot]
932ae86d47 CLI: Fix tests. sign_only requires a blockhash (#8005) (#8007)
automerge
2020-01-28 19:07:47 -08:00
mergify[bot]
756e6334b0 Add lock to make sure slot-based locktree calls are safe (#7993) (#7999)
automerge
2020-01-28 14:57:37 -08:00
Dan Albert
4e6eca9748 Update cargo files to 0.23.1 (#7994)
automerge
2020-01-27 20:44:44 -08:00
Michael Vines
d9e37eb30c Fix compute_shred_version() (#7989)
automerge

(cherry picked from commit fd7d5cbe0d)
2020-01-27 19:06:20 -07:00
mergify[bot]
04d1b35926 Consensus fix, don't consider threshold check if.. (#7948) (#7991)
automerge
2020-01-27 17:52:48 -08:00
mergify[bot]
d13d609050 Reduce epoch duration from 2 weeks to 2 days (#7987)
automerge
2020-01-27 10:24:20 -08:00
mergify[bot]
20426cf251 Specify where VM images are coming from across GCE projects (#7985) (#7986)
automerge
2020-01-27 09:02:05 -08:00
Michael Vines
4a220d7c8e Remove show- prefix 2020-01-26 21:01:18 -07:00
Michael Vines
436eab41ca Remove stray key 2020-01-26 14:35:50 -07:00
mergify[bot]
c8472d0a96 CLI: --sign-only and --signer require --blockhash (#7982) (#7983)
automerge
2020-01-26 10:19:04 -08:00
mergify[bot]
1a7db9c17e CLI: Consolidate offline arg declarations (#7979) (#7980)
automerge
2020-01-26 01:24:01 -08:00
mergify[bot]
b468d9f17c CLI: Deterministic dummy keypair generation for SigningAuthority::Offline (#7971) (#7978)
automerge
2020-01-26 00:13:06 -08:00
Michael Vines
41cf1d7d23 s/dervied/derived/ 2020-01-25 23:22:55 -07:00
Trent Nelson
e2570c98ee CLI: Add authority to show-nonce-account output (#7969) 2020-01-25 07:21:23 -07:00
Michael Vines
b5125479ec Bump perf libs to v0.18.0 for CUDA 10.2 support 2020-01-24 21:39:49 -07:00
Michael Vines
989355e885 Add ability to hard fork at any slot (#7801)
automerge
2020-01-24 17:27:04 -08:00
Michael Vines
a2f2c46f87 Ensure shred version is never 0 2020-01-24 17:41:20 -07:00
Dan Albert
605623baf5 Report last tower distance and add partition testcase (#7929)
automerge
2020-01-24 16:37:19 -08:00
Michael Vines
fdc452c536 Move testnet.solana.com and TdS to their own GCP projects 2020-01-24 16:26:54 -07:00
Jack May
1b391dd36b Add account accessor functions (#7966) 2020-01-24 14:34:59 -08:00
Jack May
917067741a Cleanup BPF SDK (#7965) 2020-01-24 13:41:14 -08:00
Jack May
34ed93d57c Optimize account copies and use RefCell to handle duplicate accounts in BPF programs (#7958) 2020-01-24 10:54:26 -08:00
Rob Walker
d400a64b9a Update tiny_bip39 (#7959)
automerge
2020-01-24 08:59:07 -08:00
Ryo Onodera
2c7447b73e Secure sysvars under hash by freezing all strictly (#7892)
* Secure sysvars under hash by freezing all strictly

* Fix hash's non-idempotnet and add new test

* Clean up

* More cleanups
2020-01-24 16:10:32 +09:00
Michael Vines
c0f0fa24f8 Increase --wait-for-supermajority to wait for 75% online stake 2020-01-23 22:41:46 -07:00
Michael Vines
bda5f949bb Add create-snapshot command 2020-01-23 22:21:27 -07:00
Ryo Onodera
992e985972 Add column for slot range of epoch to epoch-info (#7954)
automerge
2020-01-23 20:44:37 -08:00
Greg Fitzgerald
afaa359b0d Reorg the book (#7952)
* Move application-oriented docs

* Reorg the book

* Fix build

* Apply review feedback

* verb-noun
2020-01-23 21:21:43 -07:00
Justin Starry
3c17db41dc Add note to book about drone throughput limitations (#7953)
automerge
2020-01-23 18:50:25 -08:00
Michael Vines
d62ed4f6b3 Add BlockstoreProcessorResult 2020-01-23 16:52:47 -07:00
Dan Albert
79f3194d0c Fix cli call to stakes (#7946)
automerge
2020-01-23 14:25:40 -08:00
Michael Vines
b045f9a50d codemod --extensions rs get_snapshot_tar_path get_snapshot_archive_path 2020-01-23 13:37:13 -07:00
Michael Vines
ce231602dc Move snapshot archive generation out of the SnapshotPackagerService 2020-01-23 13:37:13 -07:00
Michael Vines
6f5e0cd161 Type grooming 2020-01-23 13:37:13 -07:00
Michael Vines
1269a79a4d Unify ledger_path arg handling with validator/ 2020-01-23 13:37:13 -07:00
Michael Vines
1b3424ff61 Pass bank_forks by reference 2020-01-23 13:37:13 -07:00
Michael Vines
8b8033c72b Set BankRc slot correctly when restoring a bank snapshot 2020-01-23 13:37:13 -07:00
Michael Vines
7ca0109732 --halt-at-slot 1 now halts at slot 1 2020-01-23 13:37:13 -07:00
Michael Vines
6b5172d002 add_snapshot now returns SlotSnapshotPaths 2020-01-23 13:37:13 -07:00
Michael Vines
9e19a635bb Remove superfluous accounts arg 2020-01-23 13:37:13 -07:00
Dan Albert
15193d0e1f Ensure all GCE nightly tests use dedicated instances (#7944)
automerge
2020-01-23 10:17:12 -08:00
Tyera Eulberg
f1c5c72e62 Fix transaction.md anchor links (#7943)
* Lowercase links

* Fix misspelled anchor link
2020-01-23 10:05:42 -07:00
Michael Vines
25dfed207c Remove dead code (#7940)
automerge
2020-01-23 00:38:46 -08:00
Michael Vines
006cbee88a Uninteresting cleanup 2020-01-22 21:24:20 -07:00
Jack May
c95e5346a4 Boot the mut (#7926) 2020-01-22 17:54:06 -08:00
Ryo Onodera
e54bf563b5 Avoid unsorted recent_blockhashes for determinism (#7918)
* Avoid unsorted recent_blockhashes for determinism

* Add a test: test_create_account_unsorted
2020-01-23 10:51:22 +09:00
Jack May
8f79327190 Test account doesn't need RefCell (#7932)
automerge
2020-01-22 17:06:11 -08:00
Greg Fitzgerald
a197ac092a New Anatomy of a Transaction (#7930)
automerge
2020-01-22 16:58:46 -08:00
Rob Walker
1e2b55c0d7 Remove RedeemVoteCredits (#7916)
* Move redeem_vote_credits into runtime

* Move redeem_vote_credits into runtime

* Remove RedeemVoteCredits

* chugga for less indentation

* resurrect NoCreditsToRedeem

* fixup
2020-01-22 16:53:42 -08:00
Trent Nelson
964ff522be Verb-noun-ify Nonce API (#7925)
* Verb-noun-ify Nonce API

* Unify instruction naming with API naming

The more verbose nonce_account/NonceAccount was chosen for clarity
that these instructions work on a unique species of system account
2020-01-22 16:31:39 -07:00
Michael Vines
934c32cbc6 Add mechanism to load v0.22.3 snapshots on newer Solana versions 2020-01-22 15:40:32 -07:00
Michael Vines
9bd6be779f Reject CI on failed mergify.io backports (#7927)
automerge
2020-01-22 14:10:26 -08:00
Rob Walker
ce70d6eedc Add redeem_vote_credits to runtime (#7910)
* Move redeem_vote_credits into runtime

* fixup

* test

* move stake manipulation to stake program

* chugga for less indentation
2020-01-22 12:21:31 -08:00
Trent Nelson
3a0d13aa77 CLI: Cleanup authority arg usage inconsistencies (#7922)
automerge
2020-01-22 11:19:07 -08:00
Michael Vines
f9323c5273 don't put accounts in a weird location, use the defaults (#7921)
automerge
2020-01-22 10:57:37 -08:00
Dan Albert
7587656cf6 Implement automated partition testing (#7222) 2020-01-22 13:46:50 -05:00
Jack May
023074650f Allow the same account to be passed multiple times to a single instruction (#7795) 2020-01-22 09:11:56 -08:00
Trent Nelson
d854e90c23 CLI: Support offline authorities (#7905) 2020-01-22 10:10:22 -07:00
Greg Fitzgerald
3aabeb2b81 Rename bootstrap leader (#7906)
* Rename bootstrap leader to bootstrap validator

It's a normal validator as soon as other validators enter the
leader schedule.

* cargo fmt

* Fix build

Thanks @CriesofCarrots!
2020-01-22 09:22:09 -07:00
Tyera Eulberg
65f5885bce sendTransaction rpc: expect transaction as base58 string (#7913) 2020-01-21 22:16:07 -07:00
Tyera Eulberg
7a132eabb4 Update JSON-RPC documentation (#7915)
* Streamline getBlockCommitment response

* Update json-rpc docs
2020-01-21 20:17:33 -07:00
Rob Walker
7e1b380f01 Move vote_state current credits into epoch_credits (#7909)
* Move vote_state current credits into epoch_credits

* fixups

* fixup
2020-01-21 19:08:40 -08:00
dependabot-preview[bot]
1a2d9b8eed Bump csv from 1.1.2 to 1.1.3 (#7893)
Bumps [csv](https://github.com/BurntSushi/rust-csv) from 1.1.2 to 1.1.3.
- [Release notes](https://github.com/BurntSushi/rust-csv/releases)
- [Commits](https://github.com/BurntSushi/rust-csv/compare/1.1.2...1.1.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-21 19:49:19 -07:00
Greg Fitzgerald
6eefa0b72d Integrate transaction chapter into programming model chapter (#7911)
automerge
2020-01-21 16:44:05 -08:00
Greg Fitzgerald
44372db955 Move Blockstreamer doc into getting started doc 2020-01-21 17:15:29 -07:00
Greg Fitzgerald
e24cce4aed Hoist blockstore chapter (#7908)
automerge
2020-01-21 16:01:26 -08:00
Greg Fitzgerald
a8595c0418 Give chapters more precise titles (#7907)
automerge
2020-01-21 15:36:40 -08:00
Michael Vines
340424e03a Use minimumLedgerSlot RPC API in block-production command 2020-01-21 14:05:26 -07:00
Michael Vines
93036bec01 Add minimumLedgerSlot RPC API 2020-01-21 14:05:26 -07:00
Jack May
663e98969d Use a different error to test rpc response (#7900)
automerge
2020-01-21 12:42:23 -08:00
Sagar Dhawan
37d1daf58e Revert "Generate MAX_DATA_SHREDS_PER_FEC_BLOCK coding shreds for each FEC block (#7474)" (#7898)
automerge
2020-01-21 11:48:09 -08:00
Jack May
1a18f0ca55 Add rust duplicate account test program (#7897)
automerge
2020-01-21 10:59:19 -08:00
Jack May
bb950ec93e Naming nits (#7896)
automerge
2020-01-21 10:38:46 -08:00
Greg Fitzgerald
39ab3557a3 Delete "testnet participation" redirect (#7895)
automerge
2020-01-21 09:35:59 -08:00
Michael Vines
dcdc46b97c Assume 1 or more validators 2020-01-21 10:34:58 -07:00
Michael Vines
da3ed0dfb3 Try running testnet.solana.com with only two validators 2020-01-21 10:34:53 -07:00
Greg Fitzgerald
e391b9fb90 Delete duplicate book content (#7894)
automerge
2020-01-21 09:17:20 -08:00
Michael Vines
e346cdad26 Run ./book/build-cli-usage.sh 2020-01-21 08:58:29 -07:00
Michael Vines
7e4c6ff218 solana set => solana config set 2020-01-21 08:53:44 -07:00
Michael Vines
356f246a74 Remove get-/show- prefix from cli commands 2020-01-21 08:43:07 -07:00
dependabot-preview[bot]
80da552834 Bump rpassword from 4.0.4 to 4.0.5
Bumps [rpassword](https://github.com/conradkleinespel/rpassword) from 4.0.4 to 4.0.5.
- [Release notes](https://github.com/conradkleinespel/rpassword/releases)
- [Commits](https://github.com/conradkleinespel/rpassword/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-20 23:30:18 -07:00
Sagar Dhawan
2dd8ab197d Remove redundant threadpools in sigverify (#7888)
* Limit the number of thread pools sigverify creates

* Name local threadpools
2020-01-20 20:08:19 -08:00
Rob Walker
1fe11e9ae2 chacha ignore farf (#7882) 2020-01-20 17:04:31 -08:00
Tyera Eulberg
21d5fe6272 Fix timestamp overflow (#7886)
* Split timestamp calculation into separate fn for math unit testing

* Add failing test

* Fix failing test; also bump stakes to near expected cluster max supply

* Don't error on timestamp of slot 0
2020-01-20 17:54:44 -07:00
Jack May
52bc4a3598 nudge (#7887) 2020-01-20 15:27:36 -08:00
Dan Albert
cccaacee36 Wait for stake distribution in automation (#7883)
automerge
2020-01-20 13:32:37 -08:00
Michael Vines
ebf6e1c0e9 --limit-ledger-size now accepts an optional slot count value 2020-01-20 14:20:30 -07:00
Sunny Gleason
5cf090c896 feat: implement RPC notification queue (#7863) 2020-01-20 16:08:29 -05:00
Rob Walker
cc299053cc Add support for stake::split() via create_account_with_seed() (#7879)
* Add split with seed

* move to new system_program APIs

* de-replicode
2020-01-20 12:33:27 -08:00
Michael Vines
82b75796f9 Create ledger directory if it doesn't already exist 2020-01-20 10:11:43 -07:00
dependabot-preview[bot]
a560d94a9f Bump humantime from 1.3.0 to 2.0.0
Bumps [humantime](https://github.com/tailhook/humantime) from 1.3.0 to 2.0.0.
- [Release notes](https://github.com/tailhook/humantime/releases)
- [Commits](https://github.com/tailhook/humantime/compare/v1.3.0...v2.0.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-20 10:08:11 -07:00
dependabot-preview[bot]
0827d52c6f Bump indexmap from 1.1.0 to 1.3.1
Bumps [indexmap](https://github.com/bluss/indexmap) from 1.1.0 to 1.3.1.
- [Release notes](https://github.com/bluss/indexmap/releases)
- [Commits](https://github.com/bluss/indexmap/compare/1.1.0...1.3.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-20 10:07:48 -07:00
Ryo Onodera
a8d33c9950 Spy just for RPC to avoid premature supermajority (#7856)
* Spy just for RPC to avoid premature supermajority

* Make gossip_content_info private

Co-Authored-By: Michael Vines <mvines@gmail.com>

* Fix misindent...

Co-authored-by: Michael Vines <mvines@gmail.com>
2020-01-20 10:50:31 +09:00
dependabot-preview[bot]
43c32ea280 Bump rpassword from 4.0.3 to 4.0.4
Bumps [rpassword](https://github.com/conradkleinespel/rpassword) from 4.0.3 to 4.0.4.
- [Release notes](https://github.com/conradkleinespel/rpassword/releases)
- [Commits](https://github.com/conradkleinespel/rpassword/compare/v.4.0.3...v4.0.4)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-18 19:32:14 -07:00
Michael Vines
30d40e9a32 If a bad RPC node is selected try another one instead of aborting 2020-01-18 11:00:08 -07:00
Michael Vines
e28508ad56 Abort if a snapshot download fails for any reason other than 404 2020-01-18 08:59:53 -07:00
Michael Vines
182e4cec86 Update backport labels 2020-01-17 21:38:39 -07:00
Michael Vines
a32de96ab1 Add show-stakes subcommand 2020-01-17 14:14:01 -07:00
Trent Nelson
0de35fdd1f CLI: Support offline and nonced stake subcommands (#7831)
* Support durable nonce for staker-authorize-*

* CLI: Factor out sign-only reply parsing to helper

* Support offline signing for staker-authorize-*
2020-01-17 10:30:56 -07:00
Rob Walker
470d9cd752 Add system_instruction::{allocate, allocate_with_seed, assign_with_seed}, (#7847)
* cleanup test checks cargo audit

* Add system_instruction allocate

* fixup

* fixup
2020-01-17 09:29:15 -08:00
Justin Starry
87598c7612 Consolidate tx error counters and update metrics dashboard (#7724)
automerge
2020-01-16 23:26:50 -08:00
Michael Vines
57bf618627 Enable config program at soft launch epoch 0 (#7854)
automerge
2020-01-16 23:05:33 -08:00
Michael Vines
c576a707b0 Increase token cap (#7855)
automerge
2020-01-16 23:02:05 -08:00
Justin Starry
b78b1bbfa9 Improve bench-tps keypair generation (#7723)
* Improve bench-tps keypair generation

* Fix tests

* Fix move test

* cargo fmt

* Split up funding function into smaller functions

* Support restarting bench-tps without re-funding

* Change quick start logic and remove noisy log
2020-01-17 10:35:12 +08:00
Ryo Onodera
e710964d05 Revamp the progress of current epoch in get-epoch-info (#7838)
* Revamp the progress of current epoch in get-epoch-info

* Incorporate suggested more concise labelling
2020-01-17 09:39:47 +09:00
dependabot-preview[bot]
2d00657756 Bump num_cpus from 1.11.1 to 1.12.0 (#7845)
Bumps [num_cpus](https://github.com/seanmonstar/num_cpus) from 1.11.1 to 1.12.0.
- [Release notes](https://github.com/seanmonstar/num_cpus/releases)
- [Changelog](https://github.com/seanmonstar/num_cpus/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/num_cpus/compare/v1.11.1...v1.12.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-16 17:38:13 -07:00
carllin
0526d4ff21 Add logging surrounding failure in get_slot_entries_with_shred_info() (#7846)
* Add logging surrounding failure
2020-01-16 16:03:24 -08:00
carllin
76e20015a4 Add separate thread to check for and store duplicate slot proofs (#7834) 2020-01-16 15:27:54 -08:00
Rob Walker
f5e797e3aa cleanup test checks cargo audit (#7849)
automerge
2020-01-16 15:08:36 -08:00
Rob Walker
787e36a28f ignore prost is part of move (#7848) 2020-01-16 14:14:44 -08:00
sakridge
8572b57834 Refactor chacha cuda to be able to test cuda crate but not in OpenCL (#7685)
* Refactor chacha cuda to be able to test cuda crate but not in OpenCL

chacha not implemeted in OpenCL

* Get off core::Error
2020-01-16 08:29:36 -08:00
Ryo Onodera
ed0129f881 Don't depend on unused lazy_static 2020-01-16 08:43:13 -07:00
Ryo Onodera
78836a9e22 Make run.sh not overwrite genesis if existing (#7837) 2020-01-16 14:34:36 +09:00
Rob Walker
4c08184379 no check if no change (#7824) 2020-01-15 15:13:11 -08:00
Tyera Eulberg
da165d6943 Fix Rpc inconsistencies (#7826)
* Update rpc account format: remove byte arrays

* Base58-encode pubkeys in getStoragePubkeysForSlot

* Update docs
2020-01-15 15:33:53 -07:00
Trent Nelson
8ffccfbaff CLI: Plumb stake authorities throughout (#7822)
automerge
2020-01-15 13:32:06 -08:00
Rob Walker
a6d083d69d Remove create_account bandaid now that to's signature is required (#7776)
* Remove create account bandaid now that  requires signature

* shrink scope of this PR to bandaid
2020-01-15 13:03:22 -08:00
Greg Fitzgerald
91bae9d510 Don't use word 'securely' (#7820)
automerge
2020-01-15 11:30:11 -08:00
Tyera Eulberg
f0f185509f Remove tuple from programNotification (#7819)
automerge
2020-01-15 10:52:02 -08:00
Dan Albert
5947ef7706 Remove word pair from address generator seed string (#7802)
* Remove word pair from address generator seed string
2020-01-15 13:50:37 -05:00
Michael Vines
4f663a2a86 Add new genesis validators (#7814)
automerge
2020-01-15 09:26:49 -08:00
Michael Vines
1d01777a13 Prefer CUDA_HOME environment variable 2020-01-15 09:03:52 -07:00
Tyera Eulberg
6d3b8b6d7d Remove tuples from JSON RPC responses (#7806)
* Remove RpcConfirmedBlock tuple

* Remove getRecentBlockhash tuple

* Remove getProgramAccounts tuple

* Remove tuple from get_signature_confirmation_status

* Collect Rpc response types

* Camel-case epoch schedule for rpc response

* Remove getBlockCommitment tuple

* Remove getStorageTurn tuple

* Update json-rpc docs
2020-01-15 00:25:45 -07:00
Michael Vines
50c1c08235 Set bootstrap leader and net/ validator vote account commission to 100% 2020-01-15 00:25:26 -07:00
Ryo Onodera
b16c30b4c6 Fix cluster collapse due to no proper shifted read (#7797)
* Fix cluster collapse due to no proper shifted read

* Add test for bank hash mismatch

Co-authored-by: sakridge <sakridge@gmail.com>
2020-01-15 11:45:19 +09:00
Justin Starry
ff1ca1e0d3 Consolidate entry tick verification into one function (#7740)
* Consolidate entry tick verification into one function

* Mark bad slots as dead in blocktree processor

* more feedback

* Add bank.is_complete

* feedback
2020-01-15 09:15:26 +08:00
carllin
721c4378c1 Plumb ability to handle duplicate shreds into shred insertion functions (#7784) 2020-01-14 15:37:53 -08:00
Jack May
5f4e0c7e3e Naming nits (#7798)
automerge
2020-01-14 13:38:17 -08:00
Michael Vines
e6af4511a8 Include shred version in gossip 2020-01-14 14:32:40 -07:00
Michael Vines
965ad778dd Improve KeypairFileNotFound error message (#7792)
automerge
2020-01-14 12:19:08 -08:00
sakridge
3b78be83cf Add hash stats information to check hashes between validators (#7780)
automerge
2020-01-14 11:57:29 -08:00
Trent Nelson
564cd4e09d Book: Drop since-fixed nonce known issue (#7789)
automerge
2020-01-14 10:13:09 -08:00
Ryo Onodera
699ca5fec1 Unignore advisories as affected ver. is corrected (#7730)
For details see upstream PR: https://github.com/RustSec/advisory-db/pull/221
2020-01-14 11:16:32 +09:00
carllin
f91ffbbfdf Add support in BlockStore for tracking duplicate slots (#7761)
* Add test

* Add new column family to track duplicate slots

* Fix clippy errors

* Introduce new SlotColumn for common implementation of Column trait
2020-01-13 17:21:39 -08:00
Pankaj Garg
156292e408 Reduce grace ticks, and ignore grace ticks for missing leaders (#7764)
* Reduce grace ticks, and ignore grace ticks for missing leaders

* address review comments

* blockstore related renames
2020-01-14 05:25:41 +05:30
Trent Nelson
81ae44f858 Nonce: Rename instructions with VerbNoun scheme (#7775)
automerge
2020-01-13 15:34:43 -08:00
Tyera Eulberg
c948814eae Update getConfirmedBlock examples (#7772) 2020-01-13 15:05:27 -07:00
Greg Fitzgerald
b5dba77056 Rename blocktree to blockstore (#7757)
automerge
2020-01-13 13:13:52 -08:00
Trent Nelson
ef06d165b4 Book: Update durable nonce proposal entry (#7694)
automerge
2020-01-13 13:12:09 -08:00
Jack May
5cb23c814d Install move-loader binaries (#7768) 2020-01-13 12:53:53 -08:00
carllin
8f7ded33e0 coalesce data and coding index (#7765) 2020-01-13 12:03:19 -08:00
Tyera Eulberg
a17d5795fb getConfirmedBlock: add encoding optional parameter (#7756)
automerge
2020-01-12 21:34:30 -08:00
Michael Vines
ad4d41e602 Pick an RPC node at random to avoid getting stuck on a bad RPC node 2020-01-11 12:10:11 -07:00
Trent Nelson
9754fc789e Manage durable nonce stored value in runtime (#7684)
* Bank: Return nonce pubkey/account from `check_tx_durable_nonce`

* Forward account with HashAgeKind::DurableNonce

* Add durable nonce helper for HashAgeKind

* Add nonce util for advancing stored nonce in runtime

* Advance nonce in runtime

* Store rolled back nonce account on TX InstructionError

* nonce: Add test for replayed InstErr fee theft
2020-01-10 16:57:31 -07:00
carllin
fd3c6eb320 Remove print in test (#7758)
automerge
2020-01-10 15:37:22 -08:00
sakridge
b7b68ecdba Add partition testing documentation (#7739) 2020-01-10 15:32:43 -08:00
Jack May
08ba27627d Direct entrypoint for execution (#7746) 2020-01-10 13:20:15 -08:00
carllin
27d2c0aaf3 Handle errors on replaying ledger properly (#7741) 2020-01-10 12:16:44 -08:00
Jack May
b714a4be63 Fix call to BPF build script (#7754)
automerge
2020-01-10 10:28:55 -08:00
Trent Nelson
2356b25c58 Book: Update SPV section to reflect new account state query mechanism (#5399)
* Book: Update SPV section to reflect new account state query mechanism

* Book: SPV - Rename Bank-Merkle diagram

* Relax specificity of inclusion proof resolution

* Cosmetic: re-wrap at 80
2020-01-10 10:48:29 -07:00
Greg Fitzgerald
05cad05505 Update validator proposal (#7752)
* Use 80-char lines

* Remove the part that was implemented in Gulf Stream
2020-01-10 10:15:49 -07:00
dependabot-preview[bot]
1e3082fbc0 Bump tiny-bip39 from 0.6.2 to 0.7.0 (#7750)
Bumps [tiny-bip39](https://github.com/maciejhirsz/tiny-bip39) from 0.6.2 to 0.7.0.
- [Release notes](https://github.com/maciejhirsz/tiny-bip39/releases)
- [Changelog](https://github.com/maciejhirsz/tiny-bip39/blob/master/CHANGELOG.md)
- [Commits](https://github.com/maciejhirsz/tiny-bip39/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-10 09:19:28 -07:00
dependabot-preview[bot]
80d2573b10 Bump cbindgen from 0.12.1 to 0.12.2 (#7749)
Bumps [cbindgen](https://github.com/eqrion/cbindgen) from 0.12.1 to 0.12.2.
- [Release notes](https://github.com/eqrion/cbindgen/releases)
- [Changelog](https://github.com/eqrion/cbindgen/blob/master/CHANGES)
- [Commits](https://github.com/eqrion/cbindgen/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-10 09:19:01 -07:00
dependabot-preview[bot]
6adcdc41f4 Bump num-traits from 0.2.10 to 0.2.11 (#7737)
Bumps [num-traits](https://github.com/rust-num/num-traits) from 0.2.10 to 0.2.11.
- [Release notes](https://github.com/rust-num/num-traits/releases)
- [Changelog](https://github.com/rust-num/num-traits/blob/master/RELEASES.md)
- [Commits](https://github.com/rust-num/num-traits/compare/num-traits-0.2.10...num-traits-0.2.11)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-10 09:18:39 -07:00
Jack May
2d08dddfc8 nit, cleanup uses (#7747)
automerge
2020-01-09 23:58:13 -08:00
Jack May
6da8f49d8b nit, clearer error message (#7748)
automerge
2020-01-09 23:53:47 -08:00
Justin Starry
bcd072c5e8 Clarify account creation error messages in CLI (#7719)
* Clarify account creation error messages in CLI

* feedback

* Fix rebase
2020-01-10 12:25:07 +08:00
Justin Starry
e90a31781c Update http crate in bpf program to fix security vulnerability (#7735) 2020-01-10 10:21:20 +08:00
sakridge
2e89ec9105 Don't keep generating transactions in non-sustained bench-tps mode (#7577) 2020-01-09 17:48:18 -08:00
Ryo Onodera
865c42465a Cap file size for snapshot data files (#7182)
* save limit deserialize

* save

* Save

* Clean up

* rustfmt

* rustfmt

* Just comment out to please CI

* Fix ci...

* Move code

* Rustfmt

* Crean up control flow

* Add another comment

* Introduce predetermined constant limit on snapshot data files (deserialize side)

* Introduce predetermined constant limit on snapshot data files (serialize side)

* rustfmt

* Tweak message

* Revert dynamic memory limit

* Limit size of snapshot data file (de)serialization

* Fix test breakage

* Clean up

* Fix uses formatting

* Rename: deserialize_{for,from}_snapshot

* Simplify comment

* Use Slot

* Provide slot for status cache

* Align variable name with snapshot_status_cache_file_path

* Define serialize_snapshot_data_file_with_metrics

* Fix build.......

* De-marco serialize_snapshot_data_file_with_metrics

* Revert u64 => Slot
2020-01-10 09:49:36 +09:00
sakridge
73c93cc345 Print bank hash and hash inputs. (#7733) 2020-01-09 16:33:10 -08:00
dependabot-preview[bot]
cf32fdf672 Bump reqwest from 0.10.0 to 0.10.1 (#7731)
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.10.0 to 0.10.1.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.10.0...v0.10.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-09 17:10:55 -07:00
Ryo Onodera
c33b54794c Propose Solana ABI management (#7524)
* Propose Solana ABI management

* Mention fuzz testing

* Address minor review comments

* Remove versioning and unit tests

* Rename

* Clean up a bit

* Pass through Grammarly

* Yet more tweaks...
2020-01-10 08:24:08 +09:00
Rob Walker
6775e83420 Add create with seed to cli (#7713)
* Add create with seed to cli

* nonce and vote, too
2020-01-09 15:22:48 -08:00
Justin Starry
719785a8d3 Update http crate to fix security vulnerability (#7725)
* Update http to fix security vulnerability

* Ignore RUSTSEC because they incorrectly says http 0.1.21 is vulnerable
2020-01-10 04:43:02 +09:00
Ryo Onodera
287995ffdf Correctly integrate buildkite with codecov (#7718)
* Correctly integrate buildkite with codecov

* Fix shellcheck...

* Really detect Buildkite
2020-01-10 03:39:33 +09:00
dependabot-preview[bot]
0e506a53b5 Bump url from 2.1.0 to 2.1.1 (#7720)
Bumps [url](https://github.com/servo/rust-url) from 2.1.0 to 2.1.1.
- [Release notes](https://github.com/servo/rust-url/releases)
- [Commits](https://github.com/servo/rust-url/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-09 08:44:47 -07:00
Michael Vines
70e1a15973 Remove vote account from genesis validators 2020-01-08 22:47:56 -07:00
Jack May
09cff5e4cc Cleanup usage of feature "program" (#7712) 2020-01-08 13:49:35 -08:00
dependabot-preview[bot]
57858b8015 Bump reqwest from 0.9.24 to 0.10.0 (#7642)
* Bump reqwest from 0.9.24 to 0.10.0

Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.9.24 to 0.10.0.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>

* Make reqwest::blocking specific

Co-authored-by: Tyera Eulberg <teulberg@gmail.com>
2020-01-08 13:31:43 -07:00
Jack May
07855e3125 Allow override of RUST_LOG (#7705) 2020-01-08 09:19:12 -08:00
Jack May
2f5f8e7afd Pass RUST_LOG through on testnet creation (#7707) 2020-01-07 21:46:28 -08:00
Michael Vines
43897de12e Account for stake held by the current node while waiting for the supermajority to join gossip 2020-01-07 22:29:31 -07:00
dependabot-preview[bot]
4b577aa77b Bump cc from 1.0.48 to 1.0.49 (#7690)
Bumps [cc](https://github.com/alexcrichton/cc-rs) from 1.0.48 to 1.0.49.
- [Release notes](https://github.com/alexcrichton/cc-rs/releases)
- [Commits](https://github.com/alexcrichton/cc-rs/compare/1.0.48...1.0.49)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-07 22:13:17 -07:00
carllin
85c3d64f29 Fix rooted slot iterator (#7695)
* Enable jumping gaps caused by snapshots in rooted slot iterator
2020-01-07 22:51:28 -05:00
Michael Vines
47dd293904 supermajority is one word 2020-01-07 15:50:59 -07:00
Michael Vines
c4220a4853 clippy 2020-01-07 15:50:59 -07:00
Michael Vines
48ab88a2af Add --wait-for-super-majority to facilitate asynchronous cluster restarts 2020-01-07 15:50:59 -07:00
dependabot-preview[bot]
d9cf9709d2 Bump csv from 1.1.1 to 1.1.2 (#7698)
Bumps [csv](https://github.com/BurntSushi/rust-csv) from 1.1.1 to 1.1.2.
- [Release notes](https://github.com/BurntSushi/rust-csv/releases)
- [Commits](https://github.com/BurntSushi/rust-csv/compare/1.1.1...1.1.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-07 14:38:02 -07:00
Michael Vines
9720c894f1 Use commas to make a log message more readable 2020-01-06 22:31:01 -07:00
Rob Walker
8dad3af36d Update Lockup comments (#7692) 2020-01-06 19:52:20 -08:00
Ryo Onodera
e5425d4a27 Fix AppendVec test breakage... (#7693) 2020-01-07 09:21:59 +09:00
Ryo Onodera
58e6d4aabb Sanitize AppendVec's file_size (#7373)
* Check append vec file size

* Don't use panic

* Clean up a bit

* Clean up

* Clean ups

* Change assertion into sanization check

* Remove...

* Clean up

* More clean up

* More clean up

* Use assert_matches
2020-01-07 08:14:56 +09:00
Tyera Eulberg
9ce142606c Update getBlockTime rpc docs (#7688) 2020-01-06 00:00:20 -07:00
Tyera Eulberg
e75a64a8a2 getBlockTime: Fix RootedSlotIterator lowest root (#7681)
* Determine lowest_nonzero_root for purged blocktrees, and clean up slot offset math

* Filter duplicate timestamp votes

* Refactor deduping code
2020-01-05 23:38:27 -07:00
dependabot-preview[bot]
bc71e1b612 Bump sha2 from 0.8.0 to 0.8.1
Bumps [sha2](https://github.com/RustCrypto/hashes) from 0.8.0 to 0.8.1.
- [Release notes](https://github.com/RustCrypto/hashes/releases)
- [Commits](https://github.com/RustCrypto/hashes/compare/sha2-v0.8.0...sha2-v0.8.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2020-01-05 22:33:02 -07:00
Tyera Eulberg
580ca36a62 Cli: fund validator-info accounts with rent-exempt lamports 2020-01-04 22:59:12 -07:00
Michael Vines
447fe48d2a Revert "Add a stand-alone gossip node on the blocksteamer instance"
This reverts commit a217920561.

This commit is causing trouble when the TdS cluster is reset and
validators running an older genesis config are still present.
Occasionally an RPC URL from an older validator will be selected,
causing a new node to fail to boot.
2020-01-04 16:42:12 -07:00
Michael Vines
e8a6c8cd6d Don't panic if peer_addr() fails (#7678)
automerge
2020-01-04 10:00:22 -08:00
Michael Vines
a8fd42c1df Set default vote account commission to 100% 2020-01-04 10:04:31 -07:00
Michael Vines
e782c26908 Prune older epoch stakes 2020-01-04 09:34:27 -07:00
Michael Vines
cd65a1e172 Run local cluster tests serially for easier debug 2020-01-04 09:34:27 -07:00
Michael Vines
6e51c5685e Minor book fixes 2020-01-04 08:53:20 -07:00
Tyera Eulberg
84a37a2c0c Make validator timestamping more coincident, and increase timestamp sample range (#7673)
automerge
2020-01-03 22:38:00 -08:00
Trent Nelson
7e94cc2cc3 Move nonce into system program (#7645)
automerge
2020-01-03 16:34:58 -08:00
Michael Vines
7002ccb866 Log root slots while processing ledger 2020-01-03 13:25:37 -07:00
Michael Vines
4fe0b116ae Measure heap usage while processing the ledger 2020-01-03 13:25:37 -07:00
Michael Vines
a0fb9de515 Move thread_mem_usage module into measure/ 2020-01-03 13:25:37 -07:00
sakridge
5d42dcc9ec Reduce constants for ledger cleanup test (#7629) 2020-01-03 12:05:14 -08:00
sakridge
96e88c90e8 Lessen test_slots_to_snapshot constants to make test faster (#7628)
Reduces test time from 6m to 45s
2020-01-03 09:58:52 -08:00
Jack May
75d94240ed account_info utilities (#7666) 2020-01-03 09:14:51 -08:00
Jack May
6c544708e1 Add safety docs (#7665) 2020-01-03 09:14:28 -08:00
Michael Vines
078e7246ac Publish bpf-sdk only in Linux build 2020-01-02 23:20:59 -07:00
Jack May
06cff1fb9f Publish bpf-sdk releases (#7655) 2020-01-02 20:44:15 -08:00
Michael Vines
2e8bbed75b Revert "Remov dead code from TdS testnet manager config (#7414)"
This reverts commit 8920ac02f6.
2020-01-02 21:07:40 -07:00
Greg Fitzgerald
a707c9410e More thiserror (#7183)
* Less solana_core::result. Module now private.

* Drop solana_core::result dependency from a few more modules

* Fix warning

* Cleanup

* Fix typo
2020-01-02 20:50:43 -07:00
Jack May
a956bb08d8 Export bpf loader ser/de (#7661) 2020-01-02 18:18:56 -08:00
Trent Nelson
db52cc6749 CLI: Fix default nonce authority resolution (#7657)
automerge
2020-01-02 17:05:08 -08:00
Trent Nelson
73c6224a95 Book - Document nonceable CLI subcommands (#7656)
automerge
2020-01-02 16:30:26 -08:00
Michael Vines
a217920561 Add a stand-alone gossip node on the blocksteamer instance
The blocksteamer instance is the TdS cluster entrypoint.  Running an
additional solana-gossip node allows other participants to join a
cluster even if the validator node on the blocksteamer instance goes down.
2020-01-02 17:20:59 -07:00
Michael Vines
48a36f59a6 Add get-rpc-url --any option 2020-01-02 17:20:59 -07:00
Michael Vines
965b132664 Permit --gossip-host with --entrypoint 2020-01-02 17:20:59 -07:00
Rob Walker
63f185f9bf Delete unused type (#7653) 2020-01-02 13:15:31 -08:00
Rob Walker
e97b0088f2 Make lockups block stake transfers via rekeying (#7651) 2020-01-01 11:03:29 -08:00
Trent Nelson
374c17a0d9 Book: Sync CLI API doc for show-block-production (#7648)
automerge
2019-12-31 09:26:45 -08:00
Michael Vines
4b3bc587ab Add input validation for --creation-time/--lockup-date args (#7646)
automerge
2019-12-30 21:57:47 -08:00
dependabot-preview[bot]
06c63f2026 Bump cbindgen from 0.12.0 to 0.12.1 (#7637)
Bumps [cbindgen](https://github.com/eqrion/cbindgen) from 0.12.0 to 0.12.1.
- [Release notes](https://github.com/eqrion/cbindgen/releases)
- [Changelog](https://github.com/eqrion/cbindgen/blob/master/CHANGES)
- [Commits](https://github.com/eqrion/cbindgen/compare/v0.12.0...v0.12.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-12-30 22:16:48 -07:00
Rob Walker
6b7d9942a7 Add authorized_voter history (#7643)
* Add authorized_voter history

* fixups

* coverage

* bigger vote state
2019-12-30 19:57:53 -08:00
Rob Walker
760a56964f delete fixed_buf (#7644) 2019-12-30 16:45:43 -08:00
Tyera Eulberg
6ca575b5a3 Make sol-to-lamport const name more clear (#7641)
automerge
2019-12-30 11:28:41 -08:00
Trent Nelson
ce1d36cacb Book: Document CLI durable nonce account management (#7595)
* Book: Document CLI durable nonce account management

* Fix rent link

* review
2019-12-30 13:13:56 -05:00
Pankaj Garg
87b2525e03 Limit maximum number of shreds in a slot to 32K (#7584)
* Limit maximum number of shreds in a slot to 32K

* mark dead slot replay as fatal error
2019-12-30 07:42:09 -08:00
Rob Walker
faa77aca2e Update terminology.md 2019-12-29 21:35:06 -08:00
Rob Walker
5d2158792c Add inflation to book, cleanup dead links, include orphaned documents (#7638)
* Add inflation as implemented proposal

* grab another orphan and add orphan-proofing
2019-12-29 18:15:32 -08:00
Rob Walker
e1ebaa902b Add base pubkey to create_account_with_seed (#7636) 2019-12-29 16:42:24 -08:00
Rob Walker
e0564f628e Use lamports in genesis (#7631)
* Use lamports in genesis

* readability
2019-12-28 12:49:10 -08:00
Justin Starry
44e45aa090 Support nonced transactions in the CLI (#7624)
* Support nonced transactions in the CLI

* Update nonce.rs
2019-12-27 14:35:49 -06:00
Michael Vines
89f5f336af Account for rent (#7626)
automerge
2019-12-24 18:01:21 -08:00
Parth
727be309b2 fix entryverification state (#7169)
automerge
2019-12-23 23:26:27 -08:00
dependabot-preview[bot]
ce2d7a2d5a Bump nix from 0.16.0 to 0.16.1 (#7623)
Bumps [nix](https://github.com/nix-rust/nix) from 0.16.0 to 0.16.1.
- [Release notes](https://github.com/nix-rust/nix/releases)
- [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md)
- [Commits](https://github.com/nix-rust/nix/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-12-23 23:06:42 -07:00
Michael Vines
fad6c7201e Remove old book location (#7621) 2019-12-23 22:56:42 -07:00
Michael Vines
8f0e1f3349 Update gitbook-cage first 2019-12-23 18:18:30 -07:00
sakridge
6f7d0c6928 Move cleanup to a script so it doesn't kill itself (#7603) 2019-12-23 14:31:57 -08:00
Rob Walker
120c8f244c Add slot_history for slashing (#7589)
* Add slot_history for slashing

* fixup

* fixup
2019-12-23 12:23:45 -08:00
Jack May
352a367570 Specify version for solana-sdk-macro to enable crate.io publishing (#7615) 2019-12-23 12:10:43 -08:00
Michael Vines
9f65d22909 Groom log messages (#7610) 2019-12-23 10:43:07 -07:00
Ryo Onodera
141131f3a6 Stabilize fn coverage by creating a clean room (#7576)
* Stabilize fn coverage by pruning all updated files

* Pruning didn't work; Switch to clean room dir

* Oh, shellcheck...

* Remove the data_dir variable

* Comment about relationale for find + while read
2019-12-23 16:32:29 +09:00
dependabot-preview[bot]
488420fdf2 Bump core_affinity from 0.5.9 to 0.5.10 (#7578)
Bumps [core_affinity](https://github.com/Elzair/core_affinity_rs) from 0.5.9 to 0.5.10.
- [Release notes](https://github.com/Elzair/core_affinity_rs/releases)
- [Commits](https://github.com/Elzair/core_affinity_rs/compare/0.5.9...0.5.10)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-12-22 22:41:47 -07:00
Greg Fitzgerald
10e6b8f769 Fix key in genesis (#7585) 2019-12-22 22:40:35 -07:00
Michael Vines
419da18405 show-block-production: Rename "missed" to "skipped" as not all skipped slots are missed slots (#7599) 2019-12-22 22:39:47 -07:00
Dan Albert
7329d4bf3a Extend Stable CI job timeout to 60 minutes (#7604) 2019-12-22 20:14:07 -07:00
Ryo Onodera
c8fe4043b6 Rename slot_hash => bank_hash in AcoountsDB (#7579)
* Rename slot_hash => bank_hash in AcoountsDB
2019-12-23 10:50:31 +09:00
Parth
3d133d61ca fix rent book entry (#7602) 2019-12-23 06:12:29 +05:30
Michael Vines
d51e42c707 MISSED -> SKIPPED 2019-12-22 10:19:35 -07:00
Michael Vines
79e39d6f0b Remove stray SOLANA_CUDA=1 2019-12-22 10:09:04 -07:00
sakridge
7dec934bb3 Optimize lock_accounts mutex use (#7593)
Use the lock for the whole batch instead of per-tx
Optimize the critical section to pre-generate the keys necessary
before taking the lock.
2019-12-21 10:43:22 -08:00
sakridge
83f866df01 Switch banking bench to report tps instead of total time (#7590)
Easier to compare results when modifying thread count.
2019-12-21 10:43:08 -08:00
Michael Vines
d88d8e2dbb Fix another silly bug 2019-12-21 09:20:12 -07:00
Michael Vines
3a40dff999 Cargo.lock 2019-12-20 21:55:35 -07:00
Michael Vines
3f69d58498 ledger-tool: Add --all option to bounds, to display all non-empty slots (#7592) 2019-12-20 20:43:53 -07:00
Dan Albert
ca10cf081f Update cargo.toml files from 0.22.0 to 0.23.0 (#7596) 2019-12-20 21:45:42 -05:00
371 changed files with 14004 additions and 9051 deletions

View File

@@ -19,14 +19,6 @@ pull_request_rules:
label: label:
add: add:
- automerge - automerge
- name: v0.21 backport
conditions:
- base=master
- label=v0.21
actions:
backport:
branches:
- v0.21
- name: v0.22 backport - name: v0.22 backport
conditions: conditions:
- base=master - base=master
@@ -43,3 +35,11 @@ pull_request_rules:
backport: backport:
branches: branches:
- v0.23 - v0.23
- name: v0.24 backport
conditions:
- base=master
- label=v0.24
actions:
backport:
branches:
- v0.24

1446
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,10 @@ members = [
"bench-streamer", "bench-streamer",
"bench-tps", "bench-tps",
"banking-bench", "banking-bench",
"chacha",
"chacha-cuda",
"chacha-sys", "chacha-sys",
"cli-config",
"client", "client",
"core", "core",
"faucet", "faucet",
@@ -38,6 +41,8 @@ members = [
"programs/vest", "programs/vest",
"programs/vote", "programs/vote",
"archiver", "archiver",
"archiver-lib",
"archiver-utils",
"runtime", "runtime",
"sdk", "sdk",
"sdk-c", "sdk-c",
@@ -45,7 +50,6 @@ members = [
"sys-tuner", "sys-tuner",
"upload-perf", "upload-perf",
"net-utils", "net-utils",
"fixed-buf",
"vote-signer", "vote-signer",
"cli", "cli",
"rayon-threadlimit", "rayon-threadlimit",

View File

@@ -140,25 +140,6 @@ TODO: Documentation update procedure is WIP as we move to gitbook
Document the new recommended version by updating `book/src/running-archiver.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version. Document the new recommended version by updating `book/src/running-archiver.md` and `book/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
#### Publish updated Book
We maintain three copies of the "book" as official documentation:
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
https://solana-labs.github.io/book/
2) "Book-edge" tracks the tip of the master branch and updates automatically.
https://solana-labs.github.io/book-edge/
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
https://solana-labs.github.io/book-beta/
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
```bash
PUBLISH_BOOK_TAG=v0.16.6
```
https://buildkite.com/solana-labs/manual-update-book
### Update software on testnet.solana.com ### Update software on testnet.solana.com
The testnet running on testnet.solana.com is set to use a fixed release tag The testnet running on testnet.solana.com is set to use a fixed release tag

39
archiver-lib/Cargo.toml Normal file
View File

@@ -0,0 +1,39 @@
[package]
name = "solana-archiver-lib"
version = "0.23.1"
description = "Solana Archiver Library"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
bincode = "1.2.1"
crossbeam-channel = "0.3"
ed25519-dalek = "=1.0.0-pre.1"
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-client = { path = "../client", version = "0.23.1" }
solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
thiserror = "1.0"
serde = "1.0.104"
serde_json = "1.0.44"
serde_derive = "1.0.103"
solana-net-utils = { path = "../net-utils", version = "0.23.1" }
solana-chacha = { path = "../chacha", version = "0.23.1" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.1" }
solana-ledger = { path = "../ledger", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.23.1" }
solana-perf = { path = "../perf", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.23.1" }
solana-core = { path = "../core", version = "0.23.1" }
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.1" }
solana-metrics = { path = "../metrics", version = "0.23.1" }
[dev-dependencies]
hex = "0.4.0"
[lib]
name = "solana_archiver_lib"

View File

@@ -1,26 +1,27 @@
use crate::{ use crate::result::ArchiverError;
chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE}, use crossbeam_channel::unbounded;
use ed25519_dalek;
use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_archiver_utils::sample_file;
use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
use solana_client::{
rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn,
thin_client::ThinClient,
};
use solana_core::{
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}, cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo, contact_info::ContactInfo,
gossip_service::GossipService, gossip_service::GossipService,
packet::{limited_deserialize, PACKET_DATA_SIZE}, packet::{limited_deserialize, PACKET_DATA_SIZE},
repair_service, repair_service,
repair_service::{RepairService, RepairSlotRange, RepairStrategy}, repair_service::{RepairService, RepairSlotRange, RepairStrategy},
result::{Error, Result},
shred_fetch_stage::ShredFetchStage, shred_fetch_stage::ShredFetchStage,
sigverify_stage::{DisabledSigVerifier, SigVerifyStage}, sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
storage_stage::NUM_STORAGE_SAMPLES, storage_stage::NUM_STORAGE_SAMPLES,
streamer::{receiver, responder, PacketReceiver}, streamer::{receiver, responder, PacketReceiver},
window_service::WindowService, window_service::WindowService,
}; };
use crossbeam_channel::unbounded;
use ed25519_dalek;
use rand::{thread_rng, Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use solana_client::{
rpc_client::RpcClient, rpc_request::RpcRequest, rpc_response::RpcStorageTurn,
thin_client::ThinClient,
};
use solana_ledger::{ use solana_ledger::{
blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred, blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, shred::Shred,
}; };
@@ -29,11 +30,11 @@ use solana_perf::packet::Packets;
use solana_perf::recycler::Recycler; use solana_perf::recycler::Recycler;
use solana_sdk::packet::Packet; use solana_sdk::packet::Packet;
use solana_sdk::{ use solana_sdk::{
account_utils::State, account_utils::StateMut,
client::{AsyncClient, SyncClient}, client::{AsyncClient, SyncClient},
clock::{get_complete_segment_from_slot, get_segment_from_slot, Slot}, clock::{get_complete_segment_from_slot, get_segment_from_slot, Slot},
commitment_config::CommitmentConfig, commitment_config::CommitmentConfig,
hash::{Hash, Hasher}, hash::Hash,
message::Message, message::Message,
signature::{Keypair, KeypairUtil, Signature}, signature::{Keypair, KeypairUtil, Signature},
timing::timestamp, timing::timestamp,
@@ -45,9 +46,7 @@ use solana_storage_program::{
storage_instruction::{self, StorageAccountType}, storage_instruction::{self, StorageAccountType},
}; };
use std::{ use std::{
fs::File, io::{self, ErrorKind},
io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom},
mem::size_of,
net::{SocketAddr, UdpSocket}, net::{SocketAddr, UdpSocket},
path::{Path, PathBuf}, path::{Path, PathBuf},
result, result,
@@ -58,6 +57,8 @@ use std::{
time::Duration, time::Duration,
}; };
type Result<T> = std::result::Result<T, ArchiverError>;
static ENCRYPTED_FILENAME: &str = "ledger.enc"; static ENCRYPTED_FILENAME: &str = "ledger.enc";
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@@ -85,41 +86,6 @@ struct ArchiverMeta {
client_commitment: CommitmentConfig, client_commitment: CommitmentConfig,
} }
pub(crate) fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
let in_file = File::open(in_path)?;
let metadata = in_file.metadata()?;
let mut buffer_file = BufReader::new(in_file);
let mut hasher = Hasher::default();
let sample_size = size_of::<Hash>();
let sample_size64 = sample_size as u64;
let mut buf = vec![0; sample_size];
let file_len = metadata.len();
if file_len < sample_size64 {
return Err(io::Error::new(ErrorKind::Other, "file too short!"));
}
for offset in sample_offsets {
if *offset > (file_len - sample_size64) / sample_size64 {
return Err(io::Error::new(ErrorKind::Other, "offset too large"));
}
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
trace!("sampling @ {} ", *offset);
match buffer_file.read(&mut buf) {
Ok(size) => {
assert_eq!(size, buf.len());
hasher.hash(&buf);
}
Err(e) => {
warn!("Error sampling file");
return Err(e);
}
}
}
Ok(hasher.result())
}
fn get_slot_from_signature( fn get_slot_from_signature(
signature: &ed25519_dalek::Signature, signature: &ed25519_dalek::Signature,
storage_turn: u64, storage_turn: u64,
@@ -239,16 +205,16 @@ impl Archiver {
info!("Connecting to the cluster via {:?}", cluster_entrypoint); info!("Connecting to the cluster via {:?}", cluster_entrypoint);
let (nodes, _) = let (nodes, _) =
match crate::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) { match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
Ok(nodes_and_archivers) => nodes_and_archivers, Ok(nodes_and_archivers) => nodes_and_archivers,
Err(e) => { Err(e) => {
//shutdown services before exiting //shutdown services before exiting
exit.store(true, Ordering::Relaxed); exit.store(true, Ordering::Relaxed);
gossip_service.join()?; gossip_service.join()?;
return Err(Error::from(e)); return Err(e.into());
} }
}; };
let client = crate::gossip_service::get_client(&nodes); let client = solana_core::gossip_service::get_client(&nodes);
info!("Setting up mining account..."); info!("Setting up mining account...");
if let Err(e) = Self::setup_mining_account( if let Err(e) = Self::setup_mining_account(
@@ -411,7 +377,7 @@ impl Archiver {
client_commitment: CommitmentConfig, client_commitment: CommitmentConfig,
) { ) {
let nodes = cluster_info.read().unwrap().tvu_peers(); let nodes = cluster_info.read().unwrap().tvu_peers();
let client = crate::gossip_service::get_client(&nodes); let client = solana_core::gossip_service::get_client(&nodes);
if let Ok(Some(account)) = if let Ok(Some(account)) =
client.get_account_with_commitment(&storage_keypair.pubkey(), client_commitment.clone()) client.get_account_with_commitment(&storage_keypair.pubkey(), client_commitment.clone())
@@ -624,9 +590,7 @@ impl Archiver {
client_commitment.clone(), client_commitment.clone(),
)? == 0 )? == 0
{ {
return Err( return Err(ArchiverError::EmptyStorageAccountBalance);
io::Error::new(io::ErrorKind::Other, "keypair account has no balance").into(),
);
} }
info!("checking storage account keypair..."); info!("checking storage account keypair...");
@@ -637,11 +601,8 @@ impl Archiver {
let blockhash = let blockhash =
match client.get_recent_blockhash_with_commitment(client_commitment.clone()) { match client.get_recent_blockhash_with_commitment(client_commitment.clone()) {
Ok((blockhash, _)) => blockhash, Ok((blockhash, _)) => blockhash,
Err(_) => { Err(e) => {
return Err(Error::IO(<io::Error>::new( return Err(ArchiverError::TransportError(e));
io::ErrorKind::Other,
"unable to get recent blockhash, can't submit proof",
)));
} }
}; };
@@ -675,7 +636,7 @@ impl Archiver {
) { ) {
// No point if we've got no storage account... // No point if we've got no storage account...
let nodes = cluster_info.read().unwrap().tvu_peers(); let nodes = cluster_info.read().unwrap().tvu_peers();
let client = crate::gossip_service::get_client(&nodes); let client = solana_core::gossip_service::get_client(&nodes);
let storage_balance = client.poll_get_balance_with_commitment( let storage_balance = client.poll_get_balance_with_commitment(
&storage_keypair.pubkey(), &storage_keypair.pubkey(),
meta.client_commitment.clone(), meta.client_commitment.clone(),
@@ -739,7 +700,7 @@ impl Archiver {
fn get_segment_config( fn get_segment_config(
cluster_info: &Arc<RwLock<ClusterInfo>>, cluster_info: &Arc<RwLock<ClusterInfo>>,
client_commitment: CommitmentConfig, client_commitment: CommitmentConfig,
) -> result::Result<u64, Error> { ) -> Result<u64> {
let rpc_peers = { let rpc_peers = {
let cluster_info = cluster_info.read().unwrap(); let cluster_info = cluster_info.read().unwrap();
cluster_info.all_rpc_peers() cluster_info.all_rpc_peers()
@@ -758,12 +719,12 @@ impl Archiver {
) )
.map_err(|err| { .map_err(|err| {
warn!("Error while making rpc request {:?}", err); warn!("Error while making rpc request {:?}", err);
Error::IO(io::Error::new(ErrorKind::Other, "rpc error")) ArchiverError::ClientError(err)
})? })?
.as_u64() .as_u64()
.unwrap()) .unwrap())
} else { } else {
Err(io::Error::new(io::ErrorKind::Other, "No RPC peers...".to_string()).into()) Err(ArchiverError::NoRpcPeers)
} }
} }
@@ -773,7 +734,7 @@ impl Archiver {
slots_per_segment: u64, slots_per_segment: u64,
previous_blockhash: &Hash, previous_blockhash: &Hash,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) -> result::Result<(Hash, u64), Error> { ) -> Result<(Hash, u64)> {
loop { loop {
let (blockhash, turn_slot) = Self::poll_for_blockhash_and_slot( let (blockhash, turn_slot) = Self::poll_for_blockhash_and_slot(
cluster_info, cluster_info,
@@ -793,7 +754,7 @@ impl Archiver {
slots_per_segment: u64, slots_per_segment: u64,
previous_blockhash: &Hash, previous_blockhash: &Hash,
exit: &Arc<AtomicBool>, exit: &Arc<AtomicBool>,
) -> result::Result<(Hash, u64), Error> { ) -> Result<(Hash, u64)> {
info!("waiting for the next turn..."); info!("waiting for the next turn...");
loop { loop {
let rpc_peers = { let rpc_peers = {
@@ -814,17 +775,13 @@ impl Archiver {
) )
.map_err(|err| { .map_err(|err| {
warn!("Error while making rpc request {:?}", err); warn!("Error while making rpc request {:?}", err);
Error::IO(io::Error::new(ErrorKind::Other, "rpc error")) ArchiverError::ClientError(err)
})?; })?;
let RpcStorageTurn { let RpcStorageTurn {
blockhash: storage_blockhash, blockhash: storage_blockhash,
slot: turn_slot, slot: turn_slot,
} = serde_json::from_value::<RpcStorageTurn>(response).map_err(|err| { } = serde_json::from_value::<RpcStorageTurn>(response)
io::Error::new( .map_err(ArchiverError::JsonError)?;
io::ErrorKind::Other,
format!("Couldn't parse response: {:?}", err),
)
})?;
let turn_blockhash = storage_blockhash.parse().map_err(|err| { let turn_blockhash = storage_blockhash.parse().map_err(|err| {
io::Error::new( io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
@@ -842,7 +799,7 @@ impl Archiver {
} }
} }
if exit.load(Ordering::Relaxed) { if exit.load(Ordering::Relaxed) {
return Err(Error::IO(io::Error::new( return Err(ArchiverError::IO(io::Error::new(
ErrorKind::Other, ErrorKind::Other,
"exit signalled...", "exit signalled...",
))); )));
@@ -950,9 +907,7 @@ impl Archiver {
// check if all the slots in the segment are complete // check if all the slots in the segment are complete
if !Self::segment_complete(start_slot, slots_per_segment, blockstore) { if !Self::segment_complete(start_slot, slots_per_segment, blockstore) {
return Err( return Err(ArchiverError::SegmentDownloadError);
io::Error::new(ErrorKind::Other, "Unable to download the full segment").into(),
);
} }
Ok(start_slot) Ok(start_slot)
} }
@@ -995,74 +950,3 @@ impl Archiver {
panic!("Couldn't get segment slot from archiver!"); panic!("Couldn't get segment slot from archiver!");
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use std::fs::{create_dir_all, remove_file};
use std::io::Write;
fn tmp_file_path(name: &str) -> PathBuf {
use std::env;
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
let keypair = Keypair::new();
let mut path = PathBuf::new();
path.push(out_dir);
path.push("tmp");
create_dir_all(&path).unwrap();
path.push(format!("{}-{}", name, keypair.pubkey()));
path
}
#[test]
fn test_sample_file() {
solana_logger::setup();
let in_path = tmp_file_path("test_sample_file_input.txt");
let num_strings = 4096;
let string = "12foobar";
{
let mut in_file = File::create(&in_path).unwrap();
for _ in 0..num_strings {
in_file.write(string.as_bytes()).unwrap();
}
}
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
let samples: Vec<_> = (0..num_samples).collect();
let res = sample_file(&in_path, samples.as_slice());
let ref_hash: Hash = Hash::new(&[
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
]);
let res = res.unwrap();
assert_eq!(res, ref_hash);
// Sample just past the end
assert!(sample_file(&in_path, &[num_samples]).is_err());
remove_file(&in_path).unwrap();
}
#[test]
fn test_sample_file_invalid_offset() {
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
{
let mut in_file = File::create(&in_path).unwrap();
for _ in 0..4096 {
in_file.write("123456foobar".as_bytes()).unwrap();
}
}
let samples = [0, 200000];
let res = sample_file(&in_path, &samples);
assert!(res.is_err());
remove_file(in_path).unwrap();
}
#[test]
fn test_sample_file_missing_file() {
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
let samples = [0, 5];
let res = sample_file(&in_path, &samples);
assert!(res.is_err());
}
}

11
archiver-lib/src/lib.rs Normal file
View File

@@ -0,0 +1,11 @@
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate solana_metrics;
pub mod archiver;
mod result;

View File

@@ -0,0 +1,48 @@
use serde_json;
use solana_client::client_error;
use solana_ledger::blockstore;
use solana_sdk::transport;
use std::any::Any;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ArchiverError {
#[error("IO error")]
IO(#[from] std::io::Error),
#[error("blockstore error")]
BlockstoreError(#[from] blockstore::BlockstoreError),
#[error("crossbeam error")]
CrossbeamSendError(#[from] crossbeam_channel::SendError<u64>),
#[error("send error")]
SendError(#[from] std::sync::mpsc::SendError<u64>),
#[error("join error")]
JoinError(Box<dyn Any + Send + 'static>),
#[error("transport error")]
TransportError(#[from] transport::TransportError),
#[error("client error")]
ClientError(#[from] client_error::ClientError),
#[error("Json parsing error")]
JsonError(#[from] serde_json::error::Error),
#[error("Storage account has no balance")]
EmptyStorageAccountBalance,
#[error("No RPC peers..")]
NoRpcPeers,
#[error("Couldn't download full segment")]
SegmentDownloadError,
}
impl std::convert::From<Box<dyn Any + Send + 'static>> for ArchiverError {
fn from(e: Box<dyn Any + Send + 'static>) -> ArchiverError {
ArchiverError::JoinError(e)
}
}

26
archiver-utils/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "solana-archiver-utils"
version = "0.23.1"
description = "Solana Archiver Utils"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-chacha = { path = "../chacha", version = "0.23.1" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.1" }
solana-ledger = { path = "../ledger", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.23.1" }
solana-perf = { path = "../perf", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.23.1" }
[dev-dependencies]
hex = "0.4.0"
[lib]
name = "solana_archiver_utils"

120
archiver-utils/src/lib.rs Normal file
View File

@@ -0,0 +1,120 @@
#[macro_use]
extern crate log;
use solana_sdk::hash::{Hash, Hasher};
use std::fs::File;
use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom};
use std::mem::size_of;
use std::path::Path;
pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
let in_file = File::open(in_path)?;
let metadata = in_file.metadata()?;
let mut buffer_file = BufReader::new(in_file);
let mut hasher = Hasher::default();
let sample_size = size_of::<Hash>();
let sample_size64 = sample_size as u64;
let mut buf = vec![0; sample_size];
let file_len = metadata.len();
if file_len < sample_size64 {
return Err(io::Error::new(ErrorKind::Other, "file too short!"));
}
for offset in sample_offsets {
if *offset > (file_len - sample_size64) / sample_size64 {
return Err(io::Error::new(ErrorKind::Other, "offset too large"));
}
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
trace!("sampling @ {} ", *offset);
match buffer_file.read(&mut buf) {
Ok(size) => {
assert_eq!(size, buf.len());
hasher.hash(&buf);
}
Err(e) => {
warn!("Error sampling file");
return Err(e);
}
}
}
Ok(hasher.result())
}
#[cfg(test)]
mod tests {
use super::*;
use rand::{thread_rng, Rng};
use std::fs::{create_dir_all, remove_file};
use std::io::Write;
use std::path::PathBuf;
extern crate hex;
fn tmp_file_path(name: &str) -> PathBuf {
use std::env;
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
let mut rand_bits = [0u8; 32];
thread_rng().fill(&mut rand_bits[..]);
let mut path = PathBuf::new();
path.push(out_dir);
path.push("tmp");
create_dir_all(&path).unwrap();
path.push(format!("{}-{:?}", name, hex::encode(rand_bits)));
println!("path: {:?}", path);
path
}
#[test]
fn test_sample_file() {
solana_logger::setup();
let in_path = tmp_file_path("test_sample_file_input.txt");
let num_strings = 4096;
let string = "12foobar";
{
let mut in_file = File::create(&in_path).unwrap();
for _ in 0..num_strings {
in_file.write(string.as_bytes()).unwrap();
}
}
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
let samples: Vec<_> = (0..num_samples).collect();
let res = sample_file(&in_path, samples.as_slice());
let ref_hash: Hash = Hash::new(&[
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
]);
let res = res.unwrap();
assert_eq!(res, ref_hash);
// Sample just past the end
assert!(sample_file(&in_path, &[num_samples]).is_err());
remove_file(&in_path).unwrap();
}
#[test]
fn test_sample_file_invalid_offset() {
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
{
let mut in_file = File::create(&in_path).unwrap();
for _ in 0..4096 {
in_file.write("123456foobar".as_bytes()).unwrap();
}
}
let samples = [0, 200000];
let res = sample_file(&in_path, &samples);
assert!(res.is_err());
remove_file(in_path).unwrap();
}
#[test]
fn test_sample_file_missing_file() {
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
let samples = [0, 5];
let res = sample_file(&in_path, &samples);
assert!(res.is_err());
}
}

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-archiver" name = "solana-archiver"
version = "0.22.10" version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@@ -10,10 +10,11 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
console = "0.9.1" console = "0.9.1"
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" } solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
solana-core = { path = "../core", version = "0.22.10" } solana-core = { path = "../core", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.22.10" } solana-logger = { path = "../logger", version = "0.23.1" }
solana-metrics = { path = "../metrics", version = "0.22.10" } solana-metrics = { path = "../metrics", version = "0.23.1" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" } solana-archiver-lib = { path = "../archiver-lib", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.22.10" } solana-net-utils = { path = "../net-utils", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.23.1" }

View File

@@ -1,5 +1,6 @@
use clap::{crate_description, crate_name, App, Arg}; use clap::{crate_description, crate_name, App, Arg};
use console::style; use console::style;
use solana_archiver_lib::archiver::Archiver;
use solana_clap_utils::{ use solana_clap_utils::{
input_validators::is_keypair, input_validators::is_keypair,
keypair::{ keypair::{
@@ -8,7 +9,6 @@ use solana_clap_utils::{
}, },
}; };
use solana_core::{ use solana_core::{
archiver::Archiver,
cluster_info::{Node, VALIDATOR_PORT_RANGE}, cluster_info::{Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo, contact_info::ContactInfo,
}; };

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-banking-bench" name = "solana-banking-bench"
version = "0.22.10" version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies] [dependencies]
log = "0.4.6" log = "0.4.6"
rayon = "1.2.0" rayon = "1.2.0"
solana-core = { path = "../core", version = "0.22.10" } solana-core = { path = "../core", version = "0.23.1" }
solana-ledger = { path = "../ledger", version = "0.22.10" } solana-ledger = { path = "../ledger", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.22.10" } solana-logger = { path = "../logger", version = "0.23.1" }
solana-runtime = { path = "../runtime", version = "0.22.10" } solana-runtime = { path = "../runtime", version = "0.23.1" }
solana-measure = { path = "../measure", version = "0.22.10" } solana-measure = { path = "../measure", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.22.10" } solana-sdk = { path = "../sdk", version = "0.23.1" }
rand = "0.6.5" rand = "0.6.5"
crossbeam-channel = "0.3" crossbeam-channel = "0.3"

View File

@@ -162,8 +162,8 @@ fn main() {
// If it is dropped before poh_service, then poh_service will error when // If it is dropped before poh_service, then poh_service will error when
// calling send() on the channel. // calling send() on the channel.
let signal_receiver = Arc::new(signal_receiver); let signal_receiver = Arc::new(signal_receiver);
let mut total = 0; let mut total_us = 0;
let mut tx_total = 0; let mut tx_total_us = 0;
let mut txs_processed = 0; let mut txs_processed = 0;
let mut root = 1; let mut root = 1;
let collector = Pubkey::new_rand(); let collector = Pubkey::new_rand();
@@ -173,6 +173,7 @@ fn main() {
chunk_len, chunk_len,
num_threads, num_threads,
}; };
let mut total_sent = 0;
for _ in 0..ITERS { for _ in 0..ITERS {
let now = Instant::now(); let now = Instant::now();
let mut sent = 0; let mut sent = 0;
@@ -223,7 +224,7 @@ fn main() {
); );
assert!(txs_processed < bank.transaction_count()); assert!(txs_processed < bank.transaction_count());
txs_processed = bank.transaction_count(); txs_processed = bank.transaction_count();
tx_total += duration_as_us(&now.elapsed()); tx_total_us += duration_as_us(&now.elapsed());
let mut poh_time = Measure::start("poh_time"); let mut poh_time = Measure::start("poh_time");
poh_recorder.lock().unwrap().reset( poh_recorder.lock().unwrap().reset(
@@ -255,20 +256,21 @@ fn main() {
poh_time.as_us(), poh_time.as_us(),
); );
} else { } else {
tx_total += duration_as_us(&now.elapsed()); tx_total_us += duration_as_us(&now.elapsed());
} }
// This signature clear may not actually clear the signatures // This signature clear may not actually clear the signatures
// in this chunk, but since we rotate between CHUNKS then // in this chunk, but since we rotate between CHUNKS then
// we should clear them by the time we come around again to re-use that chunk. // we should clear them by the time we come around again to re-use that chunk.
bank.clear_signatures(); bank.clear_signatures();
total += duration_as_us(&now.elapsed()); total_us += duration_as_us(&now.elapsed());
debug!( debug!(
"time: {} us checked: {} sent: {}", "time: {} us checked: {} sent: {}",
duration_as_us(&now.elapsed()), duration_as_us(&now.elapsed()),
txes / CHUNKS, txes / CHUNKS,
sent, sent,
); );
total_sent += sent;
if bank.slot() > 0 && bank.slot() % 16 == 0 { if bank.slot() > 0 && bank.slot() % 16 == 0 {
for tx in transactions.iter_mut() { for tx in transactions.iter_mut() {
@@ -284,11 +286,11 @@ fn main() {
} }
eprintln!( eprintln!(
"{{'name': 'banking_bench_total', 'median': '{}'}}", "{{'name': 'banking_bench_total', 'median': '{}'}}",
total / ITERS as u64, (1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
); );
eprintln!( eprintln!(
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}", "{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
tx_total / ITERS as u64, (1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
); );
drop(verified_sender); drop(verified_sender);

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-exchange" name = "solana-bench-exchange"
version = "0.22.10" version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@@ -23,19 +23,19 @@ serde = "1.0.104"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.44" serde_json = "1.0.44"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" } solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
solana-core = { path = "../core", version = "0.22.10" } solana-core = { path = "../core", version = "0.23.1" }
solana-genesis = { path = "../genesis", version = "0.22.10" } solana-genesis = { path = "../genesis", version = "0.23.1" }
solana-client = { path = "../client", version = "0.22.10" } solana-client = { path = "../client", version = "0.23.1" }
solana-faucet = { path = "../faucet", version = "0.22.10" } solana-faucet = { path = "../faucet", version = "0.23.1" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.10" } solana-exchange-program = { path = "../programs/exchange", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.22.10" } solana-logger = { path = "../logger", version = "0.23.1" }
solana-metrics = { path = "../metrics", version = "0.22.10" } solana-metrics = { path = "../metrics", version = "0.23.1" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" } solana-net-utils = { path = "../net-utils", version = "0.23.1" }
solana-runtime = { path = "../runtime", version = "0.22.10" } solana-runtime = { path = "../runtime", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.22.10" } solana-sdk = { path = "../sdk", version = "0.23.1" }
untrusted = "0.7.0" untrusted = "0.7.0"
ws = "0.9.1" ws = "0.9.1"
[dev-dependencies] [dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "0.22.10" } solana-local-cluster = { path = "../local-cluster", version = "0.23.1" }

View File

@@ -2,14 +2,14 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-streamer" name = "solana-bench-streamer"
version = "0.22.10" version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" } solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
solana-core = { path = "../core", version = "0.22.10" } solana-core = { path = "../core", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.22.10" } solana-logger = { path = "../logger", version = "0.23.1" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" } solana-net-utils = { path = "../net-utils", version = "0.23.1" }

View File

@@ -1,6 +1,5 @@
use clap::{crate_description, crate_name, App, Arg}; use clap::{crate_description, crate_name, App, Arg};
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE}; use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
use solana_core::result::Result;
use solana_core::streamer::{receiver, PacketReceiver}; use solana_core::streamer::{receiver, PacketReceiver};
use std::cmp::max; use std::cmp::max;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
@@ -8,7 +7,7 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::sync::Arc; use std::sync::Arc;
use std::thread::sleep; use std::thread::sleep;
use std::thread::{spawn, JoinHandle}; use std::thread::{spawn, JoinHandle, Result};
use std::time::Duration; use std::time::Duration;
use std::time::SystemTime; use std::time::SystemTime;

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-bench-tps" name = "solana-bench-tps"
version = "0.22.10" version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@@ -16,24 +16,24 @@ serde = "1.0.104"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.44" serde_json = "1.0.44"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" } solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
solana-core = { path = "../core", version = "0.22.10" } solana-core = { path = "../core", version = "0.23.1" }
solana-genesis = { path = "../genesis", version = "0.22.10" } solana-genesis = { path = "../genesis", version = "0.23.1" }
solana-client = { path = "../client", version = "0.22.10" } solana-client = { path = "../client", version = "0.23.1" }
solana-faucet = { path = "../faucet", version = "0.22.10" } solana-faucet = { path = "../faucet", version = "0.23.1" }
solana-librapay = { path = "../programs/librapay", version = "0.22.10", optional = true } solana-librapay = { path = "../programs/librapay", version = "0.23.1", optional = true }
solana-logger = { path = "../logger", version = "0.22.10" } solana-logger = { path = "../logger", version = "0.23.1" }
solana-metrics = { path = "../metrics", version = "0.22.10" } solana-metrics = { path = "../metrics", version = "0.23.1" }
solana-measure = { path = "../measure", version = "0.22.10" } solana-measure = { path = "../measure", version = "0.23.1" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" } solana-net-utils = { path = "../net-utils", version = "0.23.1" }
solana-runtime = { path = "../runtime", version = "0.22.10" } solana-runtime = { path = "../runtime", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.22.10" } solana-sdk = { path = "../sdk", version = "0.23.1" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.10", optional = true } solana-move-loader-program = { path = "../programs/move_loader", version = "0.23.1", optional = true }
[dev-dependencies] [dev-dependencies]
serial_test = "0.3.2" serial_test = "0.3.2"
serial_test_derive = "0.3.1" serial_test_derive = "0.3.1"
solana-local-cluster = { path = "../local-cluster", version = "0.22.10" } solana-local-cluster = { path = "../local-cluster", version = "0.23.1" }
[features] [features]
move = ["solana-librapay", "solana-move-loader-program"] move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -187,7 +187,9 @@ where
sleep(Duration::from_millis(1)); sleep(Duration::from_millis(1));
} }
} else { } else {
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 { while !shared_txs.read().unwrap().is_empty()
|| shared_tx_active_thread_count.load(Ordering::Relaxed) > 0
{
sleep(Duration::from_millis(1)); sleep(Duration::from_millis(1));
} }
} }

View File

@@ -4,7 +4,7 @@ use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil}; use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
use std::{net::SocketAddr, process::exit, time::Duration}; use std::{net::SocketAddr, process::exit, time::Duration};
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::SOL_LAMPORTS; const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
/// Holds the configuration for a single run of the benchmark /// Holds the configuration for a single run of the benchmark
pub struct Config { pub struct Config {

View File

@@ -12,7 +12,7 @@ use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::e
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7; pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
fn main() { fn main() {
solana_logger::setup_with_filter("solana=info"); solana_logger::setup_with_default("solana=info");
solana_metrics::set_panic_hook("bench-tps"); solana_metrics::set_panic_hook("bench-tps");
let matches = cli::build_args(solana_clap_utils::version!()).get_matches(); let matches = cli::build_args(solana_clap_utils::version!()).get_matches();

View File

@@ -24,7 +24,7 @@ msc {
... ; ... ;
Validator abox Validator [label="\nmax\nlockout\n"]; Validator abox Validator [label="\nmax\nlockout\n"];
|||; |||;
StakerX => Cluster [label="StakeState::RedeemCredits()"]; Cluster box Cluster [label="credits redeemed (at epoch)"];
StakerY => Cluster [label="StakeState::RedeemCredits()"] ;
} }

View File

@@ -0,0 +1,19 @@
+----------+
| Bank-Hash|
+----------+
^
|
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
: :
: +--------------+ +-------------+ :
: Hash( | Accounts-Hash| + | Block-Merkle| ) :
: +--------------+ +-------------+ :
: ^ :
+~~~~~~~~~~~~~ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
|
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
: +---------------+ +---------------+ +---------------+ :
: Hash( | Hash(Account1)| + | Hash(Account2)| + ... + | Hash(AccountN)| ) :
: +---------------+ +---------------+ +---------------+ :
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+

View File

@@ -1,18 +0,0 @@
+------------+
| Bank-Merkle|
+------------+
^ ^
/ \
+-----------------+ +-------------+
| Bank-Diff-Merkle| | Block-Merkle|
+-----------------+ +-------------+
^ ^
/ \
+------+ +--------------------------+
| Hash | | Previous Bank-Diff-Merkle|
+------+ +--------------------------+
^ ^
/ \
+---------------+ +---------------+
| Hash(Account1)| | Hash(Account2)|
+---------------+ +---------------+

View File

@@ -5,9 +5,9 @@ cd "$(dirname "$0")"
usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed 's|'"$HOME"'|~|g') usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed 's|'"$HOME"'|~|g')
out=${1:-src/api-reference/cli.md} out=${1:-src/cli/usage.md}
cat src/api-reference/.cli.md > "$out" cat src/cli/.usage.md.header > "$out"
section() { section() {
declare mark=${2:-"###"} declare mark=${2:-"###"}

View File

@@ -3,4 +3,14 @@ set -e
cd "$(dirname "$0")" cd "$(dirname "$0")"
# md check
find src -name '*.md' -a \! -name SUMMARY.md |
while read -r file; do
if ! grep -q '('"${file#src/}"')' src/SUMMARY.md; then
echo "Error: $file missing from SUMMARY.md"
exit 1
fi
done
make -j"$(nproc)" test make -j"$(nproc)" test

View File

@@ -1,6 +1,6 @@
BOB_SRCS=$(wildcard art/*.bob) BOB_SRCS=$(wildcard art/*.bob)
MSC_SRCS=$(wildcard art/*.msc) MSC_SRCS=$(wildcard art/*.msc)
MD_SRCS=$(wildcard src/*.md) MD_SRCS=$(wildcard src/*.md src/*/*.md)
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg) SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg)

View File

@@ -1,14 +1,31 @@
# Table of contents # Table of contents
* [Introduction](introduction.md) * [Introduction](introduction.md)
* [Terminology](terminology.md) * [Using Solana from the Command-line](cli/README.md)
* [Getting Started](getting-started/README.md) * [Command-line Usage](cli/usage.md)
* [Testnet Participation](getting-started/testnet-participation.md) * [Paper Wallet](paper-wallet/README.md)
* [Example Client: Web Wallet](getting-started/webwallet.md) * [Installation](paper-wallet/installation.md)
* [Programming Model](programs/README.md) * [Paper Wallet Usage](paper-wallet/usage.md)
* [Example: Tic-Tac-Toe](programs/tictactoe.md) * [Offline Signing](offline-signing/README.md)
* [Drones](programs/drones.md) * [Durable Transaction Nonces](offline-signing/durable-nonce.md)
* [A Solana Cluster](cluster/README.md) * [Developing Applications](apps/README.md)
* [Example: Web Wallet](apps/webwallet.md)
* [Example: Tic-Tac-Toe](apps/tictactoe.md)
* [Drones](apps/drones.md)
* [Anatomy of a Transaction](transaction.md)
* [JSON RPC API](apps/jsonrpc-api.md)
* [JavaScript API](apps/javascript-api.md)
* [Running a Validator](running-validator/README.md)
* [Validator Requirements](running-validator/validator-reqs.md)
* [Choosing a Testnet](running-validator/validator-testnet.md)
* [Installing the Validator Software](running-validator/validator-software.md)
* [Starting a Validator](running-validator/validator-start.md)
* [Staking](running-validator/validator-stake.md)
* [Monitoring a Validator](running-validator/validator-monitor.md)
* [Publishing Validator Info](running-validator/validator-info.md)
* [Troubleshooting](running-validator/validator-troubleshoot.md)
* [Running an Archiver](running-archiver.md)
* [Understanding Solana's Architecture](cluster/README.md)
* [Synchronization](cluster/synchronization.md) * [Synchronization](cluster/synchronization.md)
* [Leader Rotation](cluster/leader-rotation.md) * [Leader Rotation](cluster/leader-rotation.md)
* [Fork Generation](cluster/fork-generation.md) * [Fork Generation](cluster/fork-generation.md)
@@ -20,46 +37,13 @@
* [Performance Metrics](cluster/performance-metrics.md) * [Performance Metrics](cluster/performance-metrics.md)
* [Anatomy of a Validator](validator/README.md) * [Anatomy of a Validator](validator/README.md)
* [TPU](validator/tpu.md) * [TPU](validator/tpu.md)
* [TVU](validator/tvu/README.md) * [TVU](validator/tvu.md)
* [Blockstore](validator/tvu/blockstore.md) * [Blockstore](validator/blockstore.md)
* [Gossip Service](validator/gossip.md) * [Gossip Service](validator/gossip.md)
* [The Runtime](validator/runtime.md) * [The Runtime](validator/runtime.md)
* [Anatomy of a Transaction](transaction.md) * [Building from Source](building-from-source.md)
* [Running a Validator](running-validator/README.md) * [Terminology](terminology.md)
* [Validator Requirements](running-validator/validator-reqs.md)
* [Choosing a Testnet](running-validator/validator-testnet.md)
* [Installing the Validator Software](running-validator/validator-software.md)
* [Starting a Validator](running-validator/validator-start.md)
* [Staking](running-validator/validator-stake.md)
* [Monitoring a Validator](running-validator/validator-monitor.md)
* [Publishing Validator Info](running-validator/validator-info.md)
* [Troubleshooting](running-validator/validator-troubleshoot.md)
* [Running an Archiver](running-archiver.md)
* [Paper Wallet](paper-wallet/README.md)
* [Installation](paper-wallet/installation.md)
* [Paper Wallet Usage](paper-wallet/usage.md)
* [Offline Signing](offline-signing/README.md)
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
* [API Reference](api-reference/README.md)
* [Transaction](api-reference/transaction-api.md)
* [Instruction](api-reference/instruction-api.md)
* [Blockstreamer](api-reference/blockstreamer.md)
* [JSON RPC API](api-reference/jsonrpc-api.md)
* [JavaScript API](api-reference/javascript-api.md)
* [solana CLI](api-reference/cli.md)
* [Accepted Design Proposals](proposals/README.md)
* [Ledger Replication](proposals/ledger-replication-to-implement.md)
* [Secure Vote Signing](proposals/vote-signing-to-implement.md)
* [Cluster Test Framework](proposals/cluster-test-framework.md)
* [Validator](proposals/validator-proposal.md)
* [Simple Payment and State Verification](proposals/simple-payment-and-state-verification.md)
* [Cross-Program Invocation](proposals/cross-program-invocation.md)
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
* [Snapshot Verification](proposals/snapshot-verification.md)
* [Bankless Leader](proposals/bankless-leader.md)
* [Slashing](proposals/slashing.md)
* [Implemented Design Proposals](implemented-proposals/README.md) * [Implemented Design Proposals](implemented-proposals/README.md)
* [Blockstore](implemented-proposals/blockstore.md)
* [Cluster Software Installation and Updates](implemented-proposals/installer.md) * [Cluster Software Installation and Updates](implemented-proposals/installer.md)
* [Cluster Economics](implemented-proposals/ed_overview/README.md) * [Cluster Economics](implemented-proposals/ed_overview/README.md)
* [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md) * [Validation-client Economics](implemented-proposals/ed_overview/ed_validation_client_economics/README.md)
@@ -70,6 +54,7 @@
* [Replication-client Economics](implemented-proposals/ed_overview/ed_replication_client_economics/README.md) * [Replication-client Economics](implemented-proposals/ed_overview/ed_replication_client_economics/README.md)
* [Storage-replication Rewards](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md) * [Storage-replication Rewards](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_storage_replication_rewards.md)
* [Replication-client Reward Auto-delegation](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md) * [Replication-client Reward Auto-delegation](implemented-proposals/ed_overview/ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md)
* [Storage Rent Economics](implemented-proposals/ed_overview/ed_storage_rent_economics.md)
* [Economic Sustainability](implemented-proposals/ed_overview/ed_economic_sustainability.md) * [Economic Sustainability](implemented-proposals/ed_overview/ed_economic_sustainability.md)
* [Attack Vectors](implemented-proposals/ed_overview/ed_attack_vectors.md) * [Attack Vectors](implemented-proposals/ed_overview/ed_attack_vectors.md)
* [Economic Design MVP](implemented-proposals/ed_overview/ed_mvp.md) * [Economic Design MVP](implemented-proposals/ed_overview/ed_mvp.md)
@@ -88,3 +73,19 @@
* [Rent](implemented-proposals/rent.md) * [Rent](implemented-proposals/rent.md)
* [Durable Transaction Nonces](implemented-proposals/durable-tx-nonces.md) * [Durable Transaction Nonces](implemented-proposals/durable-tx-nonces.md)
* [Validator Timestamp Oracle](implemented-proposals/validator-timestamp-oracle.md) * [Validator Timestamp Oracle](implemented-proposals/validator-timestamp-oracle.md)
* [Commitment](implemented-proposals/commitment.md)
* [Snapshot Verification](implemented-proposals/snapshot-verification.md)
* [Accepted Design Proposals](proposals/README.md)
* [Ledger Replication](proposals/ledger-replication-to-implement.md)
* [Secure Vote Signing](proposals/vote-signing-to-implement.md)
* [Cluster Test Framework](proposals/cluster-test-framework.md)
* [Validator](proposals/validator-proposal.md)
* [Simple Payment and State Verification](proposals/simple-payment-and-state-verification.md)
* [Cross-Program Invocation](proposals/cross-program-invocation.md)
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
* [Snapshot Verification](proposals/snapshot-verification.md)
* [Bankless Leader](proposals/bankless-leader.md)
* [Slashing](proposals/slashing.md)
* [Tick Verification](proposals/tick-verification.md)
* [Block Confirmation](proposals/block-confirmation.md)
* [ABI Management](proposals/abi-management.md)

View File

@@ -1,4 +0,0 @@
# API Reference
The following sections contain API references material you may find useful when developing applications utilizing a Solana cluster.

View File

@@ -1,28 +0,0 @@
# Blockstreamer
Solana supports a node type called an _blockstreamer_. This validator variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication.
A blockstreamer runs without a vote signer, and can optionally stream ledger entries out to a Unix domain socket as they are processed. The JSON-RPC service still functions as on any other node.
To run a blockstreamer, include the argument `no-signer` and \(optional\) `blockstream` socket location:
```bash
$ ./multinode-demo/validator-x.sh --no-signer --blockstream <SOCKET>
```
The stream will output a series of JSON objects:
* An Entry event JSON object is sent when each ledger entry is processed, with the following fields:
* `dt`, the system datetime, as RFC3339-formatted string
* `t`, the event type, always "entry"
* `s`, the slot height, as unsigned 64-bit integer
* `h`, the tick height, as unsigned 64-bit integer
* `entry`, the entry, as JSON object
* A Block event JSON object is sent when a block is complete, with the following fields:
* `dt`, the system datetime, as RFC3339-formatted string
* `t`, the event type, always "block"
* `s`, the slot height, as unsigned 64-bit integer
* `h`, the tick height, as unsigned 64-bit integer
* `l`, the slot leader id, as base-58 encoded string
* `hash`, the [blockhash](terminology.md#blockhash), as base-58 encoded string

View File

@@ -1,38 +0,0 @@
# Instruction
For the purposes of building a [Transaction](../transaction.md), a more verbose instruction format is used:
* **Instruction:**
* **program\_id:** The pubkey of the on-chain program that executes the
instruction
* **accounts:** An ordered list of accounts that should be passed to
the program processing the instruction, including metadata detailing
if an account is a signer of the transaction and if it is a credit
only account.
* **data:** A byte array that is passed to the program executing the
instruction
A more compact form is actually included in a `Transaction`:
* **CompiledInstruction:**
* **program\_id\_index:** The index of the `program_id` in the
`account_keys` list
* **accounts:** An ordered list of indices into `account_keys`
specifying the accounds that should be passed to the program
processing the instruction.
* **data:** A byte array that is passed to the program executing the
instruction

View File

@@ -1,62 +0,0 @@
# Transaction
## Components of a `Transaction`
* **Transaction:**
* **message:** Defines the transaction
* **header:** Details the account types of and signatures required by
the transaction
* **num\_required\_signatures:** The total number of signatures
required to make the transaction valid.
* **num\_credit\_only\_signed\_accounts:** The last
`num_readonly_signed_accounts` signatures refer to signing
credit only accounts. Credit only accounts can be used concurrently
by multiple parallel transactions, but their balance may only be
increased, and their account data is read-only.
* **num\_credit\_only\_unsigned\_accounts:** The last
`num_readonly_unsigned_accounts` public keys in `account_keys` refer
to non-signing credit only accounts
* **account\_keys:** List of public keys used by the transaction, including
by the instructions and for signatures. The first
`num_required_signatures` public keys must sign the transaction.
* **recent\_blockhash:** The ID of a recent ledger entry. Validators will
reject transactions with a `recent_blockhash` that is too old.
* **instructions:** A list of [instructions](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/instruction.md) that are
run sequentially and committed in one atomic transaction if all
succeed.
* **signatures:** A list of signatures applied to the transaction. The
list is always of length `num_required_signatures`, and the signature
at index `i` corresponds to the public key at index `i` in `account_keys`.
The list is initialized with empty signatures \(i.e. zeros\), and
populated as signatures are added.
## Transaction Signing
A `Transaction` is signed by using an ed25519 keypair to sign the serialization of the `message`. The resulting signature is placed at the index of `signatures` matching the index of the keypair's public key in `account_keys`.
## Transaction Serialization
`Transaction`s \(and their `message`s\) are serialized and deserialized using the [bincode](https://crates.io/crates/bincode) crate with a non-standard vector serialization that uses only one byte for the length if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3 bytes if it requires 15 or 16 bits. The vector serialization is defined by Solana's [short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).

View File

@@ -1,6 +1,20 @@
# Programming Model # Programming Model
A client _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to user-contributed _programs_. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transfered. Instructions are executed atomically. If any instruction is invalid, any changes made within the transaction are discarded. An _app_ interacts with a Solana cluster by sending it _transactions_ with one or more _instructions_. The Solana _runtime_ passes those instructions to user-contributed _programs_. An instruction might, for example, tell a program to transfer _lamports_ from one _account_ to another or create an interactive contract that governs how lamports are transfered. Instructions are executed sequentially and atomically. If any instruction is invalid, any changes made within the transaction are discarded.
### Accounts and Signatures
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data.
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime.
### Recent Blockhash
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
### Instructions
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately.
## Deploying Programs to a Cluster ## Deploying Programs to a Cluster

View File

@@ -16,6 +16,8 @@ Creator of on-chain game tic-tac-toe hosts a drone that responds to airdrop requ
Creator of a new on-chain token \(ERC-20 interface\), may wish to do a worldwide airdrop to distribute its tokens to millions of users over just a few seconds. That drone cannot spend resources interacting with the Solana cluster. Instead, the drone should only verify the client is unique and human, and then return the signature. It may also want to listen to the Solana cluster for recent entry IDs to support client retries and to ensure the airdrop is targeting the desired cluster. Creator of a new on-chain token \(ERC-20 interface\), may wish to do a worldwide airdrop to distribute its tokens to millions of users over just a few seconds. That drone cannot spend resources interacting with the Solana cluster. Instead, the drone should only verify the client is unique and human, and then return the signature. It may also want to listen to the Solana cluster for recent entry IDs to support client retries and to ensure the airdrop is targeting the desired cluster.
Note: the Solana cluster will not parallelize transactions funded by the same fee-paying account. This means that the max throughput of a single fee-paying account is limited to the number of _ticks_ processed per second by the current leader. Add additional fee-paying accounts to improve throughput.
## Attack vectors ## Attack vectors
### Invalid recent\_blockhash ### Invalid recent\_blockhash

View File

@@ -25,15 +25,18 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getEpochInfo](jsonrpc-api.md#getepochinfo) * [getEpochInfo](jsonrpc-api.md#getepochinfo)
* [getEpochSchedule](jsonrpc-api.md#getepochschedule) * [getEpochSchedule](jsonrpc-api.md#getepochschedule)
* [getGenesisHash](jsonrpc-api.md#getgenesishash) * [getGenesisHash](jsonrpc-api.md#getgenesishash)
* [getInflation](jsonrpc-api.md#getinflation)
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule) * [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption) * [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
* [getNumBlocksSinceSignatureConfirmation](jsonrpc-api.md#getnumblockssincesignatureconfirmation) * [getNumBlocksSinceSignatureConfirmation](jsonrpc-api.md#getnumblockssincesignatureconfirmation)
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts) * [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash) * [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
* [getSignatureConfirmation](jsonrpc-api.md#getsignatureconfirmation)
* [getSignatureStatus](jsonrpc-api.md#getsignaturestatus) * [getSignatureStatus](jsonrpc-api.md#getsignaturestatus)
* [getSlot](jsonrpc-api.md#getslot) * [getSlot](jsonrpc-api.md#getslot)
* [getSlotLeader](jsonrpc-api.md#getslotleader) * [getSlotLeader](jsonrpc-api.md#getslotleader)
* [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment) * [getSlotsPerSegment](jsonrpc-api.md#getslotspersegment)
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
* [getStorageTurn](jsonrpc-api.md#getstorageturn) * [getStorageTurn](jsonrpc-api.md#getstorageturn)
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate) * [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
* [getTransactionCount](jsonrpc-api.md#gettransactioncount) * [getTransactionCount](jsonrpc-api.md#gettransactioncount)
@@ -43,7 +46,8 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot) * [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
* [requestAirdrop](jsonrpc-api.md#requestairdrop) * [requestAirdrop](jsonrpc-api.md#requestairdrop)
* [sendTransaction](jsonrpc-api.md#sendtransaction) * [sendTransaction](jsonrpc-api.md#sendtransaction)
* [startSubscriptionChannel](jsonrpc-api.md#startsubscriptionchannel) * [setLogFilter](jsonrpc-api.md#setlogfilter)
* [validatorExit](jsonrpc-api.md#validatorexit)
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket) * [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
* [accountSubscribe](jsonrpc-api.md#accountsubscribe) * [accountSubscribe](jsonrpc-api.md#accountsubscribe)
* [accountUnsubscribe](jsonrpc-api.md#accountunsubscribe) * [accountUnsubscribe](jsonrpc-api.md#accountunsubscribe)
@@ -51,15 +55,17 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [programUnsubscribe](jsonrpc-api.md#programunsubscribe) * [programUnsubscribe](jsonrpc-api.md#programunsubscribe)
* [signatureSubscribe](jsonrpc-api.md#signaturesubscribe) * [signatureSubscribe](jsonrpc-api.md#signaturesubscribe)
* [signatureUnsubscribe](jsonrpc-api.md#signatureunsubscribe) * [signatureUnsubscribe](jsonrpc-api.md#signatureunsubscribe)
* [slotSubscribe](jsonrpc-api.md#slotsubscribe)
* [slotUnsubscribe](jsonrpc-api.md#slotunsubscribe)
## Request Formatting ## Request Formatting
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields: To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
* `jsonrpc`, set to `"2.0"` * `jsonrpc: <string>`, set to `"2.0"`
* `id`, a unique client-generated identifying integer * `id: <number>`, a unique client-generated identifying integer
* `method`, a string containing the method to be invoked * `method: <string>`, a string containing the method to be invoked
* `params`, a JSON array of ordered parameter values * `params: <array>`, a JSON array of ordered parameter values
Example using curl: Example using curl:
@@ -69,9 +75,9 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
The response output will be a JSON object with the following fields: The response output will be a JSON object with the following fields:
* `jsonrpc`, matching the request specification * `jsonrpc: <string>`, matching the request specification
* `id`, matching the request identifier * `id: <number>`, matching the request identifier
* `result`, requested data or success confirmation * `result: <array|number|object|string>`, requested data or success confirmation
Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST. Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST.
@@ -115,12 +121,12 @@ Returns a transaction receipt
#### Parameters: #### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string * `<string>` - Signature of Transaction to confirm, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `RpcResponse<boolean>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed * `RpcResponse<bool>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
#### Example: #### Example:
@@ -138,18 +144,19 @@ Returns all information associated with the account of provided Pubkey
#### Parameters: #### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string * `<string>` - Pubkey of account to query, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
The result value will be an RpcResponse JSON object containing an AccountInfo JSON object. The result value will be an RpcResponse JSON object containing an AccountInfo JSON object.
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing: * `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
* `lamports`, number of lamports assigned to this account, as a u64 * `lamports: <u64>`, number of lamports assigned to this account, as a u64
* `owner`, base-58 encoded pubkey of the program this account has been assigned to * `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
* `data`, base-58 encoded data associated with the account * `data: <string>`, base-58 encoded data associated with the account
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\) * `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
* `rentEpoch`: <u64>, the epoch at which this account will next owe rent, as u64
#### Example: #### Example:
@@ -158,7 +165,7 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"}},"id":1} {"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF","rentEpoch":2}},"id":1}
``` ```
### getBalance ### getBalance
@@ -167,12 +174,12 @@ Returns the balance of the account of provided Pubkey
#### Parameters: #### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string * `<string>` - Pubkey of account to query, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `RpcResponse<u64>` - RpcResponse JSON object with `value` field set to quantity * `RpcResponse<u64>` - RpcResponse JSON object with `value` field set to the balance
#### Example: #### Example:
@@ -190,16 +197,15 @@ Returns commitment for particular block
#### Parameters: #### Parameters:
* `u64` - block, identified by Slot * `<u64>` - block, identified by Slot
#### Results: #### Results:
The result field will be a JSON object containing: The result field will be a JSON object containing:
* `commitment` - commitment, comprising either: * `commitment` - commitment, comprising either:
* `null` - Unknown block * `<null>` - Unknown block
* `object` - BlockCommitment * `<array>` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
* `array` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
* `totalStake` - total active stake, in lamports, of the current epoch * `totalStake` - total active stake, in lamports, of the current epoch
#### Example: #### Example:
@@ -209,7 +215,7 @@ The result field will be a JSON object containing:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":[{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1} {"jsonrpc":"2.0","result":{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32],"totalStake": 42},"id":1}
``` ```
### getBlockTime ### getBlockTime
@@ -228,12 +234,12 @@ query a node that is built from genesis and retains the entire ledger.
#### Parameters: #### Parameters:
* `u64` - block, identified by Slot * `<u64>` - block, identified by Slot
#### Results: #### Results:
* `null` - block has not yet been produced * `<null>` - block has not yet been produced
* `i64` - estimated production time, as Unix timestamp (seconds since the Unix epoch) * `<i64>` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
#### Example: #### Example:
@@ -257,10 +263,10 @@ None
The result field will be an array of JSON objects, each with the following sub fields: The result field will be an array of JSON objects, each with the following sub fields:
* `pubkey` - Node public key, as base-58 encoded string * `pubkey: <string>` - Node public key, as base-58 encoded string
* `gossip` - Gossip network address for the node * `gossip: <string>` - Gossip network address for the node
* `tpu` - TPU network address for the node * `tpu: <string>` - TPU network address for the node
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled * `rpc: <string>` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
#### Example: #### Example:
@@ -278,25 +284,25 @@ Returns identity and transaction information about a confirmed block in the ledg
#### Parameters: #### Parameters:
* `integer` - slot, as u64 integer * `<u64>` - slot, as u64 integer
* `string` - (optional) encoding for each returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON. * `<string>` - (optional) encoding for each returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
#### Results: #### Results:
The result field will be an object with the following fields: The result field will be an object with the following fields:
* `blockhash` - the blockhash of this block, as base-58 encoded string * `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
* `previousBlockhash` - the blockhash of this block's parent, as base-58 encoded string * `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string
* `parentSlot` - the slot index of this block's parent * `parentSlot: <u64>` - the slot index of this block's parent
* `transactions` - an array of JSON objects containing: * `transactions: <array>` - an array of JSON objects containing:
* `transaction` - [Transaction](transaction-api.md) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter * `transaction: <object|string>` - [Transaction](transaction-api.md) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
* `meta` - transaction status metadata object, containing `null` or: * `meta: <object>` - transaction status metadata object, containing `null` or:
* `status` - Transaction status: * `status: <object>` - Transaction status:
* `"Ok": null` - Transaction was successful * `"Ok": null` - Transaction was successful
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) * `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
* `fee` - fee this transaction was charged, as u64 integer * `fee: <u64>` - fee this transaction was charged, as u64 integer
* `preBalances` - array of u64 account balances from before the transaction was processed * `preBalances: <array>` - array of u64 account balances from before the transaction was processed
* `postBalances` - array of u64 account balances after the transaction was processed * `postBalances: <array>` - array of u64 account balances after the transaction was processed
#### Example: #### Example:
@@ -305,13 +311,13 @@ The result field will be an object with the following fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[[{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1} {"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
// Request // Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[["81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ",{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}]]},"id":1} {"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
``` ```
### getConfirmedBlocks ### getConfirmedBlocks
@@ -320,8 +326,8 @@ Returns a list of confirmed blocks
#### Parameters: #### Parameters:
* `integer` - start_slot, as u64 integer * `<u64>` - start_slot, as u64 integer
* `integer` - (optional) end_slot, as u64 integer * `<u64>` - (optional) end_slot, as u64 integer
#### Results: #### Results:
@@ -345,15 +351,16 @@ Returns information about the current epoch
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
The result field will be an object with the following fields: The result field will be an object with the following fields:
* `epoch`, the current epoch * `absoluteSlot: <u64>`, the current slot
* `slotIndex`, the current slot relative to the start of the current epoch * `epoch: <u64>`, the current epoch
* `slotsInEpoch`, the number of slots in this epoch * `slotIndex: <u64>`, the current slot relative to the start of the current epoch
* `slotsInEpoch: <u64>`, the number of slots in this epoch
#### Example: #### Example:
@@ -362,7 +369,7 @@ The result field will be an object with the following fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1} {"jsonrpc":"2.0","result":{"absoluteSlot":166598,"epoch":27,"slotIndex":2790,"slotsInEpoch":8192},"id":1}
``` ```
### getEpochSchedule ### getEpochSchedule
@@ -377,11 +384,11 @@ None
The result field will be an object with the following fields: The result field will be an object with the following fields:
* `slotsPerEpoch`, the maximum number of slots in each epoch * `slotsPerEpoch: <u64>`, the maximum number of slots in each epoch
* `leaderScheduleSlotOffset`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch * `leaderScheduleSlotOffset: <u64>`, the number of slots before beginning of an epoch to calculate a leader schedule for that epoch
* `warmup`, whether epochs start short and grow * `warmup: <bool>`, whether epochs start short and grow
* `firstNormalEpoch`, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH) * `firstNormalEpoch: <u64>`, first normal-length epoch, log2(slotsPerEpoch) - log2(MINIMUM_SLOTS_PER_EPOCH)
* `firstNormalSlot`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1) * `firstNormalSlot: <u64>`, MINIMUM_SLOTS_PER_EPOCH * (2.pow(firstNormalEpoch) - 1)
#### Example: #### Example:
@@ -403,7 +410,7 @@ None
#### Results: #### Results:
* `string` - a Hash as base-58 encoded string * `<string>` - a Hash as base-58 encoded string
#### Example: #### Example:
@@ -415,14 +422,43 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1} {"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
``` ```
### getInflation
Returns the inflation configuration of the cluster
#### Parameters:
None
#### Results:
The result field will be an Inflation object with the following fields:
* `initial: <f64>`, the initial inflation percentage from time 0
* `terminal: <f64>`, terminal inflation percentage
* `taper: <f64>`, rate per year at which inflation is lowered
* `foundation: <f64>`, percentage of total inflation allocated to the foundation
* `foundationTerm: <f64>`, duration of foundation pool inflation in years
* `storage: <f64>`, percentage of total inflation allocated to storage rewards
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getInflation"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
```
### getLeaderSchedule ### getLeaderSchedule
Returns the leader schedule for an epoch Returns the leader schedule for an epoch
#### Parameters: #### Parameters:
* `slot` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched * `<u64>` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
@@ -446,12 +482,12 @@ Returns minimum balance required to make account rent exempt.
#### Parameters: #### Parameters:
* `u64` - account data length * `<usize>` - account data length
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `u64` - minimum lamports required in account * `<u64>` - minimum lamports required in account
#### Example: #### Example:
@@ -469,12 +505,12 @@ Returns the current number of blocks since signature has been confirmed.
#### Parameters: #### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string * `<string>` - Signature of Transaction to confirm, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `u64` - count * `<u64>` - count, or null if signature not found
#### Example: #### Example:
@@ -492,18 +528,20 @@ Returns all accounts owned by the provided program Pubkey
#### Parameters: #### Parameters:
* `string` - Pubkey of program, as base-58 encoded string * `<string>` - Pubkey of program, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
The result field will be an array of arrays. Each sub array will contain: The result field will be an array of JSON objects, which will contain:
* `string` - the account Pubkey as base-58 encoded string and a JSON object, with the following sub fields: * `pubkey: <string>` - the account Pubkey as base-58 encoded string
* `lamports`, number of lamports assigned to this account, as a u64 * `account: <object>` - a JSON object, with the following sub fields:
* `owner`, base-58 encoded pubkey of the program this account has been assigned to * `lamports: <u64>`, number of lamports assigned to this account, as a u64
* `data`, base-58 encoded data associated with the account * `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
* `executable`, boolean indicating if the account contains a program \(and is strictly read-only\) * `data: <string>`, base-58 encoded data associated with the account
* `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
* `rentEpoch`: <u64>, the epoch at which this account will next owe rent, as u64
#### Example: #### Example:
@@ -512,7 +550,7 @@ The result field will be an array of arrays. Each sub array will contain:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T"]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":1,"data":"", ["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR", {"executable":false,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","lamports":10,"data":[]]]},"id":1} {"jsonrpc":"2.0","result":[{"account":{"data":"2R9jLfiAQ9bgdcw6h8s44439","executable":false,"lamports":15298080,"owner":"4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T","rentEpoch":28},"pubkey":"CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY"}],"id":1}
``` ```
### getRecentBlockhash ### getRecentBlockhash
@@ -521,15 +559,15 @@ Returns a recent block hash from the ledger, and a fee schedule that can be used
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object. An RpcResponse containing a JSON object consisting of a string blockhash and FeeCalculator JSON object.
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to a JSON object including: * `RpcResponse<object>` - RpcResponse JSON object with `value` field set to a JSON object including:
* `blockhash` - a Hash as base-58 encoded string * `blockhash: <string>` - a Hash as base-58 encoded string
* `feeCalculator` - FeeCalculator object, the fee schedule for this block hash * `feeCalculator: <object>` - FeeCalculator object, the fee schedule for this block hash
#### Example: #### Example:
@@ -538,7 +576,34 @@ An RpcResponse containing a JSON object consisting of a string blockhash and Fee
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","feeCalculator":{"lamportsPerSignature": 0}}},"id":1} {"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
```
### getSignatureConfirmation
Returns the status and number of confirmations of a given signature.
#### Parameters:
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
* `<null>` - Unknown transaction
* `<object>` - Transaction confirmations and status:
* `confirmations: <u64>` - count of confirmations since transaction was processed
* `status: <object>` -
* `"Ok": <null>` - Transaction was successful
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureConfirmation", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"confirmations":12,"status":{"Ok": null}},"id":1}
``` ```
### getSignatureStatus ### getSignatureStatus
@@ -547,14 +612,14 @@ Returns the status of a given signature. This method is similar to [confirmTrans
#### Parameters: #### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string * `<string>` - Signature of Transaction to confirm, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `null` - Unknown transaction * `<null>` - Unknown transaction
* `object` - Transaction status: * `<object>` - Transaction status:
* `"Ok": null` - Transaction was successful * `"Ok": <null>` - Transaction was successful
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14) * `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
#### Example: #### Example:
@@ -564,7 +629,7 @@ Returns the status of a given signature. This method is similar to [confirmTrans
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1} {"jsonrpc":"2.0","result":{"Ok": null},"id":1}
``` ```
### getSlot ### getSlot
@@ -573,11 +638,11 @@ Returns the current slot the node is processing
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `u64` - Current slot * `<u64>` - Current slot
#### Example: #### Example:
@@ -595,11 +660,11 @@ Returns the current slot leader
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `string` - Node Id as base-58 encoded string * `<string>` - Node identity Pubkey as base-58 encoded string
#### Example: #### Example:
@@ -617,11 +682,11 @@ Returns the current storage segment size in terms of slots
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `u64` - Number of slots in a storage segment * `<u64>` - Number of slots in a storage segment
#### Example: #### Example:
@@ -632,6 +697,27 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":1024,"id":1} {"jsonrpc":"2.0","result":1024,"id":1}
``` ```
### getStoragePubkeysForSlot
Returns the storage Pubkeys for a particular slot
#### Parameters:
None
#### Results:
An array of Pubkeys, as base-58 encoded strings
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStoragePubkeysForSlot","params":[1]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC"],"id":1}
```
### getStorageTurn ### getStorageTurn
Returns the current storage turn's blockhash and slot Returns the current storage turn's blockhash and slot
@@ -644,8 +730,8 @@ None
A JSON object consisting of A JSON object consisting of
* `blockhash` - a Hash as base-58 encoded string indicating the blockhash of the turn slot * `blockhash: <string>` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
* `slot` - the current storage turn slot * `slot: <u64>` - the current storage turn slot
#### Example: #### Example:
@@ -653,7 +739,7 @@ A JSON object consisting of
// Request // Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": "2048"},"id":1} {"jsonrpc":"2.0","result":{"blockhash": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "slot": 2048},"id":1}
``` ```
### getStorageTurnRate ### getStorageTurnRate
@@ -666,7 +752,7 @@ None
#### Results: #### Results:
* `u64` - Number of slots in storage turn * `<u64>` - Number of slots in storage turn
#### Example: #### Example:
@@ -683,11 +769,11 @@ Returns the current Transaction count from the ledger
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `u64` - count * `<u64>` - count
#### Example: #### Example:
@@ -701,15 +787,15 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### getTotalSupply ### getTotalSupply
Returns the current total supply in Lamports Returns the current total supply in lamports
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
* `u64` - Total supply * `<u64>` - Total supply
#### Example: #### Example:
@@ -731,7 +817,7 @@ None
#### Results: #### Results:
The result field will be a JSON object with the following sub fields: The result field will be a JSON object with the following fields:
* `solana-core`, software version of solana-core * `solana-core`, software version of solana-core
@@ -741,7 +827,7 @@ The result field will be a JSON object with the following sub fields:
// Request // Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"solana-core": "0.17.2"},"id":1} {"jsonrpc":"2.0","result":{"solana-core": "0.23.1"},"id":1}
``` ```
### getVoteAccounts ### getVoteAccounts
@@ -750,18 +836,19 @@ Returns the account info and associated stake for all the voting accounts in the
#### Parameters: #### Parameters:
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results: #### Results:
The result field will be a JSON object of `current` and `delinquent` accounts, each containing an array of JSON objects with the following sub fields: The result field will be a JSON object of `current` and `delinquent` accounts, each containing an array of JSON objects with the following sub fields:
* `votePubkey` - Vote account public key, as base-58 encoded string * `votePubkey: <string>` - Vote account public key, as base-58 encoded string
* `nodePubkey` - Node public key, as base-58 encoded string * `nodePubkey: <string>` - Node public key, as base-58 encoded string
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch * `activatedStake: <u64>` - the stake, in lamports, delegated to this vote account and active in this epoch
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch * `epochVoteAccount: <bool>` - bool, whether the vote account is staked for this epoch
* `commission`, percentage (0-100) of rewards payout owed to the vote account * `commission: <number>`, percentage (0-100) of rewards payout owed to the vote account
* `lastVote` - Most recent slot voted on by this vote account * `lastVote: <u64>` - Most recent slot voted on by this vote account
* `epochCredits: <array>` - History of how many credits earned by the end of each epoch, as an array of arrays containing: [epoch, credits, previousCredits]
#### Example: #### Example:
@@ -770,7 +857,7 @@ The result field will be a JSON object of `current` and `delinquent` accounts, e
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1} {"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"epochCredits":[[1,64,0],[2,192,64]],"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"epochCredits":[],"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
``` ```
### minimumLedgerSlot ### minimumLedgerSlot
@@ -802,13 +889,13 @@ Requests an airdrop of lamports to a Pubkey
#### Parameters: #### Parameters:
* `string` - Pubkey of account to receive lamports, as base-58 encoded string * `<string>` - Pubkey of account to receive lamports, as base-58 encoded string
* `integer` - lamports, as a u64 * `<integer>` - lamports, as a u64
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success) * `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success)
#### Results: #### Results:
* `string` - Transaction Signature of airdrop, as base-58 encoded string * `<string>` - Transaction Signature of airdrop, as base-58 encoded string
#### Example: #### Example:
@@ -826,22 +913,66 @@ Creates new transaction
#### Parameters: #### Parameters:
* `array` - array of octets containing a fully-signed Transaction * `<array>` - array of octets containing a fully-signed Transaction
#### Results: #### Results:
* `string` - Transaction Signature, as base-58 encoded string * `<string>` - Transaction Signature, as base-58 encoded string
#### Example: #### Example:
```bash ```bash
// Request // Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":[[61, 98, 55, 49, 15, 187, 41, 215, 176, 49, 234, 229, 228, 77, 129, 221, 239, 88, 145, 227, 81, 158, 223, 123, 14, 229, 235, 247, 191, 115, 199, 71, 121, 17, 32, 67, 63, 209, 239, 160, 161, 2, 94, 105, 48, 159, 235, 235, 93, 98, 172, 97, 63, 197, 160, 164, 192, 20, 92, 111, 57, 145, 251, 6, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 13, 39, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 11, 12, 106, 49, 74, 226, 201, 16, 161, 192, 28, 84, 124, 97, 190, 201, 171, 186, 6, 18, 70, 142, 89, 185, 176, 154, 115, 61, 26, 163, 77, 1, 88, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}' http://localhost:8899 curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["3gKEMTuxvm3DKEJc4UyiyoNz1sxwdVRW2pyDDXqaCvUjGApnsazGh2y4W92zuaSSdJhBbWLYAkZokBt4N5oW27R7zCVaLLpLxvATL2GgheEh9DmmDR1P9r1ZqirVXM2fF3z5cafmc4EtwWc1UErFdCWj1qYvy4bDGMLXRYLURxaKytEEqrxz6JXj8rUHhDpjTZeFxmC6iAW3hZr6cmaAzewQCQfiEv2HfydriwHDtN95u3Y1EF6SuXxcRqox2aTjGye2Ln9zFj4XbnAtjCmkZhR"]}' http://localhost:8899
// Result // Result
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1} {"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
``` ```
### setLogFilter
Sets the log filter on the validator
#### Parameters:
* `<string>` - the new log filter to use
#### Results:
* `<null>`
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"setLogFilter", "params":["solana_core=debug"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":null,"id":1}
```
### validatorExit
If a validator boots with RPC exit enabled (`--enable-rpc-exit` parameter), this request causes the validator to exit.
#### Parameters:
None
#### Results:
* `<bool>` - Whether the validator exit operation was successful
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":true,"id":1}
```
### Subscription Websocket ### Subscription Websocket
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`: After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
@@ -870,14 +1001,14 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Parameters: #### Parameters:
* `string` - account Pubkey, as base-58 encoded string * `<string>` - account Pubkey, as base-58 encoded string
* `integer` - optional, number of confirmed blocks to wait before notification. * `<u64>` - optional, number of confirmed blocks to wait before notification.
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\) Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
#### Results: #### Results:
* `integer` - Subscription id \(needed to unsubscribe\) * `<number>` - Subscription id \(needed to unsubscribe\)
#### Example: #### Example:
@@ -894,7 +1025,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format: #### Notification Format:
```bash ```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF"},"subscription":0}} {"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":"4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM","lamports":1,"data":"Joig2k8Ax4JPMpWhXRyc2jMa7Wejz4X1xqVi3i7QRkmVj1ChUgNc4VNpGUQePJGBAui3c6886peU9GEbjsyeANN8JGStprwLbLwcw5wpPjuQQb9mwrjVmoDQBjj3MzZKgeHn6wmnQ5k8DBFuoCYKWWsJfH2gv9FvCzrN6K1CRcQZzF","rentEpoch":28},"subscription":0}}
``` ```
### accountUnsubscribe ### accountUnsubscribe
@@ -903,11 +1034,11 @@ Unsubscribe from account change notifications
#### Parameters: #### Parameters:
* `integer` - id of account Subscription to cancel * `<number>` - id of account Subscription to cancel
#### Results: #### Results:
* `bool` - unsubscribe success message * `<bool>` - unsubscribe success message
#### Example: #### Example:
@@ -925,14 +1056,14 @@ Subscribe to a program to receive notifications when the lamports or data for a
#### Parameters: #### Parameters:
* `string` - program\_id Pubkey, as base-58 encoded string * `<string>` - program\_id Pubkey, as base-58 encoded string
* `integer` - optional, number of confirmed blocks to wait before notification. * `<u64>` - optional, number of confirmed blocks to wait before notification.
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\) Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
#### Results: #### Results:
* `integer` - Subscription id \(needed to unsubscribe\) * `<integer>` - Subscription id \(needed to unsubscribe\)
#### Example: #### Example:
@@ -948,11 +1079,11 @@ Subscribe to a program to receive notifications when the lamports or data for a
#### Notification Format: #### Notification Format:
* `string` - account Pubkey, as base-58 encoded string * `<string>` - account Pubkey, as base-58 encoded string
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\) * `<object>` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
```bash ```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":{"pubkey": "8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM","account":{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd"}},"subscription":0}} {"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":"9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV","data":"4SZWhnbSt3njU4QHVgPrWeekz1BudU4ttmdr9ezmrL4X6XeLeL83xVAo6ZdxwU3oXgHNeF2q6tWZbnVnBXmvNyeLVEGt8ZQ4ZmgjHfVNCEwBtzh2aDrHgQSjBFLYAdmM3uwBhcm1EyHJLeUiFqpsoAUhn6Vphwrpf44dWRAGsAJZbzvVrUW9bfucpR7xudHHg2MxQ2CdqsfS3TfWUJY3vaf2A4AUNzfAmNPHBGi99nU2hYubGSVSPcpVPpdRWQkydgqasBmTosd","rentEpoch":28}],"subscription":0}}
``` ```
### programUnsubscribe ### programUnsubscribe
@@ -961,11 +1092,11 @@ Unsubscribe from program-owned account change notifications
#### Parameters: #### Parameters:
* `integer` - id of account Subscription to cancel * `<integer>` - id of account Subscription to cancel
#### Results: #### Results:
* `bool` - unsubscribe success message * `<bool>` - unsubscribe success message
#### Example: #### Example:
@@ -983,8 +1114,8 @@ Subscribe to a transaction signature to receive notification when the transactio
#### Parameters: #### Parameters:
* `string` - Transaction Signature, as base-58 encoded string * `<string>` - Transaction Signature, as base-58 encoded string
* `integer` - optional, number of confirmed blocks to wait before notification. * `<integer>` - optional, number of confirmed blocks to wait before notification.
Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\) Default: 0, Max: `MAX_LOCKOUT_HISTORY` \(greater integers rounded down\)
@@ -1016,11 +1147,11 @@ Unsubscribe from signature confirmation notification
#### Parameters: #### Parameters:
* `integer` - subscription id to cancel * `<integer>` - subscription id to cancel
#### Results: #### Results:
* `bool` - unsubscribe success message * `<bool>` - unsubscribe success message
#### Example: #### Example:
@@ -1031,3 +1162,53 @@ Unsubscribe from signature confirmation notification
// Result // Result
{"jsonrpc": "2.0","result": true,"id": 1} {"jsonrpc": "2.0","result": true,"id": 1}
``` ```
### slotSubscribe
Subscribe to receive notification anytime a slot is processed by the validator
#### Parameters:
None
#### Results:
* `integer` - subscription id \(needed to unsubscribe\)
#### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"slotSubscribe"}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
```
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "slotNotification", "params": {"result":{"parent":75,"root":44,"slot":76},"subscription":0}}
```
### slotUnsubscribe
Unsubscribe from signature confirmation notification
#### Parameters:
* `<integer>` - subscription id to cancel
#### Results:
* `<bool>` - unsubscribe success message
#### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"slotUnsubscribe", "params":[0]}
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```

View File

@@ -1,4 +1,4 @@
# Getting Started # Building from Source
The Solana git repository contains all the scripts you might need to spin up your own local testnet. Depending on what you're looking to achieve, you may want to run a different variation, as the full-fledged, performance-enhanced multinode testnet is considerably more complex to set up than a Rust-only, singlenode testnode. If you are looking to develop high-level features, such as experimenting with smart contracts, save yourself some setup headaches and stick to the Rust-only singlenode demo. If you're doing performance optimization of the transaction pipeline, consider the enhanced singlenode demo. If you're doing consensus work, you'll need at least a Rust-only multinode demo. If you want to reproduce our TPS metrics, run the enhanced multinode demo. The Solana git repository contains all the scripts you might need to spin up your own local testnet. Depending on what you're looking to achieve, you may want to run a different variation, as the full-fledged, performance-enhanced multinode testnet is considerably more complex to set up than a Rust-only, singlenode testnode. If you are looking to develop high-level features, such as experimenting with smart contracts, save yourself some setup headaches and stick to the Rust-only singlenode demo. If you're doing performance optimization of the transaction pipeline, consider the enhanced singlenode demo. If you're doing consensus work, you'll need at least a Rust-only multinode demo. If you want to reproduce our TPS metrics, run the enhanced multinode demo.
@@ -52,12 +52,12 @@ $ NDEBUG=1 ./multinode-demo/faucet.sh
### Singlenode Testnet ### Singlenode Testnet
Before you start a validator, make sure you know the IP address of the machine you want to be the bootstrap leader for the demo, and make sure that udp ports 8000-10000 are open on all the machines you want to test with. Before you start a validator, make sure you know the IP address of the machine you want to be the bootstrap validator for the demo, and make sure that udp ports 8000-10000 are open on all the machines you want to test with.
Now start the bootstrap leader in a separate shell: Now start the bootstrap validator in a separate shell:
```bash ```bash
$ NDEBUG=1 ./multinode-demo/bootstrap-leader.sh $ NDEBUG=1 ./multinode-demo/bootstrap-validator.sh
``` ```
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to receive transactions. The leader will request some tokens from the faucet if it doesn't have any. The faucet does not need to be running for subsequent leader starts. Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to receive transactions. The leader will request some tokens from the faucet if it doesn't have any. The faucet does not need to be running for subsequent leader starts.
@@ -74,7 +74,7 @@ To run a performance-enhanced validator on Linux, [CUDA 10.0](https://developer.
```bash ```bash
$ ./fetch-perf-libs.sh $ ./fetch-perf-libs.sh
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/bootstrap-leader.sh $ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/bootstrap-validator.sh
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/validator.sh $ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/validator.sh
``` ```
@@ -121,6 +121,34 @@ thread apply all bt
This will dump all the threads stack traces into gdb.txt This will dump all the threads stack traces into gdb.txt
### Blockstreamer
Solana supports a node type called an _blockstreamer_. This validator variation is intended for applications that need to observe the data plane without participating in transaction validation or ledger replication.
A blockstreamer runs without a vote signer, and can optionally stream ledger entries out to a Unix domain socket as they are processed. The JSON-RPC service still functions as on any other node.
To run a blockstreamer, include the argument `no-signer` and \(optional\) `blockstream` socket location:
```bash
$ NDEBUG=1 ./multinode-demo/validator-x.sh --no-signer --blockstream <SOCKET>
```
The stream will output a series of JSON objects:
* An Entry event JSON object is sent when each ledger entry is processed, with the following fields:
* `dt`, the system datetime, as RFC3339-formatted string
* `t`, the event type, always "entry"
* `s`, the slot height, as unsigned 64-bit integer
* `h`, the tick height, as unsigned 64-bit integer
* `entry`, the entry, as JSON object
* A Block event JSON object is sent when a block is complete, with the following fields:
* `dt`, the system datetime, as RFC3339-formatted string
* `t`, the event type, always "block"
* `s`, the slot height, as unsigned 64-bit integer
* `h`, the tick height, as unsigned 64-bit integer
* `l`, the slot leader id, as base-58 encoded string
* `hash`, the [blockhash](terminology.md#blockhash), as base-58 encoded string
## Public Testnet ## Public Testnet
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`. In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.

5
book/src/cli/README.md Normal file
View File

@@ -0,0 +1,5 @@
# Using Solana from the Command-line
This chapter describes the command-line tools for interacting with Solana. One
could use these tools to send payments, stake validators, and check account
balances.

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ A Solana cluster is a set of validators working together to serve client transac
## Creating a Cluster ## Creating a Cluster
Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap leader to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster. Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap validator_. The validator holding the bootstrap validator's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap validator to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster.
A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until archiver nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy. A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until archiver nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy.
@@ -37,4 +37,4 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only
Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions. Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions.
A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique _data plane fanout_; learn more in the [data plan fanout](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/data-plane-fanout.md) section. A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md).

View File

@@ -1,6 +1,6 @@
# Stake Delegation and Rewards # Stake Delegation and Rewards
Stakers are rewarded for helping to validate the ledger. They do this by delegating their stake to validator nodes. Those validators do the legwork of replaying the ledger and send votes to a per-node vote account to which stakers can delegate their stakes. The rest of the cluster uses those stake-weighted votes to select a block when forks arise. Both the validator and staker need some economic incentive to play their part. The validator needs to be compensated for its hardware and the staker needs to be compensated for the risk of getting its stake slashed. The economics are covered in [staking rewards](../proposals/staking-rewards.md). This chapter, on the other hand, describes the underlying mechanics of its implementation. Stakers are rewarded for helping to validate the ledger. They do this by delegating their stake to validator nodes. Those validators do the legwork of replaying the ledger and send votes to a per-node vote account to which stakers can delegate their stakes. The rest of the cluster uses those stake-weighted votes to select a block when forks arise. Both the validator and staker need some economic incentive to play their part. The validator needs to be compensated for its hardware and the staker needs to be compensated for the risk of getting its stake slashed. The economics are covered in [staking rewards](../implemented-proposals/staking-rewards.md). This chapter, on the other hand, describes the underlying mechanics of its implementation.
## Basic Design ## Basic Design
@@ -94,42 +94,22 @@ The Stakes and the RewardsPool are accounts that are owned by the same `Stake` p
### StakeInstruction::DelegateStake ### StakeInstruction::DelegateStake
The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. The transaction must be signed by the stake's `authorized_staker`. If the stake account is already StakeState::Stake \(i.e. already activated\), the stake is re-delegated. Stakes may be re-delegated at any time, and updated stakes are reflected immediately, but only one re-delegation is permitted per epoch. The Stake account is moved from Initialized to StakeState::Stake form, or from a deactivated (i.e. fully cooled-down) StakeState::Stake to activated StakeState::Stake. This is how stakers choose the vote account and validator node to which their stake account lamports are delegated. The transaction must be signed by the stake's `authorized_staker`.
* `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX * `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX
* `account[1]` - R - The VoteState instance. * `account[1]` - R - The VoteState instance.
* `account[2]` - R - sysvar::clock account, carries information about current Bank epoch * `account[2]` - R - sysvar::clock account, carries information about current Bank epoch
* `account[3]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration * `account[3]` - R - sysvar::stakehistory account, carries information about stake history
* `account[4]` - R - stake::Config accoount, carries warmup, cooldown, and slashing configuration
### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\) ### StakeInstruction::Authorize\(Pubkey, StakeAuthorize\)
Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. Updates the account with a new authorized staker or withdrawer, according to the StakeAuthorize parameter \(`Staker` or `Withdrawer`\). The transaction must be by signed by the Stakee account's current `authorized_staker` or `authorized_withdrawer`. Any stake lock-up must have expired, or the lock-up custodian must also sign the transaction.
* `account[0]` - RW - The StakeState * `account[0]` - RW - The StakeState
`StakeState::authorized_staker` or `authorized_withdrawer` is set to to `Pubkey`. `StakeState::authorized_staker` or `authorized_withdrawer` is set to to `Pubkey`.
### StakeInstruction::RedeemVoteCredits
The staker or the owner of the Stake account sends a transaction with this instruction to claim rewards.
The Vote account and the Stake account pair maintain a lifetime counter of total rewards generated and claimed. Rewards are paid according to a point value supplied by the Bank from inflation. A `point` is one credit \* one staked lamport, rewards paid are proportional to the number of lamports staked.
* `account[0]` - RW - The StakeState::Stake instance that is redeeming rewards.
* `account[1]` - R - The VoteState instance, must be the same as `StakeState::voter_pubkey`
* `account[2]` - RW - The StakeState::RewardsPool instance that will fulfill the request \(picked at random\).
* `account[3]` - R - sysvar::rewards account from the Bank that carries point value.
* `account[4]` - R - sysvar::stake\_history account from the Bank that carries stake warmup/cooldown history
Reward is paid out for the difference between `VoteState::credits` to `StakeState::Stake::credits_observed`, multiplied by `sysvar::rewards::Rewards::validator_point_value`. `StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token balance, and the reward is deposited to the Stake account token balance and the stake account's `stake` is increased by the same amount \(re-invested\).
```text
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
stake_state.credits_observed = vote_state.credits;
```
`credits_to_claim` is used to compute the reward and commission, and `StakeState::Stake::credits_observed` is updated to the latest `VoteState::credits` value.
### StakeInstruction::Deactivate ### StakeInstruction::Deactivate
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down. A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
@@ -162,11 +142,11 @@ Lamports build up over time in a Stake account and any excess over activated sta
## Staking Rewards ## Staking Rewards
The specific mechanics and rules of the validator rewards regime is outlined here. Rewards are earned by delegating stake to a validator that is voting correctly. Voting incorrectly exposes that validator's stakes to [slashing](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/staking-and-rewards.md). The specific mechanics and rules of the validator rewards regime is outlined here. Rewards are earned by delegating stake to a validator that is voting correctly. Voting incorrectly exposes that validator's stakes to [slashing](../proposals/slashing.md).
### Basics ### Basics
The network pays rewards from a portion of network [inflation](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/inflation.md). The number of lamports available to pay rewards for an epoch is fixed and must be evenly divided among all staked nodes according to their relative stake weight and participation. The weighting unit is called a [point](../terminology.md#point). The network pays rewards from a portion of network [inflation](../terminology.md#inflation). The number of lamports available to pay rewards for an epoch is fixed and must be evenly divided among all staked nodes according to their relative stake weight and participation. The weighting unit is called a [point](../terminology.md#point).
Rewards for an epoch are not available until the end of that epoch. Rewards for an epoch are not available until the end of that epoch.
@@ -228,4 +208,4 @@ Only lamports in excess of effective+activating stake may be withdrawn at any ti
### Lock-up ### Lock-up
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state. Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state. Changing the authorized staker or withdrawer is also subject to lock-up, as such an operation is effectively a transfer.

View File

@@ -1,7 +0,0 @@
# Testnet Participation
Participate in our testnet:
* [Running a Validator](../running-validator/)
* [Running an Archiver](../running-archiver.md)

View File

@@ -1,90 +0,0 @@
# Blockstore
After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../cluster/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized.
The blockstore allows a validator to record every shred it observes on the network, in any order, as long as the shred is signed by the expected leader for a given slot.
Shreds are moved to a fork-able key space the tuple of `leader slot` + `shred index` \(within the slot\). This permits the skip-list structure of the Solana protocol to be stored in its entirety, without a-priori choosing which fork to follow, which Entries to persist or when to persist them.
Repair requests for recent shreds are served out of RAM or recent files and out of deeper storage for less recent shreds, as implemented by the store backing Blockstore.
## Functionalities of Blockstore
1. Persistence: the Blockstore lives in the front of the nodes verification
pipeline, right behind network receive and signature verification. If the
shred received is consistent with the leader schedule \(i.e. was signed by the
leader for the indicated slot\), it is immediately stored.
2. Repair: repair is the same as window repair above, but able to serve any
shred that's been received. Blockstore stores shreds with signatures,
preserving the chain of origination.
3. Forks: Blockstore supports random access of shreds, so can support a
validator's need to rollback and replay from a Bank checkpoint.
4. Restart: with proper pruning/culling, the Blockstore can be replayed by
ordered enumeration of entries from slot 0. The logic of the replay stage
\(i.e. dealing with forks\) will have to be used for the most recent entries in
the Blockstore.
## Blockstore Design
1. Entries in the Blockstore are stored as key-value pairs, where the key is the concatenated slot index and shred index for an entry, and the value is the entry data. Note shred indexes are zero-based for each slot \(i.e. they're slot-relative\).
2. The Blockstore maintains metadata for each slot, in the `SlotMeta` struct containing:
* `slot_index` - The index of this slot
* `num_blocks` - The number of blocks in the slot \(used for chaining to a previous slot\)
* `consumed` - The highest shred index `n`, such that for all `m < n`, there exists a shred in this slot with shred index equal to `n` \(i.e. the highest consecutive shred index\).
* `received` - The highest received shred index for the slot
* `next_slots` - A list of future slots this slot could chain to. Used when rebuilding
the ledger to find possible fork points.
* `last_index` - The index of the shred that is flagged as the last shred for this slot. This flag on a shred will be set by the leader for a slot when they are transmitting the last shred for a slot.
* `is_rooted` - True iff every block from 0...slot forms a full sequence without any holes. We can derive is\_rooted for each slot with the following rules. Let slot\(n\) be the slot with index `n`, and slot\(n\).is\_full\(\) is true if the slot with index `n` has all the ticks expected for that slot. Let is\_rooted\(n\) be the statement that "the slot\(n\).is\_rooted is true". Then:
is\_rooted\(0\) is\_rooted\(n+1\) iff \(is\_rooted\(n\) and slot\(n\).is\_full\(\)
3. Chaining - When a shred for a new slot `x` arrives, we check the number of blocks \(`num_blocks`\) for that new slot \(this information is encoded in the shred\). We then know that this new slot chains to slot `x - num_blocks`.
4. Subscriptions - The Blockstore records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blockstore channel for consumption by the ReplayStage. See the `Blockstore APIs` for details.
5. Update notifications - The Blockstore notifies listeners when slot\(n\).is\_rooted is flipped from false to true for any `n`.
## Blockstore APIs
The Blockstore offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blockstore. These subscription API's are as follows: 1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
1. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blockstore.
## Interfacing with Bank
The bank exposes to replay stage:
1. `prev_hash`: which PoH chain it's working on as indicated by the hash of the last
entry it processed
2. `tick_height`: the ticks in the PoH chain currently being verified by this
bank
3. `votes`: a stack of records that contain: 1. `prev_hashes`: what anything after this vote must chain to in PoH 2. `tick_height`: the tick height at which this vote was cast 3. `lockout period`: how long a chain must be observed to be in the ledger to
be able to be chained below this vote
Replay stage uses Blockstore APIs to find the longest chain of entries it can hang off a previous vote. If that chain of entries does not hang off the latest vote, the replay stage rolls back the bank to that vote and replays the chain from there.
## Pruning Blockstore
Once Blockstore entries are old enough, representing all the possible forks becomes less useful, perhaps even problematic for replay upon restart. Once a validator's votes have reached max lockout, however, any Blockstore contents that are not on the PoH chain for that vote for can be pruned, expunged.
Archiver nodes will be responsible for storing really old ledger contents, and validators need only persist their bank periodically.

View File

@@ -28,7 +28,7 @@ lockout on a bank `b`.
This computation is performed on a votable candidate bank `b` as follows. This computation is performed on a votable candidate bank `b` as follows.
``` ```text
let output: HashMap<b, StakeLockout> = HashMap::new(); let output: HashMap<b, StakeLockout> = HashMap::new();
for vote_account in b.vote_accounts { for vote_account in b.vote_accounts {
for v in vote_account.vote_stack { for v in vote_account.vote_stack {
@@ -62,7 +62,7 @@ votes > v as the number of confirmations will be lower).
Now more specifically, we augment the above computation to: Now more specifically, we augment the above computation to:
``` ```text
let output: HashMap<b, StakeLockout> = HashMap::new(); let output: HashMap<b, StakeLockout> = HashMap::new();
let fork_commitment_cache = ForkCommitmentCache::default(); let fork_commitment_cache = ForkCommitmentCache::default();
for vote_account in b.vote_accounts { for vote_account in b.vote_accounts {
@@ -76,7 +76,7 @@ Now more specifically, we augment the above computation to:
``` ```
where `f'` is defined as: where `f'` is defined as:
``` ```text
fn f`( fn f`(
stake_lockout: &mut StakeLockout, stake_lockout: &mut StakeLockout,
some_ancestor: &mut BlockCommitment, some_ancestor: &mut BlockCommitment,

View File

@@ -10,7 +10,6 @@ These protocol-based rewards, to be distributed to participating validation and
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below. Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction \(be it a state execution or proof-of-replication verification\). A mechanism for long-term economic stability and forking protection through partial burning of each transaction fee is also discussed below.
A high-level schematic of Solanas crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized. A high-level schematic of Solanas crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics/), [State-validation Protocol-based Rewards](ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_validation_client_economics/ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_validation_client_economics/ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_validation_client_economics/ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rent_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics/) chapter will review the Solana network design for global ledger storage/redundancy and archiver-client economics \([Storage-replication rewards](ed_replication_client_economics/ed_rce_storage_replication_rewards.md)\) along with an archiver-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md). An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
**Figure 1**: Schematic overview of Solana economic incentive design. **Figure 1**: Schematic overview of Solana economic incentive design.

View File

@@ -8,5 +8,4 @@ While replication-clients are incentivized and rewarded through protocol-based r
The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detail in the [Economic Sustainability](../ed_economic_sustainability.md) chapter\), thus the transaction fees are expected to be proportionally higher. The validation of PoReps by validation-clients is computationally more expensive than state-validation \(detail in the [Economic Sustainability](../ed_economic_sustainability.md) chapter\), thus the transaction fees are expected to be proportionally higher.
There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\). There are various attack vectors available for colluding validation and replication clients, also described in detail below in [Economic Sustainability](../ed_economic_sustainability/README.md). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the archivers challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps \(note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid\).

View File

@@ -11,7 +11,7 @@ Validator-client rewards for these services are to be distributed at the end of
The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of: The effective protocol-based annual interest rate \(%\) per epoch received by validation-clients is to be a function of:
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_validartion_client_economics.md)\) * the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule \(see [Validation-client Economics](.)\)
* the fraction of staked SOLs out of the current total circulating supply, * the fraction of staked SOLs out of the current total circulating supply,
* the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch. * the up-time/participation \[% of available slots that validator had opportunity to vote on\] of a given validator over the previous epoch.

View File

@@ -13,7 +13,6 @@ Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-b
Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](../../transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilisation. By monitoring a protocol specified gas usage with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level. Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](../../transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilisation. By monitoring a protocol specified gas usage with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level.
As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechansim that protects against "tax evasion" attacks \(i.e. side-channel fee payments\)[1](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/ed_referenced.md). As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechansim that protects against "tax evasion" attacks \(i.e. side-channel fee payments\)[1](../ed_references.md).
Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place. Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place.

View File

@@ -18,9 +18,9 @@ Accounts whose balance is insufficient to satisfy the rent that would be due sim
A percentage of the rent collected is destroyed. The rest is distributed to validator accounts by stake weight, a la transaction fees, at the end of every slot. A percentage of the rent collected is destroyed. The rest is distributed to validator accounts by stake weight, a la transaction fees, at the end of every slot.
## Credit only ## Read-only accounts
Credit only accounts are treated as a special case. They are loaded as if rent were due, but updates to their state may be delayed until the end of the slot, when credits are paid. Read-only accounts are not being charged rent in current implementation.
## Design considerations, others considered ## Design considerations, others considered

View File

@@ -12,7 +12,7 @@ For brevity this design assumes that a single voter with a stake is deployed as
## Time ## Time
The Solana cluster generates a source of time via a Verifiable Delay Function we are calling [Proof of History](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/synchronization.md). The Solana cluster generates a source of time via a Verifiable Delay Function we are calling [Proof of History](../cluster/synchronization.md).
Proof of History is used to create a deterministic round robin schedule for all the active leaders. At any given time only 1 leader, which can be computed from the ledger itself, can propose a fork. For more details, see [fork generation](../cluster/fork-generation.md) and [leader rotation](../cluster/leader-rotation.md). Proof of History is used to create a deterministic round robin schedule for all the active leaders. At any given time only 1 leader, which can be computed from the ledger itself, can propose a fork. For more details, see [fork generation](../cluster/fork-generation.md) and [leader rotation](../cluster/leader-rotation.md).
@@ -109,7 +109,7 @@ When evaluating multiple forks, each validator should use the following rules:
3. Pick the fork that has the greatest amount of cluster transaction fees. 3. Pick the fork that has the greatest amount of cluster transaction fees.
4. Pick the latest fork in terms of PoH. 4. Pick the latest fork in terms of PoH.
Cluster transaction fees are fees that are deposited to the mining pool as described in the [Staking Rewards](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/staking-rewards.md) section. Cluster transaction fees are fees that are deposited to the mining pool as described in the [Staking Rewards](staking-rewards.md) section.
## PoH ASIC Resistance ## PoH ASIC Resistance
@@ -134,4 +134,3 @@ An attacker generates a concurrent fork from an older block to try to rollback t
* 3 votes have a lockout of 8 slots. Concurrent fork must be at least 8 slots ahead and produced in 3 slots. Therefore requires an ASIC 2.6x faster. * 3 votes have a lockout of 8 slots. Concurrent fork must be at least 8 slots ahead and produced in 3 slots. Therefore requires an ASIC 2.6x faster.
* 10 votes have a lockout of 1024 slots. 1024/10, or 102.4x faster ASIC. * 10 votes have a lockout of 1024 slots. 1024/10, or 102.4x faster ASIC.
* 20 votes have a lockout of 2^20 slots. 2^20/20, or 52,428.8x faster ASIC. * 20 votes have a lockout of 2^20 slots. 2^20/20, or 52,428.8x faster ASIC.

View File

@@ -64,7 +64,7 @@ presently stored nonce value with
- Command - Command
```bash ```bash
solana get-nonce nonce-keypair.json solana nonce nonce-keypair.json
``` ```
- Output - Output
@@ -105,7 +105,7 @@ Inspect a nonce account in a more human friendly format with
- Command - Command
```bash ```bash
solana show-nonce-account nonce-keypair.json solana nonce-account nonce-keypair.json
``` ```
- Output - Output
@@ -117,7 +117,7 @@ nonce: DZar6t2EaCFQTbUP4DHKwZ1wT8gCPW2aRfkVWhydkBvS
``` ```
{% hint style="info" %} {% hint style="info" %}
[Full usage documentation](../api-reference/cli.md#solana-show-nonce-account) [Full usage documentation](../api-reference/cli.md#solana-nonce-account)
{% endhint %} {% endhint %}
### Withdraw Funds from a Nonce Account ### Withdraw Funds from a Nonce Account
@@ -236,7 +236,7 @@ Remember, `alice.json` is the [nonce authority](#nonce-authority) in this exampl
{% endhint %} {% endhint %}
```bash ```bash
$ solana show-nonce-account nonce.json $ solana nonce-account nonce.json
balance: 1 SOL balance: 1 SOL
minimum balance required: 0.00136416 SOL minimum balance required: 0.00136416 SOL
nonce: F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7 nonce: F7vmkY3DTaxfagttWjQweib42b6ZHADSx94Tw8gHx3W7
@@ -256,7 +256,7 @@ $ solana balance -k bob.json
1 SOL 1 SOL
``` ```
```bash ```bash
$ solana show-nonce-account nonce.json $ solana nonce-account nonce.json
balance: 1 SOL balance: 1 SOL
minimum balance required: 0.00136416 SOL minimum balance required: 0.00136416 SOL
nonce: 6bjroqDcZgTv6Vavhqf81oBHTv3aMnX19UTB51YhAZnN nonce: 6bjroqDcZgTv6Vavhqf81oBHTv3aMnX19UTB51YhAZnN

View File

@@ -102,7 +102,7 @@ networked machine.
Next, configure the `solana` CLI tool to connect to a particular cluster: Next, configure the `solana` CLI tool to connect to a particular cluster:
```bash ```bash
solana set --url <CLUSTER URL> # (i.e. http://testnet.solana.com:8899) solana config set --url <CLUSTER URL> # (i.e. http://testnet.solana.com:8899)
``` ```
Finally, to check the balance, run the following command: Finally, to check the balance, run the following command:

View File

@@ -0,0 +1,90 @@
# Solana ABI management process
This document proposes the Solana ABI management process. The ABI management
process is an engineering practice and a supporting technical framework to avoid
introducing unintended incompatible ABI changes.
# Problem
The Solana ABI (binary interface to the cluster) is currently only defined
implicitly by the implementation and requires a very careful eye to notice
breaking changes. This makes it extremely difficult to upgrade the software
on an existing cluster without rebooting the ledger.
# Requirements and objectives
- Unintended ABI changes can be detected as CI failures mechanically.
- Newer implementation must be able to process the oldest data (since genesis)
once we go mainnet.
- The objective of this proposal is to protect the ABI while sustaining rather
rapid development by opting for a mechanical process rather than a very long
human-driven auditing process.
- Once signed cryptographically, data blob must be identical, so no
in-place data format update is possible regardless of inbound and outbound of
the online system. Also, considering the sheer volume of transactions we're
aiming to handle, retrospective in-place update is undesirable at best.
# Solution
Instead of natural human's eye due-diligence, which should be assumed to fail
regularly, we need a systematic assurance of not breaking the cluster when
changing the source code.
For that purpose, we introduce a mechanism of marking every ABI-related things
in source code (`struct`s, `enum`s) with the new `#[frozen_abi]` attribute. This
takes hard-coded digest value derived from types of its fields via
`ser::Serialize`. And the attribute automatically generates a unit test to try
to detect any unsanctioned changes to the marked ABI-related things.
However, the detection cannot be complete; no matter how hard we statically
analyze the source code, it's still possible to break ABI. For example, this
includes not-`derive`d hand-written `ser::Serialize`, underlying library's
implementation changes (for example `bincode`), CPU architecture differences.
The detection of these possible ABI incompatibilities is out-of-scope for this
ABI management.
# Definitions
ABI item/type: various types to be used for serialization, which collectively
comprises the whole ABI for any system components. For example, those types
include `struct`s and `enum`s.
ABI item digest: Some fixed hash derived from type information of ABI item's
fields.
# Example
```patch
+#[frozen_abi(digest="1c6a53e9")]
#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Vote {
/// A stack of votes starting with the oldest vote
pub slots: Vec<Slot>,
/// signature of the bank's state at the last slot
pub hash: Hash,
}
```
# Developer's workflow
To know the digest for new ABI items, developers can add `frozen_abi` with a
random digest value and run the unit tests and replace it with the correct
digest from the assertion test error message.
In general, once we add `frozen_abi` and its change is published in the stable
release channel, its digest should never change. If such a change is needed, we
should opt for defining a new struct like `FooV1`. And special release flow like
hard forks should be approached.
# Implementation remarks
We use some degree of macro machinery to automatically generate unit tests
and calculate a digest from ABI items. This is doable by clever use of
`serde::Serialize` ([1]) and `any::typename` ([2]). For a precedent for similar
implementation, `ink` from the Parity Technologies [3] could be informational.
# References
1. [(De)Serialization with type info · Issue #1095 · serde-rs/serde](https://github.com/serde-rs/serde/issues/1095#issuecomment-345483479)
2. [`std::any::type_name` - Rust](https://doc.rust-lang.org/std/any/fn.type_name.html)
3. [Parity's ink to write smart contracts](https://github.com/paritytech/ink)

View File

@@ -10,7 +10,7 @@ When replay stage starts processing the same transactions, it can assume that Po
## Fee Account ## Fee Account
The [fee account](https://github.com/solana-labs/solana/tree/b5f7a4bff9953415b1f3d385bd59bc65c1ec11a4/book/src/proposals/terminology.md#fee_account) pays for the transaction to be included in the block. The leader only needs to validate that the fee account has the balance to pay for the fee. The [fee account](../terminology.md#fee_account) pays for the transaction to be included in the block. The leader only needs to validate that the fee account has the balance to pay for the fee.
## Balance Cache ## Balance Cache
@@ -53,4 +53,3 @@ The same fee account can be reused many times in the same block until it is used
Clients that transmit a large number of transactions per second should use a dedicated fee account that is not used as Credit-Debit in any instruction. Clients that transmit a large number of transactions per second should use a dedicated fee account that is not used as Credit-Debit in any instruction.
Once an account fee is used as Credit-Debit, it will fail the balance check until the balance cache is reset. Once an account fee is used as Credit-Debit, it will fail the balance check until the balance cache is reset.

View File

@@ -1,74 +1,108 @@
# Simple Payment and State Verification # Simple Payment and State Verification
It is often useful to allow low resourced clients to participate in a Solana cluster. Be this participation economic or contract execution, verification that a client's activity has been accepted by the network is typically expensive. This proposal lays out a mechanism for such clients to confirm that their actions have been committed to the ledger state with minimal resource expenditure and third-party trust. It is often useful to allow low resourced clients to participate in a Solana
cluster. Be this participation economic or contract execution, verification
that a client's activity has been accepted by the network is typically
expensive. This proposal lays out a mechanism for such clients to confirm that
their actions have been committed to the ledger state with minimal resource
expenditure and third-party trust.
## A Naive Approach ## A Naive Approach
Validators store the signatures of recently confirmed transactions for a short period of time to ensure that they are not processed more than once. Validators provide a JSON RPC endpoint, which clients can use to query the cluster if a transaction has been recently processed. Validators also provide a PubSub notification, whereby a client registers to be notified when a given signature is observed by the validator. While these two mechanisms allow a client to verify a payment, they are not a proof and rely on completely trusting a validator. Validators store the signatures of recently confirmed transactions for a short
period of time to ensure that they are not processed more than once. Validators
provide a JSON RPC endpoint, which clients can use to query the cluster if a
transaction has been recently processed. Validators also provide a PubSub
notification, whereby a client registers to be notified when a given signature
is observed by the validator. While these two mechanisms allow a client to
verify a payment, they are not a proof and rely on completely trusting a
validator.
We will describe a way to minimize this trust using Merkle Proofs to anchor the validator's response in the ledger, allowing the client to confirm on their own that a sufficient number of their preferred validators have confirmed a transaction. Requiring multiple validator attestations further reduces trust in the validator, as it increases both the technical and economic difficulty of compromising several other network participants. We will describe a way to minimize this trust using Merkle Proofs to anchor the
validator's response in the ledger, allowing the client to confirm on their own
that a sufficient number of their preferred validators have confirmed a
transaction. Requiring multiple validator attestations further reduces trust in
the validator, as it increases both the technical and economic difficulty of
compromising several other network participants.
## Light Clients ## Light Clients
A 'light client' is a cluster participant that does not itself run a validator. This light client would provide a level of security greater than trusting a remote validator, without requiring the light client to spend a lot of resources verifying the ledger. A 'light client' is a cluster participant that does not itself run a validator.
This light client would provide a level of security greater than trusting a
remote validator, without requiring the light client to spend a lot of resources
verifying the ledger.
Rather than providing transaction signatures directly to a light client, the validator instead generates a Merkle Proof from the transaction of interest to the root of a Merkle Tree of all transactions in the including block. This Merkle Root is stored in a ledger entry which is voted on by validators, providing it consensus legitimacy. The additional level of security for a light client depends on an initial canonical set of validators the light client considers to be the stakeholders of the cluster. As that set is changed, the client can update its internal set of known validators with [receipts](simple-payment-and-state-verification.md#receipts). This may become challenging with a large number of delegated stakes. Rather than providing transaction signatures directly to a light client, the
validator instead generates a Merkle Proof from the transaction of interest to
the root of a Merkle Tree of all transactions in the including block. This
Merkle Root is stored in a ledger entry which is voted on by validators,
providing it consensus legitimacy. The additional level of security for a light
client depends on an initial canonical set of validators the light client
considers to be the stakeholders of the cluster. As that set is changed, the
client can update its internal set of known validators with
[receipts](simple-payment-and-state-verification.md#receipts). This may become
challenging with a large number of delegated stakes.
Validators themselves may want to use light client APIs for performance reasons. For example, during the initial launch of a validator, the validator may use a cluster provided checkpoint of the state and verify it with a receipt. Validators themselves may want to use light client APIs for performance reasons.
For example, during the initial launch of a validator, the validator may use a
cluster provided checkpoint of the state and verify it with a receipt.
## Receipts ## Receipts
A receipt is a minimal proof that; a transaction has been included in a block, that the block has been voted on by the client's preferred set of validators and that the votes have reached the desired confirmation depth. A receipt is a minimal proof that; a transaction has been included in a block,
that the block has been voted on by the client's preferred set of validators
and that the votes have reached the desired confirmation depth.
The receipts for both state and payments start with a Merkle Path from the value into a Bank-Merkle that has been voted on and included in the ledger. A chain of PoH Entries containing subsequent validator votes, deriving from the Bank-Merkle, is the confirmation proof. ### Transaction Inclusion Proof
Clients can examine this ledger data and compute the finality using Solana's fork selection rules. A transaction inclusion proof is a data structure that contains a Merkle Path
from a transaction, through an Entry-Merkle to a Block-Merkle, which is included
in a Bank-Hash with the required set of validator votes. A chain of PoH Entries
containing subsequent validator votes, deriving from the Bank-Hash, is the proof
of confirmation. Clients can examine this ledger data and compute finality using
Solana's fork selection rules.
### Payment Merkle Path An Entry-Merkle is a Merkle Root including all transactions in a given entry,
sorted by signature.
A payment receipt is a data structure that contains a Merkle Path from a transaction to the required set of validator votes. A Block-Merkle is the Merkle Root of all the Entry-Merkles sequenced in the block.
An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted by signature.
![Block Merkle Diagram](../.gitbook/assets/spv-block-merkle.svg) ![Block Merkle Diagram](../.gitbook/assets/spv-block-merkle.svg)
A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block. Transaction status is necessary for the receipt because the state receipt is constructed for the block. Two transactions over the same state can appear in the block, and therefore, there is no way to infer from just the state whether a transaction that is committed to the ledger has succeeded or failed in modifying the intended state. It may not be necessary to encode the full status code, but a single status bit to indicate the transaction's success. A Bank-Hash is the hash of the concatenation of the Block-Merkle and Accounts-Hash
### State Merkle Path <img alt="Bank Hash Diagram" src="img/spv-bank-hash.svg" class="center"/>
A state receipt provides a confirmation that a specific state is committed at the end of the block. Inter-block state transitions do not generate a receipt. An Accounts-Hash is the hash of the concatentation of the state hashes of each
account modified during the current slot.
For example: Transaction status is necessary for the receipt because the state receipt is
constructed for the block. Two transactions over the same state can appear in
the block, and therefore, there is no way to infer from just the state whether
a transaction that is committed to the ledger has succeeded or failed in
modifying the intended state. It may not be necessary to encode the full status
code, but a single status bit to indicate the transaction's success.
* A sends 5 Lamports to B ### Account State Verification
* B spends 5 Lamports
* C sends 5 Lamports to A
At the end of the block, A and B are in the exact same starting state, and any state receipt would point to the same value for A or B. An account's state (balance or other data) can be verified by submitting a
transaction with a ___TBD___ Instruction to the cluster. The client can then
The Bank-Merkle is computed from the Merkle Tree of the new state changes, along with the Previous Bank-Merkle, and the Block-Merkle. use a [Transaction Inclusion Proof](#transaction-inclusion-proof) to verify
whether the cluster agrees that the acount has reached the expected state.
![Bank Merkle Diagram](../.gitbook/assets/spv-bank-merkle.svg)
A state receipt contains only the state changes occurring in the block. A direct Merkle Path to the current Bank-Merkle guarantees the state value at that bank hash, but it cannot be used to generate a “current” receipt to the latest state if the state modification occurred in some previous block. There is no guarantee that the path provided by the validator is the latest one available out of all the previous Bank-Merkles.
Clients that want to query the chain for a receipt of the "latest" state would need to create a transaction that would update the Merkle Path for that account, such as a credit of 0 Lamports.
### Validator Votes ### Validator Votes
Leaders should coalesce the validator votes by stake weight into a single entry. This will reduce the number of entries necessary to create a receipt. Leaders should coalesce the validator votes by stake weight into a single entry.
This will reduce the number of entries necessary to create a receipt.
### Chain of Entries ### Chain of Entries
A receipt has a PoH link from the payment or state Merkle Path root to a list of consecutive validation votes. A receipt has a PoH link from the payment or state Merkle Path root to a list
of consecutive validation votes.
It contains the following: It contains the following:
* State -&gt; Bank-Merkle * Transaction -&gt; Entry-Merkle -&gt; Block-Merkle -&gt; Bank-Hash
or
* Transaction -&gt; Entry-Merkle -&gt; Block-Merkle -&gt; Bank-Merkle
And a vector of PoH entries: And a vector of PoH entries:
@@ -89,21 +123,33 @@ LightEntry {
} }
``` ```
The light entries are reconstructed from Entries and simply show the entry Merkle Root that was mixed in to the PoH hash, instead of the full transaction set. The light entries are reconstructed from Entries and simply show the entry
Merkle Root that was mixed in to the PoH hash, instead of the full transaction
set.
Clients do not need the starting vote state. The [fork selection](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/fork-selection.md) algorithm is defined such that only votes that appear after the transaction provide finality for the transaction, and finality is independent of the starting state. Clients do not need the starting vote state. The
[fork selection](../implemented-proposals/tower-bft.md) algorithm is defined
such that only votes that appear after the transaction provide finality for the
transaction, and finality is independent of the starting state.
### Verification ### Verification
A light client that is aware of the supermajority set validators can verify a receipt by following the Merkle Path to the PoH chain. The Bank-Merkle is the Merkle Root and will appear in votes included in an Entry. The light client can simulate [fork selection](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/book/src/fork-selection.md) for the consecutive votes and verify that the receipt is confirmed at the desired lockout threshold. A light client that is aware of the supermajority set validators can verify a
receipt by following the Merkle Path to the PoH chain. The Block-Merkle is the
Merkle Root and will appear in votes included in an Entry. The light client can
simulate [fork selection](../implemented-proposals/tower-bft.md) for the
consecutive votes and verify that the receipt is confirmed at the desired
lockout threshold.
### Synthetic State ### Synthetic State
Synthetic state should be computed into the Bank-Merkle along with the bank generated state. Synthetic state should be computed into the Bank-Hash along with the bank
generated state.
For example: For example:
* Epoch validator accounts and their stakes and weights. * Epoch validator accounts and their stakes and weights.
* Computed fee rates * Computed fee rates
These values should have an entry in the Bank-Merkle. They should live under known accounts, and therefore have an exact address in the Merkle Path. These values should have an entry in the Bank-Hash. They should live under known
accounts, and therefore have an index into the hash concatenation.

View File

@@ -2,24 +2,44 @@
## History ## History
When we first started Solana, the goal was to de-risk our TPS claims. We knew that between optimistic concurrency control and sufficiently long leader slots, that PoS consensus was not the biggest risk to TPS. It was GPU-based signature verification, software pipelining and concurrent banking. Thus, the TPU was born. After topping 100k TPS, we split the team into one group working toward 710k TPS and another to flesh out the validator pipeline. Hence, the TVU was born. The current architecture is a consequence of incremental development with that ordering and project priorities. It is not a reflection of what we ever believed was the most technically elegant cross-section of those technologies. In the context of leader rotation, the strong distinction between leading and validating is blurred. When we first started Solana, the goal was to de-risk our TPS claims. We knew
that between optimistic concurrency control and sufficiently long leader slots,
that PoS consensus was not the biggest risk to TPS. It was GPU-based signature
verification, software pipelining and concurrent banking. Thus, the TPU was
born. After topping 100k TPS, we split the team into one group working toward
710k TPS and another to flesh out the validator pipeline. Hence, the TVU was
born. The current architecture is a consequence of incremental development with
that ordering and project priorities. It is not a reflection of what we ever
believed was the most technically elegant cross-section of those technologies.
In the context of leader rotation, the strong distinction between leading and
validating is blurred.
## Difference between validating and leading ## Difference between validating and leading
The fundamental difference between the pipelines is when the PoH is present. In a leader, we process transactions, removing bad ones, and then tag the result with a PoH hash. In the validator, we verify that hash, peel it off, and process the transactions in exactly the same way. The only difference is that if a validator sees a bad transaction, it can't simply remove it like the leader does, because that would cause the PoH hash to change. Instead, it rejects the whole block. The other difference between the pipelines is what happens _after_ banking. The leader broadcasts entries to downstream validators whereas the validator will have already done that in RetransmitStage, which is a confirmation time optimization. The validation pipeline, on the other hand, has one last step. Any time it finishes processing a block, it needs to weigh any forks it's observing, possibly cast a vote, and if so, reset its PoH hash to the block hash it just voted on. The fundamental difference between the pipelines is when the PoH is present. In
a leader, we process transactions, removing bad ones, and then tag the result
with a PoH hash. In the validator, we verify that hash, peel it off, and
process the transactions in exactly the same way. The only difference is that
if a validator sees a bad transaction, it can't simply remove it like the
leader does, because that would cause the PoH hash to change. Instead, it
rejects the whole block. The other difference between the pipelines is what
happens _after_ banking. The leader broadcasts entries to downstream validators
whereas the validator will have already done that in RetransmitStage, which is
a confirmation time optimization. The validation pipeline, on the other hand,
has one last step. Any time it finishes processing a block, it needs to weigh
any forks it's observing, possibly cast a vote, and if so, reset its PoH hash
to the block hash it just voted on.
## Proposed Design ## Proposed Design
We unwrap the many abstraction layers and build a single pipeline that can toggle leader mode on whenever the validator's ID shows up in the leader schedule. We unwrap the many abstraction layers and build a single pipeline that can
toggle leader mode on whenever the validator's ID shows up in the leader
schedule.
![Validator block diagram](../.gitbook/assets/validator-proposal.svg) ![Validator block diagram](../.gitbook/assets/validator-proposal.svg)
## Notable changes ## Notable changes
* No threads are shut down to switch out of leader mode. Instead, FetchStage
should forward transactions to the next leader.
* Hoist FetchStage and BroadcastStage out of TPU * Hoist FetchStage and BroadcastStage out of TPU
* BankForks renamed to Banktree * BankForks renamed to Banktree
* TPU moves to new socket-free crate called solana-tpu. * TPU moves to new socket-free crate called solana-tpu.
@@ -27,8 +47,6 @@ We unwrap the many abstraction layers and build a single pipeline that can toggl
* TVU goes away * TVU goes away
* New RepairStage absorbs Shred Fetch Stage and repair requests * New RepairStage absorbs Shred Fetch Stage and repair requests
* JSON RPC Service is optional - used for debugging. It should instead be part * JSON RPC Service is optional - used for debugging. It should instead be part
of a separate `solana-blockstreamer` executable. of a separate `solana-blockstreamer` executable.
* New MulticastStage absorbs retransmit part of RetransmitStage * New MulticastStage absorbs retransmit part of RetransmitStage
* MulticastStage downstream of Blockstore * MulticastStage downstream of Blockstore

View File

@@ -149,8 +149,8 @@ From another console, confirm the IP address and **identity pubkey** of your arc
solana-gossip spy --entrypoint testnet.solana.com:8001 solana-gossip spy --entrypoint testnet.solana.com:8001
``` ```
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver: Provide the **storage account pubkey** to the `solana storage-account` command to view the recent mining activity from your archiver:
```bash ```bash
solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY solana --keypair storage-keypair.json storage-account $STORAGE_IDENTITY
``` ```

View File

@@ -21,11 +21,11 @@ solana balance --lamports
## Check Vote Activity ## Check Vote Activity
The `solana show-vote-account` command displays the recent voting activity from The `solana vote-account` command displays the recent voting activity from
your validator: your validator:
```bash ```bash
solana show-vote-account ~/validator-vote-keypair.json solana vote-account ~/validator-vote-keypair.json
``` ```
## Get Cluster Info ## Get Cluster Info

View File

@@ -54,11 +54,7 @@ solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote
``` ```
Assuming the node is voting, now you're up and running and generating validator Assuming the node is voting, now you're up and running and generating validator
rewards. You'll want to periodically redeem/claim your rewards: rewards. Rewards are paid automatically on epoch boundaries.
```bash
solana redeem-vote-credits ~/validator-stake-keypair.json ~/validator-vote-keypair.json
```
The rewards lamports earned are split between your stake account and the vote The rewards lamports earned are split between your stake account and the vote
account according to the commission rate set in the vote account. Rewards can account according to the commission rate set in the vote account. Rewards can
@@ -85,11 +81,11 @@ so it can take an hour or more for stake to come fully online.
To monitor your validator during its warmup period: To monitor your validator during its warmup period:
* View your vote account:`solana show-vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network. * View your vote account:`solana vote-account ~/validator-vote-keypair.json` This displays the current state of all the votes the validator has submitted to the network.
* View your stake account, the delegation preference and details of your stake:`solana show-stake-account ~/validator-stake-keypair.json` * View your stake account, the delegation preference and details of your stake:`solana stake-account ~/validator-stake-keypair.json`
* `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs * `solana uptime ~/validator-vote-keypair.json` will display the voting history \(aka, uptime\) of your validator over recent Epochs
* `solana show-validators` displays the current active stake of all validators, including yours * `solana validators` displays the current active stake of all validators, including yours
* `solana show-stake-history ` shows the history of stake warming up and cooling down over recent epochs * `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####` * Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`
* Once your stake is warmed up, you will see a stake balance listed for your validator on the [Solana Network Explorer](http://explorer.solana.com/validators) * Once your stake is warmed up, you will see a stake balance listed for your validator on the [Solana Network Explorer](http://explorer.solana.com/validators)
@@ -132,6 +128,3 @@ depending on active stake and the size of your stake.
Note that a stake account may only be used once, so after deactivation, use the Note that a stake account may only be used once, so after deactivation, use the
cli's `withdraw-stake` command to recover the previously staked lamports. cli's `withdraw-stake` command to recover the previously staked lamports.
Be sure and redeem your credits before withdrawing all your lamports. Once the
account is fully withdrawn, the account is destroyed.

View File

@@ -6,7 +6,7 @@ The solana cli includes `get` and `set` configuration commands to automatically
set the `--url` argument for cli commands. For example: set the `--url` argument for cli commands. For example:
```bash ```bash
solana set --url http://testnet.solana.com:8899 solana config set --url http://testnet.solana.com:8899
``` ```
\(You can always override the set configuration by explicitly passing the \(You can always override the set configuration by explicitly passing the
@@ -18,7 +18,7 @@ Before attaching a validator node, sanity check that the cluster is accessible
to your machine by fetching the transaction count: to your machine by fetching the transaction count:
```bash ```bash
solana get-transaction-count solana transaction-count
``` ```
Inspect the network explorer at Inspect the network explorer at
@@ -100,7 +100,7 @@ Now that you have a keypair, set the solana configuration to use your validator
keypair for all following commands: keypair for all following commands:
```bash ```bash
solana set --keypair ~/validator-keypair.json solana config set --keypair ~/validator-keypair.json
``` ```
You should see the following output: You should see the following output:

View File

@@ -26,7 +26,7 @@ A preimage resistant [hash](terminology.md#hash) of the [ledger](terminology.md#
The number of [blocks](terminology.md#block) beneath the current block. The first block after the [genesis block](terminology.md#genesis-block) has height one. The number of [blocks](terminology.md#block) beneath the current block. The first block after the [genesis block](terminology.md#genesis-block) has height one.
## bootstrap leader ## bootstrap validator
The first [validator](terminology.md#validator) to produce a [block](terminology.md#block). The first [validator](terminology.md#validator) to produce a [block](terminology.md#block).
@@ -112,6 +112,10 @@ The configuration file that prepares the [ledger](terminology.md#ledger) for the
A digital fingerprint of a sequence of bytes. A digital fingerprint of a sequence of bytes.
## inflation
An increase in token supply over time used to fund rewards for validation and replication and to fund continued development of Solana.
## instruction ## instruction
The smallest unit of a [program](terminology.md#program) that a [client](terminology.md#client) can include in a [transaction](terminology.md#instruction). The smallest unit of a [program](terminology.md#program) that a [client](terminology.md#client) can include in a [transaction](terminology.md#instruction).

View File

@@ -1,17 +1,79 @@
# Anatomy of a Transaction # Anatomy of a Transaction
Transactions encode lists of instructions that are executed sequentially, and only committed if all the instructions complete successfully. All account updates are reverted upon the failure of a transaction. Each transaction details the accounts used, including which must sign and which are read only, a recent blockhash, the instructions, and any signatures. This chapter documents the binary format of a transaction.
## Accounts and Signatures ## Transaction Format
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data. A transaction contains a [compact-array](#compact-array-format) of signatures,
followed by a [message](#message-format). Each item in the signatures array is
a [digital signature](#signature-format) of the given message. The Solana
runtime verifies that the number of signatures matches the number in the first
8 bits of the [message header](#message-header-format). It also verifies that
each signature was signed by the private key corresponding to the public key at
the same index in the message's account addresses array.
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime. ### Signature Format
## Recent Blockhash Each digital signature is in the ed25519 binary format and consumes 64 bytes.
A Transaction includes a recent blockhash to prevent duplication and to give transactions lifetimes. Any transaction that is completely identical to a previous one is rejected, so adding a newer blockhash allows multiple transactions to repeat the exact same action. Transactions also have lifetimes that are defined by the blockhash, as any transaction whose blockhash is too old will be rejected.
## Instructions ## Message Format
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately. A message contains a [header](#message-header-format), followed by a
compact-array of [account addresses](#account-addresses-format), followed by a
recent [blockhash](#blockhash-format), followed by a compact-array of
[instructions](#instruction-format).
### Message Header Format
The message header contains three unsigned 8-bit values. The first value is the
number of required signatures in the containing transaction. The second value
is the number of those corresponding account addresses that are read-only. The
third value in the message header is the number of read-only account addresses
not requiring signatures.
### Account Addresses Format
The addresses that require signatures appear at the beginning of the account
address array, with addresses requesting write access first and read-only
accounts following. The addresses that do not require signatures follow the
addresses that do, again with read-write accounts first and read-only accounts
following.
### Blockhash Format
A blockhash contains a 32-byte SHA-256 hash. It is used to indicate when a
client last observed the ledger. Validators will reject transactions when the
blockhash is too old.
## Instruction Format
An instruction contains a program ID index, followed by a compact-array of
account address indexes, followed by a compact-array of opaque 8-bit data. The
program ID index is used to identify an on-chain program that can interpret the
opaque data. The program ID index is an unsigned 8-bit index to an account
address in the message's array of account addresses. The account address
indexes are each an unsigned 8-bit index into that same array.
## Compact-Array Format
A compact-array is serialized as the array length, followed by each array item.
The array length is a special multi-byte encoding called compact-u16.
### Compact-u16 Format
A compact-u16 is a multi-byte encoding of 16 bits. The first byte contains the
lower 7 bits of the value in its lower 7 bits. If the value is above 0x7f, the
high bit is set and the next 7 bits of the value are placed into the lower 7
bits of a second byte. If the value is above 0x3fff, the high bit is set and
the remaining 2 bits of the value are placed into the lower 2 bits of a third
byte.
## Account Address Format
An account address is 32-bytes of arbitrary data. When the address requires a
digital signature, the runtime interprets it as the public key of an ed25519
keypair.

24
chacha-cuda/Cargo.toml Normal file
View File

@@ -0,0 +1,24 @@
[package]
name = "solana-chacha-cuda"
version = "0.23.1"
description = "Solana Chacha Cuda APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
log = "0.4.8"
solana-archiver-utils = { path = "../archiver-utils", version = "0.23.1" }
solana-chacha = { path = "../chacha", version = "0.23.1" }
solana-ledger = { path = "../ledger", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.23.1" }
solana-perf = { path = "../perf", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.23.1" }
[dev-dependencies]
hex-literal = "0.2.1"
[lib]
name = "solana_chacha_cuda"

View File

@@ -1,6 +1,6 @@
// Module used by validators to approve storage mining proofs in parallel using the GPU // Module used by validators to approve storage mining proofs in parallel using the GPU
use crate::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE}; use solana_chacha::chacha::{CHACHA_BLOCK_SIZE, CHACHA_KEY_SIZE};
use solana_ledger::blockstore::Blockstore; use solana_ledger::blockstore::Blockstore;
use solana_perf::perf_libs; use solana_perf::perf_libs;
use solana_sdk::hash::Hash; use solana_sdk::hash::Hash;
@@ -113,8 +113,8 @@ pub fn chacha_cbc_encrypt_file_many_keys(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::archiver::sample_file; use solana_archiver_utils::sample_file;
use crate::chacha::chacha_cbc_encrypt_ledger; use solana_chacha::chacha::chacha_cbc_encrypt_ledger;
use solana_ledger::entry::create_ticks; use solana_ledger::entry::create_ticks;
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT; use solana_sdk::clock::DEFAULT_SLOTS_PER_SEGMENT;

8
chacha-cuda/src/lib.rs Normal file
View File

@@ -0,0 +1,8 @@
#[macro_use]
extern crate log;
#[cfg(test)]
#[macro_use]
extern crate hex_literal;
pub mod chacha_cuda;

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "solana-chacha-sys" name = "solana-chacha-sys"
version = "0.22.10" version = "0.23.1"
description = "Solana chacha-sys" description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@@ -9,4 +9,4 @@ license = "Apache-2.0"
edition = "2018" edition = "2018"
[build-dependencies] [build-dependencies]
cc = "1.0.48" cc = "1.0.49"

1
chacha/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/farf/

25
chacha/Cargo.toml Normal file
View File

@@ -0,0 +1,25 @@
[package]
name = "solana-chacha"
version = "0.23.1"
description = "Solana Chacha APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
log = "0.4.8"
rand = "0.6.5"
rand_chacha = "0.1.1"
solana-chacha-sys = { path = "../chacha-sys", version = "0.23.1" }
solana-ledger = { path = "../ledger", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.23.1" }
solana-perf = { path = "../perf", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.23.1" }
[dev-dependencies]
hex-literal = "0.2.1"
[lib]
name = "solana_chacha"

View File

@@ -74,13 +74,14 @@ pub fn chacha_cbc_encrypt_ledger(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::chacha::chacha_cbc_encrypt_ledger; use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::gen_keys::GenKeys; use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_ledger::blockstore::Blockstore; use solana_ledger::blockstore::Blockstore;
use solana_ledger::entry::Entry; use solana_ledger::entry::Entry;
use solana_ledger::get_tmp_ledger_path; use solana_ledger::get_tmp_ledger_path;
use solana_sdk::hash::{hash, Hash, Hasher}; use solana_sdk::hash::{hash, Hash, Hasher};
use solana_sdk::pubkey::Pubkey; use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::KeypairUtil; use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction; use solana_sdk::system_transaction;
use std::fs::remove_file; use std::fs::remove_file;
use std::fs::File; use std::fs::File;
@@ -92,8 +93,9 @@ mod tests {
let one = hash(&zero.as_ref()); let one = hash(&zero.as_ref());
let seed = [2u8; 32]; let seed = [2u8; 32];
let mut rnd = GenKeys::new(seed);
let keypair = rnd.gen_keypair(); let mut generator = ChaChaRng::from_seed(seed);
let keypair = Keypair::generate(&mut generator);
let mut id = one; let mut id = one;
let mut num_hashes = 0; let mut num_hashes = 0;
@@ -135,8 +137,9 @@ mod tests {
let out_path = tmp_file_path("test_encrypt_ledger"); let out_path = tmp_file_path("test_encrypt_ledger");
let seed = [2u8; 32]; let seed = [2u8; 32];
let mut rnd = GenKeys::new(seed);
let keypair = rnd.gen_keypair(); let mut generator = ChaChaRng::from_seed(seed);
let keypair = Keypair::generate(&mut generator);
let entries = make_tiny_deterministic_test_entries(slots_per_segment); let entries = make_tiny_deterministic_test_entries(slots_per_segment);
blockstore blockstore

8
chacha/src/lib.rs Normal file
View File

@@ -0,0 +1,8 @@
#[macro_use]
extern crate log;
#[cfg(test)]
#[macro_use]
extern crate hex_literal;
pub mod chacha;

View File

@@ -29,7 +29,7 @@ Start a local cluster and run sanity on it
-x - Add an extra validator (may be supplied multiple times) -x - Add an extra validator (may be supplied multiple times)
-r - Select the RPC endpoint hosted by a node that starts as -r - Select the RPC endpoint hosted by a node that starts as
a validator node. If unspecified the RPC endpoint hosted by a validator node. If unspecified the RPC endpoint hosted by
the bootstrap leader will be used. the bootstrap validator will be used.
-c - Reuse existing node/ledger configuration from a previous sanity -c - Reuse existing node/ledger configuration from a previous sanity
run run
@@ -74,7 +74,7 @@ source multinode-demo/common.sh
nodes=( nodes=(
"multinode-demo/faucet.sh" "multinode-demo/faucet.sh"
"multinode-demo/bootstrap-leader.sh \ "multinode-demo/bootstrap-validator.sh \
--no-restart \ --no-restart \
--init-complete-file init-complete-node1.log \ --init-complete-file init-complete-node1.log \
--dynamic-port-range 8000-8050" --dynamic-port-range 8000-8050"
@@ -170,7 +170,7 @@ startNodes() {
logs+=("$(getNodeLogFile "$i" "$cmd")") logs+=("$(getNodeLogFile "$i" "$cmd")")
fi fi
# 1 == bootstrap leader, wait until it boots before starting # 1 == bootstrap validator, wait until it boots before starting
# other validators # other validators
if [[ "$i" -eq 1 ]]; then if [[ "$i" -eq 1 ]]; then
SECONDS= SECONDS=
@@ -178,8 +178,8 @@ startNodes() {
( (
set -x set -x
$solana_cli --keypair config/bootstrap-leader/identity-keypair.json \ $solana_cli --keypair config/bootstrap-validator/identity-keypair.json \
--url http://127.0.0.1:8899 get-genesis-hash --url http://127.0.0.1:8899 genesis-hash
) | tee genesis-hash.log ) | tee genesis-hash.log
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)" maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"
fi fi
@@ -277,7 +277,7 @@ rollingNodeRestart() {
} }
verifyLedger() { verifyLedger() {
for ledger in bootstrap-leader validator; do for ledger in bootstrap-validator validator; do
echo "--- $ledger ledger verification" echo "--- $ledger ledger verification"
( (
set -x set -x
@@ -331,7 +331,7 @@ while [[ $iteration -le $iterations ]]; do
rm -rf $client_keypair rm -rf $client_keypair
) || flag_error ) || flag_error
echo "--- RPC API: bootstrap-leader getTransactionCount ($iteration)" echo "--- RPC API: bootstrap-validator getTransactionCount ($iteration)"
( (
set -x set -x
curl --retry 5 --retry-delay 2 --retry-connrefused \ curl --retry 5 --retry-delay 2 --retry-connrefused \
@@ -351,7 +351,7 @@ while [[ $iteration -le $iterations ]]; do
http://localhost:18899 http://localhost:18899
) || flag_error ) || flag_error
# Verify transaction count as reported by the bootstrap-leader node is advancing # Verify transaction count as reported by the bootstrap-validator node is advancing
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt) transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
if [[ -n $lastTransactionCount ]]; then if [[ -n $lastTransactionCount ]]; then
echo "--- Transaction count check: $lastTransactionCount < $transactionCount" echo "--- Transaction count check: $lastTransactionCount < $transactionCount"

View File

@@ -20,6 +20,7 @@ declare prints=(
declare print_free_tree=( declare print_free_tree=(
'core/src' 'core/src'
'faucet/src' 'faucet/src'
'ledger/src'
'metrics/src' 'metrics/src'
'net-utils/src' 'net-utils/src'
'runtime/src' 'runtime/src'

View File

@@ -8,7 +8,6 @@ me=$(basename "$0")
echo --- update gitbook-cage echo --- update gitbook-cage
if [[ -n $CI_BRANCH ]]; then if [[ -n $CI_BRANCH ]]; then
( (
set -x set -x
( (
. ci/rust-version.sh stable . ci/rust-version.sh stable
@@ -25,74 +24,8 @@ if [[ -n $CI_BRANCH ]]; then
git reset --hard HEAD~ git reset --hard HEAD~
fi fi
) )
fi
source ci/rust-version.sh stable
eval "$(ci/channel-info.sh)"
if [[ -n $PUBLISH_BOOK_TAG ]]; then
CURRENT_TAG="$(git describe --tags)"
COMMIT_TO_PUBLISH="$(git rev-list -n 1 "${PUBLISH_BOOK_TAG}")"
# book is manually published at a specified release tag
if [[ $PUBLISH_BOOK_TAG != "$CURRENT_TAG" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
commit: "$COMMIT_TO_PUBLISH"
env:
PUBLISH_BOOK_TAG: "$PUBLISH_BOOK_TAG"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
repo=git@github.com:solana-labs/book.git
BOOK="book"
else else
# book-edge and book-beta are published automatically on the tip of the branch echo CI_BRANCH not set
case $CHANNEL in
edge)
repo=git@github.com:solana-labs/book-edge.git
;;
beta)
repo=git@github.com:solana-labs/book-beta.git
;;
*)
echo "--- publish skipped"
exit 0
;;
esac
BOOK=$CHANNEL
fi fi
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "book/build.sh"
echo --- create book repo
(
set -x
cd book/html/
git init .
git add ./* ./.nojekyll
git config user.email maintainers@solana.com
git config user.name "$me"
git commit -m "${CI_COMMIT:-local}"
)
echo "--- publish $BOOK"
(
cd book/html/
git remote add origin $repo
git fetch origin master
if ! git diff HEAD origin/master --quiet; then
git push -f origin HEAD:master
else
echo "Content unchanged, publish skipped"
fi
)
exit 0 exit 0

View File

@@ -22,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings _ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
_ cargo +"$rust_stable" audit --version _ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013 --ignore RUSTSEC-2018-0015 --ignore RUSTSEC-2019-0031 --ignore RUSTSEC-2020-0002 _ cargo +"$rust_stable" audit --ignore RUSTSEC-2020-0002
_ ci/nits.sh _ ci/nits.sh
_ ci/order-crates-for-publishing.py _ ci/order-crates-for-publishing.py
_ book/build.sh _ book/build.sh
@@ -30,7 +30,7 @@ _ ci/check-ssh-keys.sh
{ {
cd programs/bpf cd programs/bpf
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0031 _ cargo +"$rust_stable" audit
for project in rust/*/ ; do for project in rust/*/ ; do
echo "+++ do_bpf_checks $project" echo "+++ do_bpf_checks $project"
( (

View File

@@ -86,7 +86,7 @@ test-stable-perf)
fi fi
_ cargo +"$rust_stable" build --bins ${V:+--verbose} _ cargo +"$rust_stable" build --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture _ cargo +"$rust_stable" test --package solana-chacha-cuda --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
;; ;;
test-move) test-move)
ci/affects-files.sh \ ci/affects-files.sh \

View File

@@ -206,6 +206,7 @@ steps:
TESTNET_DB_HOST: "$TESTNET_DB_HOST" TESTNET_DB_HOST: "$TESTNET_DB_HOST"
GCE_NODE_COUNT: "$GCE_NODE_COUNT" GCE_NODE_COUNT: "$GCE_NODE_COUNT"
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT" GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
RUST_LOG: "$RUST_LOG"
EOF EOF
) | buildkite-agent pipeline upload ) | buildkite-agent pipeline upload
exit 0 exit 0
@@ -377,7 +378,7 @@ deploy() {
( (
set -x set -x
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \ ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
-t "$CHANNEL_OR_TAG" -n 1 -c 0 -u -P \ -t "$CHANNEL_OR_TAG" -n 0 -c 0 -u -P \
-a testnet-solana-com --letsencrypt testnet.solana.com \ -a testnet-solana-com --letsencrypt testnet.solana.com \
--limit-ledger-size \ --limit-ledger-size \
${skipCreate:+-e} \ ${skipCreate:+-e} \
@@ -454,6 +455,10 @@ deploy() {
TDS_CLIENT_COUNT="1" TDS_CLIENT_COUNT="1"
fi fi
if [[ -n $TDS_SLOTS_PER_EPOCH ]]; then
maybeSlotsPerEpoch=(--slots-per-epoch "$TDS_SLOTS_PER_EPOCH")
fi
if [[ -z $ENABLE_GPU ]]; then if [[ -z $ENABLE_GPU ]]; then
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100") maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
elif [[ $ENABLE_GPU == skip ]]; then elif [[ $ENABLE_GPU == skip ]]; then
@@ -539,7 +544,7 @@ deploy() {
${maybeInternalNodesLamports} \ ${maybeInternalNodesLamports} \
${maybeExternalAccountsFile} \ ${maybeExternalAccountsFile} \
--target-lamports-per-signature 0 \ --target-lamports-per-signature 0 \
--slots-per-epoch 4096 \ "${maybeSlotsPerEpoch[@]}" \
${maybeAdditionalDisk} ${maybeAdditionalDisk}
) )
;; ;;

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "solana-clap-utils" name = "solana-clap-utils"
version = "0.22.10" version = "0.23.1"
description = "Solana utilities for the clap" description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
@@ -12,8 +12,8 @@ edition = "2018"
clap = "2.33.0" clap = "2.33.0"
rpassword = "4.0" rpassword = "4.0"
semver = "0.9.0" semver = "0.9.0"
solana-sdk = { path = "../sdk", version = "0.22.10" } solana-sdk = { path = "../sdk", version = "0.23.1" }
tiny-bip39 = "0.6.2" tiny-bip39 = "0.7.0"
url = "2.1.0" url = "2.1.0"
chrono = "0.4" chrono = "0.4"

View File

@@ -64,6 +64,20 @@ pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey())) value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey()))
} }
pub fn pubkeys_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Pubkey>> {
matches.values_of(name).map(|values| {
values
.map(|value| {
value.parse::<Pubkey>().unwrap_or_else(|_| {
read_keypair_file(value)
.expect("read_keypair_file failed")
.pubkey()
})
})
.collect()
})
}
// Return pubkey/signature pairs for a string of the form pubkey=signature // Return pubkey/signature pairs for a string of the form pubkey=signature
pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubkey, Signature)>> { pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubkey, Signature)>> {
matches.values_of(name).map(|values| { matches.values_of(name).map(|values| {
@@ -154,7 +168,7 @@ mod tests {
#[test] #[test]
fn test_keypair_of() { fn test_keypair_of() {
let keypair = Keypair::new(); let keypair = Keypair::new();
let outfile = tmp_file_path("test_gen_keypair_file.json", &keypair.pubkey()); let outfile = tmp_file_path("test_keypair_of.json", &keypair.pubkey());
let _ = write_keypair_file(&keypair, &outfile).unwrap(); let _ = write_keypair_file(&keypair, &outfile).unwrap();
let matches = app() let matches = app()
@@ -178,7 +192,7 @@ mod tests {
#[test] #[test]
fn test_pubkey_of() { fn test_pubkey_of() {
let keypair = Keypair::new(); let keypair = Keypair::new();
let outfile = tmp_file_path("test_gen_keypair_file.json", &keypair.pubkey()); let outfile = tmp_file_path("test_pubkey_of.json", &keypair.pubkey());
let _ = write_keypair_file(&keypair, &outfile).unwrap(); let _ = write_keypair_file(&keypair, &outfile).unwrap();
let matches = app() let matches = app()
@@ -202,6 +216,26 @@ mod tests {
fs::remove_file(&outfile).unwrap(); fs::remove_file(&outfile).unwrap();
} }
#[test]
fn test_pubkeys_of() {
let keypair = Keypair::new();
let outfile = tmp_file_path("test_pubkeys_of.json", &keypair.pubkey());
let _ = write_keypair_file(&keypair, &outfile).unwrap();
let matches = app().clone().get_matches_from(vec![
"test",
"--multiple",
&keypair.pubkey().to_string(),
"--multiple",
&outfile,
]);
assert_eq!(
pubkeys_of(&matches, "multiple"),
Some(vec![keypair.pubkey(), keypair.pubkey()])
);
fs::remove_file(&outfile).unwrap();
}
#[test] #[test]
fn test_pubkeys_sigs_of() { fn test_pubkeys_sigs_of() {
let key1 = Pubkey::new_rand(); let key1 = Pubkey::new_rand();

View File

@@ -21,7 +21,7 @@ pub const ASK_KEYWORD: &str = "ASK";
pub const ASK_SEED_PHRASE_ARG: ArgConstant<'static> = ArgConstant { pub const ASK_SEED_PHRASE_ARG: ArgConstant<'static> = ArgConstant {
long: "ask-seed-phrase", long: "ask-seed-phrase",
name: "ask_seed_phrase", name: "ask_seed_phrase",
help: "Securely recover a keypair using a seed phrase and optional passphrase", help: "Recover a keypair using a seed phrase and optional passphrase",
}; };
pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
@@ -80,7 +80,7 @@ pub fn keypair_from_seed_phrase(
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
} else { } else {
let sanitized = sanitize_seed_phrase(seed_phrase); let sanitized = sanitize_seed_phrase(seed_phrase);
let mnemonic = Mnemonic::from_phrase(sanitized, Language::English)?; let mnemonic = Mnemonic::from_phrase(&sanitized, Language::English)?;
let passphrase = prompt_passphrase(&passphrase_prompt)?; let passphrase = prompt_passphrase(&passphrase_prompt)?;
let seed = Seed::new(&mnemonic, &passphrase); let seed = Seed::new(&mnemonic, &passphrase);
keypair_from_seed(seed.as_bytes())? keypair_from_seed(seed.as_bytes())?

View File

@@ -1,19 +1,16 @@
[package] [package]
name = "solana-fixed-buf"
version = "0.22.10"
description = "A fixed-size byte array that supports bincode serde"
authors = ["Solana Maintainers <maintainers@solana.com>"] authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli-config"
description = "Blockchain, Rebuilt for Scale"
version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
edition = "2018"
[dependencies] [dependencies]
bincode = "1.2.1" dirs = "2.0.2"
lazy_static = "1.4.0"
serde = "1.0.104" serde = "1.0.104"
[lib]
name = "solana_fixed_buf"
[dev-dependencies]
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_yaml = "0.8.11"

View File

@@ -1,8 +1,10 @@
// Wallet settings that can be configured for long-term use // Wallet settings that can be configured for long-term use
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::fs::{create_dir_all, File}; use std::{
use std::io::{self, Write}; fs::{create_dir_all, File},
use std::path::Path; io::{self, Write},
path::Path,
};
lazy_static! { lazy_static! {
pub static ref CONFIG_FILE: Option<String> = { pub static ref CONFIG_FILE: Option<String> = {

4
cli-config/src/lib.rs Normal file
View File

@@ -0,0 +1,4 @@
#[macro_use]
extern crate lazy_static;
pub mod config;

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018" edition = "2018"
name = "solana-cli" name = "solana-cli"
description = "Blockchain, Rebuilt for Scale" description = "Blockchain, Rebuilt for Scale"
version = "0.22.10" version = "0.23.1"
repository = "https://github.com/solana-labs/solana" repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0" license = "Apache-2.0"
homepage = "https://solana.com/" homepage = "https://solana.com/"
@@ -17,35 +17,35 @@ criterion-stats = "0.3.0"
ctrlc = { version = "3.1.3", features = ["termination"] } ctrlc = { version = "3.1.3", features = ["termination"] }
console = "0.9.1" console = "0.9.1"
dirs = "2.0.2" dirs = "2.0.2"
lazy_static = "1.4.0"
log = "0.4.8" log = "0.4.8"
indicatif = "0.13.0" indicatif = "0.13.0"
humantime = "1.3.0" humantime = "2.0.0"
num-traits = "0.2" num-traits = "0.2"
pretty-hex = "0.1.1" pretty-hex = "0.1.1"
reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tls"] } reqwest = { version = "0.10.1", default-features = false, features = ["blocking", "rustls-tls"] }
serde = "1.0.104" serde = "1.0.104"
serde_derive = "1.0.103" serde_derive = "1.0.103"
serde_json = "1.0.44" serde_json = "1.0.44"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
solana-budget-program = { path = "../programs/budget", version = "0.22.10" } solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.10" } solana-clap-utils = { path = "../clap-utils", version = "0.23.1" }
solana-client = { path = "../client", version = "0.22.10" } solana-cli-config = { path = "../cli-config", version = "0.23.1" }
solana-config-program = { path = "../programs/config", version = "0.22.10" } solana-client = { path = "../client", version = "0.23.1" }
solana-faucet = { path = "../faucet", version = "0.22.10" } solana-config-program = { path = "../programs/config", version = "0.23.1" }
solana-logger = { path = "../logger", version = "0.22.10" } solana-faucet = { path = "../faucet", version = "0.23.1" }
solana-net-utils = { path = "../net-utils", version = "0.22.10" } solana-logger = { path = "../logger", version = "0.23.1" }
solana-runtime = { path = "../runtime", version = "0.22.10" } solana-net-utils = { path = "../net-utils", version = "0.23.1" }
solana-sdk = { path = "../sdk", version = "0.22.10" } solana-runtime = { path = "../runtime", version = "0.23.1" }
solana-stake-program = { path = "../programs/stake", version = "0.22.10" } solana-sdk = { path = "../sdk", version = "0.23.1" }
solana-storage-program = { path = "../programs/storage", version = "0.22.10" } solana-stake-program = { path = "../programs/stake", version = "0.23.1" }
solana-vote-program = { path = "../programs/vote", version = "0.22.10" } solana-storage-program = { path = "../programs/storage", version = "0.23.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.10" } solana-vote-program = { path = "../programs/vote", version = "0.23.1" }
url = "2.1.0" solana-vote-signer = { path = "../vote-signer", version = "0.23.1" }
url = "2.1.1"
[dev-dependencies] [dev-dependencies]
solana-core = { path = "../core", version = "0.22.10" } solana-core = { path = "../core", version = "0.23.1" }
solana-budget-program = { path = "../programs/budget", version = "0.22.10" } solana-budget-program = { path = "../programs/budget", version = "0.23.1" }
tempfile = "3.1.0" tempfile = "3.1.0"
[[bin]] [[bin]]

View File

@@ -2,6 +2,7 @@ use crate::{
cluster_query::*, cluster_query::*,
display::{println_name_value, println_signers}, display::{println_name_value, println_signers},
nonce::{self, *}, nonce::{self, *},
offline::*,
stake::*, stake::*,
storage::*, storage::*,
validator_info::*, validator_info::*,
@@ -31,7 +32,7 @@ use solana_sdk::{
message::Message, message::Message,
native_token::lamports_to_sol, native_token::lamports_to_sol,
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, KeypairUtil, Signature}, signature::{keypair_from_seed, Keypair, KeypairUtil, Signature},
system_instruction::{create_address_with_seed, SystemError, MAX_ADDRESS_SEED_LEN}, system_instruction::{create_address_with_seed, SystemError, MAX_ADDRESS_SEED_LEN},
system_transaction, system_transaction,
transaction::{Transaction, TransactionError}, transaction::{Transaction, TransactionError},
@@ -85,7 +86,8 @@ impl SigningAuthority {
matches: &ArgMatches<'_>, matches: &ArgMatches<'_>,
name: &str, name: &str,
signers: Option<&[(Pubkey, Signature)]>, signers: Option<&[(Pubkey, Signature)]>,
) -> Result<Self, CliError> { ) -> Result<Option<Self>, CliError> {
if matches.is_present(name) {
keypair_of(matches, name) keypair_of(matches, name)
.map(|keypair| keypair.into()) .map(|keypair| keypair.into())
.or_else(|| { .or_else(|| {
@@ -100,6 +102,10 @@ impl SigningAuthority {
.map(|pubkey| pubkey.into()) .map(|pubkey| pubkey.into())
}) })
.ok_or_else(|| CliError::BadParameter("Invalid authority".to_string())) .ok_or_else(|| CliError::BadParameter("Invalid authority".to_string()))
.map(Some)
} else {
Ok(None)
}
} }
pub fn keypair(&self) -> &Keypair { pub fn keypair(&self) -> &Keypair {
@@ -125,7 +131,7 @@ impl From<Keypair> for SigningAuthority {
impl From<Pubkey> for SigningAuthority { impl From<Pubkey> for SigningAuthority {
fn from(pubkey: Pubkey) -> Self { fn from(pubkey: Pubkey) -> Self {
SigningAuthority::Offline(pubkey, Keypair::new()) SigningAuthority::Offline(pubkey, keypair_from_seed(pubkey.as_ref()).unwrap())
} }
} }
@@ -160,7 +166,7 @@ pub struct PayCommand {
pub cancelable: bool, pub cancelable: bool,
pub sign_only: bool, pub sign_only: bool,
pub signers: Option<Vec<(Pubkey, Signature)>>, pub signers: Option<Vec<(Pubkey, Signature)>>,
pub blockhash: Option<Hash>, pub blockhash_query: BlockhashQuery,
pub nonce_account: Option<Pubkey>, pub nonce_account: Option<Pubkey>,
pub nonce_authority: Option<SigningAuthority>, pub nonce_authority: Option<SigningAuthority>,
} }
@@ -205,6 +211,10 @@ pub enum CliCommand {
slot_limit: Option<u64>, slot_limit: Option<u64>,
}, },
ShowGossip, ShowGossip,
ShowStakes {
use_lamports_unit: bool,
vote_account_pubkeys: Option<Vec<Pubkey>>,
},
ShowValidators { ShowValidators {
use_lamports_unit: bool, use_lamports_unit: bool,
}, },
@@ -216,6 +226,7 @@ pub enum CliCommand {
}, },
CreateNonceAccount { CreateNonceAccount {
nonce_account: KeypairEq, nonce_account: KeypairEq,
seed: Option<String>,
nonce_authority: Option<Pubkey>, nonce_authority: Option<Pubkey>,
lamports: u64, lamports: u64,
}, },
@@ -239,6 +250,7 @@ pub enum CliCommand {
// Stake Commands // Stake Commands
CreateStakeAccount { CreateStakeAccount {
stake_account: KeypairEq, stake_account: KeypairEq,
seed: Option<String>,
staker: Option<Pubkey>, staker: Option<Pubkey>,
withdrawer: Option<Pubkey>, withdrawer: Option<Pubkey>,
lockup: Lockup, lockup: Lockup,
@@ -249,7 +261,7 @@ pub enum CliCommand {
stake_authority: Option<SigningAuthority>, stake_authority: Option<SigningAuthority>,
sign_only: bool, sign_only: bool,
signers: Option<Vec<(Pubkey, Signature)>>, signers: Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<SigningAuthority>, nonce_authority: Option<SigningAuthority>,
}, },
@@ -260,11 +272,10 @@ pub enum CliCommand {
force: bool, force: bool,
sign_only: bool, sign_only: bool,
signers: Option<Vec<(Pubkey, Signature)>>, signers: Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<SigningAuthority>, nonce_authority: Option<SigningAuthority>,
}, },
RedeemVoteCredits(Pubkey, Pubkey),
ShowStakeHistory { ShowStakeHistory {
use_lamports_unit: bool, use_lamports_unit: bool,
}, },
@@ -279,7 +290,7 @@ pub enum CliCommand {
authority: Option<SigningAuthority>, authority: Option<SigningAuthority>,
sign_only: bool, sign_only: bool,
signers: Option<Vec<(Pubkey, Signature)>>, signers: Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<SigningAuthority>, nonce_authority: Option<SigningAuthority>,
}, },
@@ -310,6 +321,7 @@ pub enum CliCommand {
// Vote Commands // Vote Commands
CreateVoteAccount { CreateVoteAccount {
vote_account: KeypairEq, vote_account: KeypairEq,
seed: Option<String>,
node_pubkey: Pubkey, node_pubkey: Pubkey,
authorized_voter: Option<Pubkey>, authorized_voter: Option<Pubkey>,
authorized_withdrawer: Option<Pubkey>, authorized_withdrawer: Option<Pubkey>,
@@ -442,31 +454,32 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
command: CliCommand::Fees, command: CliCommand::Fees,
require_keypair: false, require_keypair: false,
}), }),
("get-block-time", Some(matches)) => parse_get_block_time(matches), ("block-time", Some(matches)) => parse_get_block_time(matches),
("get-epoch-info", Some(matches)) => parse_get_epoch_info(matches), ("epoch-info", Some(matches)) => parse_get_epoch_info(matches),
("get-genesis-hash", Some(_matches)) => Ok(CliCommandInfo { ("genesis-hash", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::GetGenesisHash, command: CliCommand::GetGenesisHash,
require_keypair: false, require_keypair: false,
}), }),
("get-slot", Some(matches)) => parse_get_slot(matches), ("slot", Some(matches)) => parse_get_slot(matches),
("get-transaction-count", Some(matches)) => parse_get_transaction_count(matches), ("transaction-count", Some(matches)) => parse_get_transaction_count(matches),
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo { ("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::LeaderSchedule, command: CliCommand::LeaderSchedule,
require_keypair: false, require_keypair: false,
}), }),
("ping", Some(matches)) => parse_cluster_ping(matches), ("ping", Some(matches)) => parse_cluster_ping(matches),
("show-block-production", Some(matches)) => parse_show_block_production(matches), ("block-production", Some(matches)) => parse_show_block_production(matches),
("show-gossip", Some(_matches)) => Ok(CliCommandInfo { ("gossip", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::ShowGossip, command: CliCommand::ShowGossip,
require_keypair: false, require_keypair: false,
}), }),
("show-validators", Some(matches)) => parse_show_validators(matches), ("stakes", Some(matches)) => parse_show_stakes(matches),
("validators", Some(matches)) => parse_show_validators(matches),
// Nonce Commands // Nonce Commands
("authorize-nonce-account", Some(matches)) => parse_authorize_nonce_account(matches), ("authorize-nonce-account", Some(matches)) => parse_authorize_nonce_account(matches),
("create-nonce-account", Some(matches)) => parse_nonce_create_account(matches), ("create-nonce-account", Some(matches)) => parse_nonce_create_account(matches),
("get-nonce", Some(matches)) => parse_get_nonce(matches), ("nonce", Some(matches)) => parse_get_nonce(matches),
("new-nonce", Some(matches)) => parse_new_nonce(matches), ("new-nonce", Some(matches)) => parse_new_nonce(matches),
("show-nonce-account", Some(matches)) => parse_show_nonce_account(matches), ("nonce-account", Some(matches)) => parse_show_nonce_account(matches),
("withdraw-from-nonce-account", Some(matches)) => { ("withdraw-from-nonce-account", Some(matches)) => {
parse_withdraw_from_nonce_account(matches) parse_withdraw_from_nonce_account(matches)
} }
@@ -486,9 +499,8 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
("stake-authorize-withdrawer", Some(matches)) => { ("stake-authorize-withdrawer", Some(matches)) => {
parse_stake_authorize(matches, StakeAuthorize::Withdrawer) parse_stake_authorize(matches, StakeAuthorize::Withdrawer)
} }
("redeem-vote-credits", Some(matches)) => parse_redeem_vote_credits(matches), ("stake-account", Some(matches)) => parse_show_stake_account(matches),
("show-stake-account", Some(matches)) => parse_show_stake_account(matches), ("stake-history", Some(matches)) => parse_show_stake_history(matches),
("show-stake-history", Some(matches)) => parse_show_stake_history(matches),
// Storage Commands // Storage Commands
("create-archiver-storage-account", Some(matches)) => { ("create-archiver-storage-account", Some(matches)) => {
parse_storage_create_archiver_account(matches) parse_storage_create_archiver_account(matches)
@@ -497,17 +509,11 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
parse_storage_create_validator_account(matches) parse_storage_create_validator_account(matches)
} }
("claim-storage-reward", Some(matches)) => parse_storage_claim_reward(matches), ("claim-storage-reward", Some(matches)) => parse_storage_claim_reward(matches),
("show-storage-account", Some(matches)) => parse_storage_get_account_command(matches), ("storage-account", Some(matches)) => parse_storage_get_account_command(matches),
// Validator Info Commands // Validator Info Commands
("validator-info", Some(matches)) => match matches.subcommand() { ("validator-info", Some(matches)) => match matches.subcommand() {
("publish", Some(matches)) => parse_validator_info_command(matches), ("publish", Some(matches)) => parse_validator_info_command(matches),
("get", Some(matches)) => parse_get_validator_info_command(matches), ("get", Some(matches)) => parse_get_validator_info_command(matches),
("", None) => {
eprintln!("{}", matches.usage());
Err(CliError::CommandNotRecognized(
"no validator-info subcommand given".to_string(),
))
}
_ => unreachable!(), _ => unreachable!(),
}, },
// Vote Commands // Vote Commands
@@ -519,7 +525,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
("vote-authorize-withdrawer", Some(matches)) => { ("vote-authorize-withdrawer", Some(matches)) => {
parse_vote_authorize(matches, VoteAuthorize::Withdrawer) parse_vote_authorize(matches, VoteAuthorize::Withdrawer)
} }
("show-vote-account", Some(matches)) => parse_vote_get_account_command(matches), ("vote-account", Some(matches)) => parse_vote_get_account_command(matches),
("uptime", Some(matches)) => parse_vote_uptime_command(matches), ("uptime", Some(matches)) => parse_vote_uptime_command(matches),
// Wallet Commands // Wallet Commands
("address", Some(_matches)) => Ok(CliCommandInfo { ("address", Some(_matches)) => Ok(CliCommandInfo {
@@ -604,19 +610,15 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
let timestamp_pubkey = value_of(&matches, "timestamp_pubkey"); let timestamp_pubkey = value_of(&matches, "timestamp_pubkey");
let witnesses = values_of(&matches, "witness"); let witnesses = values_of(&matches, "witness");
let cancelable = matches.is_present("cancelable"); let cancelable = matches.is_present("cancelable");
let sign_only = matches.is_present("sign_only"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, "signer"); let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let blockhash = value_of(&matches, "blockhash"); let blockhash_query = BlockhashQuery::new_from_matches(&matches);
let nonce_account = pubkey_of(&matches, NONCE_ARG.name); let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) { let nonce_authority = SigningAuthority::new_from_matches(
Some(SigningAuthority::new_from_matches(
&matches, &matches,
NONCE_AUTHORITY_ARG.name, NONCE_AUTHORITY_ARG.name,
signers.as_deref(), signers.as_deref(),
)?) )?;
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
@@ -628,14 +630,14 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
cancelable, cancelable,
sign_only, sign_only,
signers, signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
nonce_authority, nonce_authority,
}), }),
require_keypair: true, require_keypair: true,
}) })
} }
("show-account", Some(matches)) => { ("account", Some(matches)) => {
let account_pubkey = pubkey_of(matches, "account_pubkey").unwrap(); let account_pubkey = pubkey_of(matches, "account_pubkey").unwrap();
let output_file = matches.value_of("output_file"); let output_file = matches.value_of("output_file");
let use_lamports_unit = matches.is_present("lamports"); let use_lamports_unit = matches.is_present("lamports");
@@ -1007,7 +1009,7 @@ fn process_pay(
cancelable: bool, cancelable: bool,
sign_only: bool, sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>, signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<&SigningAuthority>, nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult { ) -> ProcessResult {
@@ -1016,8 +1018,7 @@ fn process_pay(
(to, "to".to_string()), (to, "to".to_string()),
)?; )?;
let (blockhash, fee_calculator) = let (blockhash, fee_calculator) = blockhash_query.get_blockhash_fee_calculator(rpc_client)?;
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
let cancelable = if cancelable { let cancelable = if cancelable {
Some(config.keypair.pubkey()) Some(config.keypair.pubkey())
@@ -1285,6 +1286,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
process_show_block_production(&rpc_client, config, *epoch, *slot_limit) process_show_block_production(&rpc_client, config, *epoch, *slot_limit)
} }
CliCommand::ShowGossip => process_show_gossip(&rpc_client), CliCommand::ShowGossip => process_show_gossip(&rpc_client),
CliCommand::ShowStakes {
use_lamports_unit,
vote_account_pubkeys,
} => process_show_stakes(
&rpc_client,
*use_lamports_unit,
vote_account_pubkeys.as_deref(),
),
CliCommand::ShowValidators { use_lamports_unit } => { CliCommand::ShowValidators { use_lamports_unit } => {
process_show_validators(&rpc_client, *use_lamports_unit) process_show_validators(&rpc_client, *use_lamports_unit)
} }
@@ -1306,12 +1315,14 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
// Create nonce account // Create nonce account
CliCommand::CreateNonceAccount { CliCommand::CreateNonceAccount {
nonce_account, nonce_account,
seed,
nonce_authority, nonce_authority,
lamports, lamports,
} => process_create_nonce_account( } => process_create_nonce_account(
&rpc_client, &rpc_client,
config, config,
nonce_account, nonce_account,
seed.clone(),
*nonce_authority, *nonce_authority,
*lamports, *lamports,
), ),
@@ -1356,6 +1367,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
// Create stake account // Create stake account
CliCommand::CreateStakeAccount { CliCommand::CreateStakeAccount {
stake_account, stake_account,
seed,
staker, staker,
withdrawer, withdrawer,
lockup, lockup,
@@ -1364,6 +1376,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&rpc_client, &rpc_client,
config, config,
stake_account, stake_account,
seed,
staker, staker,
withdrawer, withdrawer,
lockup, lockup,
@@ -1375,7 +1388,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
ref stake_authority, ref stake_authority,
sign_only, sign_only,
ref signers, ref signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
ref nonce_authority, ref nonce_authority,
} => process_deactivate_stake_account( } => process_deactivate_stake_account(
@@ -1385,7 +1398,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
stake_authority.as_ref(), stake_authority.as_ref(),
*sign_only, *sign_only,
signers, signers,
*blockhash, blockhash_query,
*nonce_account, *nonce_account,
nonce_authority.as_ref(), nonce_authority.as_ref(),
), ),
@@ -1396,7 +1409,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
force, force,
sign_only, sign_only,
ref signers, ref signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
ref nonce_authority, ref nonce_authority,
} => process_delegate_stake( } => process_delegate_stake(
@@ -1408,18 +1421,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*force, *force,
*sign_only, *sign_only,
signers, signers,
*blockhash, blockhash_query,
*nonce_account, *nonce_account,
nonce_authority.as_ref(), nonce_authority.as_ref(),
), ),
CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => {
process_redeem_vote_credits(
&rpc_client,
config,
&stake_account_pubkey,
&vote_account_pubkey,
)
}
CliCommand::ShowStakeAccount { CliCommand::ShowStakeAccount {
pubkey: stake_account_pubkey, pubkey: stake_account_pubkey,
use_lamports_unit, use_lamports_unit,
@@ -1439,7 +1444,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
ref authority, ref authority,
sign_only, sign_only,
ref signers, ref signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
ref nonce_authority, ref nonce_authority,
} => process_stake_authorize( } => process_stake_authorize(
@@ -1451,7 +1456,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
authority.as_ref(), authority.as_ref(),
*sign_only, *sign_only,
signers, signers,
*blockhash, blockhash_query,
*nonce_account, *nonce_account,
nonce_authority.as_ref(), nonce_authority.as_ref(),
), ),
@@ -1521,6 +1526,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
// Create vote account // Create vote account
CliCommand::CreateVoteAccount { CliCommand::CreateVoteAccount {
vote_account, vote_account,
seed,
node_pubkey, node_pubkey,
authorized_voter, authorized_voter,
authorized_withdrawer, authorized_withdrawer,
@@ -1529,6 +1535,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&rpc_client, &rpc_client,
config, config,
vote_account, vote_account,
seed,
&node_pubkey, &node_pubkey,
authorized_voter, authorized_voter,
authorized_withdrawer, authorized_withdrawer,
@@ -1621,7 +1628,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
cancelable, cancelable,
sign_only, sign_only,
ref signers, ref signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
ref nonce_authority, ref nonce_authority,
}) => process_pay( }) => process_pay(
@@ -1635,7 +1642,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
*cancelable, *cancelable,
*sign_only, *sign_only,
signers, signers,
*blockhash, blockhash_query,
*nonce_account, *nonce_account,
nonce_authority.as_ref(), nonce_authority.as_ref(),
), ),
@@ -1871,7 +1878,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
) )
.subcommand( .subcommand(
SubCommand::with_name("create-address-with-seed") SubCommand::with_name("create-address-with-seed")
.about("Generate a dervied account address with a seed") .about("Generate a derived account address with a seed")
.arg( .arg(
Arg::with_name("seed") Arg::with_name("seed")
.index(1) .index(1)
@@ -1973,31 +1980,9 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.long("cancelable") .long("cancelable")
.takes_value(false), .takes_value(false),
) )
.arg( .offline_args()
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(nonce_arg()) .arg(nonce_arg())
.arg(nonce_authority_arg()) .arg(nonce_authority_arg()),
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
),
) )
.subcommand( .subcommand(
SubCommand::with_name("send-signature") SubCommand::with_name("send-signature")
@@ -2049,8 +2034,9 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("show-account") SubCommand::with_name("account")
.about("Show the contents of an account") .about("Show the contents of an account")
.alias("account")
.arg( .arg(
Arg::with_name("account_pubkey") Arg::with_name("account_pubkey")
.index(1) .index(1)
@@ -2091,6 +2077,7 @@ mod tests {
use solana_sdk::{ use solana_sdk::{
account::Account, account::Account,
nonce_state::{Meta as NonceMeta, NonceState}, nonce_state::{Meta as NonceMeta, NonceState},
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair_file}, signature::{read_keypair_file, write_keypair_file},
system_program, system_program,
transaction::TransactionError, transaction::TransactionError,
@@ -2111,6 +2098,13 @@ mod tests {
path path
} }
#[test]
fn test_signing_authority_dummy_keypairs() {
let signing_authority: SigningAuthority = Pubkey::new(&[1u8; 32]).into();
assert_eq!(signing_authority, Pubkey::new(&[1u8; 32]).into());
assert_ne!(signing_authority, Pubkey::new(&[2u8; 32]).into());
}
#[test] #[test]
fn test_cli_parse_command() { fn test_cli_parse_command() {
let test_commands = app("test", "desc", "version"); let test_commands = app("test", "desc", "version");
@@ -2374,12 +2368,16 @@ mod tests {
); );
// Test Pay Subcommand w/ sign-only // Test Pay Subcommand w/ sign-only
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_pay = test_commands.clone().get_matches_from(vec![ let test_pay = test_commands.clone().get_matches_from(vec![
"test", "test",
"pay", "pay",
&pubkey_string, &pubkey_string,
"50", "50",
"lamports", "lamports",
"--blockhash",
&blockhash_string,
"--sign-only", "--sign-only",
]); ]);
assert_eq!( assert_eq!(
@@ -2388,6 +2386,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
sign_only: true, sign_only: true,
..PayCommand::default() ..PayCommand::default()
}), }),
@@ -2405,6 +2404,8 @@ mod tests {
&pubkey_string, &pubkey_string,
"50", "50",
"lamports", "lamports",
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer1, &signer1,
]); ]);
@@ -2414,6 +2415,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
signers: Some(vec![(key1, sig1)]), signers: Some(vec![(key1, sig1)]),
..PayCommand::default() ..PayCommand::default()
}), }),
@@ -2431,6 +2433,8 @@ mod tests {
&pubkey_string, &pubkey_string,
"50", "50",
"lamports", "lamports",
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer1, &signer1,
"--signer", "--signer",
@@ -2442,6 +2446,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
signers: Some(vec![(key1, sig1), (key2, sig2)]), signers: Some(vec![(key1, sig1), (key2, sig2)]),
..PayCommand::default() ..PayCommand::default()
}), }),
@@ -2450,8 +2455,6 @@ mod tests {
); );
// Test Pay Subcommand w/ Blockhash // Test Pay Subcommand w/ Blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_pay = test_commands.clone().get_matches_from(vec![ let test_pay = test_commands.clone().get_matches_from(vec![
"test", "test",
"pay", "pay",
@@ -2467,7 +2470,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
..PayCommand::default() ..PayCommand::default()
}), }),
require_keypair: true require_keypair: true
@@ -2494,7 +2497,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(pubkey), nonce_account: Some(pubkey),
..PayCommand::default() ..PayCommand::default()
}), }),
@@ -2525,7 +2528,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(pubkey), nonce_account: Some(pubkey),
nonce_authority: Some(keypair.into()), nonce_authority: Some(keypair.into()),
..PayCommand::default() ..PayCommand::default()
@@ -2561,7 +2564,7 @@ mod tests {
command: CliCommand::Pay(PayCommand { command: CliCommand::Pay(PayCommand {
lamports: 50, lamports: 50,
to: pubkey, to: pubkey,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(pubkey), nonce_account: Some(pubkey),
nonce_authority: Some(authority_pubkey.into()), nonce_authority: Some(authority_pubkey.into()),
signers: Some(vec![(authority_pubkey, sig)]), signers: Some(vec![(authority_pubkey, sig)]),
@@ -2703,6 +2706,7 @@ mod tests {
let node_pubkey = Pubkey::new_rand(); let node_pubkey = Pubkey::new_rand();
config.command = CliCommand::CreateVoteAccount { config.command = CliCommand::CreateVoteAccount {
vote_account: bob_keypair.into(), vote_account: bob_keypair.into(),
seed: None,
node_pubkey, node_pubkey,
authorized_voter: Some(bob_pubkey), authorized_voter: Some(bob_pubkey),
authorized_withdrawer: Some(bob_pubkey), authorized_withdrawer: Some(bob_pubkey),
@@ -2734,6 +2738,7 @@ mod tests {
let custodian = Pubkey::new_rand(); let custodian = Pubkey::new_rand();
config.command = CliCommand::CreateStakeAccount { config.command = CliCommand::CreateStakeAccount {
stake_account: bob_keypair.into(), stake_account: bob_keypair.into(),
seed: None,
staker: None, staker: None,
withdrawer: None, withdrawer: None,
lockup: Lockup { lockup: Lockup {
@@ -2763,7 +2768,7 @@ mod tests {
stake_authority: None, stake_authority: None,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}; };
@@ -2849,7 +2854,7 @@ mod tests {
lamports: 10, lamports: 10,
to: bob_pubkey, to: bob_pubkey,
nonce_account: Some(bob_pubkey), nonce_account: Some(bob_pubkey),
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
..PayCommand::default() ..PayCommand::default()
}); });
let signature = process_command(&config); let signature = process_command(&config);
@@ -2876,7 +2881,7 @@ mod tests {
config.command = CliCommand::Pay(PayCommand { config.command = CliCommand::Pay(PayCommand {
lamports: 10, lamports: 10,
to: bob_pubkey, to: bob_pubkey,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(bob_pubkey), nonce_account: Some(bob_pubkey),
nonce_authority: Some(bob_keypair.into()), nonce_authority: Some(bob_keypair.into()),
..PayCommand::default() ..PayCommand::default()
@@ -2951,6 +2956,7 @@ mod tests {
let bob_keypair = Keypair::new(); let bob_keypair = Keypair::new();
config.command = CliCommand::CreateVoteAccount { config.command = CliCommand::CreateVoteAccount {
vote_account: bob_keypair.into(), vote_account: bob_keypair.into(),
seed: None,
node_pubkey, node_pubkey,
authorized_voter: Some(bob_pubkey), authorized_voter: Some(bob_pubkey),
authorized_withdrawer: Some(bob_pubkey), authorized_withdrawer: Some(bob_pubkey),

View File

@@ -11,6 +11,7 @@ use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{input_parsers::*, input_validators::*}; use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::{rpc_client::RpcClient, rpc_response::RpcVoteAccountInfo}; use solana_client::{rpc_client::RpcClient, rpc_response::RpcVoteAccountInfo};
use solana_sdk::{ use solana_sdk::{
account_utils::StateMut,
clock::{self, Slot}, clock::{self, Slot},
commitment_config::CommitmentConfig, commitment_config::CommitmentConfig,
epoch_schedule::{Epoch, EpochSchedule}, epoch_schedule::{Epoch, EpochSchedule},
@@ -54,9 +55,9 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.about("Get the version of the cluster entrypoint"), .about("Get the version of the cluster entrypoint"),
) )
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees")) .subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule")) .subcommand(SubCommand::with_name("block-time")
.subcommand(SubCommand::with_name("get-block-time")
.about("Get estimated production time of a block") .about("Get estimated production time of a block")
.alias("get-block-time")
.arg( .arg(
Arg::with_name("slot") Arg::with_name("slot")
.index(1) .index(1)
@@ -66,9 +67,11 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.help("Slot number of the block to query") .help("Slot number of the block to query")
) )
) )
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
.subcommand( .subcommand(
SubCommand::with_name("get-epoch-info") SubCommand::with_name("epoch-info")
.about("Get information about the current epoch") .about("Get information about the current epoch")
.alias("get-epoch-info")
.arg( .arg(
Arg::with_name("confirmed") Arg::with_name("confirmed")
.long("confirmed") .long("confirmed")
@@ -79,10 +82,13 @@ impl ClusterQuerySubCommands for App<'_, '_> {
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("get-genesis-hash").about("Get the genesis hash"), SubCommand::with_name("genesis-hash")
.about("Get the genesis hash")
.alias("get-genesis-hash")
) )
.subcommand( .subcommand(
SubCommand::with_name("get-slot").about("Get current slot") SubCommand::with_name("slot").about("Get current slot")
.alias("get-slot")
.arg( .arg(
Arg::with_name("confirmed") Arg::with_name("confirmed")
.long("confirmed") .long("confirmed")
@@ -93,7 +99,8 @@ impl ClusterQuerySubCommands for App<'_, '_> {
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("get-transaction-count").about("Get current transaction count") SubCommand::with_name("transaction-count").about("Get current transaction count")
.alias("get-transaction-count")
.arg( .arg(
Arg::with_name("confirmed") Arg::with_name("confirmed")
.long("confirmed") .long("confirmed")
@@ -151,8 +158,9 @@ impl ClusterQuerySubCommands for App<'_, '_> {
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("show-block-production") SubCommand::with_name("block-production")
.about("Show information about block production") .about("Show information about block production")
.alias("show-block-production")
.arg( .arg(
Arg::with_name("epoch") Arg::with_name("epoch")
.long("epoch") .long("epoch")
@@ -167,12 +175,33 @@ impl ClusterQuerySubCommands for App<'_, '_> {
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("show-gossip") SubCommand::with_name("gossip")
.about("Show the current gossip network nodes"), .about("Show the current gossip network nodes")
.alias("show-gossip")
) )
.subcommand( .subcommand(
SubCommand::with_name("show-validators") SubCommand::with_name("stakes")
.about("Show information about the current validators") .about("Show stake account information")
.arg(
Arg::with_name("vote_account_pubkeys")
.index(1)
.value_name("VOTE ACCOUNT PUBKEYS")
.takes_value(true)
.multiple(true)
.validator(is_pubkey_or_keypair)
.help("Only show stake accounts delegated to the provided vote accounts"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
.subcommand(
SubCommand::with_name("validators")
.about("Show summary information about the current validators")
.alias("show-validators")
.arg( .arg(
Arg::with_name("lamports") Arg::with_name("lamports")
.long("lamports") .long("lamports")
@@ -261,6 +290,19 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
}) })
} }
pub fn parse_show_stakes(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
let vote_account_pubkeys = pubkeys_of(matches, "vote_account_pubkeys");
Ok(CliCommandInfo {
command: CliCommand::ShowStakes {
use_lamports_unit,
vote_account_pubkeys,
},
require_keypair: false,
})
}
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports"); let use_lamports_unit = matches.is_present("lamports");
@@ -405,30 +447,50 @@ pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResu
Ok(timestamp.to_string()) Ok(timestamp.to_string())
} }
fn slot_to_human_time(slot: Slot) -> String {
humantime::format_duration(Duration::from_secs(
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
))
.to_string()
}
pub fn process_get_epoch_info( pub fn process_get_epoch_info(
rpc_client: &RpcClient, rpc_client: &RpcClient,
commitment_config: &CommitmentConfig, commitment_config: &CommitmentConfig,
) -> ProcessResult { ) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?; let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
println!(); println!();
println_name_value("Current epoch:", &epoch_info.epoch.to_string()); println_name_value("Slot:", &epoch_info.absolute_slot.to_string());
println_name_value("Current slot:", &epoch_info.absolute_slot.to_string()); println_name_value("Epoch:", &epoch_info.epoch.to_string());
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
let end_slot = start_slot + epoch_info.slots_in_epoch;
println_name_value( println_name_value(
"Total slots in current epoch:", "Epoch slot range:",
&epoch_info.slots_in_epoch.to_string(), &format!("[{}..{})", start_slot, end_slot),
);
println_name_value(
"Epoch completed percent:",
&format!(
"{:>3.3}%",
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
),
); );
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index; let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
println_name_value( println_name_value(
"Remaining slots in current epoch:", "Epoch completed slots:",
&remaining_slots_in_epoch.to_string(), &format!(
); "{}/{} ({} remaining)",
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
let remaining_time_in_epoch = Duration::from_secs( ),
remaining_slots_in_epoch * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
); );
println_name_value( println_name_value(
"Time remaining in current epoch:", "Epoch completed time:",
&humantime::format_duration(remaining_time_in_epoch).to_string(), &format!(
"{}/{} ({} remaining)",
slot_to_human_time(epoch_info.slot_index),
slot_to_human_time(epoch_info.slots_in_epoch),
slot_to_human_time(remaining_slots_in_epoch)
),
); );
Ok("".to_string()) Ok("".to_string())
} }
@@ -552,7 +614,7 @@ pub fn process_show_block_production(
let skipped_slots = leader_skipped_slots.entry(leader).or_insert(0); let skipped_slots = leader_skipped_slots.entry(leader).or_insert(0);
loop { loop {
if !confirmed_blocks.is_empty() { if confirmed_blocks_index < confirmed_blocks.len() {
let slot_of_next_confirmed_block = confirmed_blocks[confirmed_blocks_index]; let slot_of_next_confirmed_block = confirmed_blocks[confirmed_blocks_index];
if slot_of_next_confirmed_block < slot { if slot_of_next_confirmed_block < slot {
confirmed_blocks_index += 1; confirmed_blocks_index += 1;
@@ -798,6 +860,45 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
)) ))
} }
pub fn process_show_stakes(
rpc_client: &RpcClient,
use_lamports_unit: bool,
vote_account_pubkeys: Option<&[Pubkey]>,
) -> ProcessResult {
use crate::stake::print_stake_state;
use solana_stake_program::stake_state::StakeState;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Fetching stake accounts...");
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
progress_bar.finish_and_clear();
for (stake_pubkey, stake_account) in all_stake_accounts {
if let Ok(stake_state) = stake_account.state() {
match stake_state {
StakeState::Initialized(_) => {
if vote_account_pubkeys.is_none() {
println!("\nstake pubkey: {}", stake_pubkey);
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
}
}
StakeState::Stake(_, stake) => {
if vote_account_pubkeys.is_none()
|| vote_account_pubkeys
.unwrap()
.contains(&stake.delegation.voter_pubkey)
{
println!("\nstake pubkey: {}", stake_pubkey);
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
}
}
_ => {}
}
}
}
Ok("".to_string())
}
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult { pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?; let epoch_schedule = rpc_client.get_epoch_schedule()?;
let vote_accounts = rpc_client.get_vote_accounts()?; let vote_accounts = rpc_client.get_vote_accounts()?;
@@ -957,11 +1058,10 @@ mod tests {
); );
let slot = 100; let slot = 100;
let test_get_block_time = test_commands.clone().get_matches_from(vec![ let test_get_block_time =
"test", test_commands
"get-block-time", .clone()
&slot.to_string(), .get_matches_from(vec!["test", "block-time", &slot.to_string()]);
]);
assert_eq!( assert_eq!(
parse_command(&test_get_block_time).unwrap(), parse_command(&test_get_block_time).unwrap(),
CliCommandInfo { CliCommandInfo {
@@ -972,7 +1072,7 @@ mod tests {
let test_get_epoch_info = test_commands let test_get_epoch_info = test_commands
.clone() .clone()
.get_matches_from(vec!["test", "get-epoch-info"]); .get_matches_from(vec!["test", "epoch-info"]);
assert_eq!( assert_eq!(
parse_command(&test_get_epoch_info).unwrap(), parse_command(&test_get_epoch_info).unwrap(),
CliCommandInfo { CliCommandInfo {
@@ -985,7 +1085,7 @@ mod tests {
let test_get_genesis_hash = test_commands let test_get_genesis_hash = test_commands
.clone() .clone()
.get_matches_from(vec!["test", "get-genesis-hash"]); .get_matches_from(vec!["test", "genesis-hash"]);
assert_eq!( assert_eq!(
parse_command(&test_get_genesis_hash).unwrap(), parse_command(&test_get_genesis_hash).unwrap(),
CliCommandInfo { CliCommandInfo {
@@ -994,9 +1094,7 @@ mod tests {
} }
); );
let test_get_slot = test_commands let test_get_slot = test_commands.clone().get_matches_from(vec!["test", "slot"]);
.clone()
.get_matches_from(vec!["test", "get-slot"]);
assert_eq!( assert_eq!(
parse_command(&test_get_slot).unwrap(), parse_command(&test_get_slot).unwrap(),
CliCommandInfo { CliCommandInfo {
@@ -1009,7 +1107,7 @@ mod tests {
let test_transaction_count = test_commands let test_transaction_count = test_commands
.clone() .clone()
.get_matches_from(vec!["test", "get-transaction-count"]); .get_matches_from(vec!["test", "transaction-count"]);
assert_eq!( assert_eq!(
parse_command(&test_transaction_count).unwrap(), parse_command(&test_transaction_count).unwrap(),
CliCommandInfo { CliCommandInfo {

View File

@@ -1,11 +1,8 @@
#[macro_use]
extern crate lazy_static;
pub mod cli; pub mod cli;
pub mod cluster_query; pub mod cluster_query;
pub mod config;
pub mod display; pub mod display;
pub mod nonce; pub mod nonce;
pub mod offline;
pub mod stake; pub mod stake;
pub mod storage; pub mod storage;
pub mod validator_info; pub mod validator_info;

View File

@@ -1,4 +1,4 @@
use clap::{crate_description, crate_name, Arg, ArgGroup, ArgMatches, SubCommand}; use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches, SubCommand};
use console::style; use console::style;
use solana_clap_utils::{ use solana_clap_utils::{
@@ -10,15 +10,16 @@ use solana_clap_utils::{
}; };
use solana_cli::{ use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliError}, cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliError},
config::{self, Config},
display::{println_name_value, println_name_value_or}, display::{println_name_value, println_name_value_or},
}; };
use solana_cli_config::config::{Config, CONFIG_FILE};
use solana_sdk::signature::read_keypair_file; use solana_sdk::signature::read_keypair_file;
use std::error; use std::error;
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> { fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
let parse_args = match matches.subcommand() { let parse_args = match matches.subcommand() {
("config", Some(matches)) => match matches.subcommand() {
("get", Some(subcommand_matches)) => { ("get", Some(subcommand_matches)) => {
if let Some(config_file) = matches.value_of("config_file") { if let Some(config_file) = matches.value_of("config_file") {
let config = Config::load(config_file).unwrap_or_default(); let config = Config::load(config_file).unwrap_or_default();
@@ -71,6 +72,8 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
} }
false false
} }
_ => unreachable!(),
},
_ => true, _ => true,
}; };
Ok(parse_args) Ok(parse_args)
@@ -159,7 +162,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.takes_value(true) .takes_value(true)
.global(true) .global(true)
.help("Configuration file to use"); .help("Configuration file to use");
if let Some(ref config_file) = *config::CONFIG_FILE { if let Some(ref config_file) = *CONFIG_FILE {
arg.default_value(&config_file) arg.default_value(&config_file)
} else { } else {
arg arg
@@ -206,9 +209,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.global(true) .global(true)
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
) )
.subcommand(
SubCommand::with_name("config")
.about("Solana command-line tool configuration settings")
.aliases(&["get", "set"])
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand( .subcommand(
SubCommand::with_name("get") SubCommand::with_name("get")
.about("Get cli config settings") .about("Get current config settings")
.arg( .arg(
Arg::with_name("specific_setting") Arg::with_name("specific_setting")
.index(1) .index(1)
@@ -220,13 +228,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
) )
.subcommand( .subcommand(
SubCommand::with_name("set") SubCommand::with_name("set")
.about("Set a cli config setting") .about("Set a config setting")
.group( .group(
ArgGroup::with_name("config_settings") ArgGroup::with_name("config_settings")
.args(&["json_rpc_url", "keypair"]) .args(&["json_rpc_url", "keypair"])
.multiple(true) .multiple(true)
.required(true), .required(true),
), ),
),
) )
.get_matches(); .get_matches();

View File

@@ -3,18 +3,20 @@ use crate::cli::{
log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig, log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult, SigningAuthority, CliError, ProcessResult, SigningAuthority,
}; };
use crate::offline::BLOCKHASH_ARG;
use clap::{App, Arg, ArgMatches, SubCommand}; use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant}; use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
use solana_client::rpc_client::RpcClient; use solana_client::rpc_client::RpcClient;
use solana_sdk::{ use solana_sdk::{
account::Account, account::Account,
account_utils::State, account_utils::StateMut,
hash::Hash, hash::Hash,
nonce_state::NonceState, nonce_state::{Meta, NonceState},
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, KeypairUtil}, signature::{Keypair, KeypairUtil},
system_instruction::{ system_instruction::{
create_nonce_account, nonce_advance, nonce_authorize, nonce_withdraw, NonceError, advance_nonce_account, authorize_nonce_account, create_address_with_seed,
create_nonce_account, create_nonce_account_with_seed, withdraw_nonce_account, NonceError,
SystemError, SystemError,
}, },
system_program, system_program,
@@ -54,7 +56,7 @@ pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
.long(NONCE_ARG.long) .long(NONCE_ARG.long)
.takes_value(true) .takes_value(true)
.value_name("PUBKEY") .value_name("PUBKEY")
.requires("blockhash") .requires(BLOCKHASH_ARG.name)
.validator(is_pubkey) .validator(is_pubkey)
.help(NONCE_ARG.help) .help(NONCE_ARG.help)
} }
@@ -91,6 +93,13 @@ impl NonceSubCommands for App<'_, '_> {
.validator(is_pubkey_or_keypair) .validator(is_pubkey_or_keypair)
.help("Account to be granted authority of the nonce account"), .help("Account to be granted authority of the nonce account"),
) )
.arg(
Arg::with_name("seed")
.long("seed")
.value_name("SEED STRING")
.takes_value(true)
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
)
.arg(nonce_authority_arg()), .arg(nonce_authority_arg()),
) )
.subcommand( .subcommand(
@@ -132,8 +141,9 @@ impl NonceSubCommands for App<'_, '_> {
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("get-nonce") SubCommand::with_name("nonce")
.about("Get the current nonce value") .about("Get the current nonce value")
.alias("get-nonce")
.arg( .arg(
Arg::with_name("nonce_account_pubkey") Arg::with_name("nonce_account_pubkey")
.index(1) .index(1)
@@ -159,8 +169,9 @@ impl NonceSubCommands for App<'_, '_> {
.arg(nonce_authority_arg()), .arg(nonce_authority_arg()),
) )
.subcommand( .subcommand(
SubCommand::with_name("show-nonce-account") SubCommand::with_name("nonce-account")
.about("Show the contents of a nonce account") .about("Show the contents of a nonce account")
.alias("show-nonce-account")
.arg( .arg(
Arg::with_name("nonce_account_pubkey") Arg::with_name("nonce_account_pubkey")
.index(1) .index(1)
@@ -223,15 +234,8 @@ impl NonceSubCommands for App<'_, '_> {
pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap(); let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let new_authority = pubkey_of(matches, "new_authority").unwrap(); let new_authority = pubkey_of(matches, "new_authority").unwrap();
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) { let nonce_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount { command: CliCommand::AuthorizeNonceAccount {
@@ -245,12 +249,14 @@ pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliComm
pub fn parse_nonce_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_nonce_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = keypair_of(matches, "nonce_account_keypair").unwrap(); let nonce_account = keypair_of(matches, "nonce_account_keypair").unwrap();
let seed = matches.value_of("seed").map(|s| s.to_string());
let lamports = required_lamports_from(matches, "amount", "unit")?; let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name); let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name);
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::CreateNonceAccount { command: CliCommand::CreateNonceAccount {
nonce_account: nonce_account.into(), nonce_account: nonce_account.into(),
seed,
nonce_authority, nonce_authority,
lamports, lamports,
}, },
@@ -269,15 +275,8 @@ pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap(); let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) { let nonce_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::NewNonce { command: CliCommand::NewNonce {
@@ -307,15 +306,8 @@ pub fn parse_withdraw_from_nonce_account(
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap(); let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap(); let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?; let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) { let nonce_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, None)?;
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount { command: CliCommand::WithdrawFromNonceAccount {
@@ -368,7 +360,7 @@ pub fn process_authorize_nonce_account(
let nonce_authority = nonce_authority let nonce_authority = nonce_authority
.map(|a| a.keypair()) .map(|a| a.keypair())
.unwrap_or(&config.keypair); .unwrap_or(&config.keypair);
let ix = nonce_authorize(nonce_account, &nonce_authority.pubkey(), new_authority); let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority);
let mut tx = Transaction::new_signed_with_payer( let mut tx = Transaction::new_signed_with_payer(
vec![ix], vec![ix],
Some(&config.keypair.pubkey()), Some(&config.keypair.pubkey()),
@@ -390,24 +382,31 @@ pub fn process_create_nonce_account(
rpc_client: &RpcClient, rpc_client: &RpcClient,
config: &CliConfig, config: &CliConfig,
nonce_account: &Keypair, nonce_account: &Keypair,
seed: Option<String>,
nonce_authority: Option<Pubkey>, nonce_authority: Option<Pubkey>,
lamports: u64, lamports: u64,
) -> ProcessResult { ) -> ProcessResult {
let nonce_account_pubkey = nonce_account.pubkey(); let nonce_account_pubkey = nonce_account.pubkey();
let nonce_account_address = if let Some(seed) = seed.clone() {
create_address_with_seed(&nonce_account_pubkey, &seed, &system_program::id())?
} else {
nonce_account_pubkey
};
check_unique_pubkeys( check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()), (&config.keypair.pubkey(), "cli keypair".to_string()),
(&nonce_account_pubkey, "nonce_account_pubkey".to_string()), (&nonce_account_address, "nonce_account".to_string()),
)?; )?;
if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_pubkey) { if let Ok(nonce_account) = rpc_client.get_account(&nonce_account_address) {
let err_msg = if nonce_account.owner == system_program::id() let err_msg = if nonce_account.owner == system_program::id()
&& State::<NonceState>::state(&nonce_account).is_ok() && StateMut::<NonceState>::state(&nonce_account).is_ok()
{ {
format!("Nonce account {} already exists", nonce_account_pubkey) format!("Nonce account {} already exists", nonce_account_address)
} else { } else {
format!( format!(
"Account {} already exists and is not a nonce account", "Account {} already exists and is not a nonce account",
nonce_account_pubkey nonce_account_address
) )
}; };
return Err(CliError::BadParameter(err_msg).into()); return Err(CliError::BadParameter(err_msg).into());
@@ -423,17 +422,37 @@ pub fn process_create_nonce_account(
} }
let nonce_authority = nonce_authority.unwrap_or_else(|| config.keypair.pubkey()); let nonce_authority = nonce_authority.unwrap_or_else(|| config.keypair.pubkey());
let ixs = create_nonce_account(
let ixs = if let Some(seed) = seed {
create_nonce_account_with_seed(
&config.keypair.pubkey(), // from
&nonce_account_address, // to
&nonce_account_pubkey, // base
&seed, // seed
&nonce_authority,
lamports,
)
} else {
create_nonce_account(
&config.keypair.pubkey(), &config.keypair.pubkey(),
&nonce_account_pubkey, &nonce_account_pubkey,
&nonce_authority, &nonce_authority,
lamports, lamports,
); )
};
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let signers = if nonce_account_pubkey != config.keypair.pubkey() {
vec![&config.keypair, nonce_account] // both must sign if `from` and `to` differ
} else {
vec![&config.keypair] // when stake_account == config.keypair and there's a seed, we only need one signature
};
let mut tx = Transaction::new_signed_with_payer( let mut tx = Transaction::new_signed_with_payer(
ixs, ixs,
Some(&config.keypair.pubkey()), Some(&config.keypair.pubkey()),
&[&config.keypair, nonce_account], &signers,
recent_blockhash, recent_blockhash,
); );
check_account_for_fee( check_account_for_fee(
@@ -442,8 +461,7 @@ pub fn process_create_nonce_account(
&fee_calculator, &fee_calculator,
&tx.message, &tx.message,
)?; )?;
let result = let result = rpc_client.send_and_confirm_transaction(&mut tx, &signers);
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, nonce_account]);
log_instruction_custom_error::<SystemError>(result) log_instruction_custom_error::<SystemError>(result)
} }
@@ -488,7 +506,7 @@ pub fn process_new_nonce(
let nonce_authority = nonce_authority let nonce_authority = nonce_authority
.map(|a| a.keypair()) .map(|a| a.keypair())
.unwrap_or(&config.keypair); .unwrap_or(&config.keypair);
let ix = nonce_advance(&nonce_account, &nonce_authority.pubkey()); let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey());
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new_signed_with_payer( let mut tx = Transaction::new_signed_with_payer(
vec![ix], vec![ix],
@@ -520,7 +538,7 @@ pub fn process_show_nonce_account(
)) ))
.into()); .into());
} }
let print_account = |hash: Option<Hash>| { let print_account = |data: Option<(Meta, Hash)>| {
println!( println!(
"balance: {}", "balance: {}",
build_balance_message(nonce_account.lamports, use_lamports_unit, true) build_balance_message(nonce_account.lamports, use_lamports_unit, true)
@@ -533,15 +551,21 @@ pub fn process_show_nonce_account(
true true
) )
); );
match hash { match data {
Some(hash) => println!("nonce: {}", hash), Some((meta, hash)) => {
None => println!("nonce: uninitialized"), println!("nonce: {}", hash);
println!("authority: {}", meta.nonce_authority);
}
None => {
println!("nonce: uninitialized");
println!("authority: uninitialized");
}
} }
Ok("".to_string()) Ok("".to_string())
}; };
match nonce_account.state() { match nonce_account.state() {
Ok(NonceState::Uninitialized) => print_account(None), Ok(NonceState::Uninitialized) => print_account(None),
Ok(NonceState::Initialized(_, hash)) => print_account(Some(hash)), Ok(NonceState::Initialized(meta, hash)) => print_account(Some((meta, hash))),
Err(err) => Err(CliError::RpcRequestError(format!( Err(err) => Err(CliError::RpcRequestError(format!(
"Account data could not be deserialized to nonce state: {:?}", "Account data could not be deserialized to nonce state: {:?}",
err err
@@ -563,7 +587,7 @@ pub fn process_withdraw_from_nonce_account(
let nonce_authority = nonce_authority let nonce_authority = nonce_authority
.map(|a| a.keypair()) .map(|a| a.keypair())
.unwrap_or(&config.keypair); .unwrap_or(&config.keypair);
let ix = nonce_withdraw( let ix = withdraw_nonce_account(
nonce_account, nonce_account,
&nonce_authority.pubkey(), &nonce_authority.pubkey(),
destination_account_pubkey, destination_account_pubkey,
@@ -672,6 +696,7 @@ mod tests {
CliCommandInfo { CliCommandInfo {
command: CliCommand::CreateNonceAccount { command: CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().into(), nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
seed: None,
nonce_authority: None, nonce_authority: None,
lamports: 50, lamports: 50,
}, },
@@ -694,6 +719,7 @@ mod tests {
CliCommandInfo { CliCommandInfo {
command: CliCommand::CreateNonceAccount { command: CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().into(), nonce_account: read_keypair_file(&keypair_file).unwrap().into(),
seed: None,
nonce_authority: Some( nonce_authority: Some(
read_keypair_file(&authority_keypair_file).unwrap().pubkey() read_keypair_file(&authority_keypair_file).unwrap().pubkey()
), ),
@@ -759,7 +785,7 @@ mod tests {
// Test ShowNonceAccount Subcommand // Test ShowNonceAccount Subcommand
let test_show_nonce_account = test_commands.clone().get_matches_from(vec![ let test_show_nonce_account = test_commands.clone().get_matches_from(vec![
"test", "test",
"show-nonce-account", "nonce-account",
&nonce_account_string, &nonce_account_string,
]); ]);
assert_eq!( assert_eq!(

252
cli/src/offline.rs Normal file
View File

@@ -0,0 +1,252 @@
use clap::{App, Arg, ArgMatches};
use solana_clap_utils::{
input_parsers::value_of,
input_validators::{is_hash, is_pubkey_sig},
ArgConstant,
};
use solana_client::rpc_client::RpcClient;
use solana_sdk::{fee_calculator::FeeCalculator, hash::Hash};
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
name: "blockhash",
long: "blockhash",
help: "Use the supplied blockhash",
};
pub const SIGN_ONLY_ARG: ArgConstant<'static> = ArgConstant {
name: "sign_only",
long: "sign-only",
help: "Sign the transaction offline",
};
pub const SIGNER_ARG: ArgConstant<'static> = ArgConstant {
name: "signer",
long: "signer",
help: "Provid a public-key/signature pair for the transaction",
};
#[derive(Clone, Debug, PartialEq)]
pub enum BlockhashQuery {
None(Hash, FeeCalculator),
FeeCalculator(Hash),
All,
}
impl BlockhashQuery {
pub fn new(blockhash: Option<Hash>, sign_only: bool) -> Self {
match blockhash {
Some(hash) if sign_only => Self::None(hash, FeeCalculator::default()),
Some(hash) if !sign_only => Self::FeeCalculator(hash),
None if !sign_only => Self::All,
_ => panic!("Cannot resolve blockhash"),
}
}
pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self {
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
BlockhashQuery::new(blockhash, sign_only)
}
pub fn get_blockhash_fee_calculator(
&self,
rpc_client: &RpcClient,
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
let (hash, fee_calc) = match self {
BlockhashQuery::None(hash, fee_calc) => (Some(hash), Some(fee_calc)),
BlockhashQuery::FeeCalculator(hash) => (Some(hash), None),
BlockhashQuery::All => (None, None),
};
if None == fee_calc {
let (cluster_hash, fee_calc) = rpc_client.get_recent_blockhash()?;
Ok((*hash.unwrap_or(&cluster_hash), fee_calc))
} else {
Ok((*hash.unwrap(), fee_calc.unwrap().clone()))
}
}
}
impl Default for BlockhashQuery {
fn default() -> Self {
BlockhashQuery::All
}
}
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(BLOCKHASH_ARG.name)
.long(BLOCKHASH_ARG.long)
.takes_value(true)
.value_name("BLOCKHASH")
.validator(is_hash)
.help(BLOCKHASH_ARG.help)
}
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGN_ONLY_ARG.name)
.long(SIGN_ONLY_ARG.long)
.takes_value(false)
.requires(BLOCKHASH_ARG.name)
.help(SIGN_ONLY_ARG.help)
}
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGNER_ARG.name)
.long(SIGNER_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY=BASE58_SIG")
.validator(is_pubkey_sig)
.requires(BLOCKHASH_ARG.name)
.multiple(true)
.help(SIGNER_ARG.help)
}
pub trait OfflineArgs {
fn offline_args(self) -> Self;
}
impl OfflineArgs for App<'_, '_> {
fn offline_args(self) -> Self {
self.arg(blockhash_arg())
.arg(sign_only_arg())
.arg(signer_arg())
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::App;
use serde_json::{self, json, Value};
use solana_client::{
rpc_request::RpcRequest,
rpc_response::{Response, RpcResponseContext},
};
use solana_sdk::{fee_calculator::FeeCalculator, hash::hash};
use std::collections::HashMap;
#[test]
fn test_blockhashspec_new_ok() {
let blockhash = hash(&[1u8]);
assert_eq!(
BlockhashQuery::new(Some(blockhash), true),
BlockhashQuery::None(blockhash, FeeCalculator::default()),
);
assert_eq!(
BlockhashQuery::new(Some(blockhash), false),
BlockhashQuery::FeeCalculator(blockhash),
);
assert_eq!(BlockhashQuery::new(None, false), BlockhashQuery::All,);
}
#[test]
#[should_panic]
fn test_blockhashspec_new_fail() {
BlockhashQuery::new(None, true);
}
#[test]
fn test_blockhashspec_new_from_matches_ok() {
let test_commands = App::new("blockhashspec_test").offline_args();
let blockhash = hash(&[1u8]);
let blockhash_string = blockhash.to_string();
let matches = test_commands.clone().get_matches_from(vec![
"blockhashspec_test",
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::None(blockhash, FeeCalculator::default()),
);
let matches = test_commands.clone().get_matches_from(vec![
"blockhashspec_test",
"--blockhash",
&blockhash_string,
]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::FeeCalculator(blockhash),
);
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhashspec_test"]);
assert_eq!(
BlockhashQuery::new_from_matches(&matches),
BlockhashQuery::All,
);
}
#[test]
#[should_panic]
fn test_blockhashspec_new_from_matches_fail() {
let test_commands = App::new("blockhashspec_test")
.arg(blockhash_arg())
// We can really only hit this case unless the arg requirements
// are broken, so unset the requires() to recreate that condition
.arg(sign_only_arg().requires(""));
let matches = test_commands
.clone()
.get_matches_from(vec!["blockhashspec_test", "--sign-only"]);
BlockhashQuery::new_from_matches(&matches);
}
#[test]
fn test_blockhashspec_get_blockhash_fee_calc() {
let test_blockhash = hash(&[0u8]);
let rpc_blockhash = hash(&[1u8]);
let rpc_fee_calc = FeeCalculator::new(42, 42);
let get_recent_blockhash_response = json!(Response {
context: RpcResponseContext { slot: 1 },
value: json!((
Value::String(rpc_blockhash.to_string()),
serde_json::to_value(rpc_fee_calc.clone()).unwrap()
)),
});
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::All
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(rpc_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::FeeCalculator(test_blockhash)
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, rpc_fee_calc.clone()),
);
let mut mocks = HashMap::new();
mocks.insert(
RpcRequest::GetRecentBlockhash,
get_recent_blockhash_response.clone(),
);
let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks);
assert_eq!(
BlockhashQuery::None(test_blockhash, FeeCalculator::default())
.get_blockhash_fee_calculator(&rpc_client)
.unwrap(),
(test_blockhash, FeeCalculator::default()),
);
let rpc_client = RpcClient::new_mock("fails".to_string());
assert!(BlockhashQuery::All
.get_blockhash_fee_calculator(&rpc_client)
.is_err());
}
}

View File

@@ -1,11 +1,12 @@
use crate::{ use crate::{
cli::{ cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys, build_balance_message, check_account_for_fee, check_unique_pubkeys,
get_blockhash_fee_calculator, log_instruction_custom_error, nonce_authority_arg, log_instruction_custom_error, nonce_authority_arg, replace_signatures,
replace_signatures, required_lamports_from, return_signers, CliCommand, CliCommandInfo, required_lamports_from, return_signers, CliCommand, CliCommandInfo, CliConfig, CliError,
CliConfig, CliError, ProcessResult, SigningAuthority, ProcessResult, SigningAuthority,
}, },
nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG}, nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG},
offline::*,
}; };
use clap::{App, Arg, ArgMatches, SubCommand}; use clap::{App, Arg, ArgMatches, SubCommand};
use console::style; use console::style;
@@ -13,21 +14,19 @@ use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
use solana_client::rpc_client::RpcClient; use solana_client::rpc_client::RpcClient;
use solana_sdk::signature::{Keypair, Signature}; use solana_sdk::signature::{Keypair, Signature};
use solana_sdk::{ use solana_sdk::{
account_utils::State, account_utils::StateMut,
hash::Hash,
pubkey::Pubkey, pubkey::Pubkey,
signature::KeypairUtil, signature::KeypairUtil,
system_instruction::SystemError, system_instruction::{create_address_with_seed, SystemError},
sysvar::{ sysvar::{
stake_history::{self, StakeHistory}, stake_history::{self, StakeHistory},
Sysvar, Sysvar,
}, },
transaction::Transaction, transaction::Transaction,
}; };
use solana_stake_program::stake_state::Meta;
use solana_stake_program::{ use solana_stake_program::{
stake_instruction::{self, StakeError}, stake_instruction::{self, StakeError},
stake_state::{Authorized, Lockup, StakeAuthorize, StakeState}, stake_state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState},
}; };
use solana_vote_program::vote_state::VoteState; use solana_vote_program::vote_state::VoteState;
use std::ops::Deref; use std::ops::Deref;
@@ -105,6 +104,13 @@ impl StakeSubCommands for App<'_, '_> {
.validator(is_pubkey_or_keypair) .validator(is_pubkey_or_keypair)
.help("Identity of the custodian (can withdraw before lockup expires)") .help("Identity of the custodian (can withdraw before lockup expires)")
) )
.arg(
Arg::with_name("seed")
.long("seed")
.value_name("SEED STRING")
.takes_value(true)
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the STAKE ACCOUNT pubkey")
)
.arg( .arg(
Arg::with_name("lockup_epoch") Arg::with_name("lockup_epoch")
.long("lockup-epoch") .long("lockup-epoch")
@@ -166,29 +172,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("The vote account to which the stake will be delegated") .help("The vote account to which the stake will be delegated")
) )
.arg(stake_authority_arg()) .arg(stake_authority_arg())
.arg( .offline_args()
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(nonce_arg()) .arg(nonce_arg())
.arg(nonce_authority_arg()) .arg(nonce_authority_arg())
) )
@@ -214,29 +198,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("New authorized staker") .help("New authorized staker")
) )
.arg(stake_authority_arg()) .arg(stake_authority_arg())
.arg( .offline_args()
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(nonce_arg()) .arg(nonce_arg())
.arg(nonce_authority_arg()) .arg(nonce_authority_arg())
) )
@@ -262,29 +224,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("New authorized withdrawer") .help("New authorized withdrawer")
) )
.arg(withdraw_authority_arg()) .arg(withdraw_authority_arg())
.arg( .offline_args()
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(nonce_arg()) .arg(nonce_arg())
.arg(nonce_authority_arg()) .arg(nonce_authority_arg())
) )
@@ -300,29 +240,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("Stake account to be deactivated.") .help("Stake account to be deactivated.")
) )
.arg(stake_authority_arg()) .arg(stake_authority_arg())
.arg( .offline_args()
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(nonce_arg()) .arg(nonce_arg())
.arg(nonce_authority_arg()) .arg(nonce_authority_arg())
) )
@@ -367,30 +285,9 @@ impl StakeSubCommands for App<'_, '_> {
.arg(withdraw_authority_arg()) .arg(withdraw_authority_arg())
) )
.subcommand( .subcommand(
SubCommand::with_name("redeem-vote-credits") SubCommand::with_name("stake-account")
.about("Redeem credits in the stake account")
.arg(
Arg::with_name("stake_account_pubkey")
.index(1)
.value_name("STAKE ACCOUNT")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Address of the stake account in which to redeem credits")
)
.arg(
Arg::with_name("vote_account_pubkey")
.index(2)
.value_name("VOTE ACCOUNT")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("The vote account to which the stake is currently delegated.")
)
)
.subcommand(
SubCommand::with_name("show-stake-account")
.about("Show the contents of a stake account") .about("Show the contents of a stake account")
.alias("show-stake-account")
.arg( .arg(
Arg::with_name("stake_account_pubkey") Arg::with_name("stake_account_pubkey")
.index(1) .index(1)
@@ -408,8 +305,9 @@ impl StakeSubCommands for App<'_, '_> {
) )
) )
.subcommand( .subcommand(
SubCommand::with_name("show-stake-history") SubCommand::with_name("stake-history")
.about("Show the stake history") .about("Show the stake history")
.alias("show-stake-history")
.arg( .arg(
Arg::with_name("lamports") Arg::with_name("lamports")
.long("lamports") .long("lamports")
@@ -422,6 +320,7 @@ impl StakeSubCommands for App<'_, '_> {
pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account = keypair_of(matches, "stake_account").unwrap(); let stake_account = keypair_of(matches, "stake_account").unwrap();
let seed = matches.value_of("seed").map(|s| s.to_string());
let epoch = value_of(&matches, "lockup_epoch").unwrap_or(0); let epoch = value_of(&matches, "lockup_epoch").unwrap_or(0);
let unix_timestamp = unix_timestamp_from_rfc3339_datetime(&matches, "lockup_date").unwrap_or(0); let unix_timestamp = unix_timestamp_from_rfc3339_datetime(&matches, "lockup_date").unwrap_or(0);
let custodian = pubkey_of(matches, "custodian").unwrap_or_default(); let custodian = pubkey_of(matches, "custodian").unwrap_or_default();
@@ -432,6 +331,7 @@ pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommand
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::CreateStakeAccount { command: CliCommand::CreateStakeAccount {
stake_account: stake_account.into(), stake_account: stake_account.into(),
seed,
staker, staker,
withdrawer, withdrawer,
lockup: Lockup { lockup: Lockup {
@@ -449,29 +349,15 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap(); let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let force = matches.is_present("force"); let force = matches.is_present("force");
let sign_only = matches.is_present("sign_only"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, "signer"); let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let blockhash = value_of(matches, "blockhash"); let blockhash_query = BlockhashQuery::new_from_matches(matches);
let require_keypair = signers.is_none(); let require_keypair = signers.is_none();
let nonce_account = pubkey_of(&matches, NONCE_ARG.name); let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) { let stake_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, STAKE_AUTHORITY_ARG.name, signers.as_deref())?;
&matches, let nonce_authority =
STAKE_AUTHORITY_ARG.name, SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, signers.as_deref())?;
signers.as_deref(),
)?)
} else {
None
};
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::DelegateStake { command: CliCommand::DelegateStake {
@@ -481,7 +367,7 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
force, force,
sign_only, sign_only,
signers, signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
nonce_authority, nonce_authority,
}, },
@@ -499,28 +385,14 @@ pub fn parse_stake_authorize(
StakeAuthorize::Staker => STAKE_AUTHORITY_ARG.name, StakeAuthorize::Staker => STAKE_AUTHORITY_ARG.name,
StakeAuthorize::Withdrawer => WITHDRAW_AUTHORITY_ARG.name, StakeAuthorize::Withdrawer => WITHDRAW_AUTHORITY_ARG.name,
}; };
let sign_only = matches.is_present("sign_only"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, "signer"); let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let authority = if matches.is_present(authority_flag) { let authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, authority_flag, signers.as_deref())?;
&matches, let blockhash_query = BlockhashQuery::new_from_matches(matches);
authority_flag,
signers.as_deref(),
)?)
} else {
None
};
let blockhash = value_of(matches, "blockhash");
let nonce_account = pubkey_of(&matches, NONCE_ARG.name); let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) { let nonce_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, signers.as_deref())?;
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::StakeAuthorize { command: CliCommand::StakeAuthorize {
@@ -530,7 +402,7 @@ pub fn parse_stake_authorize(
authority, authority,
sign_only, sign_only,
signers, signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
nonce_authority, nonce_authority,
}, },
@@ -538,41 +410,17 @@ pub fn parse_stake_authorize(
}) })
} }
pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
Ok(CliCommandInfo {
command: CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey),
require_keypair: true,
})
}
pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> { pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let sign_only = matches.is_present("sign_only"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, "signer"); let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let blockhash = value_of(matches, "blockhash"); let blockhash_query = BlockhashQuery::new_from_matches(matches);
let require_keypair = signers.is_none(); let require_keypair = signers.is_none();
let nonce_account = pubkey_of(&matches, NONCE_ARG.name); let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) { let stake_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, STAKE_AUTHORITY_ARG.name, signers.as_deref())?;
&matches, let nonce_authority =
STAKE_AUTHORITY_ARG.name, SigningAuthority::new_from_matches(&matches, NONCE_AUTHORITY_ARG.name, signers.as_deref())?;
signers.as_deref(),
)?)
} else {
None
};
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::DeactivateStake { command: CliCommand::DeactivateStake {
@@ -580,7 +428,7 @@ pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliComma
stake_authority, stake_authority,
sign_only, sign_only,
signers, signers,
blockhash, blockhash_query,
nonce_account, nonce_account,
nonce_authority, nonce_authority,
}, },
@@ -592,15 +440,8 @@ pub fn parse_stake_withdraw_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap(); let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap(); let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?; let lamports = required_lamports_from(matches, "amount", "unit")?;
let withdraw_authority = if matches.is_present(WITHDRAW_AUTHORITY_ARG.name) { let withdraw_authority =
Some(SigningAuthority::new_from_matches( SigningAuthority::new_from_matches(&matches, WITHDRAW_AUTHORITY_ARG.name, None)?;
&matches,
WITHDRAW_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo { Ok(CliCommandInfo {
command: CliCommand::WithdrawStake { command: CliCommand::WithdrawStake {
@@ -637,24 +478,30 @@ pub fn process_create_stake_account(
rpc_client: &RpcClient, rpc_client: &RpcClient,
config: &CliConfig, config: &CliConfig,
stake_account: &Keypair, stake_account: &Keypair,
seed: &Option<String>,
staker: &Option<Pubkey>, staker: &Option<Pubkey>,
withdrawer: &Option<Pubkey>, withdrawer: &Option<Pubkey>,
lockup: &Lockup, lockup: &Lockup,
lamports: u64, lamports: u64,
) -> ProcessResult { ) -> ProcessResult {
let stake_account_pubkey = stake_account.pubkey(); let stake_account_pubkey = stake_account.pubkey();
let stake_account_address = if let Some(seed) = seed {
create_address_with_seed(&stake_account_pubkey, &seed, &solana_stake_program::id())?
} else {
stake_account_pubkey
};
check_unique_pubkeys( check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()), (&config.keypair.pubkey(), "cli keypair".to_string()),
(&stake_account_pubkey, "stake_account_pubkey".to_string()), (&stake_account_address, "stake_account".to_string()),
)?; )?;
if let Ok(stake_account) = rpc_client.get_account(&stake_account_pubkey) { if let Ok(stake_account) = rpc_client.get_account(&stake_account_address) {
let err_msg = if stake_account.owner == solana_stake_program::id() { let err_msg = if stake_account.owner == solana_stake_program::id() {
format!("Stake account {} already exists", stake_account_pubkey) format!("Stake account {} already exists", stake_account_address)
} else { } else {
format!( format!(
"Account {} already exists and is not a stake account", "Account {} already exists and is not a stake account",
stake_account_pubkey stake_account_address
) )
}; };
return Err(CliError::BadParameter(err_msg).into()); return Err(CliError::BadParameter(err_msg).into());
@@ -676,18 +523,37 @@ pub fn process_create_stake_account(
withdrawer: withdrawer.unwrap_or(config.keypair.pubkey()), withdrawer: withdrawer.unwrap_or(config.keypair.pubkey()),
}; };
let ixs = stake_instruction::create_account( let ixs = if let Some(seed) = seed {
stake_instruction::create_account_with_seed(
&config.keypair.pubkey(), // from
&stake_account_address, // to
&stake_account_pubkey, // base
seed, // seed
&authorized,
lockup,
lamports,
)
} else {
stake_instruction::create_account(
&config.keypair.pubkey(), &config.keypair.pubkey(),
&stake_account_pubkey, &stake_account_pubkey,
&authorized, &authorized,
lockup, lockup,
lamports, lamports,
); )
};
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let signers = if stake_account_pubkey != config.keypair.pubkey() {
vec![&config.keypair, stake_account] // both must sign if `from` and `to` differ
} else {
vec![&config.keypair] // when stake_account == config.keypair and there's a seed, we only need one signature
};
let mut tx = Transaction::new_signed_with_payer( let mut tx = Transaction::new_signed_with_payer(
ixs, ixs,
Some(&config.keypair.pubkey()), Some(&config.keypair.pubkey()),
&[&config.keypair, stake_account], &signers,
recent_blockhash, recent_blockhash,
); );
check_account_for_fee( check_account_for_fee(
@@ -696,8 +562,7 @@ pub fn process_create_stake_account(
&fee_calculator, &fee_calculator,
&tx.message, &tx.message,
)?; )?;
let result = let result = rpc_client.send_and_confirm_transaction(&mut tx, &signers);
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, stake_account]);
log_instruction_custom_error::<SystemError>(result) log_instruction_custom_error::<SystemError>(result)
} }
@@ -711,7 +576,7 @@ pub fn process_stake_authorize(
authority: Option<&SigningAuthority>, authority: Option<&SigningAuthority>,
sign_only: bool, sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>, signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<&SigningAuthority>, nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult { ) -> ProcessResult {
@@ -721,7 +586,7 @@ pub fn process_stake_authorize(
)?; )?;
let authority = authority.map(|a| a.keypair()).unwrap_or(&config.keypair); let authority = authority.map(|a| a.keypair()).unwrap_or(&config.keypair);
let (recent_blockhash, fee_calculator) = let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?; blockhash_query.get_blockhash_fee_calculator(rpc_client)?;
let ixs = vec![stake_instruction::authorize( let ixs = vec![stake_instruction::authorize(
stake_account_pubkey, // stake account to update stake_account_pubkey, // stake account to update
&authority.pubkey(), // currently authorized &authority.pubkey(), // currently authorized
@@ -777,12 +642,12 @@ pub fn process_deactivate_stake_account(
stake_authority: Option<&SigningAuthority>, stake_authority: Option<&SigningAuthority>,
sign_only: bool, sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>, signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<&SigningAuthority>, nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult { ) -> ProcessResult {
let (recent_blockhash, fee_calculator) = let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?; blockhash_query.get_blockhash_fee_calculator(rpc_client)?;
let stake_authority = stake_authority let stake_authority = stake_authority
.map(|a| a.keypair()) .map(|a| a.keypair())
.unwrap_or(&config.keypair); .unwrap_or(&config.keypair);
@@ -867,65 +732,25 @@ pub fn process_withdraw_stake(
log_instruction_custom_error::<StakeError>(result) log_instruction_custom_error::<StakeError>(result)
} }
pub fn process_redeem_vote_credits( pub fn print_stake_state(stake_lamports: u64, stake_state: &StakeState, use_lamports_unit: bool) {
rpc_client: &RpcClient,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
vote_account_pubkey: &Pubkey,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![stake_instruction::redeem_vote_credits(
stake_account_pubkey,
vote_account_pubkey,
)];
let mut tx = Transaction::new_signed_with_payer(
ixs,
Some(&config.keypair.pubkey()),
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
pub fn process_show_stake_account(
rpc_client: &RpcClient,
_config: &CliConfig,
stake_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let stake_account = rpc_client.get_account(stake_account_pubkey)?;
if stake_account.owner != solana_stake_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a stake account",
stake_account_pubkey
))
.into());
}
fn show_authorized(authorized: &Authorized) { fn show_authorized(authorized: &Authorized) {
println!("authorized staker: {}", authorized.staker); println!("authorized staker: {}", authorized.staker);
println!("authorized withdrawer: {}", authorized.staker); println!("authorized withdrawer: {}", authorized.withdrawer);
} }
fn show_lockup(lockup: &Lockup) { fn show_lockup(lockup: &Lockup) {
println!("lockup epoch: {}", lockup.epoch); println!("lockup epoch: {}", lockup.epoch);
println!("lockup custodian: {}", lockup.custodian); println!("lockup custodian: {}", lockup.custodian);
} }
match stake_account.state() { match stake_state {
Ok(StakeState::Stake( StakeState::Stake(
Meta { Meta {
authorized, lockup, .. authorized, lockup, ..
}, },
stake, stake,
)) => { ) => {
println!( println!(
"total stake: {}", "total stake: {}",
build_balance_message(stake_account.lamports, use_lamports_unit, true) build_balance_message(stake_lamports, use_lamports_unit, true)
); );
println!("credits observed: {}", stake.credits_observed); println!("credits observed: {}", stake.credits_observed);
println!( println!(
@@ -951,16 +776,40 @@ pub fn process_show_stake_account(
} }
show_authorized(&authorized); show_authorized(&authorized);
show_lockup(&lockup); show_lockup(&lockup);
Ok("".to_string())
} }
Ok(StakeState::RewardsPool) => Ok("Stake account is a rewards pool".to_string()), StakeState::RewardsPool => println!("stake account is a rewards pool"),
Ok(StakeState::Uninitialized) => Ok("Stake account is uninitialized".to_string()), StakeState::Uninitialized => println!("stake account is uninitialized"),
Ok(StakeState::Initialized(Meta { StakeState::Initialized(Meta {
authorized, lockup, .. authorized, lockup, ..
})) => { }) => {
println!("Stake account is undelegated"); println!(
"total stake: {}",
build_balance_message(stake_lamports, use_lamports_unit, true)
);
println!("stake account is undelegated");
show_authorized(&authorized); show_authorized(&authorized);
show_lockup(&lockup); show_lockup(&lockup);
}
}
}
pub fn process_show_stake_account(
rpc_client: &RpcClient,
_config: &CliConfig,
stake_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let stake_account = rpc_client.get_account(stake_account_pubkey)?;
if stake_account.owner != solana_stake_program::id() {
return Err(CliError::RpcRequestError(format!(
"{:?} is not a stake account",
stake_account_pubkey
))
.into());
}
match stake_account.state() {
Ok(stake_state) => {
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
Ok("".to_string()) Ok("".to_string())
} }
Err(err) => Err(CliError::RpcRequestError(format!( Err(err) => Err(CliError::RpcRequestError(format!(
@@ -1014,7 +863,7 @@ pub fn process_delegate_stake(
force: bool, force: bool,
sign_only: bool, sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>, signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>, blockhash_query: &BlockhashQuery,
nonce_account: Option<Pubkey>, nonce_account: Option<Pubkey>,
nonce_authority: Option<&SigningAuthority>, nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult { ) -> ProcessResult {
@@ -1067,7 +916,7 @@ pub fn process_delegate_stake(
} }
let (recent_blockhash, fee_calculator) = let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?; blockhash_query.get_blockhash_fee_calculator(rpc_client)?;
let ixs = vec![stake_instruction::delegate_stake( let ixs = vec![stake_instruction::delegate_stake(
stake_account_pubkey, stake_account_pubkey,
@@ -1119,7 +968,11 @@ pub fn process_delegate_stake(
mod tests { mod tests {
use super::*; use super::*;
use crate::cli::{app, parse_command}; use crate::cli::{app, parse_command};
use solana_sdk::signature::{read_keypair_file, write_keypair}; use solana_sdk::{
fee_calculator::FeeCalculator,
hash::Hash,
signature::{read_keypair_file, write_keypair},
};
use tempfile::NamedTempFile; use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) { fn make_tmp_file() -> (String, NamedTempFile) {
@@ -1157,7 +1010,7 @@ mod tests {
authority: None, authority: None,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1183,7 +1036,7 @@ mod tests {
authority: Some(read_keypair_file(&authority_keypair_file).unwrap().into()), authority: Some(read_keypair_file(&authority_keypair_file).unwrap().into()),
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1191,11 +1044,15 @@ mod tests {
} }
); );
// Test Authorize Subcommand w/ sign-only // Test Authorize Subcommand w/ sign-only
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_authorize = test_commands.clone().get_matches_from(vec![ let test_authorize = test_commands.clone().get_matches_from(vec![
"test", "test",
&subcommand, &subcommand,
&stake_account_string, &stake_account_string,
&stake_account_string, &stake_account_string,
"--blockhash",
&blockhash_string,
"--sign-only", "--sign-only",
]); ]);
assert_eq!( assert_eq!(
@@ -1208,7 +1065,7 @@ mod tests {
authority: None, authority: None,
sign_only: true, sign_only: true,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1224,6 +1081,8 @@ mod tests {
&subcommand, &subcommand,
&stake_account_string, &stake_account_string,
&stake_account_string, &stake_account_string,
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer, &signer,
]); ]);
@@ -1237,7 +1096,7 @@ mod tests {
authority: None, authority: None,
sign_only: false, sign_only: false,
signers: Some(vec![(keypair.pubkey(), sig)]), signers: Some(vec![(keypair.pubkey(), sig)]),
blockhash: None, blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1253,6 +1112,8 @@ mod tests {
&subcommand, &subcommand,
&stake_account_string, &stake_account_string,
&stake_account_string, &stake_account_string,
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer, &signer,
"--signer", "--signer",
@@ -1268,7 +1129,7 @@ mod tests {
authority: None, authority: None,
sign_only: false, sign_only: false,
signers: Some(vec![(keypair.pubkey(), sig), (keypair2.pubkey(), sig2),]), signers: Some(vec![(keypair.pubkey(), sig), (keypair2.pubkey(), sig2),]),
blockhash: None, blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1276,8 +1137,6 @@ mod tests {
} }
); );
// Test Authorize Subcommand w/ blockhash // Test Authorize Subcommand w/ blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_authorize = test_commands.clone().get_matches_from(vec![ let test_authorize = test_commands.clone().get_matches_from(vec![
"test", "test",
&subcommand, &subcommand,
@@ -1296,7 +1155,7 @@ mod tests {
authority: None, authority: None,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1331,7 +1190,7 @@ mod tests {
authority: None, authority: None,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: Some(nonce_account_pubkey), nonce_account: Some(nonce_account_pubkey),
nonce_authority: Some(nonce_authority_keypair.into()), nonce_authority: Some(nonce_authority_keypair.into()),
}, },
@@ -1389,6 +1248,7 @@ mod tests {
CliCommandInfo { CliCommandInfo {
command: CliCommand::CreateStakeAccount { command: CliCommand::CreateStakeAccount {
stake_account: stake_account_keypair.into(), stake_account: stake_account_keypair.into(),
seed: None,
staker: Some(authorized), staker: Some(authorized),
withdrawer: Some(authorized), withdrawer: Some(authorized),
lockup: Lockup { lockup: Lockup {
@@ -1421,6 +1281,7 @@ mod tests {
CliCommandInfo { CliCommandInfo {
command: CliCommand::CreateStakeAccount { command: CliCommand::CreateStakeAccount {
stake_account: stake_account_keypair.into(), stake_account: stake_account_keypair.into(),
seed: None,
staker: None, staker: None,
withdrawer: None, withdrawer: None,
lockup: Lockup::default(), lockup: Lockup::default(),
@@ -1449,7 +1310,7 @@ mod tests {
force: false, force: false,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1482,7 +1343,7 @@ mod tests {
force: false, force: false,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1508,7 +1369,7 @@ mod tests {
force: true, force: true,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1537,7 +1398,7 @@ mod tests {
force: false, force: false,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1550,6 +1411,8 @@ mod tests {
"delegate-stake", "delegate-stake",
&stake_account_string, &stake_account_string,
&vote_account_string, &vote_account_string,
"--blockhash",
&blockhash_string,
"--sign-only", "--sign-only",
]); ]);
assert_eq!( assert_eq!(
@@ -1562,7 +1425,7 @@ mod tests {
force: false, force: false,
sign_only: true, sign_only: true,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1579,6 +1442,8 @@ mod tests {
"delegate-stake", "delegate-stake",
&stake_account_string, &stake_account_string,
&vote_account_string, &vote_account_string,
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer1, &signer1,
]); ]);
@@ -1592,7 +1457,7 @@ mod tests {
force: false, force: false,
sign_only: false, sign_only: false,
signers: Some(vec![(key1, sig1)]), signers: Some(vec![(key1, sig1)]),
blockhash: None, blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1609,6 +1474,8 @@ mod tests {
"delegate-stake", "delegate-stake",
&stake_account_string, &stake_account_string,
&vote_account_string, &vote_account_string,
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer1, &signer1,
"--signer", "--signer",
@@ -1624,7 +1491,7 @@ mod tests {
force: false, force: false,
sign_only: false, sign_only: false,
signers: Some(vec![(key1, sig1), (key2, sig2)]), signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None, blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1698,7 +1565,7 @@ mod tests {
stake_authority: None, stake_authority: None,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1726,7 +1593,7 @@ mod tests {
), ),
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::default(),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1752,7 +1619,7 @@ mod tests {
stake_authority: None, stake_authority: None,
sign_only: false, sign_only: false,
signers: None, signers: None,
blockhash: Some(blockhash), blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1764,6 +1631,8 @@ mod tests {
"test", "test",
"deactivate-stake", "deactivate-stake",
&stake_account_string, &stake_account_string,
"--blockhash",
&blockhash_string,
"--sign-only", "--sign-only",
]); ]);
assert_eq!( assert_eq!(
@@ -1774,7 +1643,7 @@ mod tests {
stake_authority: None, stake_authority: None,
sign_only: true, sign_only: true,
signers: None, signers: None,
blockhash: None, blockhash_query: BlockhashQuery::None(blockhash, FeeCalculator::default()),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1790,6 +1659,8 @@ mod tests {
"test", "test",
"deactivate-stake", "deactivate-stake",
&stake_account_string, &stake_account_string,
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer1, &signer1,
]); ]);
@@ -1801,7 +1672,7 @@ mod tests {
stake_authority: None, stake_authority: None,
sign_only: false, sign_only: false,
signers: Some(vec![(key1, sig1)]), signers: Some(vec![(key1, sig1)]),
blockhash: None, blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },
@@ -1817,6 +1688,8 @@ mod tests {
"test", "test",
"deactivate-stake", "deactivate-stake",
&stake_account_string, &stake_account_string,
"--blockhash",
&blockhash_string,
"--signer", "--signer",
&signer1, &signer1,
"--signer", "--signer",
@@ -1830,7 +1703,7 @@ mod tests {
stake_authority: None, stake_authority: None,
sign_only: false, sign_only: false,
signers: Some(vec![(key1, sig1), (key2, sig2)]), signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None, blockhash_query: BlockhashQuery::FeeCalculator(blockhash),
nonce_account: None, nonce_account: None,
nonce_authority: None, nonce_authority: None,
}, },

View File

@@ -7,7 +7,7 @@ use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::rpc_client::RpcClient; use solana_client::rpc_client::RpcClient;
use solana_sdk::signature::Keypair; use solana_sdk::signature::Keypair;
use solana_sdk::{ use solana_sdk::{
account_utils::State, message::Message, pubkey::Pubkey, signature::KeypairUtil, account_utils::StateMut, message::Message, pubkey::Pubkey, signature::KeypairUtil,
system_instruction::SystemError, transaction::Transaction, system_instruction::SystemError, transaction::Transaction,
}; };
use solana_storage_program::storage_instruction::{self, StorageAccountType}; use solana_storage_program::storage_instruction::{self, StorageAccountType};
@@ -81,8 +81,9 @@ impl StorageSubCommands for App<'_, '_> {
), ),
) )
.subcommand( .subcommand(
SubCommand::with_name("show-storage-account") SubCommand::with_name("storage-account")
.about("Show the contents of a storage account") .about("Show the contents of a storage account")
.alias("show-storage-account")
.arg( .arg(
Arg::with_name("storage_account_pubkey") Arg::with_name("storage_account_pubkey")
.index(1) .index(1)

View File

@@ -3,8 +3,8 @@ use crate::{
display::println_name_value, display::println_name_value,
}; };
use bincode::deserialize; use bincode::deserialize;
use clap::{App, Arg, ArgMatches, SubCommand}; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use reqwest::Client; use reqwest::blocking::Client;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use serde_json::{Map, Value}; use serde_json::{Map, Value};
@@ -151,6 +151,7 @@ impl ValidatorInfoSubCommands for App<'_, '_> {
self.subcommand( self.subcommand(
SubCommand::with_name("validator-info") SubCommand::with_name("validator-info")
.about("Publish/get Validator info on Solana") .about("Publish/get Validator info on Solana")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand( .subcommand(
SubCommand::with_name("publish") SubCommand::with_name("publish")
.about("Publish Validator info on Solana") .about("Publish Validator info on Solana")

Some files were not shown because too many files have changed in this diff Show More