Compare commits

...

414 Commits

Author SHA1 Message Date
08569c81e9 getVoteAccounts RPC API no longer returns "idle" vote accounts (#7339) (#7341)
automerge
2019-12-06 18:13:46 -08:00
3ba89f8363 Add more pool tokens (#7338) (#7340)
automerge

(cherry picked from commit 8a908a6864)
2019-12-06 18:04:26 -07:00
9161dbc08e Fix typo (#7336) (#7337)
(cherry picked from commit 2d6ed7142f)
2019-12-06 16:50:33 -07:00
a1b2fa295a cli: Confirm recovered pubkeys (#7316) (#7321)
automerge
2019-12-06 13:34:32 -08:00
4f33eaa9dd Increase signature confirmation timeout to fix wallet sanity (#7283) (#7332)
automerge
2019-12-06 13:29:33 -08:00
3718bab078 Add spare validator accounts (#7330)
automerge
2019-12-06 11:55:24 -08:00
dfc48705a4 more genesis (#7291) 2019-12-06 10:50:26 -07:00
f59115b503 bs58 (#7252) 2019-12-06 10:49:14 -07:00
cac467118e Update name 2019-12-06 10:15:51 -07:00
d0718075a7 Add pools (#7324) 2019-12-06 09:27:00 -07:00
ad55cc79b3 Add verify of keypair (#7301) (#7322)
automerge
2019-12-06 08:26:33 -08:00
5111cc10ca Add ChainFlow ValidatorInfo 2019-12-06 09:23:20 -07:00
a1736606dc Fail fast if account paths cannot be canonicalized (#7300) (#7315)
automerge
2019-12-05 19:45:39 -08:00
bae659b9c7 Add docs for using a paper wallet with solana cli (#7311) (#7317)
automerge
2019-12-05 19:38:18 -08:00
c480c2225d Add RockX ValidatorInfo (#7310) (#7312)
automerge
2019-12-05 18:55:29 -08:00
52771c472e Add ChorusOne ValidatorInfo (#7306) (#7308)
automerge
2019-12-05 15:31:12 -08:00
5ce21827c8 Only serialize rooted append vecs (#7281) (#7307)
automerge
2019-12-05 15:02:55 -08:00
a2c4a70fbf Canonicalize paths before symlink-ing when generating snapshots (#7294) (#7299)
automerge
2019-12-05 12:34:42 -08:00
d6e5f78834 custodian signs withdraw (#7286) (#7290)
automerge
2019-12-04 21:57:47 -08:00
74eb408460 vote update node_id (#7253) (#7285)
automerge
2019-12-04 18:24:00 -08:00
a4c6576ba4 Import validators (#7282) (#7284)
automerge
2019-12-04 18:02:00 -08:00
1fcc391a8d Fix typo, grammar, and formatting in Paper Wallet documentation (#7268) (#7271)
automerge
2019-12-04 13:19:16 -08:00
2970f960a4 Sanitize whitespace in seed phrase input (#7260) (#7267)
automerge
2019-12-04 12:32:28 -08:00
d06bea7fb2 genesis validators (#7235) (#7256)
* genesis validators (#7235)

* genesis validators

* slp1 nodes get 500SOL

* no commission

* clippy
2019-12-04 11:34:21 -08:00
45a57e8513 Use wrappable code snippet for paper wallet installation (#7261) (#7262)
automerge
2019-12-04 09:33:49 -08:00
3622e513aa make tx fee's burn percent in proper range (#7226) (#7228)
automerge
2019-12-04 03:11:25 -08:00
c4e1faa853 genesis config hashmaps (#7107) (#7255)
automerge
2019-12-03 23:44:24 -08:00
905428bee6 Allow generation of longer seed phrases with keygen (#7210) (#7249)
automerge
2019-12-03 21:56:06 -08:00
9596e7772c commission as percent (#7239) (#7251)
automerge
2019-12-03 21:42:01 -08:00
5294fe6292 Remove extra installation options for paper wallet (#7245) (#7247)
automerge
2019-12-03 20:12:57 -08:00
571cf53827 Add Paper Wallet Installation page to sidebar (#7242) (#7243) 2019-12-03 22:13:20 -05:00
35ae76532a Use procedural macro to generate static public keys (bp #7219) (#7241)
automerge
2019-12-03 19:07:19 -08:00
57dce86d5e Update paper wallet documentation (#7223) (#7237)
automerge
2019-12-03 17:32:55 -08:00
797cb01bb8 enforce proper range for rent burn_percent (#7217) (#7224)
automerge
2019-12-03 11:59:16 -08:00
9eded7a227 Prevent passphrase mistakes with confirmation prompt (#7207) (#7211)
(cherry picked from commit b874441a47)
2019-12-03 11:48:13 -07:00
a8d32103d1 Ensure IpEchoServerMessage is not fragmented (#7214) (#7215)
automerge
2019-12-02 23:00:56 -08:00
49d4925856 Fix typo (#7202) (#7205)
automerge
2019-12-02 19:26:42 -08:00
f5fad5b43d Correctly parse ip echo server response and fix broken test (#7196) (#7200)
automerge
2019-12-02 18:11:10 -08:00
4c40f9dbc9 Drop default signature fee by 10x (#7192) (#7193)
automerge
2019-12-02 14:17:37 -08:00
17db734783 Improve error handling when the user mixes up gossip (8001) and RPC (8899) ports (#7158) (#7184)
automerge
2019-12-02 11:52:57 -08:00
6ce9f97254 More conservative purge_zero_lamport_accounts purge logic (#7157) (#7190)
automerge
2019-12-02 11:46:46 -08:00
1688dd6b5c Add Paper Wallet documentation to the book (#7147) (#7161)
automerge
2019-11-26 21:11:18 -08:00
07ffcab857 Update cargo.toml file versions to 0.21.1 (#7156) 2019-11-26 19:11:07 -05:00
de6cf6b7e3 solana-keygen: Support pubkey recovery directly from seed phrase (#7149) (#7150)
automerge
2019-11-26 13:16:48 -08:00
32cf04c77d Ensure beta/stable testnets use public IPs 2019-11-26 11:23:44 -07:00
96df4c772f Add getBlockTime rpc api (#7130) (#7140)
automerge
2019-11-26 00:10:59 -08:00
640c2f88bd mut 2019-11-25 22:49:39 -07:00
82f78a5610 keygen: Support not writing keypairs to disk (#7136) (#7138)
* keygen: Add flag to prevent new from writing keypair to disk

* check_for_overwrite bails, do it before prompts

(cherry picked from commit 506ff5809e)
2019-11-25 22:46:46 -07:00
cf8f8afbc6 Add offline signing support to CLI (#7104) (#7137)
automerge
2019-11-25 21:45:37 -08:00
e6bc92f6c9 Stop open measurement before logging it 2019-11-25 22:20:54 -07:00
eaa3e87eb0 Support passphrases in keygen (#7134)
* Support passphrases in keygen

* remove short

* Update solana_keygen calls
2019-11-25 21:33:15 -07:00
9b3a1a99e5 Update backport labels 2019-11-25 21:24:41 -07:00
76a68c26c9 Track a Bank's parent slot independently from parent bank (#7131) 2019-11-25 15:34:51 -08:00
ef64f00cbb Revert "Revert "add genesis stake placeholders (#6969)" (#7109)" (#7124)
This reverts commit 702f7cc51d.
2019-11-25 15:11:55 -08:00
acbe89a159 shrink stakes (#7122) 2019-11-25 13:14:32 -08:00
0f66e5e49b Add getConfirmedBlock test to rpc (#7120)
automerge
2019-11-25 11:08:03 -08:00
686aa3a150 Bump chrono from 0.4.9 to 0.4.10 (#7113)
automerge
2019-11-25 10:01:46 -08:00
d8bc828839 Colo: Refactor remote command dispatch for create and delete (#7092)
* Colo: Dump escaping mess in remote script templates

* Colo: Rename script templates so shellcheck can get 'em

* shellcheck and nits

* Brace all of the things

* Consistent heredoc tags

* Use bash built-in square bracketing consistently

* simplify logic
2019-11-25 10:32:17 -07:00
094c391cd7 Bump itertools from 0.8.1 to 0.8.2 (#7111)
Bumps [itertools](https://github.com/bluss/rust-itertools) from 0.8.1 to 0.8.2.
- [Release notes](https://github.com/bluss/rust-itertools/releases)
- [Commits](https://github.com/bluss/rust-itertools/commits/v0.8.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-25 10:22:47 -07:00
c8491724b4 Bump num-traits from 0.2.9 to 0.2.10 (#7096)
Bumps [num-traits](https://github.com/rust-num/num-traits) from 0.2.9 to 0.2.10.
- [Release notes](https://github.com/rust-num/num-traits/releases)
- [Changelog](https://github.com/rust-num/num-traits/blob/master/RELEASES.md)
- [Commits](https://github.com/rust-num/num-traits/compare/num-traits-0.2.9...num-traits-0.2.10)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-25 10:22:10 -07:00
d5beb8a9e4 cli: Add --confirmed option to a couple commands, also add --no-header (#7112)
* Add --confirmed option to get-slot, get-epoch-info, get-transaction-count

* Add --no-header option
2019-11-24 17:34:18 -07:00
702f7cc51d Revert "add genesis stake placeholders (#6969)" (#7109)
* Revert "add genesis stake placeholders (#6969)"

This reverts commit 8a879faac7.

* fixup! Revert "add genesis stake placeholders (#6969)"

* fixup! fixup! Revert "add genesis stake placeholders (#6969)"

* fixup! fixup! fixup! Revert "add genesis stake placeholders (#6969)"

* fixup! fixup! fixup! fixup! Revert "add genesis stake placeholders (#6969)"

* fmt
2019-11-23 23:15:21 -07:00
b8cd0a1bc0 Allow secure keypair input for solana-archiver and solana cli tools (#7106)
* Add seed phrase keypair recover to archiver

* Add seed phrase keypair to cli with ASK keyword

* cli main tweaks
2019-11-23 11:55:43 -05:00
7f87ac4b65 Improve coverage.sh's environment awareness (#7101)
* Improve coverage.sh's environment awareness

* Move version check into ci/rust-version.sh

* Embrace bashism
2019-11-23 14:53:39 +09:00
306fbd8bd8 install: Drop unneeded sha2 dependency (#7108)
* Poll for updates slower

* Drop sha2 dependency
2019-11-22 21:58:26 -07:00
3e0b272a20 Remove edge channel hardcode 2019-11-22 20:34:49 -07:00
6c89226ccf Purge zero lamport accounts on snapshot ingestion (#7010)
Snapshots do not load the original index, so they must
purge zero lamport accounts again.
2019-11-22 18:22:28 -08:00
f040987c9f Move date oracle to config program (#7105)
automerge
2019-11-22 15:10:53 -08:00
2a42ddbcbf Don't panic if pubkeys are missing from Budget transaction (#7102) 2019-11-22 14:34:50 -07:00
8bb68c4e6a Really remove mentions of 'genesis_block' (#7099) 2019-11-23 05:58:20 +09:00
4485b978c1 Clean up accounts hash internal state api (#7090) 2019-11-22 08:56:00 -08:00
68bad56e7d Streamline multinode-demo/ restart logic (#7094)
* bootstrap-leader.sh will now restart the node automatically by default
* Streamline validator restart
2019-11-22 09:44:16 -07:00
ef55c15537 Remove unused --poll-for-new-genesis-config feature (#7093)
automerge
2019-11-22 08:12:08 -08:00
ce8d37984d Allow secure keypair input for solana-validator cli (#7080)
* Allow secure keypair input for solana-validator cli

* feedback

* Add --skip-mnemonic-validation

* Update --identity to --identity-keypair

* Use struct instead of tuple

* Fix dependencies

* cargo fmt

* Add basic tests

* Use `seed phrase` instead of `mnemonic`

* Update passphrase prompt
2019-11-22 10:20:40 -05:00
c8166aed97 Correctly indicate genesis activation_epoch (#7091)
* Correctly indicate genesis activation_epoch

* Drop the '(Genesis)'
2019-11-22 15:35:02 +09:00
0bd41f98ed Avoid jemalloc in windows build (#7089)
automerge
2019-11-21 18:39:29 -08:00
d8ead57fbb Use bs58 strings to declare IDs rather then raw bytes (#7082) 2019-11-21 16:34:40 -08:00
d9e7a5fcbe Use fork weight instead of individual bank weight for fork selection. (#7079)
* Fix weight calculation

* Fix tests

* fork weight

* wait until nodes are in the leader schedule

* enable sanity

* fewer long tests
2019-11-21 15:47:08 -08:00
c965a110f2 Use unbounded channel (#7081) 2019-11-21 14:23:40 -07:00
8a879faac7 add genesis stake placeholders (#6969)
* add investor stake placeholders

fixups

fixups

review comments, fixups

make more data-looky for easier management

rent may be zero

rework with more tables, derived keys

fixups

rebase-fix

fixups

fixups

* genesis is now too big to boot in 10 seconds
2019-11-21 12:05:31 -08:00
a2a9f1e331 Truncate new keypair files (#7078)
automerge
2019-11-21 10:02:04 -08:00
15d7568038 Bump cbindgen from 0.9.1 to 0.10.0 (#7044)
Bumps [cbindgen](https://github.com/eqrion/cbindgen) from 0.9.1 to 0.10.0.
- [Release notes](https://github.com/eqrion/cbindgen/releases)
- [Changelog](https://github.com/eqrion/cbindgen/blob/v0.10.0/CHANGES)
- [Commits](https://github.com/eqrion/cbindgen/compare/v0.9.1...v0.10.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-21 10:58:04 -07:00
8cbc450192 Create genesis.tar.bz2 in solana-genesis (#7039)
* Use clap_utils

* Create genesis.tar.bz2 in solana-genesis

* Remove shell-based genesis.tar.bz2 generation

* Make Option=>Result conv more rusty

* stop using solana_logger

* Simplify by just using vec!

* clean up abit
2019-11-21 10:57:27 -07:00
79199711b8 Add gpu resource usage tracking (#7075) 2019-11-21 08:33:02 -08:00
2c1b8fdd39 Add another test for bank state hashes (#7073)
automerge
2019-11-20 23:03:42 -08:00
d9024db68d Fix publish of move program (#7072) 2019-11-20 20:33:49 -08:00
96dd044f8e Allow vest's terminator to recapture tokens (#7071)
* Allow vest's terminator to recapture tokens

* Less code

* Add a VestAll instruction

The terminator may decide it's impractical to maintain a vest
contract and want to make all tokens immediately redeemable.
2019-11-20 19:33:17 -07:00
e66b29943b datapoint for best fork weight and slot in replay (#7066) 2019-11-20 17:26:52 -08:00
100b9dd12a Fix num nodes metrics (#7068)
* Fix num nodes metric

* Fix node count metrics
2019-11-20 17:00:31 -08:00
3415db9739 Merge api/program into single units (#7061) 2019-11-20 16:32:19 -08:00
186bf7ae32 Plumb --gossip-host arg 2019-11-20 16:57:24 -07:00
97ca6858b7 Write transaction status and fee into persistent store (#7030)
* Pass blocktree into execute_batch, if persist_transaction_status

* Add validator arg to enable persistent transaction status store

* Pass blocktree into banking_stage, if persist_transaction_status

* Add validator params to bash scripts

* Expose actual transaction statuses outside Bank; add tests

* Fix benches

* Offload transaction status writes to a separate thread

* Enable persistent transaction status along with rpc service

* nudge

* Review comments
2019-11-20 16:43:10 -07:00
ee6b11d36d Remove ability to deploy custom programs (#7070)
automerge
2019-11-20 15:37:42 -08:00
f58fef60fb Fix program copy 2019-11-20 15:56:00 -07:00
a76eb64bbb Copy all programs when starting a network (#7069) 2019-11-20 14:37:15 -08:00
8590326b50 Book: Add proposal for durable transaction nonces (#6725)
automerge
2019-11-20 14:33:02 -08:00
b0271394cd Clean up --gossip-port argument (#7067)
--gossip-port now specifies exactly that, the gossip port to use.  The
new --gossip-host argument can be used to specify the DNS name/IP
address for gossip if --entrypoint is not supplied (when --entrypoint is
supplied, the gossip address is automatically set to the node's ip
address as observed by the entrypoint)
2019-11-20 15:21:34 -07:00
c39633f968 nit: Circular dependency error is hard to read (#7065) 2019-11-20 13:13:22 -08:00
1fef74b00c Fix solana-keygen new --force ... (#7064)
automerge
2019-11-20 12:46:16 -08:00
9f6a2e51b2 add credit-debit rent handling (#6947)
* add credit-debit rent handling

* add tests

* charge rent for validator account for fee credit

* rent is stored per tx instead of account
2019-11-21 01:57:02 +05:30
b150da837a Use epoch as the gossip purge timeout for staked nodes. (#7005)
automerge
2019-11-20 11:25:18 -08:00
ba9aaee7cd Update config.rs (#7045)
automerge
2019-11-20 11:10:46 -08:00
3aa67969f9 Add perf module to stable-perf CI (#7060) 2019-11-20 10:59:56 -08:00
d4f336db40 Fix unpin argument (#7057)
automerge
2019-11-20 10:22:26 -08:00
d184d3a732 Merge native programs parts into one unit (#7047) 2019-11-20 10:12:43 -08:00
42da1ce4e2 Fix bank hash not changing when no internal state has changed (#7052)
* Fix bank hash not changing when no internal state has changed

* Fix unnecessary call to hash_internal_state

* Add blockhash into the bank_hash

* Add blockhash into the bank_hash and update tests

* Refactor accounts_db slot_hashes

* More clarity in comments

* Add clippy suggestion

* Grammar

* Fix compile after clippy made me break it

* Schooled by clippy
2019-11-19 20:19:43 -08:00
d2ed921bc6 Cleanup nightly warnings (#7055) 2019-11-19 20:15:37 -08:00
d32a072190 Use ticks_per_slot to calculate maximum grace ticks (#7024)
* Use ticks_per_slot to calculate maximum grace ticks

* fix test

* fix votable candidate ordering

* fixes to pick_best_fork() and a unit test

* fixes
2019-11-19 17:55:42 -08:00
95c137158f Fix gce.sh info (#7054)
automerge
2019-11-19 17:49:25 -08:00
7151b92239 Don't create keypair files with r+go (#7051) 2019-11-19 18:26:21 -07:00
716caeb17c Use camelCase (#7050)
automerge
2019-11-19 14:55:32 -08:00
f8e4bdd23d --bootstrap-storage-pubkey is now optional (#7049)
automerge
2019-11-19 14:35:56 -08:00
55dfd03007 wrong calculation (#7028)
matcher takes 2 B tokens as profit because amount of price difference is (7-6)*2B = 2B
2019-11-19 14:47:29 -07:00
854fc8d552 Add getConfirmedBlock to json-rpc docs (#7046) 2019-11-19 14:00:15 -07:00
f2badf2c5d Fix a bug where gossip loops forever while splitting messages (#7032)
* Fix a bug where gossip loops forever while splitting messages

* Get rid of while loop

* Minor clean up and rename
2019-11-19 11:51:51 -08:00
ea656b1a3f Add parent slot to getConfirmedBlock (#7038)
* Add parent slot to getConfirmedBlock

* Fix bad text-replace

* Use camelCase in getConfirmedBlock
2019-11-19 09:39:55 -07:00
5b7bd24f0a Remove duplicated args (#7036) 2019-11-19 09:10:54 -07:00
2d7c7b0982 Fix missed rebase on net.sh (#7037) 2019-11-19 10:22:30 -05:00
b958bf9086 Fix confirmation metrics (#7035) 2019-11-19 09:51:50 -05:00
43144cfe8b Make banks that fail threshhold check resettable (#7027) 2019-11-19 02:36:30 -08:00
11d2d2eccd Fix progress map losing banks and recomputing stats (#7026)
* Fix progress map missing banks

* Fix confirmations

* Fix test

* Initialiize progress with frozen banks atartup
2019-11-19 02:36:00 -08:00
e22f89853f Consider CI_TAG= to be the same as unset CI_TAG 2019-11-18 23:43:38 -07:00
7ccc029f77 Make solana ping take optional lamports argument (#7029)
* Make solana ping take optional lamports argument

* Use clap's default_value
2019-11-19 14:50:09 +09:00
0eb78e461d Relax requirement that the entrypoint node runs the RPC service (#7019) 2019-11-18 21:43:14 -07:00
3615209ce7 don't allow assignment to sysvar program (#7017)
automerge
2019-11-18 19:39:29 -08:00
6bfe0fca1f Add a version field to shreds (#7023)
* Add a version field to shreds

* Clippy

* Fix Chacha Golden

* Fix shredder bench compile

* Fix blocktree bench compile
2019-11-18 18:05:02 -08:00
bfa2535ea1 Add non-fungible token program (#7007)
* Add non-fungible token program

* Remove issuer and id from state

* Boot NftInstruction and NftState

* Rename NFT to Ownable

Maybe this should be "Owned" to avoid confusion with an Ownable trait?

* Rename directory

* Delete unreachable branch

* Don't use copy_from_slice - need an error, not a panic.

* Rename contract_pubkey to account_pubkey
2019-11-18 18:09:42 -07:00
6ec918fabb Update Move support to accomadate Libra's changes to compiler behavior (#6993) 2019-11-18 16:47:01 -08:00
cbf7c0080b fix split instruction doc (#7022) 2019-11-18 15:31:17 -08:00
a6196901de Generate net-shaper configuration from stdin, or randomly (#7021) 2019-11-18 14:47:07 -08:00
c09469fa3a Rename verify_instruction() to verify_account_changes() (#7020) 2019-11-18 15:01:14 -07:00
3acd84d9c0 Allow creating an vote program ix where the withdrawer is also the "to" account (#6992)
automerge
2019-11-18 12:43:47 -08:00
c902fd0303 skip sysvars while assessing rent (#7015)
* skip sysvars while assessing rent
2019-11-19 01:31:27 +05:30
955aaef2e6 Fixes to net-shaper and net.sh (#7002)
* Fixes to net-shaper and net.sh

* fixes to default filters and cleanup
2019-11-18 11:33:33 -08:00
e0a2bb9d86 Legitimately map transactions to statuses in blocktree (#7011)
* Refactor rocksdb TransactionStatus to store/return struct; hook up map_transactions_to_statuses

* Cleanup use statements
2019-11-18 09:12:42 -07:00
3bc8d78801 Add ConfirmedBlock struct, and rework Blocktree apis to include block… (#7004)
* Add RpcConfirmedBlock struct, and rework Blocktree apis to include blockhash info and dummy tx statuses

* Remove unused lifetime
2019-11-17 20:17:15 -07:00
b66c03667c Log for threshold failure (#7008) 2019-11-17 17:10:16 -08:00
6e04a646ba Gossip entrypoint is now option of spy not solana-gossip (#7006) 2019-11-17 11:36:24 -05:00
086e5da8d0 feat: add TransactionStatus column family and test (#6958) 2019-11-17 11:26:01 -05:00
c1b06817a2 Add non-dev value for slots_per_epoch and use that as default (#6984)
When --dev flag is not passed.
2019-11-16 20:53:54 -08:00
c3926e6af0 |solana-gossip spy| no longer requires an entrypoint (#6999) 2019-11-16 14:16:28 -07:00
70322d1ff8 Add error logging to dead slots (#7000) 2019-11-16 02:54:51 -08:00
7c32640a9b Set index and set data should write into shred data (#6995) 2019-11-16 02:41:59 -08:00
5ad09afc15 Improve run.sh for better developer experience (#6945)
* run.sh: Create genesis file for ad-hoc validators

* run.sh: Prefer release under NDEBUG

* run.sh: Add sanity test for run.sh

* run.sh: Conditionally re-gen drone and faucet keys

* Make shellcheck happy

* Address code review comments

* Clean up a bit
2019-11-16 15:56:29 +09:00
5d8c1a303e fix: update run.sh arguments to solana-genesis (#6996) 2019-11-15 23:22:21 -05:00
24b254459b Fix dev mode arg in run.sh (#6997) 2019-11-15 23:16:42 -05:00
30089841f6 Use correct faucet arg in run.sh (#6994)
automerge
2019-11-15 18:33:08 -08:00
0bee05b849 Pull TdS transaction fees to 0 2019-11-15 15:51:37 -07:00
afd9ae9999 Allow withdraws to the authorized withdrawer (#6989) 2019-11-15 17:16:24 -05:00
5ab70c4e97 genesis: rename mint account to faucet account and make it optional (#6990) 2019-11-15 14:50:26 -07:00
cab2232aba Fix System Stats script (#6985)
automerge
2019-11-15 13:25:40 -08:00
946e937549 Create development vs softlaunch environment hooks into net scripts (#6974) 2019-11-15 15:18:45 -05:00
0ca943f49b RecyclerCache for shred_sigverify (#6986)
automerge
2019-11-15 12:16:56 -08:00
b2db0b97fc Add show-gossip command (#6982) 2019-11-15 13:15:34 -07:00
d565ec7968 Fixes to net-shaper, and net.sh option to start/stop shaper (#6981)
* Fixes to net-shaper, and net.sh option to start/stop shaper

* fix shellcheck

* more shellchecks
2019-11-15 12:10:48 -08:00
36e3ccfc68 Remvoe pinned memory (#6976) 2019-11-15 10:58:25 -08:00
892ca196f1 Improve error message when unable to read a file (#6978) 2019-11-15 10:39:05 -07:00
59413b3124 Fix rules for fork selection (#6906)
automerge
2019-11-15 08:36:33 -08:00
e1643c91c4 Pull a fixed and working version of shellcheck docker imaage (#6975) 2019-11-15 10:55:25 -05:00
3ce6248f8c Add CPU and RAM usage to Metrics (#6968)
* Add CPU usage to Metrics

* Add RAM usage and rename to system-stats

* Shellcheck

* Remove SC exception

* Address review comments
2019-11-14 20:36:34 -08:00
006c39380a Display 'none' instead of 0.0.0.0 (#6973) 2019-11-14 20:24:35 -07:00
22f2247f46 Cargo.lock 2019-11-14 16:59:30 -07:00
852a2146ab Add Blocktree api to get transactions by slot (#6966)
* Add blocktree method to get confirmed-block txs

* Clean up use statements

* Add test, and fmt

* Plumb new blocktree method into getConfirmedBlock
2019-11-14 16:34:39 -07:00
99b42f210c Remove unused sha2 dep (#6964)
automerge
2019-11-14 14:01:11 -08:00
ae3c9033c1 Stop running testsuites when only the book is modified (#6956) 2019-11-14 14:36:08 -07:00
03f7f0d18c Rename getBlock to getConfirmedBlock; remove getBlocksSince (#6961)
automerge
2019-11-14 13:14:42 -08:00
79d7090867 Remove obsolete references to Blob (#6957)
* Remove the name "blob" from archivers

* Remove the name "blob" from broadcast

* Remove the name "blob" from Cluset Info

* Remove the name "blob" from Repair

* Remove the name "blob" from a bunch more places

* Remove the name "blob" from tests and book
2019-11-14 11:49:31 -08:00
e7f63cd336 Upgrade to rust 1.39.0 (#6939)
* Upgrade to rust 1.39.0

* 1.39.0 clippy
2019-11-14 12:27:01 -07:00
f108f483b7 Remove Blobs and switch to Packets (#6937)
* Remove Blobs and switch to Packets

* Fix some gossip messages not respecting MTU size

* Failure to serialize is not fatal

* Add log macros

* Remove unused extern

* Apparently macro use is required

* Explicitly scope macro

* Fix test compile
2019-11-14 10:24:53 -08:00
d6cbb02c92 Bump rocksdb from 0.12.4 to 0.13.0 (#6952)
automerge
2019-11-14 09:59:54 -08:00
42af8b199f feat: add tests for invalid/failure cases (#6951) 2019-11-14 11:41:26 -05:00
dbbd9663b2 Consolidate error messaging into result detail (#6950) 2019-11-14 11:18:38 -05:00
f4846b6fe4 Update rent.rs 2019-11-14 08:55:09 -07:00
a28a34f61c Clean up DB names in automation (#6949) 2019-11-14 10:20:10 -05:00
96d47c51a1 Tighten up AWS testcases (#6948) 2019-11-14 10:17:50 -05:00
f27c11ccd8 Add Azure testnet to automation (#6911)
* Add Azure testnet to automation
2019-11-14 09:14:53 -05:00
43e2301e2c Fix roots overrunning broadcast (#6884)
* Add trusted pathway for insert_shreds to avoid checks
2019-11-14 00:32:07 -08:00
7b05b3dbb3 rent collector improvments (#6888)
* avoid account copying + pre-empt rent

* adding support for base rent
2019-11-14 10:56:49 +05:30
c96b8c8d68 Script to run net-shaper on remote nodes (#6938)
* Script to run net-shaper on remote nodes

* fixes
2019-11-13 20:31:44 -08:00
4fc767b3f6 Move version! from core:: to clap_utils:: (#6944)
* Move version! from core to clap-utils

* Completely move version! from core:: to clap_utils::

* rustfmt

* Do remaining transition after rebase
2019-11-14 13:10:38 +09:00
cc96848b01 Remove unneeded prepare_batch() assert (#6941)
automerge
2019-11-13 17:08:21 -08:00
6009801c5f More granular timings in shred generation (#6900) 2019-11-13 16:30:12 -08:00
f116cdeed9 Add validator catchup command (#6922) 2019-11-13 15:58:14 -07:00
5f38fa379c Tool to partition network and induce packet drops/delays (#6933)
* Tool to partition network and induce packet drops/delays

* clippy fixes

* review comments
2019-11-13 13:59:55 -08:00
e2fb9ac829 feat: remove unwraps from client code, fixes #6915 (#6927) 2019-11-13 14:41:54 -07:00
f83254d760 Update Iftop command in testnet automation (#6908)
* Update iftop command
2019-11-13 14:41:42 -05:00
ee5cc733a1 Log blocktree and snapshot open times (#6930)
automerge
2019-11-13 11:20:39 -08:00
18a17cfbbf Implement Display trait (#6929) 2019-11-13 11:44:07 -07:00
a3a830e1ab Delete Service trait (#6921) 2019-11-13 11:12:09 -07:00
4b1e9ada18 Fix busted failure messaging for slack app uploading (#6928)
* Add informative failure message

* Correctly expand variable names inside failed command string
2019-11-13 13:04:14 -05:00
9026339d35 Restore is_frozen() asserts (#6925) 2019-11-13 10:40:51 -07:00
0be13a6295 Silence cargo install error in bpf script (#6926)
automerge
2019-11-13 08:57:12 -08:00
fcc2874591 Remove/address some TODOs (#6923) 2019-11-13 09:43:15 -07:00
9246bee12b feat: default 8gb hard memory limit for redis (#6913) 2019-11-13 11:09:20 -05:00
30a08f4282 Cleanup ledger macros (#6916)
automerge
2019-11-13 07:14:09 -08:00
e5c5f34f9a Make solana-validator check vote account at start (#6790)
* Make solana-validator check vote account at start

* Don't abort tests...

* Fix test breakage

* Remove extra semicolon

* Attempt to fix cluster-tests

* rustfmt

* Change behavior of vote_account ephemeral pubkeys

* save

* clean up

* clean up

* rustfmt && clippy

* Reorder for simpler diff

* Fix rebase...

* Fix message a bit

* Still more rebase fixes....

* Fix yet more

* Use find_map over filter_map & next and revert message

* More through error checks

* rustfmt & clippy

* Revert

* Revert core/src/validator.rs

* Cleanup

* Cleanup

* Cleanup

* Rebase fix

* Make clippy & rustfmt happy

* save

* Clean up

* Show rpc error detail

* Check node lamports only after pubkey matching

* rustfmt
2019-11-13 16:48:55 +09:00
361eab1bf7 Remove unused dependencies (#6917)
automerge
2019-11-12 22:00:29 -08:00
2fd2140f64 🍢banking-bench/, genesis-programs/ and local-cluster/ (#6920)
* git mv genesis_programs genesis-programs

* git mv local_cluster local-cluster

* git mv banking_bench banking-bench
2019-11-12 22:20:48 -07:00
86faa3f995 Properly type RpcClient::get_version() (#6919) 2019-11-12 22:01:04 -07:00
81acd94153 Cleanup local cluster (#6897)
* Boot integration tests from unit test build

* Move bench-tps and bench-exchange integration tests out of local_cluster

* Fix build
2019-11-12 20:30:35 -07:00
48987bed67 Bump num-traits from 0.2.8 to 0.2.9 (#6914)
Bumps [num-traits](https://github.com/rust-num/num-traits) from 0.2.8 to 0.2.9.
- [Release notes](https://github.com/rust-num/num-traits/releases)
- [Changelog](https://github.com/rust-num/num-traits/blob/master/RELEASES.md)
- [Commits](https://github.com/rust-num/num-traits/compare/num-traits-0.2.8...num-traits-0.2.9)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-12 20:27:30 -07:00
4405e8a15b Automatically run dot to generate PDFs or PNGs (#6912) 2019-11-12 20:27:15 -07:00
24cb4798bc Map all private IP to public IP for log-analyzer (#6907)
* Map all private IP to public IP for log-analyzer

* fixes

* shellcheck fixes
2019-11-12 15:48:46 -08:00
986e9e268e Revive the parallel bank client from v0.16 (#6903) 2019-11-12 15:26:21 -07:00
71bf8c5f85 Keygen grind fix and improve --ignore-case (#6901)
* keygen: grind --ignore-case was not honored

* keygen: Improve grind --ignore-case ergonomics

Don't silently require the user to know their search term needs to be lowercase

* fmt
2019-11-12 14:24:37 -07:00
5a629ff387 Bump num_cpus from 1.11.0 to 1.11.1 (#6905)
Bumps [num_cpus](https://github.com/seanmonstar/num_cpus) from 1.11.0 to 1.11.1.
- [Release notes](https://github.com/seanmonstar/num_cpus/releases)
- [Changelog](https://github.com/seanmonstar/num_cpus/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/num_cpus/compare/v1.11.0...v1.11.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-12 14:24:05 -07:00
148a58865e Make creating new snapshot.tar.bz2 truly-atomic (#6902) 2019-11-12 14:21:56 -07:00
2523fa73cf Use release as default (#6896) 2019-11-12 13:39:12 -07:00
6d76c34291 Handle dead slots when loading ledger (#6887) 2019-11-12 13:38:26 -07:00
3faeb7fa79 Rename solana-netutil to solana-net-utils for consistency (#6895)
* sed -i -e 's/netutil/net_utils/g' $(git grep --files-with-matches netutil :**.rs)

* sed -i -e 's/netutil/net-utils/g' $(git grep --files-with-matches netutil)

* git mv netutil/ net-utils

* Tweak a bit

* Fix rustfmt & clippy
2019-11-12 13:37:13 -07:00
bb00904fc8 add rent reserve for bootstrap stakes (#6876)
* genesis investor stakes

* assert rent is sufficient for these bootstrappers
2019-11-12 12:33:40 -08:00
73e3fc7c4f Add packet loss analyzer to testnet automation (#6715)
* Add packet loss analyzer to testnet automation
2019-11-12 14:51:36 -05:00
5903339c17 feat: return bank/block info with block-related results (#6716) 2019-11-12 14:49:41 -05:00
2688ae614c Add public IP address option to automation (#6899)
* Add public IP address option to automation

* Make public IP use the default behavior
2019-11-12 13:55:19 -05:00
5670cafda4 Fix caching data shreds as coding shreds (#6877) 2019-11-12 10:29:58 -08:00
4bc8fd3267 Add --no-genesis-fetch flag (#6893) 2019-11-12 10:42:04 -07:00
bb2fa9957a Increase default AWS instance size to match GCE and Azure (#6773) 2019-11-12 12:27:59 -05:00
c6b108ef4f Don't panic in sdk/ when genesis fails to load (#6892) 2019-11-12 10:24:49 -07:00
bb158a9b48 Add provider specific self destruct timeouts (#6894) 2019-11-12 12:21:24 -05:00
c2fdbde68f forks graph can now optionally display all validator votes (#6885) 2019-11-12 10:13:16 -07:00
7e82450d7b Serialize transaction in proper wire format instead of json (#6889) 2019-11-12 10:45:10 -05:00
188dbdb068 Ignore symlinked logdir in repo root (#6891) 2019-11-12 10:36:53 -05:00
25866f3652 print command now supports multiple slots and decodes system/vote instructions (#6878) 2019-11-11 23:22:20 -07:00
c7e2057d2d Install xargo if a new version is available (#6882)
automerge
2019-11-11 20:32:07 -08:00
d84f367317 Extract duplicate clap helpers into clap-utils (#6812) 2019-11-12 09:42:08 +09:00
95d6586dd7 Remove debug datapoint that isn't being plotted (#6873) 2019-11-11 14:25:25 -08:00
e8e13fdeeb Insert coding shreds to blocktree only if needed in future (#6836)
* Insert coding shreds to blocktree only if needed in future

* fixes
2019-11-11 13:12:55 -08:00
816b2d7ff8 Tune repair to be less aggressive (#6868) 2019-11-11 13:12:22 -08:00
91cfa0aac9 Upgrade xargo if old (#6869) 2019-11-11 12:58:24 -08:00
4be646c695 discover() by gossip sockaddr instead of just by gossip ip address (#6865) 2019-11-11 12:42:58 -07:00
a23c6177d5 Use reusable provider-specific testnet keypairs (#6866)
* Use reusable provider-specific testnet keypairs

* shellcheck
2019-11-11 12:08:22 -07:00
cc6e1ea200 Stub out getBlocksSince and getBlock methods (#6853)
* Add getBlocksSince rpc method, and initial stub of getBlock method

* Return test transactions from getBlock method

* clippy

* Add comment on get_block method
2019-11-11 13:18:34 -05:00
596d30661a Echo failed command to results app (#6859) 2019-11-11 09:37:11 -07:00
b971eeca4b Add ryoqun to ssh authorized keys (#6860) 2019-11-11 17:12:24 +09:00
cfab36cb1d Include channel and commit info in the version of pre-release builds (#6819) 2019-11-10 22:39:13 -07:00
5835b3b8eb Increase timeout when confirming airdrop for max commitment (#6858)
* Increase timeout when confirming airdrop for max commitment

* Add commitment to airdrop rpc trace

* Flip commitment check
2019-11-10 12:20:52 -05:00
62eea636b0 Update jsonrpc-api.md 2019-11-09 19:46:04 -05:00
b14e61ff79 Filter any net/log* directory from rsync (#6857) 2019-11-09 13:38:17 -08:00
59adc25c23 Implement non-GPU mode testcase for colo (#6856) 2019-11-09 09:38:06 -07:00
86ead6a65c Update book toc for readonly accounts (#6854) 2019-11-09 08:25:24 -07:00
fbfbafa3d4 Update readonly accounts docs (#6801) 2019-11-09 07:35:37 -07:00
1ddf90ed08 Compress contact_info_trace() output to improve CI log rendering (#6852) 2019-11-09 01:12:18 -07:00
0fbd508c5f Only check the entrypoint's RPC address (#6851) 2019-11-09 00:56:31 -07:00
24a7b0ce74 Add print-genesis-hash command (#6849) 2019-11-08 23:17:48 -07:00
68eafb3f30 Ensire config dir exists 2019-11-08 22:18:21 -07:00
2649f6bdd6 Avoid excessive log/ relinking 2019-11-08 21:57:50 -07:00
9807f47d4e Rename genesis block to genesis config (#6816) 2019-11-08 23:56:57 -05:00
63425bed10 Move move tests into its own job (#6847) 2019-11-08 20:40:03 -07:00
02058ea699 Reject blocks with invalid last ticks in replay stage (#6833)
* Reject blocks with invalid last ticks in replay stage

* slot_full
2019-11-08 20:21:54 -05:00
91be35731c Fix freeze and register_tick race (#6799)
* Fix freeze and register_tick race

* Add test
2019-11-08 17:21:17 -08:00
d1daeb44e6 Remove custom stack_size() (#6844) 2019-11-08 17:11:07 -07:00
efdfc5c327 Remove TODOs (#6843) 2019-11-08 16:43:18 -07:00
9c00ad9ff2 Remove some low-hanging TODOs (#6839) 2019-11-08 16:41:36 -07:00
151adab739 earlyoom now works on reboots (#6841) 2019-11-08 16:40:38 -07:00
162b1bdef7 Add more tests (#6834)
automerge
2019-11-08 15:07:11 -08:00
da425cc225 Don't insert coding shreds into blocktree on leader (#6831) 2019-11-08 13:54:23 -08:00
346213da4c Check for LD_DW at the end of a program (#6821) 2019-11-08 13:30:44 -08:00
8babecd890 Remove todo from account (#6827) 2019-11-08 13:30:21 -08:00
2855c55ac1 Move loader does not need genesis auth key (#6818) 2019-11-08 11:52:56 -08:00
bb9649e18d Replacd todo with issue (#6823) 2019-11-08 11:48:07 -08:00
2f7d0e7884 TODO already covered by issue (#6828) 2019-11-08 11:45:17 -08:00
dfc4d7cb50 Remove unsupported test (#6820) 2019-11-08 11:37:47 -08:00
b800642fa4 Add new fork log message for when the node is leader for consistency (#6808) 2019-11-08 12:30:25 -07:00
5b6c590057 run.sh logs validators to stderr (#6817) 2019-11-08 11:30:19 -08:00
66a0f54097 Replay should respect order of register_ticks with respect to blockhashes (#6805) 2019-11-08 12:29:41 -07:00
f8e64aad5b ci/shellcheck.sh now only audits files that git knows about (#6815) 2019-11-08 10:25:59 -07:00
cd5ec8cd35 Fix blind keyed_account indexing in BPF and Move loader (#6810) 2019-11-08 09:19:19 -08:00
75fd13de5d Prevent ci/nits.sh from incorrectly nitting on ci/nits. (#6814) 2019-11-08 09:40:25 -07:00
807af8670e Clean up net logs (#6813) 2019-11-08 10:25:17 -05:00
5bd05fba09 require to account signature (#6658)
* require to signature

* fixing invocation to create_account

* fix create_account references

* address review comment

* whacking bugs in tests

* fixing stake program tests
2019-11-08 15:57:35 +05:30
f7b6e777bf Revert "Clean up net/log symlinks (#6794)" (#6809)
This reverts commit 68353b7e57.
2019-11-07 22:15:45 -07:00
68353b7e57 Clean up net/log symlinks (#6794) 2019-11-07 23:45:19 -05:00
8e81bc1b49 Fix pinning (#6604)
Remove Deref implementations and add more pass-throughs to the PinnedVec
wrapper.
Warm recyclers
set_pinnable
2019-11-07 19:48:33 -08:00
80a89b5e6d Revert "Revert "Add inflation to epoch phases (#6787)" (#6802)" (#6806)
automerge
2019-11-07 18:33:14 -08:00
b64b54f48f unfork dalek ed25519 (#6776) 2019-11-07 17:08:10 -08:00
20a52f153b Fix iftop not being stopped correctly (#6803)
automerge
2019-11-07 17:03:14 -08:00
d89271528e Revert "Add inflation to epoch phases (#6787)" (#6802)
automerge
2019-11-07 16:43:09 -08:00
ccac35fc01 Increase FEC ratio to 32:32 (#6800)
automerge
2019-11-07 16:38:06 -08:00
23e232b496 Avoid : in default log filename (#6796) 2019-11-07 15:36:29 -07:00
ddcf906a88 Add docs for FEC rate calculation (#6788)
automerge
2019-11-07 12:44:40 -08:00
09e8124017 Tool to reconfigure netem on testnet (#6781)
automerge
2019-11-07 11:14:33 -08:00
67d1e2903c Upgrade Repair be more intelligent and agressive (#6789)
* Upgrade Repair be more intelligent and agressive

* Fix u64 casts

* Fix missing bracket

* Add 1 second delay to test to allow repair to kick in
2019-11-07 11:08:09 -08:00
a9c4cd6cbe Add inflation to epoch phases (#6787) 2019-11-07 10:53:04 -08:00
180bc1784e Book: Add blockhash to terminology (#6711)
automerge
2019-11-07 10:46:04 -08:00
f984feda42 Use get_slot_with_commitment (#6791) 2019-11-07 10:41:58 -07:00
56fc15f44d Fix units on dead slots graph 2019-11-07 08:26:13 -07:00
e0d9f7d1d4 Fix genesis arg names in run.sh (#6785) 2019-11-06 23:27:10 -05:00
87ba66b6d0 Add net/ support for reusable identity keypairs (#6783) 2019-11-06 21:14:05 -07:00
e420800aeb Update terminology for block height and genesis block (#6782) 2019-11-06 23:09:03 -05:00
a684984f8b feat: add confirm_transaction, add rpc client test (#6778) 2019-11-06 22:08:03 -05:00
079682fbdc Add ping cli option to use CommitmentLevel::Max, instead of CommitmentLevel::Recent (#6775) 2019-11-06 18:54:17 -07:00
2491719f36 Fix windows build (#6774) 2019-11-06 16:07:28 -07:00
65de227520 Don't panic on packet data (#6769) 2019-11-06 14:32:37 -08:00
29f3b198cf Update snapshot verification proposal (#6764)
automerge
2019-11-06 13:48:28 -08:00
0ace79939b Add reference tick to data shreds (#6772)
* Add reference tick to data shreds

* fix tests
2019-11-06 13:27:58 -08:00
b3a75a60a4 Use rooted bank by default in rpc bank selection (#6759)
* Name anonymous parameters for clarity

* Add CommitmentConfig to select bank for rpc

* Add commitment information to jsonrpc docs

* Update send_and_confirm retries as per commitment defaults

* Pass CommitmentConfig into client requests; also various 'use' cleanup

* Use _with_commitment methods to speed local_cluster tests

* Pass CommitmentConfig into Archiver in order to enable quick confirmations in local_cluster tests

* Restore solana ping speed

* Increase wallet-sanity timeout to account for longer confirmation time
2019-11-06 14:15:00 -07:00
5e8668799c Fewer recyclers. (#6770)
automerge
2019-11-06 12:35:51 -08:00
8fa6935c9d Validators now log to a file by default (use -o -/--log - for stderr) (#6768)
automerge
2019-11-06 11:47:34 -08:00
a1fe6265fd use pubkeys in genesis (#6750) 2019-11-06 11:18:25 -08:00
67f636545a Refactor sigverify to stage for signing shreds on the GPU (#6635)
automerge
2019-11-06 10:52:30 -08:00
ec50c20400 Add time in net/logs path (#6701) 2019-11-06 10:43:12 -08:00
18f146ace5 validator/: Restructure main() to fully parse cli arguments first (#6765) 2019-11-06 11:34:31 -07:00
a91bf296d7 Add some addition packages to DC installer scripts (#6755)
* Add 'cmake' to default DC node installer

* Add 'sysstat' to default DC node installer

For 'iostat'

* Add 'perf' to default DC node installer

* Add 'iftop' to default DC node installer
2019-11-06 09:48:45 -07:00
bb8985d76c [Security] Bump spin from 0.5.0 to 0.5.2 (#6621)
Bumps [spin](https://github.com/mvdnes/spin-rs) from 0.5.0 to 0.5.2. **This update includes security fixes.**
- [Release notes](https://github.com/mvdnes/spin-rs/releases)
- [Commits](https://github.com/mvdnes/spin-rs/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-06 08:31:25 -07:00
7ff2a44a63 Make last shred for an interrupted slot signed + typed (#6760) 2019-11-06 08:25:17 -07:00
b5074d8577 Enable JSON RPC request/response logging by default (#6758) 2019-11-06 08:23:13 -07:00
5c1abaf43c Bump cc from 1.0.46 to 1.0.47 (#6741)
Bumps [cc](https://github.com/alexcrichton/cc-rs) from 1.0.46 to 1.0.47.
- [Release notes](https://github.com/alexcrichton/cc-rs/releases)
- [Commits](https://github.com/alexcrichton/cc-rs/compare/1.0.46...1.0.47)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-06 08:23:00 -07:00
dc3988eff8 CLI changes required for to account signing (#6678)
* CLI changes draft

* use tempfile

* remove un-necessary error handling

* use keypair instead of pubkey
2019-11-06 20:17:34 +05:30
24102a7435 Allow voting on empty banks (#6719)
* Allow votes on empty banks

* Remove making first bank is_delta true, no longer necessary for idling

* Remove votable from ledger tool
2019-11-06 01:02:26 -08:00
9614d17024 Limit deserialization of data coming off the wire (#6751)
* Limit deserialization of data coming off the wire

* Feedback and cleanup
2019-11-06 00:07:57 -08:00
8e3be6413e Cargo.lock 2019-11-05 20:02:09 -07:00
09e648f957 ledger-tool/: Include full validator voting history in fork-graph (#6756) 2019-11-05 19:40:00 -07:00
0c2bf022fa Apply netem packet rules to only UDP traffic (#6754) 2019-11-05 18:34:04 -08:00
1c5d2a85cf Fix substitution of private IP with public IP in iftop logs (#6748)
automerge
2019-11-05 15:08:35 -08:00
8993b15248 Integrated use of netem with testnet scripts (#6746)
automerge
2019-11-05 15:04:06 -08:00
8f91b5aab3 Add threshold to repairman for same slot (#6728) 2019-11-05 12:48:45 -08:00
46391397b8 Bump indicatif from 0.12.0 to 0.13.0 (#6736)
Bumps [indicatif](https://github.com/mitsuhiko/indicatif) from 0.12.0 to 0.13.0.
- [Release notes](https://github.com/mitsuhiko/indicatif/releases)
- [Commits](https://github.com/mitsuhiko/indicatif/compare/0.12.0...0.13.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-05 13:25:28 -07:00
85c9a231c1 Include the affected slot in blocktree error metrics (#6734) 2019-11-05 13:25:21 -07:00
c312d4fba0 Calculate proofs collected and don't encrypt if there are none (#6698) 2019-11-05 11:38:50 -08:00
7203036e3e Adjust nofiles within Blocktree::open() for all ledger/ users (#6737)
automerge
2019-11-05 11:18:49 -08:00
b9d8e3e55a Only copy whats needed to verify an instruction after processing (#6669) 2019-11-05 10:57:32 -08:00
08973f9f05 Adjust default signature fee for base-10 lamports (#6738) 2019-11-05 11:21:45 -07:00
c6931dcb07 Remove credit-only account handling (#6726)
* Renaming
- credit-only/credit-debit to read-only/read-write
- debitable to writable

* Remove credit handling, making credit-only accounts read-only

* Update programs to remove deprecated credit-only account designation

* Use readonly and writable instead of underscored types
2019-11-05 09:38:35 -07:00
cea13e964c Add --graph-forks option (#6732) 2019-11-04 23:18:30 -07:00
d207a34736 remove duplicate signal handling (#6702) 2019-11-05 11:36:51 +05:30
fba1af6ea9 ledger-tool can now load a ledger snapshot (#6729) 2019-11-04 22:14:55 -07:00
b825d04597 Pull perf into a separate module. (#6718)
automerge
2019-11-04 20:13:43 -08:00
3133ee2401 Fix limited iftop output and failure to stop iftop (#6723)
* Fix limited iftop output and failure to stop iftop

* Shellcheck

* Ignore shellcheck
2019-11-04 18:12:07 -08:00
4d52f47f87 Move get_bank_forks() into ledger/ so its available for use by ledger-tool/ (#6720) 2019-11-04 19:10:06 -07:00
f54cfcdb8f Store and persists full stack of tower votes in gossip (#6695)
* vote array

wip

wip

wip

update

gossip index should match tower index

tests build

clippy

test index after expired vote

test

bank specific last vote sync time

* verify

* we are likely to see many more warnings about old votes now
2019-11-04 16:19:54 -08:00
57983980a7 Lower verify-batch-size to debug (#6722)
automerge
2019-11-04 16:00:59 -08:00
33f4aaf3fd Rename confidence to commitment (#6714) 2019-11-04 16:44:27 -07:00
c138d692b1 Show all ports for nodes in gossip table (#6717)
* Show all ports for nodes in gossip table

* review comments
2019-11-04 15:05:08 -08:00
fb12136975 Add genesis_accounts module (#6708) 2019-11-04 13:46:33 -07:00
efe260f12e sysvar trait (#6667)
* sysvar trait

* get the new guy in on it
2019-11-04 12:31:24 -08:00
b9b535c30f move system_instruction::transfer() to credit-debit (#6677)
* transfer no credit only

* use a credit-only transfer in the credit-only test
2019-11-04 12:30:59 -08:00
d085c8626f GCE: Add instances self-destruct (#6363)
automerge
2019-11-04 10:30:26 -08:00
5e3697807c Fail gracefully if AVX support is missing (#6705) 2019-11-04 11:03:39 -07:00
5416c114cf SDK: Add sysvar to expose recent block hashes to programs (#6663)
* SDK: Add sysvar to expose recent block hashes to programs

* Blockhashes is one word

* Missed one

* Avoid allocs on update

* unwrap_or_else

* Use iterators

* Add microbench

* Revert "unwrap_or_else"

This reverts commit a8f8c3bfbe.

* Revert "Avoid allocs on update"

This reverts commit 486f01790c.
2019-11-04 10:51:15 -07:00
a0127e63c6 pay subcommand now accepts a keypair file for convenience (#6703) 2019-11-04 09:36:49 -07:00
8b2327ed34 Remove unneeded lib.rs 2019-11-04 08:11:40 -07:00
3938142535 keygen: add dedicated solana-keygen grind command (#6697)
* Remove dead code

* Speed up vanity key grinding
2019-11-03 19:41:26 -08:00
66f76c8067 Bump console from 0.9.0 to 0.9.1 (#6700)
Bumps [console](https://github.com/mitsuhiko/console) from 0.9.0 to 0.9.1.
- [Release notes](https://github.com/mitsuhiko/console/releases)
- [Commits](https://github.com/mitsuhiko/console/compare/0.9.0...0.9.1)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-11-03 19:41:16 -08:00
568475e2db Fix incorrectly signed CrdsValues (#6696) 2019-11-03 10:07:51 -08:00
9ea398416e Sign shreds on the GPU (#6595)
* sign gpu shreds

* wip

* checks

* tests build

* test

* tests

* test

* nits

* sign cpu test

* write out the sigs in parallel

* clippy

* cpu test

* prepare secret for gpu

* woot!

* update

* bump perf libs
2019-11-02 06:23:14 -07:00
50a17fc00b Use Slot and Epoch type aliases instead of raw u64 (#6693)
automerge
2019-11-02 00:38:30 -07:00
f9a9b7f610 Better output layout for iftop logs (#6690)
automerge
2019-11-01 16:36:02 -07:00
a57f6b70da Fix swapped repair and forwards addrs (#6691)
automerge
2019-11-01 16:01:42 -07:00
bae83ba2b6 Compare iftop logs using log-analyzer (#6684)
* Compare iftop logs using log-analyzer

* fixes

* fix clippy errors
2019-11-01 14:48:23 -07:00
385b4ce959 Get rid of verified packets and use the Meta::discard flag (#6674)
* get rid of verified packets and use the disabled meta field everywhere
2019-11-01 14:23:03 -07:00
7b6e3a23be Add new pubkey to auth keys (#6687) 2019-11-01 14:44:10 -06:00
1cc8956f74 Get Azure provider working again (#6659)
* Wait for node creation before continuing

* Programatically set networking rules

* Add network security group to nodes upon creation

* shellcheck
2019-11-01 14:43:31 -06:00
e6c8bfd008 Add --use-move flag to cargo-install-all.sh and net/net.sh (#6670) 2019-11-01 07:53:30 -07:00
2d67962c2f Send repairman shreds to the repair socket (#6671) 2019-10-31 18:23:50 -07:00
2e30926ac3 New program to process iftop log output (#6668)
* New program to process iftop log output

* fixes

* fix shellcheck

* address review comments

* more review comments
2019-10-31 18:22:57 -07:00
d2c66c40c6 Have cargo-install-all.sh also look in program target dirs for so's (#6631) 2019-10-31 14:40:54 -07:00
a4d48df30a Add assertion when filling blocktree slot with ticks (#6664)
automerge
2019-10-31 14:15:07 -07:00
c52830980a Rework get_slot_meta (#6642)
* Assert slotmeta is not orphan

* Clean up get_slot_meta functionality

* Add test
2019-10-31 14:03:41 -07:00
e8e5ddc55d Verify number of hashes for each block of entries (#6262)
* Verify number of hashes for each block of entries

* Fix blocktree processor tick check

* Rebase once more
2019-10-31 16:38:50 -04:00
111942a47d document clock (#6662) 2019-10-31 13:26:55 -07:00
bc88180058 stake split (#6402)
* stake split

* stake split
2019-10-31 11:07:27 -07:00
3a616de47b Implementation of AWS support in automation (#6602)
* Implementation of AWS support in automation

* Add 10 node testcase

* Add cleanup for ec2 provider and single zone testcase
2019-10-31 12:00:10 -06:00
9d65e6f183 Fix check in should_insert_data_shred (#6649) 2019-10-30 23:37:25 -07:00
328a6a866e Fix code comment (#6640)
automerge
2019-10-30 22:21:34 -07:00
5264fded00 Avoid alloc due to vector pushes (#6632) 2019-10-30 21:55:17 -07:00
83d5115a02 Add --starts-with for vanity key grinding (#6647) 2019-10-30 20:47:42 -07:00
0559212df7 log bench (#6643) 2019-10-30 19:51:44 -07:00
f131255066 Add ~/.cargo/bin to PATH (#6641) 2019-10-30 19:41:24 -07:00
59f3dc3b6b Fix PohRecorder Metrics (#6644)
* Update Poh Recorder Dashboard

* Update PohRecorder logging
2019-10-30 18:55:29 -07:00
6454bfe754 Rework get_index_meta (#6636) 2019-10-30 16:48:59 -07:00
7bb224f54a Install ag on nodes (#6634)
automerge
2019-10-30 16:43:16 -07:00
fa12a5f70b kill rent calculator (#6625) 2019-10-30 16:25:12 -07:00
d2d78a073f Remove lingering references to base-2 SOLs (#6629)
automerge
2019-10-30 14:59:44 -07:00
6d403f2d85 Remove stray println 2019-10-30 14:44:26 -07:00
8032141311 Add --no-multi-client (#6624) 2019-10-30 14:43:30 -07:00
38491c8c4b Reduce verify-batch-size log (#6623) 2019-10-30 13:41:11 -07:00
627664b785 Re-enable tests (#6615)
automerge
2019-10-29 21:34:20 -07:00
dfa1c7493c Ignore flaky move test (#6616)
automerge
2019-10-29 21:21:35 -07:00
801337a422 Refactor Weighted Shuffle (#6614)
automerge
2019-10-29 21:02:11 -07:00
4ec95043d7 Update sol:lamport ratio to base-10 (#6611)
* Update sol:lamport ratio

* Update various SOL quantities in bash scripts
2019-10-29 20:03:48 -06:00
b4dc1a7263 Remove move feature (#6605)
automerge
2019-10-29 17:14:07 -07:00
ef3aa2731c Fix Weighted Best calculation (#6606)
automerge
2019-10-29 17:04:11 -07:00
e738019c48 Add Ramp TPS table 2019-10-29 16:18:58 -07:00
a5ef78f709 Expand CF's (#6528) 2019-10-29 16:18:03 -07:00
4156cea704 Fixup running-validator docs (#6607)
* Fixup validator docs

* Remove $
2019-10-29 17:13:20 -06:00
a587d05098 fix re delegate (#6603) 2019-10-29 14:42:45 -07:00
489dc657c6 Update libra to new fork (#6523)
* Update to new libra branch

* Use core and association addresses
2019-10-29 10:39:10 -07:00
029a2837e4 Bump jsonrpc-http-server from 14.0.1 to 14.0.3 (#6597)
Bumps [jsonrpc-http-server](https://github.com/paritytech/jsonrpc) from 14.0.1 to 14.0.3.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/compare/v14.0.1...v14.0.3)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-29 10:30:06 -07:00
618ecfd1c6 Bump base64 from 0.10.1 to 0.11.0 (#6596)
Bumps [base64](https://github.com/marshallpierce/rust-base64) from 0.10.1 to 0.11.0.
- [Release notes](https://github.com/marshallpierce/rust-base64/releases)
- [Changelog](https://github.com/marshallpierce/rust-base64/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/marshallpierce/rust-base64/compare/v0.10.1...v0.11.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-29 10:29:58 -07:00
83174b919c Remove unstable default-run directive (#6599)
automerge
2019-10-29 10:28:48 -07:00
d952b38f93 Ensure nofiles is not capped at 1024 on a node reboot 2019-10-28 23:21:34 -07:00
1e2ab89b47 Ensure redis-server is started on a reboot 2019-10-28 20:58:46 -07:00
34a9619806 SigVerify stage for shreds. (#6563) 2019-10-28 16:07:51 -07:00
9ee65009cd Implement allowing validator boot failure into automation (#6589)
* Pass allow boot failures through create AND start

* Extend sleep timeout to all nodes

* Add 100 node testcase

* Reduce consistent sleep
2019-10-28 16:43:40 -06:00
85ccba366a Run localnet in development mode (#6587) 2019-10-28 15:35:17 -07:00
579a02529d Fix unnecessarily copying shreds in broadcast stage (#6588)
* Optimize coalesce_shreds to not explictly clone

* Remove Coalesce Shreds altogether

* fn no longer needs clippy exception
2019-10-28 14:58:27 -07:00
b04c8c1c1a Demote blocktree metrics log level (#6590)
automerge
2019-10-28 14:46:43 -07:00
243fa6cf63 Shred gpu sigverify (#6520)
Implement APIs for verifying shred signatures on the GPU.
2019-10-28 10:29:38 -07:00
30c0a7d069 Bump serde from 1.0.101 to 1.0.102 (#6581)
automerge
2019-10-28 09:19:39 -07:00
71b4e765c8 Bump itertools from 0.8.0 to 0.8.1 (#6583)
Bumps [itertools](https://github.com/bluss/rust-itertools) from 0.8.0 to 0.8.1.
- [Release notes](https://github.com/bluss/rust-itertools/releases)
- [Commits](https://github.com/bluss/rust-itertools/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-28 08:26:15 -07:00
73dd5aa2d1 Bump serde_derive from 1.0.101 to 1.0.102 (#6582)
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.101 to 1.0.102.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.101...v1.0.102)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-10-28 08:24:13 -07:00
96e209db49 Colo: Don't fail without a message (#6558) 2019-10-28 09:20:49 -06:00
0c14ca58c7 Invoke on-reboot from cloud startup script to avoid racing with cron (#6579)
automerge
2019-10-27 10:56:16 -07:00
f3c0aa154a -a is optional 2019-10-26 22:48:24 -07:00
6efaaa9d7a Blocktree metrics (#6527)
* Add metrics for blocktree performance
* Plumb metrics through window service
2019-10-26 16:15:59 -07:00
08238e8307 Add proposal for tick verification in slots (#6512)
* Add proposal for tick verification in slots
2019-10-26 16:14:30 -07:00
e1b35f9847 Fix race in blocktree.insert_shreds (#6550)
* Add guard for blocktree insert_shreds

* Add test
2019-10-26 04:09:58 -07:00
e174af7838 Use iftop to collect network bandwidth usage (#6560)
* Use iftop to collect network bandwidth usage

* fix shellcheck

* more shellchecks

* review comments
2019-10-26 00:06:46 -07:00
be74801236 Add NET_NUM_xyz variables 2019-10-25 23:00:14 -07:00
68acfd36d0 Bootstrap leader's stake is now authorized to the bootstrap leader's identity key (#6571) 2019-10-25 22:58:35 -07:00
c9cea2152b optimize verify_instruction (#6539) 2019-10-25 21:47:16 -07:00
e966c96644 Disable sigverify on blockstreamer node
This node get overloaded at high TPS trying to manage both a validator
and the blockexplorer.  Reduce it's workload by turning off sigverify,
which doesn't really matter since this node doesn't even vote
2019-10-25 21:33:08 -07:00
73c31d873e Update Cargo.toml versions from 0.20.0 to 0.21.0 (#6568) 2019-10-25 17:40:49 -06:00
a2a9d54985 Increase node start stagger (#6566) 2019-10-25 17:35:29 -06:00
539 changed files with 38113 additions and 15560 deletions

1
.gitignore vendored
View File

@ -15,6 +15,7 @@
# log files
*.log
log-*.txt
log-*/
# intellij files
/.idea/

View File

@ -19,46 +19,6 @@ pull_request_rules:
label:
add:
- automerge
- name: v0.16 backport
conditions:
- base=master
- label=v0.16
actions:
backport:
branches:
- v0.16
- name: v0.17 backport
conditions:
- base=master
- label=v0.17
actions:
backport:
branches:
- v0.17
- name: v0.18 backport
conditions:
- base=master
- label=v0.18
actions:
backport:
branches:
- v0.18
- name: v0.19 backport
conditions:
- base=master
- label=v0.19
actions:
backport:
branches:
- v0.19
- name: v0.20 backport
conditions:
- base=master
- label=v0.20
actions:
backport:
branches:
- v0.20
- name: v0.21 backport
conditions:
- base=master
@ -75,3 +35,11 @@ pull_request_rules:
backport:
branches:
- v0.22
- name: v0.23 backport
conditions:
- base=master
- label=v0.23
actions:
backport:
branches:
- v0.23

2166
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -3,56 +3,49 @@ members = [
"bench-exchange",
"bench-streamer",
"bench-tps",
"banking_bench",
"banking-bench",
"chacha-sys",
"client",
"core",
"drone",
"perf",
"validator",
"genesis",
"genesis_programs",
"genesis-programs",
"gossip",
"install",
"keygen",
"ledger",
"ledger-tool",
"local_cluster",
"local-cluster",
"logger",
"log-analyzer",
"merkle-tree",
"measure",
"metrics",
"programs/bpf_loader_api",
"programs/bpf_loader_program",
"programs/budget_api",
"programs/budget_program",
"programs/btc_spv_program",
"programs/btc_spv_api",
"net-shaper",
"programs/bpf_loader",
"programs/budget",
"programs/btc_spv",
"programs/btc_spv_bin",
"programs/config_api",
"programs/config_program",
"programs/config",
"programs/config_tests",
"programs/exchange_api",
"programs/exchange_program",
"programs/failure_program",
"programs/move_loader_api",
"programs/move_loader_program",
"programs/librapay_api",
"programs/noop_program",
"programs/stake_api",
"programs/stake_program",
"programs/exchange",
"programs/failure",
"programs/noop",
"programs/ownable_api",
"programs/stake",
"programs/stake_tests",
"programs/storage_api",
"programs/storage_program",
"programs/vest_api",
"programs/vest_program",
"programs/vote_api",
"programs/vote_program",
"programs/storage",
"programs/storage_tests",
"programs/vest",
"programs/vote",
"archiver",
"runtime",
"sdk",
"sdk-c",
"upload-perf",
"netutil",
"net-utils",
"fixed-buf",
"vote-signer",
"cli",
@ -61,4 +54,6 @@ members = [
exclude = [
"programs/bpf",
"programs/move_loader",
"programs/librapay_api",
]

View File

@ -78,7 +78,7 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt
```
If your rustc version is lower than 1.38.0, please update it:
If your rustc version is lower than 1.39.0, please update it:
```bash
$ rustup update

View File

@ -2,17 +2,18 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "0.20.0"
version = "0.21.1"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.0"
solana-core = { path = "../core", version = "0.20.0" }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-metrics = { path = "../metrics", version = "0.20.0" }
solana-netutil = { path = "../netutil", version = "0.20.0" }
solana-sdk = { path = "../sdk", version = "0.20.0" }
console = "0.9.1"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }

View File

@ -1,31 +1,30 @@
use clap::{crate_description, crate_name, crate_version, App, Arg};
use clap::{crate_description, crate_name, App, Arg};
use console::style;
use solana_core::archiver::Archiver;
use solana_core::cluster_info::{Node, VALIDATOR_PORT_RANGE};
use solana_core::contact_info::ContactInfo;
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
use std::net::SocketAddr;
use std::path::PathBuf;
use std::process::exit;
use std::sync::Arc;
// Return an error if a keypair file cannot be parsed.
fn is_keypair(string: String) -> Result<(), String> {
read_keypair_file(&string)
.map(|_| ())
.map_err(|err| format!("{:?}", err))
}
use solana_clap_utils::{
input_validators::is_keypair,
keypair::{
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
SKIP_SEED_PHRASE_VALIDATION_ARG,
},
};
use solana_core::{
archiver::Archiver,
cluster_info::{Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo,
};
use solana_sdk::{commitment_config::CommitmentConfig, signature::KeypairUtil};
use std::{net::SocketAddr, path::PathBuf, process::exit, sync::Arc};
fn main() {
solana_logger::setup();
let matches = App::new(crate_name!())
.about(crate_description!())
.version(crate_version!())
.version(solana_clap_utils::version!())
.arg(
Arg::with_name("identity")
Arg::with_name("identity_keypair")
.short("i")
.long("identity")
.long("identity-keypair")
.value_name("PATH")
.takes_value(true)
.validator(is_keypair)
@ -38,7 +37,7 @@ fn main() {
.value_name("HOST:PORT")
.takes_value(true)
.required(true)
.validator(solana_netutil::is_host_port)
.validator(solana_net_utils::is_host_port)
.help("Rendezvous with the cluster at this entry point"),
)
.arg(
@ -56,58 +55,80 @@ fn main() {
.long("storage-keypair")
.value_name("PATH")
.takes_value(true)
.required(true)
.validator(is_keypair)
.help("File containing the storage account keypair"),
)
.arg(
Arg::with_name(ASK_SEED_PHRASE_ARG.name)
.long(ASK_SEED_PHRASE_ARG.long)
.value_name("KEYPAIR NAME")
.multiple(true)
.takes_value(true)
.possible_values(&["identity-keypair", "storage-keypair"])
.help(ASK_SEED_PHRASE_ARG.help),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
.requires(ASK_SEED_PHRASE_ARG.name)
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
)
.get_matches();
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
let keypair = if let Some(identity) = matches.value_of("identity") {
read_keypair_file(identity).unwrap_or_else(|err| {
eprintln!("{}: Unable to open keypair file: {}", err, identity);
let identity_keypair = keypair_input(&matches, "identity_keypair")
.unwrap_or_else(|err| {
eprintln!("Identity keypair input failed: {}", err);
exit(1);
})
} else {
Keypair::new()
};
let storage_keypair = if let Some(storage_keypair) = matches.value_of("storage_keypair") {
read_keypair_file(storage_keypair).unwrap_or_else(|err| {
eprintln!("{}: Unable to open keypair file: {}", err, storage_keypair);
exit(1);
})
} else {
Keypair::new()
};
.keypair;
let KeypairWithSource {
keypair: storage_keypair,
source: storage_keypair_source,
} = keypair_input(&matches, "storage_keypair").unwrap_or_else(|err| {
eprintln!("Storage keypair input failed: {}", err);
exit(1);
});
if storage_keypair_source == keypair::Source::Generated {
clap::Error::with_description(
"The `storage-keypair` argument was not found",
clap::ErrorKind::ArgumentNotFound,
)
.exit();
}
let entrypoint_addr = matches
.value_of("entrypoint")
.map(|entrypoint| {
solana_netutil::parse_host_port(entrypoint).expect("failed to parse entrypoint address")
solana_net_utils::parse_host_port(entrypoint)
.expect("failed to parse entrypoint address")
})
.unwrap();
let gossip_addr = {
let ip = solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap();
let ip = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap();
let mut addr = SocketAddr::new(ip, 0);
addr.set_ip(solana_netutil::get_public_ip_addr(&entrypoint_addr).unwrap());
addr.set_ip(solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap());
addr
};
let node =
Node::new_archiver_with_external_ip(&keypair.pubkey(), &gossip_addr, VALIDATOR_PORT_RANGE);
let node = Node::new_archiver_with_external_ip(
&identity_keypair.pubkey(),
&gossip_addr,
VALIDATOR_PORT_RANGE,
);
println!(
"{} version {} (branch={}, commit={})",
style(crate_name!()).bold(),
crate_version!(),
solana_clap_utils::version!(),
option_env!("CI_BRANCH").unwrap_or("unknown"),
option_env!("CI_COMMIT").unwrap_or("unknown")
);
solana_metrics::set_host_id(keypair.pubkey().to_string());
solana_metrics::set_host_id(identity_keypair.pubkey().to_string());
println!(
"replicating the data with keypair={:?} gossip_addr={:?}",
keypair.pubkey(),
"replicating the data with identity_keypair={:?} gossip_addr={:?}",
identity_keypair.pubkey(),
gossip_addr
);
@ -116,8 +137,9 @@ fn main() {
&ledger_path,
node,
entrypoint_info,
Arc::new(keypair),
Arc::new(identity_keypair),
Arc::new(storage_keypair),
CommitmentConfig::recent(),
)
.unwrap();

20
banking-bench/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "0.21.1"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "0.21.1" }
solana-ledger = { path = "../ledger", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-measure = { path = "../measure", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
rand = "0.6.5"
crossbeam-channel = "0.3"

View File

@ -1,7 +1,3 @@
#[macro_use]
extern crate solana_ledger;
extern crate crossbeam_channel;
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
@ -9,13 +5,12 @@ use rayon::prelude::*;
use solana_core::banking_stage::{create_test_recorder, BankingStage};
use solana_core::cluster_info::ClusterInfo;
use solana_core::cluster_info::Node;
use solana_core::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_core::packet::to_packets_chunked;
use solana_core::poh_recorder::PohRecorder;
use solana_core::poh_recorder::WorkingBankEntry;
use solana_core::service::Service;
use solana_ledger::bank_forks::BankForks;
use solana_ledger::blocktree::{get_tmp_ledger_path, Blocktree};
use solana_ledger::{blocktree::Blocktree, get_tmp_ledger_path};
use solana_measure::measure::Measure;
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
@ -25,7 +20,6 @@ use solana_sdk::signature::Signature;
use solana_sdk::system_transaction;
use solana_sdk::timing::{duration_as_us, timestamp};
use solana_sdk::transaction::Transaction;
use std::iter;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex, RwLock};
@ -103,21 +97,21 @@ fn main() {
const PACKETS_PER_BATCH: usize = 192;
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
let mint_total = 1_000_000_000_000;
let GenesisBlockInfo {
genesis_block,
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_block(mint_total);
} = create_genesis_config(mint_total);
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let bank0 = Bank::new(&genesis_block);
let bank0 = Bank::new(&genesis_config);
let mut bank_forks = BankForks::new(0, bank0);
let mut bank = bank_forks.working_bank();
info!("threads: {} txs: {}", num_threads, txes);
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_block.hash());
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
// fund all the accounts
transactions.iter().for_each(|tx| {
@ -125,7 +119,7 @@ fn main() {
&mint_keypair,
&tx.message.account_keys[0],
mint_total / txes as u64,
genesis_block.hash(),
genesis_config.hash(),
);
let x = bank.process_transaction(&fund);
x.unwrap();
@ -142,13 +136,7 @@ fn main() {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH)
.into_iter()
.map(|x| {
let len = x.packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
@ -163,6 +151,7 @@ fn main() {
&poh_recorder,
verified_receiver,
vote_receiver,
None,
);
poh_recorder.lock().unwrap().set_bank(&bank);
@ -209,7 +198,7 @@ fn main() {
index,
);
for xv in v {
sent += xv.0.packets.len();
sent += xv.packets.len();
}
verified_sender.send(v.to_vec()).unwrap();
}
@ -288,13 +277,7 @@ fn main() {
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
tx.signatures[0] = Signature::new(&sig[0..64]);
}
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH)
.into_iter()
.map(|x| {
let len = x.packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
}
start += chunk_len;

View File

@ -1,20 +0,0 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "0.20.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "0.20.0" }
solana-ledger = { path = "../ledger", version = "0.20.0" }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-runtime = { path = "../runtime", version = "0.20.0" }
solana-measure = { path = "../measure", version = "0.20.0" }
solana-sdk = { path = "../sdk", version = "0.20.0" }
rand = "0.6.5"
crossbeam-channel = "0.3"

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.20.0"
version = "0.21.1"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -13,27 +13,29 @@ bincode = "1.2.0"
bs58 = "0.3.0"
clap = "2.32.0"
env_logger = "0.7.1"
itertools = "0.8.0"
itertools = "0.8.2"
log = "0.4.8"
num-derive = "0.3"
num-traits = "0.2"
rand = "0.6.5"
rayon = "1.2.0"
serde = "1.0.101"
serde_derive = "1.0.101"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
# solana-runtime = { path = "../solana/runtime"}
solana-core = { path = "../core", version = "0.20.0" }
solana-genesis = { path = "../genesis", version = "0.20.0" }
solana-client = { path = "../client", version = "0.20.0" }
solana-drone = { path = "../drone", version = "0.20.0" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-metrics = { path = "../metrics", version = "0.20.0" }
solana-netutil = { path = "../netutil", version = "0.20.0" }
solana-runtime = { path = "../runtime", version = "0.20.0" }
solana-sdk = { path = "../sdk", version = "0.20.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-genesis = { path = "../genesis", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
untrusted = "0.7.0"
ws = "0.9.1"
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "0.21.1" }

View File

@ -360,7 +360,7 @@ The Matcher would initiate the following last swap:
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
- Matcher takes 4 B tokens as profit
- Matcher takes 2 B tokens as profit
Table becomes:

View File

@ -8,31 +8,35 @@ use rayon::prelude::*;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_core::gen_keys::GenKeys;
use solana_drone::drone::request_airdrop_transaction;
use solana_exchange_api::exchange_instruction;
use solana_exchange_api::exchange_state::*;
use solana_exchange_api::id;
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
use solana_genesis::Base64Account;
use solana_metrics::datapoint_info;
use solana_sdk::client::Client;
use solana_sdk::client::SyncClient;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::timing::{duration_as_ms, duration_as_s};
use solana_sdk::transaction::Transaction;
use solana_sdk::{system_instruction, system_program};
use std::cmp;
use std::collections::{HashMap, VecDeque};
use std::fs::File;
use std::io::prelude::*;
use std::mem;
use std::net::SocketAddr;
use std::path::Path;
use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, RwLock};
use std::thread::{sleep, Builder};
use std::time::{Duration, Instant};
use solana_sdk::{
client::{Client, SyncClient},
commitment_config::CommitmentConfig,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
timing::{duration_as_ms, duration_as_s},
transaction::Transaction,
{system_instruction, system_program},
};
use std::{
cmp,
collections::{HashMap, VecDeque},
fs::File,
io::prelude::*,
mem,
net::SocketAddr,
path::Path,
process::exit,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
mpsc::{channel, Receiver, Sender},
Arc, RwLock,
},
thread::{sleep, Builder},
time::{Duration, Instant},
};
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
// Assume 4MB network buffers, and 512 byte packets
@ -174,19 +178,28 @@ where
info!("Generating {:?} account keys", total_keys);
let mut account_keypairs = generate_keypairs(total_keys);
let src_pubkeys: Vec<_> = account_keypairs
let src_keypairs: Vec<_> = account_keypairs
.drain(0..accounts_in_groups)
.map(|keypair| keypair)
.collect();
let src_pubkeys: Vec<Pubkey> = src_keypairs
.iter()
.map(|keypair| keypair.pubkey())
.collect();
let profit_pubkeys: Vec<_> = account_keypairs
let profit_keypairs: Vec<_> = account_keypairs
.drain(0..accounts_in_groups)
.map(|keypair| keypair)
.collect();
let profit_pubkeys: Vec<Pubkey> = profit_keypairs
.iter()
.map(|keypair| keypair.pubkey())
.collect();
info!("Create {:?} source token accounts", src_pubkeys.len());
create_token_accounts(client, &trader_signers, &src_pubkeys);
create_token_accounts(client, &trader_signers, &src_keypairs);
info!("Create {:?} profit token accounts", profit_pubkeys.len());
create_token_accounts(client, &swapper_signers, &profit_pubkeys);
create_token_accounts(client, &swapper_signers, &profit_keypairs);
// Collect the max transaction rate and total tx count seen (single node only)
let sample_stats = Arc::new(RwLock::new(Vec::new()));
@ -380,7 +393,10 @@ fn swapper<T>(
let mut tries = 0;
let mut trade_index = 0;
while client
.get_balance(&trade_infos[trade_index].trade_account)
.get_balance_with_commitment(
&trade_infos[trade_index].trade_account,
CommitmentConfig::recent(),
)
.unwrap_or(0)
== 0
{
@ -434,7 +450,7 @@ fn swapper<T>(
account_group = (account_group + 1) % account_groups as usize;
let (blockhash, _fee_calculator) = client
.get_recent_blockhash()
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
let to_swap_txs: Vec<_> = to_swap
.par_iter()
@ -557,27 +573,39 @@ fn trader<T>(
trade_account: trade.pubkey(),
order_info,
});
trades.push((signer, trade.pubkey(), side, src));
trades.push((signer, trade, side, src));
}
account_group = (account_group + 1) % account_groups as usize;
let (blockhash, _fee_calculator) = client
.get_recent_blockhash()
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
trades.chunks(chunk_size).for_each(|chunk| {
let trades_txs: Vec<_> = chunk
.par_iter()
.map(|(signer, trade, side, src)| {
let s: &Keypair = &signer;
let owner = &signer.pubkey();
.map(|(owner, trade, side, src)| {
let owner_pubkey = &owner.pubkey();
let trade_pubkey = &trade.pubkey();
let space = mem::size_of::<ExchangeState>() as u64;
Transaction::new_signed_instructions(
&[s],
&[owner.as_ref(), trade],
vec![
system_instruction::create_account(owner, trade, 1, space, &id()),
system_instruction::create_account(
owner_pubkey,
trade_pubkey,
1,
space,
&id(),
),
exchange_instruction::trade_request(
owner, trade, *side, pair, tokens, price, src,
owner_pubkey,
trade_pubkey,
*side,
pair,
tokens,
price,
src,
),
],
blockhash,
@ -638,7 +666,9 @@ where
T: SyncClient + ?Sized,
{
for s in &tx.signatures {
if let Ok(Some(r)) = sync_client.get_signature_status(s) {
if let Ok(Some(r)) =
sync_client.get_signature_status_with_commitment(s, CommitmentConfig::recent())
{
match r {
Ok(_) => {
return true;
@ -659,12 +689,15 @@ fn verify_funding_transfer<T: SyncClient + ?Sized>(
) -> bool {
if verify_transaction(client, tx) {
for a in &tx.message().account_keys[1..] {
if client.get_balance(a).unwrap_or(0) >= amount {
if client
.get_balance_with_commitment(a, CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{
return true;
}
}
}
false
}
@ -742,8 +775,9 @@ pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>],
to_fund_txs.len(),
);
let (blockhash, _fee_calculator) =
client.get_recent_blockhash().expect("blockhash");
let (blockhash, _fee_calculator) = client
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("blockhash");
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
});
@ -780,27 +814,37 @@ pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>],
});
funded.append(&mut new_funded);
funded.retain(|(k, b)| {
client.get_balance(&k.pubkey()).unwrap_or(0) > lamports && *b > lamports
client
.get_balance_with_commitment(&k.pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
> lamports
&& *b > lamports
});
debug!(" Funded: {} left: {}", funded.len(), notfunded.len());
}
}
pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], accounts: &[Pubkey]) {
let mut notfunded: Vec<(&Arc<Keypair>, &Pubkey)> = signers.iter().zip(accounts).collect();
pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], accounts: &[Keypair]) {
let mut notfunded: Vec<(&Arc<Keypair>, &Keypair)> = signers.iter().zip(accounts).collect();
while !notfunded.is_empty() {
notfunded.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
let mut to_create_txs: Vec<_> = chunk
.par_iter()
.map(|(signer, new)| {
let owner_pubkey = &signer.pubkey();
.map(|(from_keypair, new_keypair)| {
let owner_pubkey = &from_keypair.pubkey();
let space = mem::size_of::<ExchangeState>() as u64;
let create_ix =
system_instruction::create_account(owner_pubkey, new, 1, space, &id());
let request_ix = exchange_instruction::account_request(owner_pubkey, new);
let create_ix = system_instruction::create_account(
owner_pubkey,
&new_keypair.pubkey(),
1,
space,
&id(),
);
let request_ix =
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
(
signer,
(from_keypair, new_keypair),
Transaction::new_unsigned_instructions(vec![create_ix, request_ix]),
)
})
@ -819,12 +863,13 @@ pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], acco
let mut retries = 0;
while !to_create_txs.is_empty() {
let (blockhash, _fee_calculator) = client
.get_recent_blockhash()
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
to_create_txs.par_iter_mut().for_each(|(k, tx)| {
let kp: &Keypair = k;
tx.sign(&[kp], blockhash);
});
to_create_txs
.par_iter_mut()
.for_each(|((from_keypair, to_keypair), tx)| {
tx.sign(&[from_keypair.as_ref(), to_keypair], blockhash);
});
to_create_txs.iter().for_each(|(_, tx)| {
client.async_send_transaction(tx.clone()).expect("transfer");
});
@ -861,9 +906,13 @@ pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], acco
}
});
let mut new_notfunded: Vec<(&Arc<Keypair>, &Pubkey)> = vec![];
let mut new_notfunded: Vec<(&Arc<Keypair>, &Keypair)> = vec![];
for f in &notfunded {
if client.get_balance(&f.1).unwrap_or(0) == 0 {
if client
.get_balance_with_commitment(&f.1.pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
== 0
{
new_notfunded.push(*f)
}
}
@ -920,7 +969,7 @@ fn generate_keypairs(num: u64) -> Vec<Keypair> {
}
pub fn airdrop_lamports(client: &dyn Client, drone_addr: &SocketAddr, id: &Keypair, amount: u64) {
let balance = client.get_balance(&id.pubkey());
let balance = client.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent());
let balance = balance.unwrap_or(0);
if balance >= amount {
return;
@ -938,19 +987,26 @@ pub fn airdrop_lamports(client: &dyn Client, drone_addr: &SocketAddr, id: &Keypa
let mut tries = 0;
loop {
let (blockhash, _fee_calculator) = client
.get_recent_blockhash()
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.expect("Failed to get blockhash");
match request_airdrop_transaction(&drone_addr, &id.pubkey(), amount_to_drop, blockhash) {
Ok(transaction) => {
let signature = client.async_send_transaction(transaction).unwrap();
for _ in 0..30 {
if let Ok(Some(_)) = client.get_signature_status(&signature) {
if let Ok(Some(_)) = client.get_signature_status_with_commitment(
&signature,
CommitmentConfig::recent(),
) {
break;
}
sleep(Duration::from_millis(100));
}
if client.get_balance(&id.pubkey()).unwrap_or(0) >= amount {
if client
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{
break;
}
}

View File

@ -1,4 +1,4 @@
use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, ArgMatches};
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
use solana_core::gen_keys::GenKeys;
use solana_drone::drone::DRONE_PORT;
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
@ -44,10 +44,10 @@ impl Default for Config {
}
}
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
App::new(crate_name!())
.about(crate_description!())
.version(crate_version!())
.version(version)
.arg(
Arg::with_name("entrypoint")
.short("n")
@ -166,13 +166,15 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
let mut args = Config::default();
args.entrypoint_addr = solana_netutil::parse_host_port(matches.value_of("entrypoint").unwrap())
.unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
args.entrypoint_addr = solana_net_utils::parse_host_port(
matches.value_of("entrypoint").unwrap(),
)
.unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
args.drone_addr = solana_netutil::parse_host_port(matches.value_of("drone").unwrap())
args.drone_addr = solana_net_utils::parse_host_port(matches.value_of("drone").unwrap())
.unwrap_or_else(|e| {
eprintln!("failed to parse drone address: {}", e);
exit(1)

View File

@ -11,7 +11,7 @@ fn main() {
solana_logger::setup();
solana_metrics::set_panic_hook("bench-exchange");
let matches = cli::build_args().get_matches();
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
let cli_config = cli::extract_args(&matches);
let cli::Config {

View File

@ -1,7 +1,7 @@
use itertools::EitherOrBoth::{Both, Left, Right};
use itertools::Itertools;
use log::*;
use solana_exchange_api::exchange_state::*;
use solana_exchange_program::exchange_state::*;
use solana_sdk::pubkey::Pubkey;
use std::cmp::Ordering;
use std::collections::BinaryHeap;

View File

@ -1,13 +1,15 @@
use crate::local_cluster::{ClusterConfig, LocalCluster};
use log::*;
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
use solana_core::gossip_service::{discover_cluster, get_multi_client};
use solana_core::validator::ValidatorConfig;
use solana_drone::drone::run_local_drone;
use solana_exchange_api::exchange_processor::process_instruction;
use solana_exchange_api::id;
use solana_exchange_program::exchange_processor::process_instruction;
use solana_exchange_program::id;
use solana_exchange_program::solana_exchange_program;
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::genesis_config::create_genesis_config;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::process::exit;
use std::sync::mpsc::channel;
@ -81,8 +83,8 @@ fn test_exchange_local_cluster() {
#[test]
fn test_exchange_bank_client() {
solana_logger::setup();
let (genesis_block, identity) = create_genesis_block(100_000_000_000_000);
let mut bank = Bank::new(&genesis_block);
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
let mut bank = Bank::new(&genesis_config);
bank.add_instruction_processor(id(), process_instruction);
let clients = vec![BankClient::new(bank)];

View File

@ -2,13 +2,14 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.20.0"
version = "0.21.1"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-core = { path = "../core", version = "0.20.0" }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-netutil = { path = "../netutil", version = "0.20.0" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }

View File

@ -1,6 +1,5 @@
use clap::{crate_description, crate_name, crate_version, App, Arg};
use solana_core::packet::PacketsRecycler;
use solana_core::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
use clap::{crate_description, crate_name, App, Arg};
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
use solana_core::result::Result;
use solana_core::streamer::{receiver, PacketReceiver};
use std::cmp::max;
@ -29,7 +28,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let mut num = 0;
for p in &msgs.packets {
let a = p.meta.addr();
assert!(p.meta.size < BLOB_SIZE);
assert!(p.meta.size < PACKET_DATA_SIZE);
send.send_to(&p.data[..p.meta.size], &a).unwrap();
num += 1;
}
@ -54,7 +53,7 @@ fn main() -> Result<()> {
let matches = App::new(crate_name!())
.about(crate_description!())
.version(crate_version!())
.version(solana_clap_utils::version!())
.arg(
Arg::with_name("num-recv-sockets")
.long("num-recv-sockets")
@ -77,7 +76,7 @@ fn main() -> Result<()> {
let mut read_threads = Vec::new();
let recycler = PacketsRecycler::default();
for _ in 0..num_sockets {
let read = solana_netutil::bind_to(port, false).unwrap();
let read = solana_net_utils::bind_to(port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
addr = read.local_addr().unwrap();

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.20.0"
version = "0.21.1"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -12,27 +12,28 @@ bincode = "1.2.0"
clap = "2.33.0"
log = "0.4.8"
rayon = "1.2.0"
serde = "1.0.101"
serde_derive = "1.0.101"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-core = { path = "../core", version = "0.20.0" }
solana-genesis = { path = "../genesis", version = "0.20.0" }
solana-client = { path = "../client", version = "0.20.0" }
solana-drone = { path = "../drone", version = "0.20.0" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.0", optional = true }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-metrics = { path = "../metrics", version = "0.20.0" }
solana-measure = { path = "../measure", version = "0.20.0" }
solana-netutil = { path = "../netutil", version = "0.20.0" }
solana-runtime = { path = "../runtime", version = "0.20.0" }
solana-sdk = { path = "../sdk", version = "0.20.0" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-genesis = { path = "../genesis", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.21.1", optional = true }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-measure = { path = "../measure", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.21.1", optional = true }
[dev-dependencies]
serial_test = "0.2.0"
serial_test_derive = "0.2.0"
solana-local-cluster = { path = "../local-cluster", version = "0.21.1" }
[features]
move = ["solana-core/move", "solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"]
move = ["solana-librapay-api", "solana-move-loader-program"]

View File

@ -1,5 +1,3 @@
use solana_metrics;
use crate::cli::Config;
use log::*;
use rayon::prelude::*;
@ -7,18 +5,19 @@ use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_core::gen_keys::GenKeys;
use solana_drone::drone::request_airdrop_transaction;
#[cfg(feature = "move")]
use solana_librapay_api::{create_genesis, upload_mint_program, upload_payment_program};
use solana_librapay_api::{create_genesis, upload_mint_script, upload_payment_script};
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_metrics::{self, datapoint_debug};
use solana_sdk::{
client::Client,
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_instruction, system_transaction,
timing::{duration_as_ms, duration_as_s, timestamp},
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
transaction::Transaction,
};
use std::{
@ -55,7 +54,7 @@ type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
loop {
match client.get_recent_blockhash() {
match client.get_recent_blockhash_with_commitment(CommitmentConfig::recent()) {
Ok((blockhash, fee_calculator)) => return (blockhash, fee_calculator),
Err(err) => {
info!("Couldn't get recent blockhash: {:?}", err);
@ -158,12 +157,13 @@ where
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
let mut blockhash = Hash::default();
let mut blockhash_time = Instant::now();
let mut blockhash_time;
while start.elapsed() < duration {
// ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual
// accounts
let len = tx_count as usize;
blockhash_time = Instant::now();
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
blockhash = new_blockhash;
} else {
@ -173,13 +173,19 @@ where
sleep(Duration::from_millis(100));
continue;
}
info!(
"Took {} ms for new blockhash",
duration_as_ms(&blockhash_time.elapsed())
datapoint_debug!(
"bench-tps-get_blockhash",
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
);
blockhash_time = Instant::now();
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
datapoint_debug!(
"bench-tps-get_balance",
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
);
generate_txs(
&shared_txs,
&blockhash,
@ -367,7 +373,7 @@ fn generate_txs(
);
datapoint_debug!(
"bench-tps-generate_txs",
("duration", duration_as_ms(&duration), i64)
("duration", duration_as_us(&duration), i64)
);
let sz = transactions.len() / threads;
@ -432,7 +438,7 @@ fn do_tx_transfers<T: Client>(
);
datapoint_debug!(
"bench-tps-do_tx_transfers",
("duration", duration_as_ms(&transfer_start.elapsed()), i64),
("duration", duration_as_us(&transfer_start.elapsed()), i64),
("count", tx_len, i64)
);
}
@ -444,7 +450,11 @@ fn do_tx_transfers<T: Client>(
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
for a in &tx.message().account_keys[1..] {
if client.get_balance(a).unwrap_or(0) >= amount {
if client
.get_balance_with_commitment(a, CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{
return true;
}
}
@ -659,10 +669,12 @@ pub fn airdrop_lamports<T: Client>(
}
};
let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
info!("airdrop error {}", e);
starting_balance
});
let current_balance = client
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent())
.unwrap_or_else(|e| {
info!("airdrop error {}", e);
starting_balance
});
info!("current balance {}...", current_balance);
metrics_submit_lamport_balance(current_balance);
@ -777,62 +789,62 @@ fn fund_move_keys<T: Client>(
total: u64,
libra_pay_program_id: &Pubkey,
libra_mint_program_id: &Pubkey,
libra_mint_key: &Keypair,
libra_genesis_key: &Keypair,
) {
let (mut blockhash, _fee_calculator) = get_recent_blockhash(client);
info!("creating the libra funding account..");
let libra_funding_key = Keypair::new();
let tx = librapay_transaction::create_account(
funding_key,
&libra_funding_key.pubkey(),
1,
blockhash,
);
client.send_message(&[funding_key], tx.message).unwrap();
let tx = librapay_transaction::create_account(funding_key, &libra_funding_key, 1, blockhash);
client
.send_message(&[funding_key, &libra_funding_key], tx.message)
.unwrap();
info!("minting to funding keypair");
let tx = librapay_transaction::mint_tokens(
&libra_mint_program_id,
funding_key,
libra_mint_key,
libra_genesis_key,
&libra_funding_key.pubkey(),
total,
blockhash,
);
client
.send_message(&[funding_key, libra_mint_key], tx.message)
.send_message(&[funding_key, libra_genesis_key], tx.message)
.unwrap();
info!("creating {} move accounts...", keypairs.len());
let create_len = 8;
let total_len = keypairs.len();
let create_len = 5;
let mut funding_time = Measure::start("funding_time");
for (i, keys) in keypairs.chunks(create_len).enumerate() {
if client.get_balance(&keys[0].pubkey()).unwrap_or(0) > 0 {
if client
.get_balance_with_commitment(&keys[0].pubkey(), CommitmentConfig::recent())
.unwrap_or(0)
> 0
{
// already created these accounts.
break;
}
let pubkeys: Vec<_> = keys.iter().map(|k| k.pubkey()).collect();
let tx = librapay_transaction::create_accounts(funding_key, &pubkeys, 1, blockhash);
let keypairs: Vec<_> = keys.iter().map(|k| k).collect();
let tx = librapay_transaction::create_accounts(funding_key, &keypairs, 1, blockhash);
let ser_size = bincode::serialized_size(&tx).unwrap();
client.send_message(&[funding_key], tx.message).unwrap();
let mut keys = vec![funding_key];
keys.extend(&keypairs);
client.send_message(&keys, tx.message).unwrap();
if i % 10 == 0 {
info!(
"size: {} created {} accounts of {}",
ser_size,
"created {} accounts of {} (size {})",
i,
(keypairs.len() / create_len),
total_len / create_len,
ser_size,
);
}
}
funding_time.stop();
info!("funding accounts {}ms", funding_time.as_ms());
const NUM_FUNDING_KEYS: usize = 4;
const NUM_FUNDING_KEYS: usize = 10;
let funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
let pubkey_amounts: Vec<_> = funding_keys
.iter()
@ -846,7 +858,9 @@ fn fund_move_keys<T: Client>(
client.send_message(&[funding_key], tx.message).unwrap();
let mut balance = 0;
for _ in 0..20 {
if let Ok(balance_) = client.get_balance(&funding_keys[0].pubkey()) {
if let Ok(balance_) = client
.get_balance_with_commitment(&funding_keys[0].pubkey(), CommitmentConfig::recent())
{
if balance_ > 0 {
balance = balance_;
break;
@ -855,19 +869,21 @@ fn fund_move_keys<T: Client>(
sleep(Duration::from_millis(100));
}
assert!(balance > 0);
info!("funded multiple funding accounts.. {:?}", balance);
info!(
"funded multiple funding accounts with {:?} lanports",
balance
);
let libra_funding_keys: Vec<_> = (0..NUM_FUNDING_KEYS).map(|_| Keypair::new()).collect();
for (i, key) in libra_funding_keys.iter().enumerate() {
let tx =
librapay_transaction::create_account(&funding_keys[i], &key.pubkey(), 1, blockhash);
let tx = librapay_transaction::create_account(&funding_keys[i], &key, 1, blockhash);
client
.send_message(&[&funding_keys[i]], tx.message)
.send_message(&[&funding_keys[i], &key], tx.message)
.unwrap();
let tx = librapay_transaction::transfer(
libra_pay_program_id,
&libra_mint_key.pubkey(),
&libra_genesis_key.pubkey(),
&funding_keys[i],
&libra_funding_key,
&key.pubkey(),
@ -887,7 +903,7 @@ fn fund_move_keys<T: Client>(
for (j, key) in keys.iter().enumerate() {
let tx = librapay_transaction::transfer(
libra_pay_program_id,
&libra_mint_key.pubkey(),
&libra_genesis_key.pubkey(),
&funding_keys[j],
&libra_funding_keys[j],
&key.pubkey(),
@ -900,7 +916,6 @@ fn fund_move_keys<T: Client>(
.expect("create_account in generate_and_fund_keypairs");
}
info!("sent... checking balance {}", i);
for (j, key) in keys.iter().enumerate() {
let mut times = 0;
loop {
@ -918,11 +933,16 @@ fn fund_move_keys<T: Client>(
}
}
info!("funded: {} of {}", i, keypairs.len() / NUM_FUNDING_KEYS);
info!(
"funded group {} of {}",
i + 1,
keypairs.len() / NUM_FUNDING_KEYS
);
blockhash = get_recent_blockhash(client).0;
}
info!("done funding keys..");
funding_time.stop();
info!("done funding keys, took {} ms", funding_time.as_ms());
}
pub fn generate_and_fund_keypairs<T: Client>(
@ -972,8 +992,8 @@ pub fn generate_and_fund_keypairs<T: Client>(
{
if use_move {
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
let libra_mint_program_id = upload_mint_program(&funding_key, client);
let libra_pay_program_id = upload_payment_program(&funding_key, client);
let libra_mint_program_id = upload_mint_script(&funding_key, client);
let libra_pay_program_id = upload_payment_script(&funding_key, client);
// Generate another set of keypairs for move accounts.
// Still fund the solana ones which will be used for fees.
@ -1024,7 +1044,7 @@ mod tests {
use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_block::create_genesis_block;
use solana_sdk::genesis_config::create_genesis_config;
#[test]
fn test_switch_directions() {
@ -1043,8 +1063,8 @@ mod tests {
#[test]
fn test_bench_tps_bank_client() {
let (genesis_block, id) = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let clients = vec![BankClient::new(bank)];
let mut config = Config::default();
@ -1061,8 +1081,8 @@ mod tests {
#[test]
fn test_bench_tps_fund_keys() {
let (genesis_block, id) = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
@ -1071,16 +1091,21 @@ mod tests {
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
for kp in &keypairs {
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
assert_eq!(
client
.get_balance_with_commitment(&kp.pubkey(), CommitmentConfig::recent())
.unwrap(),
lamports
);
}
}
#[test]
fn test_bench_tps_fund_keys_with_fees() {
let (mut genesis_block, id) = create_genesis_block(10_000);
let (mut genesis_config, id) = create_genesis_config(10_000);
let fee_calculator = FeeCalculator::new(11, 0);
genesis_block.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_block);
genesis_config.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
@ -1089,7 +1114,7 @@ mod tests {
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
let max_fee = client
.get_recent_blockhash()
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.unwrap()
.1
.max_lamports_per_signature;

View File

@ -1,4 +1,4 @@
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
use solana_drone::drone::DRONE_PORT;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{read_keypair_file, Keypair, KeypairUtil};
@ -21,6 +21,7 @@ pub struct Config {
pub write_to_client_file: bool,
pub read_from_client_file: bool,
pub target_lamports_per_signature: u64,
pub multi_client: bool,
pub use_move: bool,
pub num_lamports_per_account: u64,
}
@ -41,6 +42,7 @@ impl Default for Config {
write_to_client_file: false,
read_from_client_file: false,
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
multi_client: true,
use_move: false,
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
}
@ -48,9 +50,9 @@ impl Default for Config {
}
/// Defines and builds the CLI args for a run of the benchmark
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
App::new(crate_name!()).about(crate_description!())
.version(crate_version!())
.version(version)
.arg(
Arg::with_name("entrypoint")
.short("n")
@ -108,6 +110,11 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.long("use-move")
.help("Use Move language transactions to perform transfers."),
)
.arg(
Arg::with_name("no-multi-client")
.long("no-multi-client")
.help("Disable multi-client support, only transact with the entrypoint."),
)
.arg(
Arg::with_name("tx_count")
.long("tx_count")
@ -167,14 +174,14 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
let mut args = Config::default();
if let Some(addr) = matches.value_of("entrypoint") {
args.entrypoint_addr = solana_netutil::parse_host_port(addr).unwrap_or_else(|e| {
args.entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse entrypoint address: {}", e);
exit(1)
});
}
if let Some(addr) = matches.value_of("drone") {
args.drone_addr = solana_netutil::parse_host_port(addr).unwrap_or_else(|e| {
args.drone_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
eprintln!("failed to parse drone address: {}", e);
exit(1)
});
@ -229,6 +236,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
}
args.use_move = matches.is_present("use-move");
args.multi_client = !matches.is_present("no-multi-client");
if let Some(v) = matches.value_of("num_lamports_per_account") {
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");

View File

@ -1,7 +1,7 @@
use log::*;
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
use solana_bench_tps::cli;
use solana_core::gossip_service::{discover_cluster, get_multi_client};
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
use solana_genesis::Base64Account;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{Keypair, KeypairUtil};
@ -15,7 +15,7 @@ fn main() {
solana_logger::setup_with_filter("solana=info");
solana_metrics::set_panic_hook("bench-tps");
let matches = cli::build_args().get_matches();
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
let cli_config = cli::extract_args(&matches);
let cli::Config {
@ -29,6 +29,7 @@ fn main() {
read_from_client_file,
target_lamports_per_signature,
use_move,
multi_client,
num_lamports_per_account,
..
} = &cli_config;
@ -70,15 +71,19 @@ fn main() {
exit(1);
});
let (client, num_clients) = get_multi_client(&nodes);
if nodes.len() < num_clients {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes
);
exit(1);
}
let client = if *multi_client {
let (client, num_clients) = get_multi_client(&nodes);
if nodes.len() < num_clients {
eprintln!(
"Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes
);
exit(1);
}
client
} else {
get_client(&nodes)
};
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
let path = Path::new(&client_ids_and_stake_file);

View File

@ -1,4 +1,3 @@
use crate::local_cluster::{ClusterConfig, LocalCluster};
use serial_test_derive::serial;
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs};
use solana_bench_tps::cli::Config;
@ -6,15 +5,16 @@ use solana_client::thin_client::create_client;
use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
use solana_core::validator::ValidatorConfig;
use solana_drone::drone::run_local_drone;
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
#[cfg(feature = "move")]
use solana_move_loader_program;
use solana_sdk::move_loader::solana_move_loader_program;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel;
use std::time::Duration;
fn test_bench_tps_local_cluster(config: Config) {
#[cfg(feature = "move")]
let native_instruction_processors = vec![solana_move_loader_program!()];
let native_instruction_processors = vec![solana_move_loader_program()];
#[cfg(not(feature = "move"))]
let native_instruction_processors = vec![];
@ -57,8 +57,10 @@ fn test_bench_tps_local_cluster(config: Config) {
)
.unwrap();
let total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
assert!(total > 100);
let _total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
#[cfg(not(debug_assertions))]
assert!(_total > 100);
}
#[test]
@ -76,7 +78,7 @@ fn test_bench_tps_local_cluster_solana() {
fn test_bench_tps_local_cluster_move() {
let mut config = Config::default();
config.tx_count = 100;
config.duration = Duration::from_secs(20);
config.duration = Duration::from_secs(10);
config.use_move = true;
test_bench_tps_local_cluster(config);

View File

@ -7,7 +7,7 @@
| TVU | |
| | |
| .-------. .------------. .----+---. .---------. |
.------------. | | Blob | | Retransmit | | Replay | | Storage | |
.------------. | | Shred | | Retransmit | | Replay | | Storage | |
| Upstream +----->| Fetch +-->| Stage +-->| Stage +-->| Stage | |
| Validators | | | Stage | | | | | | | |
`------------` | `-------` `----+-------` `----+---` `---------` |

View File

@ -1,30 +1,30 @@
.--------------------------------------.
| Validator |
| |
.--------. | .-------------------. |
| |---->| | |
| Client | | | JSON RPC Service | |
| |<----| | |
`----+---` | `-------------------` |
| | ^ |
| | | .----------------. | .------------------.
| | | | Gossip Service |<----------| Validators |
| | | `----------------` | | |
| | | ^ | | |
| | | | | | .------------. |
| | .---+---. .----+---. .-----------. | | | | |
| | | Bank |<-+ Replay | | BlobFetch |<------+ Upstream | |
| | | Forks | | Stage | | Stage | | | | Validators | |
| | `-------` `--------` `--+--------` | | | | |
| | ^ ^ | | | `------------` |
| | | | v | | |
| | | .--+--------. | | |
| | | | Blocktree | | | |
| | | `-----------` | | .------------. |
| | | ^ | | | | |
| | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | |
`-------->| TPU +---->| Broadcast +--------------->| | |
| `-----` | Stage | | | `------------` |
| `-----------` | `------------------`
`--------------------------------------`
.---------------------------------------.
| Validator |
| |
.--------. | .-------------------. |
| |---->| | |
| Client | | | JSON RPC Service | |
| |<----| | |
`----+---` | `-------------------` |
| | ^ |
| | | .----------------. | .------------------.
| | | | Gossip Service |<-----------| Validators |
| | | `----------------` | | |
| | | ^ | | |
| | | | | | .------------. |
| | .---+---. .----+---. .------------. | | | | |
| | | Bank |<-+ Replay | | ShredFetch |<------+ Upstream | |
| | | Forks | | Stage | | Stage | | | | Validators | |
| | `-------` `--------` `--+---------` | | | | |
| | ^ ^ | | | `------------` |
| | | | v | | |
| | | .--+--------. | | |
| | | | Blocktree | | | |
| | | `-----------` | | .------------. |
| | | ^ | | | | |
| | | | | | | Downstream | |
| | .--+--. .-------+---. | | | Validators | |
`-------->| TPU +---->| Broadcast +---------------->| | |
| `-----` | Stage | | | `------------` |
| `-----------` | `------------------`
`---------------------------------------`

View File

@ -36,6 +36,10 @@
* [Troubleshooting](running-validator/validator-troubleshoot.md)
* [FAQ](running-validator/validator-faq.md)
* [Running an Archiver](running-archiver.md)
* [Paper Wallet](paper-wallet/README.md)
* [Installation](paper-wallet/installation.md)
* [Creating and Using a Seed Phrase](paper-wallet/keypair.md)
* [Paper Wallet Usage](paper-wallet/usage.md)
* [API Reference](api-reference/README.md)
* [Transaction](api-reference/transaction-api.md)
* [Instruction](api-reference/instruction-api.md)
@ -55,6 +59,7 @@
* [Inter-chain Transaction Verification](proposals/interchain-transaction-verification.md)
* [Snapshot Verification](proposals/snapshot-verification.md)
* [Bankless Leader](proposals/bankless-leader.md)
* [Durable Transaction Nonces](proposals/durable-tx-nonces.md)
* [Implemented Design Proposals](implemented-proposals/README.md)
* [Blocktree](implemented-proposals/blocktree.md)
* [Cluster Software Installation and Updates](implemented-proposals/installer.md)
@ -79,6 +84,5 @@
* [Reliable Vote Transmission](implemented-proposals/reliable-vote-transmission.md)
* [Repair Service](implemented-proposals/repair-service.md)
* [Testing Programs](implemented-proposals/testing-programs.md)
* [Credit-only Accounts](implemented-proposals/credit-only-credit-debit-accounts.md)
* [Credit-only Accounts](implemented-proposals/readonly-accounts.md)
* [Embedding the Move Langauge](implemented-proposals/embedding-move.md)

View File

@ -24,5 +24,5 @@ The stream will output a series of JSON objects:
* `s`, the slot height, as unsigned 64-bit integer
* `h`, the tick height, as unsigned 64-bit integer
* `l`, the slot leader id, as base-58 encoded string
* `id`, the block id, as base-58 encoded string
* `hash`, the [blockhash](terminology.md#blockhash), as base-58 encoded string

View File

@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage
### solana-cli
```text
solana-cli 0.20.0
solana-cli 0.21.1
Blockchain, Rebuilt for Scale
USAGE:
@ -200,7 +200,7 @@ SUBCOMMANDS:
claim-storage-reward Redeem storage reward credits
cluster-version Get the version of the cluster entrypoint
confirm Confirm transaction by signature
create-archiver-storage-account Create an archiver storage account
create-archiver-storage-account Create an archiver storage account
create-stake-account Create a stake account
create-validator-storage-account Create a validator storage account
create-vote-account Create a vote account
@ -210,7 +210,7 @@ SUBCOMMANDS:
fees Display current cluster fees
get Get cli config settings
get-epoch-info Get information about the current epoch
get-genesis-blockhash Get the genesis blockhash
get-genesis-hash Get the genesis hash
get-slot Get current slot
get-transaction-count Get current transaction count
help Prints this message or the help of the given subcommand(s)
@ -236,7 +236,7 @@ SUBCOMMANDS:
#### solana-address
```text
solana-address
solana-address
Get your public key
USAGE:
@ -254,7 +254,7 @@ OPTIONS:
#### solana-airdrop
```text
solana-airdrop
solana-airdrop
Request lamports
USAGE:
@ -278,7 +278,7 @@ ARGS:
#### solana-balance
```text
solana-balance
solana-balance
Get your balance
USAGE:
@ -300,7 +300,7 @@ ARGS:
#### solana-cancel
```text
solana-cancel
solana-cancel
Cancel a transfer
USAGE:
@ -321,7 +321,7 @@ ARGS:
#### solana-claim-storage-reward
```text
solana-claim-storage-reward
solana-claim-storage-reward
Redeem storage reward credits
USAGE:
@ -343,7 +343,7 @@ ARGS:
#### solana-cluster-version
```text
solana-cluster-version
solana-cluster-version
Get the version of the cluster entrypoint
USAGE:
@ -361,7 +361,7 @@ OPTIONS:
#### solana-confirm
```text
solana-confirm
solana-confirm
Confirm transaction by signature
USAGE:
@ -382,7 +382,7 @@ ARGS:
#### solana-create-archiver-storage-account
```text
solana-create-archiver-storage-account
solana-create-archiver-storage-account
Create an archiver storage account
USAGE:
@ -398,13 +398,13 @@ OPTIONS:
-k, --keypair <PATH> /path/to/id.json
ARGS:
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT PUBKEY>
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT PUBKEY>
```
#### solana-create-stake-account
```text
solana-create-stake-account
solana-create-stake-account
Create a stake account
USAGE:
@ -432,7 +432,7 @@ ARGS:
#### solana-create-validator-storage-account
```text
solana-create-validator-storage-account
solana-create-validator-storage-account
Create a validator storage account
USAGE:
@ -448,13 +448,13 @@ OPTIONS:
-k, --keypair <PATH> /path/to/id.json
ARGS:
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT PUBKEY>
<STORAGE ACCOUNT OWNER PUBKEY>
<STORAGE ACCOUNT PUBKEY>
```
#### solana-create-vote-account
```text
solana-create-vote-account
solana-create-vote-account
Create a vote account
USAGE:
@ -467,7 +467,7 @@ FLAGS:
OPTIONS:
--authorized-voter <PUBKEY> Public key of the authorized voter (defaults to vote account)
--authorized-withdrawer <PUBKEY> Public key of the authorized withdrawer (defaults to cli config pubkey)
--commission <NUM> The commission taken on reward redemption (0-255), default: 0
--commission <NUM> The commission taken on reward redemption (0-100), default: 0
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
@ -480,7 +480,7 @@ ARGS:
#### solana-deactivate-stake
```text
solana-deactivate-stake
solana-deactivate-stake
Deactivate the delegated stake from the stake account
USAGE:
@ -501,7 +501,7 @@ ARGS:
#### solana-delegate-stake
```text
solana-delegate-stake
solana-delegate-stake
Delegate stake to a vote account
USAGE:
@ -523,7 +523,7 @@ ARGS:
#### solana-deploy
```text
solana-deploy
solana-deploy
Deploy a program
USAGE:
@ -544,7 +544,7 @@ ARGS:
#### solana-fees
```text
solana-fees
solana-fees
Display current cluster fees
USAGE:
@ -562,7 +562,7 @@ OPTIONS:
#### solana-get
```text
solana-get
solana-get
Get cli config settings
USAGE:
@ -583,7 +583,7 @@ ARGS:
#### solana-get-epoch-info
```text
solana-get-epoch-info
solana-get-epoch-info
Get information about the current epoch
USAGE:
@ -599,13 +599,13 @@ OPTIONS:
-k, --keypair <PATH> /path/to/id.json
```
#### solana-get-genesis-blockhash
#### solana-get-genesis-hash
```text
solana-get-genesis-blockhash
Get the genesis blockhash
solana-get-genesis-hash
Get the genesis hash
USAGE:
solana get-genesis-blockhash [OPTIONS]
solana get-genesis-hash [OPTIONS]
FLAGS:
-h, --help Prints help information
@ -619,7 +619,7 @@ OPTIONS:
#### solana-get-slot
```text
solana-get-slot
solana-get-slot
Get current slot
USAGE:
@ -637,7 +637,7 @@ OPTIONS:
#### solana-get-transaction-count
```text
solana-get-transaction-count
solana-get-transaction-count
Get current transaction count
USAGE:
@ -655,7 +655,7 @@ OPTIONS:
#### solana-help
```text
solana-help
solana-help
Prints this message or the help of the given subcommand(s)
USAGE:
@ -667,14 +667,14 @@ ARGS:
#### solana-pay
```text
solana-pay
solana-pay
Send a payment
USAGE:
solana pay [FLAGS] [OPTIONS] <PUBKEY> <AMOUNT> [--] [UNIT]
FLAGS:
--cancelable
--cancelable
-h, --help Prints help information
-V, --version Prints version information
@ -695,7 +695,7 @@ ARGS:
#### solana-ping
```text
solana-ping
solana-ping
Submit transactions sequentially
USAGE:
@ -716,7 +716,7 @@ OPTIONS:
#### solana-redeem-vote-credits
```text
solana-redeem-vote-credits
solana-redeem-vote-credits
Redeem credits in the stake account
USAGE:
@ -738,7 +738,7 @@ ARGS:
#### solana-send-signature
```text
solana-send-signature
solana-send-signature
Send a signature to authorize a transfer
USAGE:
@ -760,7 +760,7 @@ ARGS:
#### solana-send-timestamp
```text
solana-send-timestamp
solana-send-timestamp
Send a timestamp to unlock a transfer
USAGE:
@ -783,7 +783,7 @@ ARGS:
#### solana-set
```text
solana-set
solana-set
Set a cli config setting
USAGE:
@ -801,7 +801,7 @@ OPTIONS:
#### solana-show-account
```text
solana-show-account
solana-show-account
Show the contents of an account
USAGE:
@ -824,7 +824,7 @@ ARGS:
#### solana-show-stake-account
```text
solana-show-stake-account
solana-show-stake-account
Show the contents of a stake account
USAGE:
@ -846,7 +846,7 @@ ARGS:
#### solana-show-storage-account
```text
solana-show-storage-account
solana-show-storage-account
Show the contents of a storage account
USAGE:
@ -867,7 +867,7 @@ ARGS:
#### solana-show-validators
```text
solana-show-validators
solana-show-validators
Show information about the current validators
USAGE:
@ -886,7 +886,7 @@ OPTIONS:
#### solana-show-vote-account
```text
solana-show-vote-account
solana-show-vote-account
Show the contents of a vote account
USAGE:
@ -908,7 +908,7 @@ ARGS:
#### solana-stake-authorize-staker
```text
solana-stake-authorize-staker
solana-stake-authorize-staker
Authorize a new stake signing keypair for the given stake account
USAGE:
@ -930,7 +930,7 @@ ARGS:
#### solana-stake-authorize-withdrawer
```text
solana-stake-authorize-withdrawer
solana-stake-authorize-withdrawer
Authorize a new withdraw signing keypair for the given stake account
USAGE:
@ -952,7 +952,7 @@ ARGS:
#### solana-uptime
```text
solana-uptime
solana-uptime
Show the uptime of a validator, based on epoch voting history
USAGE:
@ -975,7 +975,7 @@ ARGS:
#### solana-validator-info
```text
solana-validator-info
solana-validator-info
Publish/get Validator info on Solana
USAGE:
@ -998,7 +998,7 @@ SUBCOMMANDS:
#### solana-vote-authorize-voter
```text
solana-vote-authorize-voter
solana-vote-authorize-voter
Authorize a new vote signing keypair for the given vote account
USAGE:
@ -1020,7 +1020,7 @@ ARGS:
#### solana-vote-authorize-withdrawer
```text
solana-vote-authorize-withdrawer
solana-vote-authorize-withdrawer
Authorize a new withdraw signing keypair for the given vote account
USAGE:
@ -1042,7 +1042,7 @@ ARGS:
#### solana-withdraw-stake
```text
solana-withdraw-stake
solana-withdraw-stake
Withdraw the unstaked lamports from the stake account
USAGE:
@ -1063,4 +1063,3 @@ ARGS:
<AMOUNT> The amount to withdraw from the stake account (default unit SOL)
<UNIT> Specify unit to use for request [possible values: SOL, lamports]
```

View File

@ -17,11 +17,13 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [confirmTransaction](jsonrpc-api.md#confirmtransaction)
* [getAccountInfo](jsonrpc-api.md#getaccountinfo)
* [getBalance](jsonrpc-api.md#getbalance)
* [getBlockConfidence](jsonrpc-api.md#getblockconfidence)
* [getBlockCommitment](jsonrpc-api.md#getblockcommitment)
* [getBlockTime](jsonrpc-api.md#getblocktime)
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
* [getGenesisBlockhash](jsonrpc-api.md#getgenesisblockhash)
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
* [getNumBlocksSinceSignatureConfirmation](jsonrpc-api.md#getnumblockssincesignatureconfirmation)
@ -78,6 +80,31 @@ Requests can be sent in batches by sending an array of JSON-RPC request objects
* Signature: An Ed25519 signature of a chunk of data.
* Transaction: A Solana instruction signed by a client key-pair.
## Configuring State Commitment
Solana nodes choose which bank state to query based on a commitment requirement
set by the client. Clients may specify either:
* `{"commitment":"max"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations
* `{"commitment":"recent"}` - the node will query its most recent bank state
The commitment parameter should be included as the last element in the `params` array:
```bash
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri",{"commitment":"max"}]}' 192.168.1.88:8899
```
#### Default:
If commitment configuration is not provided, the node will default to `"commitment":"max"`
Only methods that query bank state accept the commitment parameter. They are indicated in the API Reference below.
#### RpcResponse Structure
Many methods that take a commitment parameter return an RpcResponse JSON object comprised of two parts:
* `context` : An RpcResponseContext JSON structure including a `slot` field at which the operation was evaluated.
* `value` : The value returned by the operation itself.
## JSON RPC API Reference
### confirmTransaction
@ -87,10 +114,11 @@ Returns a transaction receipt
#### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
* `boolean` - Transaction status, true if Transaction is confirmed
* `RpcResponse<boolean>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
#### Example:
@ -99,7 +127,7 @@ Returns a transaction receipt
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":true,"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":true},"id":1}
```
### getAccountInfo
@ -109,11 +137,13 @@ Returns all information associated with the account of provided Pubkey
#### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
The result field will be a JSON object with the following sub fields:
The result value will be an RpcResponse JSON object containing an AccountInfo JSON object.
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
@ -126,7 +156,7 @@ The result field will be a JSON object with the following sub fields:
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.1,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
```
### getBalance
@ -136,10 +166,11 @@ Returns the balance of the account of provided Pubkey
#### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
* `integer` - quantity, as a signed 64-bit integer
* `RpcResponse<integer>` - RpcResponse JSON object with `value` field set to quantity, as a signed 64-bit integer
#### Example:
@ -148,12 +179,12 @@ Returns the balance of the account of provided Pubkey
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":0,"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":0},"id":1}
```
### getBlockConfidence
### getBlockCommitment
Returns confidence for particular block
Returns commitment for particular block
#### Parameters:
@ -163,20 +194,45 @@ Returns confidence for particular block
The result field will be an array with two fields:
* Confidence
* Commitment
* `null` - Unknown block
* `object` - BankConfidence
* `array` - confidence, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
* `object` - BlockCommitment
* `array` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
* 'integer' - total active stake, in lamports, of the current epoch
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockConfidence","params":[5]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[{"confidence":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1}
{"jsonrpc":"2.0","result":[{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1}
```
### getBlockTime
Returns the estimated production time of a block. Validators report their UTC
time to the ledger on a regular interval. A block's time is calculated as an
offset from the median value of the most recent validator time report.
#### Parameters:
* `u64` - block, identified by Slot
#### Results:
* `null` - block has not yet been produced
* `i64` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockTime","params":[5]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":1574721591,"id":1}
```
### getClusterNodes
@ -206,13 +262,46 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
```
### getConfirmedBlock
Returns identity and transaction information about a confirmed block in the ledger
#### Parameters:
* `integer` - slot, as u64 integer
#### Results:
The result field will be an object with the following fields:
* `blockhash` - the blockhash of this block
* `previousBlockhash` - the blockhash of this block's parent
* `parentSlot` - the slot index of this block's parent
* `transactions` - an array of tuples containing:
* [Transaction](transaction-api.md) object, in JSON format
* Transaction status object, containing:
* `status` - Transaction status:
* `"Ok": null` - Transaction was successful
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
* `fee` - fee this transaction was charged, as u64 integer
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":{"blockhash":[165,245,120,183,32,205,89,222,249,114,229,49,250,231,149,122,156,232,181,83,238,194,157,153,7,213,180,54,177,6,25,101],"parentSlot":429,"previousBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166],"transactions":[[{"message":{"accountKeys":[[5],[219,181,202,40,52,148,34,136,186,59,137,160,250,225,234,17,244,160,88,116,24,176,30,227,68,11,199,38,141,68,131,228],[233,48,179,56,91,40,254,206,53,48,196,176,119,248,158,109,121,77,11,69,108,160,128,27,228,122,146,249,53,184,68,87],[6,167,213,23,25,47,10,175,198,242,101,227,251,119,204,122,218,130,197,41,208,190,59,19,110,45,0,85,32,0,0,0],[6,167,213,23,24,199,116,201,40,86,99,152,105,29,94,182,139,94,184,163,155,75,109,92,115,85,91,33,0,0,0,0],[7,97,72,29,53,116,116,187,124,77,118,36,235,211,189,179,216,53,94,115,209,16,67,252,13,163,83,128,0,0,0,0]],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[[1],{"accounts":[[3],1,2,3],"data":[[52],2,0,0,0,1,0,0,0,0,0,0,0,173,1,0,0,0,0,0,0,86,55,9,248,142,238,135,114,103,83,247,124,67,68,163,233,55,41,59,129,64,50,110,221,234,234,27,213,205,193,219,50],"program_id_index":4}],"recentBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166]},"signatures":[[2],[119,9,95,108,35,95,7,1,69,101,65,45,5,204,61,114,172,88,123,238,32,201,135,229,57,50,13,21,106,216,129,183,238,43,37,101,148,81,56,232,88,136,80,65,46,189,39,106,94,13,238,54,186,48,118,186,0,62,121,122,172,171,66,5],[78,40,77,250,10,93,6,157,48,173,100,40,251,9,7,218,7,184,43,169,76,240,254,34,235,48,41,175,119,126,75,107,106,248,45,161,119,48,174,213,57,69,111,225,245,60,148,73,124,82,53,6,203,126,120,180,111,169,89,64,29,23,237,13]]},{"fee":100000,"status":{"Ok":null}}]]},"id":1}
```
### getEpochInfo
Returns information about the current epoch
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -234,7 +323,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### getEpochSchedule
Returns epoch schedule information from this cluster's genesis block
Returns epoch schedule information from this cluster's genesis config
#### Parameters:
@ -260,9 +349,9 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":{"first_normal_epoch":8,"first_normal_slot":8160,"leader_schedule_slot_offset":8192,"slots_per_epoch":8192,"warmup":true},"id":1}
```
### getGenesisBlockhash
### getGenesisHash
Returns the genesis block hash
Returns the genesis hash
#### Parameters:
@ -276,7 +365,7 @@ None
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getGenesisBlockhash"}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getGenesisHash"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
@ -288,7 +377,7 @@ Returns the leader schedule for the current epoch
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -311,6 +400,7 @@ Returns minimum balance required to make account rent exempt.
#### Parameters:
* `integer` - account data length, as unsigned integer
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -333,6 +423,7 @@ Returns the current number of blocks since signature has been confirmed.
#### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -355,6 +446,7 @@ Returns all accounts owned by the provided program Pubkey
#### Parameters:
* `string` - Pubkey of program, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -382,12 +474,13 @@ Returns a recent block hash from the ledger, and a fee schedule that can be used
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
An array consisting of
An RpcResponse containing an array consisting of a string blockhash and FeeCalculator JSON object.
* `RpcResponse<array>` - RpcResponse JSON object with `value` field set to an array including:
* `string` - a Hash as base-58 encoded string
* `FeeCalculator object` - the fee schedule for this block hash
@ -398,7 +491,7 @@ An array consisting of
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC",{"lamportsPerSignature": 0}],"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC",{"lamportsPerSignature": 0}]},"id":1}
```
### getSignatureStatus
@ -408,6 +501,7 @@ Returns the status of a given signature. This method is similar to [confirmTrans
#### Parameters:
* `string` - Signature of Transaction to confirm, as base-58 encoded string
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -432,7 +526,7 @@ Returns the current slot the node is processing
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -454,7 +548,7 @@ Returns the current slot leader
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -476,7 +570,7 @@ Returns the current storage segment size in terms of slots
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -542,7 +636,7 @@ Returns the current Transaction count from the ledger
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -564,7 +658,7 @@ Returns the current total supply in Lamports
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -609,7 +703,7 @@ Returns the account info and associated stake for all the voting accounts in the
#### Parameters:
None
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
@ -619,7 +713,7 @@ The result field will be a JSON object of `current` and `delinquent` accounts, e
* `nodePubkey` - Node public key, as base-58 encoded string
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch
* `commission`, an 8-bit integer used as a fraction \(commission/MAX\_U8\) for rewards payout
* `commission`, percentage (0-100) of rewards payout owed to the vote account
* `lastVote` - Most recent slot voted on by this vote account
#### Example:
@ -640,6 +734,7 @@ Requests an airdrop of lamports to a Pubkey
* `string` - Pubkey of account to receive lamports, as base-58 encoded string
* `integer` - lamports, as a signed 64-bit integer
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) (used for retrieving blockhash and verifying airdrop success)
#### Results:
@ -729,7 +824,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.1,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
### accountUnsubscribe
@ -787,7 +882,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.21.1,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
```
### programUnsubscribe

View File

@ -14,7 +14,7 @@
* **num\_credit\_only\_signed\_accounts:** The last
`num_credit_only_signed_accounts` signatures refer to signing
`num_readonly_signed_accounts` signatures refer to signing
credit only accounts. Credit only accounts can be used concurrently
@ -24,7 +24,7 @@
* **num\_credit\_only\_unsigned\_accounts:** The last
`num_credit_only_unsigned_accounts` public keys in `account_keys` refer
`num_readonly_unsigned_accounts` public keys in `account_keys` refer
to non-signing credit only accounts
@ -60,4 +60,3 @@ A `Transaction` is signed by using an ed25519 keypair to sign the serialization
## Transaction Serialization
`Transaction`s \(and their `message`s\) are serialized and deserialized using the [bincode](https://crates.io/crates/bincode) crate with a non-standard vector serialization that uses only one byte for the length if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3 bytes if it requires 15 or 16 bits. The vector serialization is defined by Solana's [short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).

View File

@ -4,7 +4,7 @@ A Solana cluster is a set of validators working together to serve client transac
## Creating a Cluster
Before starting any validators, one first needs to create a _genesis block_. The block contains entries referencing two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis block. The second validator then contacts the bootstrap leader to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster.
Before starting any validators, one first needs to create a _genesis config_. The config references two public keys, a _mint_ and a _bootstrap leader_. The validator holding the bootstrap leader's private key is responsible for appending the first entries to the ledger. It initializes its internal state with the mint's account. That account will hold the number of native tokens defined by the genesis config. The second validator then contacts the bootstrap leader to register as a _validator_ or _archiver_. Additional validators then register with any registered member of the cluster.
A validator receives all entries from the leader and submits votes confirming those entries are valid. After voting, the validator is expected to store those entries until archiver nodes submit proofs that they have stored copies of it. Once the validator observes a sufficient number of copies exist, it deletes its copy.
@ -38,4 +38,3 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only
Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions.
A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique _data plane fanout_; learn more in the [data plan fanout](https://github.com/solana-labs/solana/tree/aacead62c0eb052068172eba6b53fc85874d6d54/book/src/data-plane-fanout.md) section.

View File

@ -12,7 +12,7 @@ Nodes take turns being leader and generating the PoH that encodes state changes.
2. Leader filters valid transactions.
3. Leader executes valid transactions updating its state.
4. Leader packages transactions into entries based off its current PoH slot.
5. Leader transmits the entries to validator nodes \(in signed blobs\) 1. The PoH stream includes ticks; empty entries that indicate liveness of
5. Leader transmits the entries to validator nodes \(in signed shreds\) 1. The PoH stream includes ticks; empty entries that indicate liveness of
the leader and the passage of time on the cluster.

View File

@ -40,7 +40,7 @@ After observing the cluster for a sufficient amount of time, the leader schedule
## Leader Schedule Generation at Genesis
The genesis block declares the first leader for the first epoch. This leader ends up scheduled for the first two epochs because the leader schedule is also generated at slot 0 for the next epoch. The length of the first two epochs can be specified in the genesis block as well. The minimum length of the first epochs must be greater than or equal to the maximum rollback depth as defined in [Tower BFT](../implemented-proposals/tower-bft.md).
The genesis config declares the first leader for the first epoch. This leader ends up scheduled for the first two epochs because the leader schedule is also generated at slot 0 for the next epoch. The length of the first two epochs can be specified in the genesis config as well. The minimum length of the first epochs must be greater than or equal to the maximum rollback depth as defined in [Tower BFT](../implemented-proposals/tower-bft.md).
## Leader Schedule Generation Algorithm
@ -73,7 +73,7 @@ The seed that is selected is predictable but unbiasable. There is no grinding at
A leader can bias the active set by censoring validator votes. Two possible ways exist for leaders to censor the active set:
* Ignore votes from validators
* Ignore votes from validators
* Refuse to vote for blocks with votes from validators
To reduce the likelihood of censorship, the active set is calculated at the leader schedule offset boundary over an _active set sampling duration_. The active set sampling duration is long enough such that votes will have been collected by multiple leaders.
@ -95,4 +95,3 @@ The lifetime of a leader schedule is called an _epoch_. The epoch is split into
A leader transmits entries during its slot. After `T` ticks, all the validators switch to the next scheduled leader. Validators must ignore entries sent outside a leader's assigned slot.
All `T` ticks must be observed by the next leader for it to build its own entries on. If entries are not observed \(leader is down\) or entries are invalid \(leader is buggy or malicious\), the next leader must produce ticks to fill the previous leader's slot. Note that the next leader should do repair requests in parallel, and postpone sending ticks until it is confident other validators also failed to observe the previous leader's entries. If a leader incorrectly builds on its own ticks, the leader following it must replace all its ticks.

View File

@ -16,7 +16,7 @@ The total stake allocated to a Vote account can be calculated by the sum of all
## Vote and Stake accounts
The rewards process is split into two on-chain programs. The Vote program solves the problem of making stakes slashable. The Stake account acts as custodian of the rewards pool, and provides passive delegation. The Stake program is responsible for paying out each staker once the staker proves to the Stake program that its delegate has participated in validating the ledger.
The rewards process is split into two on-chain programs. The Vote program solves the problem of making stakes slashable. The Stake program acts as custodian of the rewards pool and provides for passive delegation. The Stake program is responsible for paying rewards to staker and voter when shown that a staker's delegate has participated in validating the ledger.
### VoteState
@ -76,7 +76,7 @@ StakeState::Stake is the current delegation preference of the **staker** and con
* `deactivated` - the epoch at which this stake was de-activated, some cool down epochs are required before the account
```text
is fully deactivated, and the stake available for withdrawal
is fully deactivated, and the stake available for withdrawal
```
* `authorized_staker` - the pubkey of the entity that must sign delegation, activation, and deactivation transactions
@ -94,7 +94,7 @@ The Stakes and the RewardsPool are accounts that are owned by the same `Stake` p
### StakeInstruction::DelegateStake
The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. If the stake account is already StakeState::Stake \(i.e. already activated\), the stake is re-delegated The transaction must be signed by the stake's `authorized_staker`.
The Stake account is moved from Ininitialized to StakeState::Stake form. This is how stakers choose their initial delegate validator node and activate their stake account lamports. The transaction must be signed by the stake's `authorized_staker`. If the stake account is already StakeState::Stake \(i.e. already activated\), the stake is re-delegated. Stakes may be re-delegated at any time, and updated stakes are reflected immediately, but only one re-delegation is permitted per epoch.
* `account[0]` - RW - The StakeState::Stake instance. `StakeState::Stake::credits_observed` is initialized to `VoteState::credits`, `StakeState::Stake::voter_pubkey` is initialized to `account[1]`. If this is the initial delegation of stake, `StakeState::Stake::stake` is initialized to the account's balance in lamports, `StakeState::Stake::activated` is initialized to the current Bank epoch, and `StakeState::Stake::deactivated` is initialized to std::u64::MAX
* `account[1]` - R - The VoteState instance.
@ -132,7 +132,7 @@ stake_state.credits_observed = vote_state.credits;
### StakeInstruction::Deactivate
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
The transaction must be signed by the stake's `authorized_staker`.
* `account[0]` - RW - The StakeState::Stake instance that is deactivating.
@ -228,5 +228,4 @@ Only lamports in excess of effective+activating stake may be withdrawn at any ti
### Lock-up
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as a slot height, i.e. the minimum slot height that must be reached by the network before the stake account balance is available for withdrawal, except to a specified custodian. This information is gathered when the stake account is created.
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state.

View File

@ -36,6 +36,59 @@ Finally, the following diagram shows a two layer cluster with a Fanout of 2.
Currently, configuration is set when the cluster is launched. In the future, these parameters may be hosted on-chain, allowing modification on the fly as the cluster sizes change.
## Calcuating the required FEC rate
Turbine relies on retransmission of packets between validators. Due to
retransmission, any network wide packet loss is compounded, and the
probability of the packet failing to reach is destination increases
on each hop. The FEC rate needs to take into account the network wide
packet loss, and the propagation depth.
A shred group is the set of data and coding packets that can be used
to reconstruct each other. Each shred group has a chance of failure,
based on the likelyhood of the number of packets failing that exceeds
the FEC rate. If a validator fails to reconstruct the shred group,
then the block cannot be reconstructed, and the validator has to rely
on repair to fixup the blocks.
The probability of the shred group failing can be computed using the
binomial distribution. If the FEC rate is `16:4`, then the group size
is 20, and at least 4 of the shreds must fail for the group to fail.
Which is equal to the sum of the probability of 4 or more trails failing
out of 20.
Probability of a block succeeding in turbine:
* Probability of packet failure: `P = 1 - (1 - network_packet_loss_rate)^2`
* FEC rate: `K:M`
* Number of trials: `N = K + M`
* Shred group failure rate: `S = SUM of i=0 -> M for binomial(prob_failure = P, trials = N, failures = i)`
* Shreds per block: `G`
* Block success rate: `B = (1 - S) ^ (G / N) `
* Binomial distribution for exactly `i` results with probability of P in N trials is defined as `(N choose i) * P^i * (1 - P)^(N-i)`
For example:
* Network packet loss rate is 15%.
* 50kpts network generates 6400 shreds per second.
* FEC rate increases the total shres per block by the FEC ratio.
With a FEC rate: `16:4`
* `G = 8000`
* `P = 1 - 0.85 * 0.85 = 1 - 0.7225 = 0.2775`
* `S = SUM of i=0 -> 4 for binomial(prob_failure = 0.2775, trials = 20, failures = i) = 0.689414`
* `B = (1 - 0.689) ^ (8000 / 20) = 10^-203`
With FEC rate of `16:16`
* `G = 12800`
* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.002132`
* `B = (1 - 0.002132) ^ (12800 / 32) = 0.42583`
With FEC rate of `32:32`
* `G = 12800`
* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.000048`
* `B = (1 - 0.000048) ^ (12800 / 64) = 0.99045`
## Neighborhoods
The following diagram shows how two neighborhoods in different layers interact. To cripple a neighborhood, enough nodes \(erasure codes +1\) from the neighborhood above need to fail. Since each neighborhood receives shreds from multiple nodes in a neighborhood in the upper layer, we'd need a big network failure in the upper layers to end up with incomplete data.

View File

@ -27,16 +27,17 @@ $ git checkout $TAG
### Configuration Setup
Ensure important programs such as the vote program are built before any nodes are started
Ensure important programs such as the vote program are built before any nodes are started. Note that we are using the release build here for good performance.
If you want the debug build, use just `cargo build` and omit the `NDEBUG=1` part of the command.
```bash
$ cargo build
$ cargo build --release
```
The network is initialized with a genesis ledger generated by running the following script.
```bash
$ ./multinode-demo/setup.sh
$ NDEBUG=1 ./multinode-demo/setup.sh
```
### Drone
@ -46,7 +47,7 @@ In order for the validators and clients to work, we'll need to spin up a drone t
Start the drone with:
```bash
$ ./multinode-demo/drone.sh
$ NDEBUG=1 ./multinode-demo/drone.sh
```
### Singlenode Testnet
@ -56,7 +57,7 @@ Before you start a validator, make sure you know the IP address of the machine y
Now start the bootstrap leader in a separate shell:
```bash
$ ./multinode-demo/bootstrap-leader.sh
$ NDEBUG=1 ./multinode-demo/bootstrap-leader.sh
```
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to receive transactions. The leader will request some tokens from the drone if it doesn't have any. The drone does not need to be running for subsequent leader starts.
@ -66,15 +67,15 @@ Wait a few seconds for the server to initialize. It will print "leader ready..."
To run a multinode testnet, after starting a leader node, spin up some additional validators in separate shells:
```bash
$ ./multinode-demo/validator-x.sh
$ NDEBUG=1 ./multinode-demo/validator-x.sh
```
To run a performance-enhanced validator on Linux, [CUDA 10.0](https://developer.nvidia.com/cuda-downloads) must be installed on your system:
```bash
$ ./fetch-perf-libs.sh
$ SOLANA_CUDA=1 ./multinode-demo/bootstrap-leader.sh
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/bootstrap-leader.sh
$ NDEBUG=1 SOLANA_CUDA=1 ./multinode-demo/validator.sh
```
### Testnet Client Demo
@ -84,7 +85,7 @@ Now that your singlenode or multinode testnet is up and running let's send it so
In a separate shell start the client:
```bash
$ ./multinode-demo/bench-tps.sh # runs against localhost by default
$ NDEBUG=1 ./multinode-demo/bench-tps.sh # runs against localhost by default
```
What just happened? The client demo spins up several threads to send 500,000 transactions to the testnet as quickly as it can. The client then pings the testnet periodically to see how many transactions it processed in that time. Take note that the demo intentionally floods the network with UDP packets, such that the network will almost certainly drop a bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client demo completes after it has convinced itself the testnet won't process any additional transactions. You should see several TPS measurements printed to the screen. In the multinode variation, you'll see TPS measurements for each validator node as well.
@ -125,7 +126,7 @@ This will dump all the threads stack traces into gdb.txt
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
```bash
$ ./multinode-demo/bench-tps.sh --entrypoint testnet.solana.com:8001 --drone testnet.solana.com:9900 --duration 60 --tx_count 50
$ NDEBUG=1 ./multinode-demo/bench-tps.sh --entrypoint testnet.solana.com:8001 --drone testnet.solana.com:9900 --duration 60 --tx_count 50
```
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)

View File

@ -1,8 +1,8 @@
# Commitment
The commitment metric aims to give clients a measure of the network confirmation
and stake levels on a particular block. Clients can then use this information to
derive their own measures of confidence.
and stake levels on a particular block. Clients can then use this information to
derive their own measures of commitment.
# Calculation RPC

View File

@ -1,105 +0,0 @@
# Credit-only Accounts
This design covers the handling of credit-only and credit-debit accounts in the [runtime](../validator/runtime.md). Accounts already distinguish themselves as credit-only or credit-debit based on the program ID specified by the transaction's instruction. Programs must treat accounts that are not owned by them as credit-only.
To identify credit-only accounts by program id would require the account to be fetched and loaded from disk. This operation is expensive, and while it is occurring, the runtime would have to reject any transactions referencing the same account.
The proposal introduces a `num_readonly_accounts` field to the transaction structure, and removes the `program_ids` dedicated vector for program accounts.
This design doesn't change the runtime transaction processing rules. Programs still can't write or spend accounts that they do not own, but it allows the runtime to optimistically take the correct lock for each account specified in the transaction before loading the accounts from storage.
Accounts selected as credit-debit by the transaction can still be treated as credit-only by the instructions.
## Runtime handling
credit-only accounts have the following properties:
* Can be deposited into: Deposits can be implemented as a simple `atomic_add`.
* read-only access to account data.
Instructions that debit or modify the credit-only account data will fail.
## Account Lock Optimizations
The Accounts module keeps track of current locked accounts in the runtime, which separates credit-only accounts from the credit-debit accounts. The credit-only accounts can be cached in memory and shared between all the threads executing transactions.
The current runtime can't predict whether an account is credit-only or credit-debit when the transaction account keys are locked at the start of the transaction processing pipeline. Accounts referenced by the transaction have not been loaded from the disk yet.
An ideal design would cache the credit-only accounts while they are referenced by any transaction moving through the runtime, and release the cache when the last transaction exits the runtime.
## Credit-only accounts and read-only account data
Credit-only account data can be treated as read-only. Credit-debit account data is treated as read-write.
## Transaction changes
To enable the possibility of caching accounts only while they are in the runtime, the Transaction structure should be changed in the following way:
* `program_ids: Vec<Pubkey>` - This vector is removed. Program keys can be placed at the end of the `account_keys` vector within the `num_readonly_accounts` number set to the number of programs.
* `num_readonly_accounts: u8` - The number of keys from the **end** of the transaction's `account_keys` array that is credit-only.
The following possible accounts are present in an transaction:
* paying account
* RW accounts
* R accounts
* Program IDs
The paying account must be credit-debit, and program IDs must be credit-only. The first account in the `account_keys` array is always the account that pays for the transaction fee, therefore it cannot be credit-only. For these reasons the credit-only accounts are all grouped together at the end of the `account_keys` vector. Counting credit-only accounts from the end allow for the default `0` value to still be functionally correct, since a transaction will succeed with all credit-debit accounts.
Since accounts can only appear once in the transaction's `account_keys` array, an account can only be credit-only or credit-debit in a single transaction, not both. The runtime treats a transaction as one atomic unit of execution. If any instruction needs credit-debit access to an account, a copy needs to be made. The write lock is held for the entire time the transaction is being processed by the runtime.
## Starvation
Read locks for credit-only accounts can keep the runtime from executing transactions requesting a write lock to a credit-debit account.
When a request for a write lock is made while a read lock is open, the transaction requesting the write lock should be cached. Upon closing the read lock, the pending transactions can be pushed through the runtime.
While a pending write transaction exists, any additional read lock requests for that account should fail. It follows that any other write lock requests will also fail. Currently, clients must retransmit when a transaction fails because of a pending transaction. This approach would mimic that behavior as closely as possible while preventing write starvation.
## Program execution with credit-only accounts
Before handing off the accounts to program execution, the runtime can mark each account in each instruction as a credit-only account. The credit-only accounts can be passed as references without an extra copy. The transaction will abort on a write to credit-only.
An alternative is to detect writes to credit-only accounts and fail the transactions before commit.
## Alternative design
This design attempts to cache a credit-only account after loading without the use of a transaction-specified credit-only accounts list. Instead, the credit-only accounts are held in a reference-counted table inside the runtime as the transactions are processed.
1. Transaction accounts are locked.
a. If the account is present in the credit-only' table, the TX does not fail.
```text
The pending state for this TX is marked NeedReadLock.
```
2. Transaction accounts are loaded.
a. Transaction accounts that are credit-only increase their reference
```text
count in the `credit-only` table.
```
b. Transaction accounts that need a write lock and are present in the
```text
`credit-only` table fail.
```
3. Transaction accounts are unlocked.
a. Decrement the `credit-only` lock table reference count; remove if its 0
b. Remove from the `lock` set if the account is not in the `credit-only`
```text
table.
```
The downside with this approach is that if the `lock` set mutex is released between lock and load to allow better pipelining of transactions, a request for a credit-only account may fail. Therefore, this approach is not suitable for treating programs as credit-only accounts.
Holding the accounts lock mutex while fetching the account from disk would potentially have a significant performance hit on the runtime. Fetching from disk is expected to be slow, but can be parallelized between multiple disks.

View File

@ -20,7 +20,7 @@ Slot leaders and validators use a PoH Recorder for both estimating slot height a
### PoH Recorder when Validating
The PoH Recorder acts as a simple VDF when validating. It tells the validator when it needs to switch to the slot leader role. Every time the validator votes on a fork, it should use the fork's latest block id to re-seed the VDF. Re-seeding solves two problems. First, it synchronizes its VDF to the leader's, allowing it to more accurately determine when its leader slot begins. Second, if the previous leader goes down, all wallclock time is accounted for in the next leader's PoH stream. For example, if one block is missing when the leader starts, the block it produces should have a PoH duration of two blocks. The longer duration ensures the following leader isn't attempting to snip all the transactions from the previous leader's slot.
The PoH Recorder acts as a simple VDF when validating. It tells the validator when it needs to switch to the slot leader role. Every time the validator votes on a fork, it should use the fork's latest [blockhash](terminology.md#blockhash) to re-seed the VDF. Re-seeding solves two problems. First, it synchronizes its VDF to the leader's, allowing it to more accurately determine when its leader slot begins. Second, if the previous leader goes down, all wallclock time is accounted for in the next leader's PoH stream. For example, if one block is missing when the leader starts, the block it produces should have a PoH duration of two blocks. The longer duration ensures the following leader isn't attempting to snip all the transactions from the previous leader's slot.
### PoH Recorder when Leading

View File

@ -0,0 +1,23 @@
# Read-Only Accounts
This design covers the handling of readonly and writable accounts in the [runtime](../validator/runtime.md). Multiple transactions that modify the same account must be processed serially so that they are always replayed in the same order. Otherwise, this could introduce non-determinism to the ledger. Some transactions, however, only need to read, and not modify, the data in particular accounts. Multiple transactions that only read the same account can be processed in parallel, since replay order does not matter, providing a performance benefit.
In order to identify readonly accounts, the transaction MessageHeader structure contains `num_readonly_signed_accounts` and `num_readonly_unsigned_accounts`. Instruction `program_ids` are included in the account vector as readonly, unsigned accounts, since executable accounts likewise cannot be modified during instruction processing.
## Runtime handling
Runtime transaction processing rules need to be updated slightly. Programs still can't write or spend accounts that they do not own. But new runtime rules ensure that readonly accounts cannot be modified, even by the programs that own them.
Readonly accounts have the following property:
* Read-only access to all account fields, including lamports (cannot be credited or debited), and account data
Instructions that credit, debit, or modify the readonly account will fail.
## Account Lock Optimizations
The Accounts module keeps track of current locked accounts in the runtime, which separates readonly accounts from the writable accounts. The default account lock gives an account the "writable" designation, and can only be accessed by one processing thread at one time. Readonly accounts are locked by a separate mechanism, allowing for parallel reads.
Although not yet implemented, readonly accounts could be cached in memory and shared between all the threads executing transactions. An ideal design would hold this cache while a readonly account is referenced by any transaction moving through the runtime, and release the cache when the last transaction exits the runtime.
Readonly accounts could also be passed into the processor as references, saving an extra copy.

View File

@ -30,7 +30,7 @@ Gossip is designed for efficient propagation of state. Messages that are sent th
## Performance
1. Worst case propagation time to the next leader is Log\(N\) hops with a base depending on the fanout. With our current default fanout of 6, it is about 6 hops to 20k nodes.
2. The leader should receive 20k validation votes aggregated by gossip-push into 64kb blobs. Which would reduce the number of packets for 20k network to 80 blobs.
2. The leader should receive 20k validation votes aggregated by gossip-push into MTU-sized shreds. Which would reduce the number of packets for 20k network to 80 shreds.
3. Each validators votes is replicated across the entire network. To maintain a queue of 5 previous votes the Crds table would grow by 25 megabytes. `(20,000 nodes * 256 bytes * 5)`.
## Two step implementation rollout
@ -44,7 +44,7 @@ Initially the network can perform reliably with just 1 vote transmitted and main
3. Fanout of 6.
4. Worst case 256kb memory overhead per node.
5. Worst case 4 hops to propagate to every node.
6. Leader should receive the entire validator vote set in 4 push message blobs.
6. Leader should receive the entire validator vote set in 4 push message shreds.
### Sub 20k network
@ -55,5 +55,5 @@ Everything above plus the following:
3. Increase fanout to 20.
4. Worst case 25mb memory overhead per node.
5. Sub 4 hops worst case to deliver to the entire network.
6. 80 blobs received by the leader for all the validator messages.
6. 80 shreds received by the leader for all the validator messages.

View File

@ -4,7 +4,7 @@ Transactions currently include a fee field that indicates the maximum fee field
## Congestion-driven fees
Each validator uses _signatures per slot_ \(SPS\) to estimate network congestion and _SPS target_ to estimate the desired processing capacity of the cluster. The validator learns the SPS target from the genesis block, whereas it calculates SPS from recently processed transactions. The genesis block also defines a target `lamports_per_signature`, which is the fee to charge per signature when the cluster is operating at _SPS target_.
Each validator uses _signatures per slot_ \(SPS\) to estimate network congestion and _SPS target_ to estimate the desired processing capacity of the cluster. The validator learns the SPS target from the genesis config, whereas it calculates SPS from recently processed transactions. The genesis config also defines a target `lamports_per_signature`, which is the fee to charge per signature when the cluster is operating at _SPS target_.
## Calculating fees
@ -28,4 +28,3 @@ Future parameters might include:
### Hijacking the SPS Target
A group of validators can centralize the cluster if they can convince it to raise the SPS Target above a point where the rest of the validators can keep up. Raising the target will cause fees to drop, presumably creating more demand and therefore higher TPS. If the validator doesn't have hardware that can process that many transactions that fast, its confirmation votes will eventually get so long that the cluster will be forced to boot it.

View File

@ -34,5 +34,4 @@ A cluster is a set of computers that work together and can be viewed from the ou
## What are SOLs?
A SOL is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional SOLs and a SOL may be split as many as 34 times. The fractional sol is called a _lamport_. It is named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of approximately 0.0000000000582 sol \(2^-34\).
A SOL is the name of Solana's native token, which can be passed to nodes in a Solana cluster in exchange for running an on-chain program or validating its output. The system may perform micropayments of fractional SOLs, which are called _lamports_. They are named in honor of Solana's biggest technical influence, [Leslie Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value of 0.000000001 SOL.

View File

@ -0,0 +1,24 @@
# Paper Wallet
This document describes how to create and use a paper wallet with the Solana CLI
tools.
{% hint style="info" %}
We do not intend to advise on how to *securely* create or manage paper wallets.
Please research the security concerns carefully.
{% endhint %}
## Overview
Solana provides a key generation tool to derive keys from BIP39 compliant seed
phrases. Solana CLI commands for running a validator and staking tokens all
support keypair input via seed phrases.
To learn more about the BIP39 standard, visit the Bitcoin BIPs Github repository
[here](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki).
{% page-ref page="installation.md" %}
{% page-ref page="keypair.md" %}
{% page-ref page="usage.md" %}

View File

@ -0,0 +1,51 @@
# Installation Guide
Follow this guide to setup Solana's key generation tool called `solana-keygen`
{% hint style="warn" %}
After installation, ensure your version is `0.21.1` or higher by running `solana-keygen -V`
{% endhint %}
## Download
First, download the latest release tarball from GitHub.
1. Setup download url
```bash
solana_downloads=https://github.com/solana-labs/solana/releases/latest/download
```
2. Specify the download file based on your machine
**MacOS**
```bash
solana_release=solana-release-x86_64-apple-darwin.tar.bz2
```
**Linux**
```bash
solana_release=solana-release-x86_64-unknown-linux-gnu.tar.bz2
```
3. Download
```bash
curl -L -sSf -o solana-release.tar.bz2 $solana_downloads/$solana_release
```
## Extract
Next, extract the tarball
```bash
tar xf solana-release.tar.bz2
```
## Add to "PATH"
Now add the tool to your PATH environment variable with the following command
```bash
export PATH="$(pwd)/solana-release/bin:${PATH}"
```
## Check
Finally, check that `solana-keygen` can be run by running
```bash
solana-keygen -V
```

View File

@ -0,0 +1,70 @@
# Creating a Paper Wallet
Using the `solana-keygen` tool, it is possible to generate new seed phrases as
well as derive a keypair from an existing seed phrase and (optional) passphrase.
The seed phrase and passphrase can be used together as a paper wallet. As long
as you keep your seed phrase and passphrase stored safely, you can use them to
access your account.
{% hint style="info" %}
For more information about how seed phrases work, review this
[Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase).
{% endhint %}
## Seed Phrase Generation
Generating a new keypair can be done using the `solana-keygen new` command. The
command will generate a random seed phrase, ask you to enter an optional
passphrase, and then will display the derived public key and the generated seed
phrase for your paper wallet.
```bash
solana-keygen new --no-outfile
```
{% hint style="warning" %}
If the `--no-outfile` flag is **omitted**, the default behavior is to write the
keypair to `~/.config/solana/id.json`
{% endhint %}
{% hint style="info" %}
For added security, increase the seed phrase word count using the `--word-count`
argument
{% endhint %}
For full usage details run:
```bash
solana-keygen new --help
```
## Public Key Derivation
Public keys can be derived from a seed phrase and a passphrase if you choose to
use one. This is useful for using using an offline-generated seed phrase to
derive a valid public key. The `solana-keygen pubkey` command will walk you
through entering your seed phrase and a passphrase if you chose to use one.
```bash
solana-keygen pubkey ASK
```
{% hint style="info" %}
Note that you could potentially use different passphrases for the same seed
phrase. Each unique passphrase will yield a different keypair.
{% endhint %}
The `solana-keygen` tool assumes the use of the BIP39 standard English word
list. If you chose to deviate from the word list or used a different language
for your seed phrase, you can still derive a valid public key but will need to
explicitly skip seed phrase validation.
```bash
solana-keygen pubkey ASK --skip-seed-phrase-validation
```
For full usage details run:
```bash
solana-keygen pubkey --help
```

View File

@ -0,0 +1,73 @@
# Paper Wallet Usage
Solana commands can be run without ever saving a keypair to disk on a machine.
If avoiding writing a private key to disk is a security concern of yours, you've
come to the right place.
{% hint style="warning" %}
Even using this secure input method, it's still possible that a private key gets
written to disk by unencrypted memory swaps. It is the user's responsibility to
protect against this scenario.
{% endhint %}
## Running a Validator
In order to run a validator, you will need to specify an "identity keypair"
which will be used to fund all of the vote transactions signed by your validator.
Rather than specifying a path with `--identity-keypair <PATH>` you can use the
`--ask-seed-phrase` option.
```bash
solana-validator --ask-seed-phrase identity-keypair --ledger ...
[identity-keypair] seed phrase: 🔒
[identity-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
```
The `--ask-seed-phrase` option accepts multiple keypairs. If you wish to use this
input method for your voting keypair as well you can do the following:
```bash
solana-validator --ask-seed-phrase identity-keypair voting-keypair --ledger ...
[identity-keypair] seed phrase: 🔒
[identity-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
[voting-keypair] seed phrase: 🔒
[voting-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
```
Refer to the following page for a comprehensive guide on running a validator:
{% page-ref page="../running-validator/README.md" %}
## Delegating Stake
Solana CLI tooling supports secure keypair input for stake delegation. To do so,
first create a stake account with some SOL. Use the special `ASK` keyword to
trigger a seed phrase input prompt for the stake account and use
`--ask-seed-phrase keypair` to securely input the funding keypair.
```bash
solana create-stake-account ASK 1 SOL --ask-seed-phrase keypair
[stake_account] seed phrase: 🔒
[stake_account] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
[keypair] seed phrase: 🔒
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
```
Then, to delegate that stake to a validator, use `--ask-seed-phrase keypair` to
securely input the funding keypair.
```bash
solana delegate-stake --ask-seed-phrase keypair <STAKE_ACCOUNT_PUBKEY> <VOTE_ACCOUNT_PUBKEY>
[keypair] seed phrase: 🔒
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
```
Refer to the following page for a comprehensive guide on delegating stake:
{% page-ref page="../running-validator/validator-stake.md" %}
---
{% page-ref page="../api-reference/cli.md" %}

View File

@ -63,7 +63,7 @@ fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
}
```
where `process_instruction()` is built into Solana's runtime and responsible for routing the given instruction to the `token` program via the instruction's `program_id` field. Before invoking `pay()`, the runtime must also ensure that `acme` didn't modify any accounts owned by `token`. It does this by calling `runtime::verify_instruction()` and then afterward updating all the `pre_*` variables to tentatively commit `acme`'s account modifications. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme`. It should call `verify_instruction()` again, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must call `verify_instruction()` one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all account modifications.
where `process_instruction()` is built into Solana's runtime and responsible for routing the given instruction to the `token` program via the instruction's `program_id` field. Before invoking `pay()`, the runtime must also ensure that `acme` didn't modify any accounts owned by `token`. It does this by calling `runtime::verify_account_changes()` and then afterward updating all the `pre_*` variables to tentatively commit `acme`'s account modifications. After `pay()` completes, the runtime must again ensure that `token` didn't modify any accounts owned by `acme`. It should call `verify_account_changes()` again, but this time with the `token` program ID. Lastly, after `pay_and_launch_missiles()` completes, the runtime must call `verify_account_changes()` one more time, where it normally would, but using all updated `pre_*` variables. If executing `pay_and_launch_missiles()` up to `pay()` made no invalid account changes, `pay()` made no invalid changes, and executing from `pay()` until `pay_and_launch_missiles()` returns made no invalid changes, then the runtime can transitively assume `pay_and_launch_missiles()` as whole made no invalid account changes, and therefore commit all account modifications.
### Setting `KeyedAccount.is_signer`

View File

@ -0,0 +1,123 @@
# Durable Transaction Nonces
## Problem
To prevent replay, Solana transactions contain a nonce field populated with a
"recent" blockhash value. A transaction containing a blockhash that is too old
(~2min as of this writing) is rejected by the network as invalid. Unfortunately
certain use cases, such as custodial services, require more time to produce a
signature for the transaction. A mechanism is needed to enable these potentially
offline network participants.
## Requirements
1) The transaction's signature needs to cover the nonce value
2) The nonce must not be reusable, even in the case of signing key disclosure
## A Contract-based Solution
Here we describe a contract-based solution to the problem, whereby a client can
"stash" a nonce value for future use in a transaction's `recent_blockhash`
field. This approach is akin to the Compare and Swap atomic instruction,
implemented by some CPU ISAs.
When making use of a durable nonce, the client must first query its value from
account data. A transaction is now constructed in the normal way, but with the
following additional requirements:
1) The durable nonce value is used in the `recent_blockhash` field
2) A `Nonce` instruction is issued (first?)
3) The appropriate transaction flag is set, signaling that the usual
hash age check should be skipped and the previous requirements enforced. This
may be unnecessary, see [Runtime Support](#runtime-support) below
### Contract Mechanics
TODO: svgbob this into a flowchart
```text
Start
Create Account
state = Uninitialized
NonceInstruction
if state == Uninitialized
if account.balance < rent_exempt
error InsufficientFunds
state = Initialized
elif state != Initialized
error BadState
if sysvar.recent_blockhashes.is_empty()
error EmptyRecentBlockhashes
if !sysvar.recent_blockhashes.contains(stored_nonce)
error NotReady
stored_hash = sysvar.recent_blockhashes[0]
success
WithdrawInstruction(to, lamports)
if state == Uninitialized
if !signers.contains(owner)
error MissingRequiredSignatures
elif state == Initialized
if !sysvar.recent_blockhashes.contains(stored_nonce)
error NotReady
if lamports != account.balance && lamports + rent_exempt > account.balance
error InsufficientFunds
account.balance -= lamports
to.balance += lamports
success
```
A client wishing to use this feature starts by creating a nonce account and
depositing sufficient lamports as to make it rent-exempt. The resultant account
will be in the `Uninitialized` state with no stored hash and thus unusable.
The `Nonce` instruction is used to request that a new nonce be stored for the
calling account. The first `Nonce` instruction run on a newly created account
will drive the account's state to `Initialized`. As such, a `Nonce` instruction
MUST be issued before the account can be used.
To discard a `NonceAccount`, the client should issue a `Withdraw` instruction
which withdraws all lamports, leaving a zero balance and making the account
eligible for deletion.
`Nonce` and `Withdraw` instructions each will only succeed if the stored
blockhash is no longer resident in sysvar.recent_blockhashes.
### Runtime Support
The contract alone is not sufficient for implementing this feature. To enforce
an extant `recent_blockhash` on the transaction and prevent fee theft via
failed transaction replay, runtime modifications are necessary.
Any transaction failing the usual `check_hash_age` validation will be tested
for a Durable Transaction Nonce. This specifics of this test are undecided, some
options:
1) Require that the `Nonce` instruction be the first in the transaction
* + No ABI changes
* + Fast and simple
* - Sets a precedent that may lead to incompatible instruction combinations
2) Blind search for a `Nonce` instruction over all instructions in the
transaction
* + No ABI changes
* - Potentially slow
3) [2], but guarded by a transaction flag
* - ABI changes
* - Wire size increase
* + We'll probably end up with some sort of flags eventually anyway
Current prototyping will use [1]. If it is determined that a Durable Transaction
Nonce is in use, the runtime will take the following actions to validate the
transaction:
1) The `NonceAccount` specified in the `Nonce` instruction is loaded.
2) The `NonceState` is deserialized from the `NonceAccount`'s data field and
confirmed to be in the `Initialized` state.
3) The nonce value stored in the `NonceAccount` is tested to match against the
one specified in the transaction's `recent_blockhash` field.
If all three of the above checks succeed, the transaction is allowed to continue
validation.
### Open Questions
* Should this feature be restricted in the number of uses per transaction?

View File

@ -32,7 +32,7 @@ Collecting rent on an as-needed basis \(i.e. whenever accounts were loaded/acces
* accounts loaded as "credit only" for a transaction could very reasonably be expected to have rent due,
but would not be debitable during any such transaction
but would not be writable during any such transaction
* a mechanism to "beat the bushes" \(i.e. go find accounts that need to pay rent\) is desirable,

View File

@ -6,4 +6,4 @@ Snapshot verification of the account states is implemented, but the bank hash of
## Solution
Use the simple payment verification (SPV) solution to verify the vote transactions which are on-chain voting for the bank hash value.
While a validator is processing transactions to catch up to the cluster from the snapshot, use incoming vote transactions and the commitment calculator to confirm that the cluster is indeed building on the snapshotted bank hash. Once a threshold commitment level is reached, accept the snapshot as valid and start voting.

View File

@ -0,0 +1,69 @@
# Tick Verification
This design the criteria and validation of ticks in a slot. It also describes
error handling and slashing conditions encompassing how the system handles
transmissions that do not meet these requirements.
# Slot structure
Each slot must contain an expected `ticks_per_slot` number of ticks. The last
shred in a slot must contain only the entirety of the last tick, and nothing
else. The leader must also mark this shred containing the last tick with the
`LAST_SHRED_IN_SLOT` flag. Between ticks, there must be `hashes_per_tick`
number of hashes.
# Handling bad transmissions
Malicious transmissions `T` are handled in two ways:
1) If a leader can generate some erronenous transmission `T` and also some
alternate transmission `T'` for the same slot without violating any slashing
rules for duplicate transmissions (for instance if `T'` is a subset of `T`),
then the cluster must handle the possibility of both transmissions being live.
Thus this means we cannot mark the erronenous transmission `T` as dead because
the cluster may have reached consensus on `T'`. These cases necessitate a
slashing proof to punish this bad behavior.
2) Otherwise, we can simply mark the slot as dead and not playable. A slashing
proof may or may not be necessary depending on feasibility.
# Blocktree receiving shreds
When blocktree receives a new shred `s`, there are two cases:
1) `s` is marked as `LAST_SHRED_IN_SLOT`, then check if there exists a shred
`s'` in blocktree for that slot where `s'.index > s.index` If so, together `s`
and `s'` constitute a slashing proof.
2) Blocktree has already received a shred `s'` marked as `LAST_SHRED_IN_SLOT`
with index `i`. If `s.index > i`, then together `s` and `s'`constitute a
slashing proof. In this case, blocktree will also not insert `s`.
3) Duplicate shreds for the same index are ignored. Non-duplicate shreds for
the same index are a slashable condition. Details for this case are covered
in the `Leader Duplicate Block Slashing` section.
# Replaying and validating ticks
1) Replay stage replays entries from blocktree, keeping track of the number of
ticks it has seen per slot, and verifying there are `hashes_per_tick` number of
hashes between ticcks. After the tick from this last shred has been played,
replay stage then checks the total number of ticks.
Failure scenario 1: If ever there are two consecutive ticks between which the
number of hashes is `!= hashes_per_tick`, mark this slot as dead.
Failure scenario 2: If the number of ticks != `ticks_per_slot`, mark slot as
dead.
Failure scenario 3: If the number of ticks reaches `ticks_per_slot`, but we still
haven't seen the `LAST_SHRED_IN_SLOT`, mark this slot as dead.
2) When ReplayStage reaches a shred marked as the last shred, it checks if this
last shred is a tick.
Failure scenario: If the signed shred with the `LAST_SHRED_IN_SLOT` flag cannot
be deserialized into a tick (either fails to deserialize or deserializes into
an entry), mark this slot as dead.

View File

@ -26,7 +26,7 @@ We unwrap the many abstraction layers and build a single pipeline that can toggl
* TPU moves to new socket-free crate called solana-tpu.
* TPU's BankingStage absorbs ReplayStage
* TVU goes away
* New RepairStage absorbs Blob Fetch Stage and repair requests
* New RepairStage absorbs Shred Fetch Stage and repair requests
* JSON RPC Service is optional - used for debugging. It should instead be part
of a separate `solana-blockstreamer` executable.

View File

@ -29,7 +29,7 @@ Before starting an archiver node, sanity check that the cluster is accessible to
Fetch the current transaction count over JSON RPC:
```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
```
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
@ -47,13 +47,13 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
#### Linux and mac OS
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
```bash
$ solana-install init
solana-install init
```
#### Windows
@ -71,9 +71,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
```bash
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
cd solana-release/
export PATH=$PWD/bin:$PATH
```
#### mac OS
@ -81,9 +81,9 @@ $ export PATH=$PWD/bin:$PATH
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
```bash
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
cd solana-release/
export PATH=$PWD/bin:$PATH
```
#### Windows
@ -95,7 +95,7 @@ Download the binaries by navigating to [https://github.com/solana-labs/solana/re
Try running following command to join the gossip network and view all the other nodes in the cluster:
```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip --entrypoint testnet.solana.com:8001 spy
# Press ^C to exit
```
@ -104,8 +104,8 @@ Now configure the keypairs for your archiver by running:
Navigate to the solana install location and open a cmd prompt
```bash
$ solana-keygen new -o archiver-keypair.json
$ solana-keygen new -o storage-keypair.json
solana-keygen new -o archiver-keypair.json
solana-keygen new -o storage-keypair.json
```
Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step:
@ -114,23 +114,23 @@ Use solana-keygen to show the public keys for each of the keypairs, they will be
```bash
# The archiver's identity
$ solana-keygen pubkey archiver-keypair.json
$ solana-keygen pubkey storage-keypair.json
solana-keygen pubkey archiver-keypair.json
solana-keygen pubkey storage-keypair.json
```
* Linux and mac OS
\`\`\`bash
$ export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
$ export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
```text
Then set up the storage accounts for your archiver by running:
```bash
$ solana --keypair archiver-keypair.json airdrop 100000 lamports
$ solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
solana --keypair archiver-keypair.json airdrop 100000 lamports
solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
```
Note: Every time the testnet restarts, run the steps to setup the archiver accounts again.
@ -138,7 +138,7 @@ Note: Every time the testnet restarts, run the steps to setup the archiver accou
To start the archiver:
```bash
$ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
solana-archiver --entrypoint testnet.solana.com:8001 --identity-keypair archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
```
## Verify Archiver Setup
@ -146,12 +146,11 @@ $ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypa
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip --entrypoint testnet.solana.com:8001 spy
```
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
```bash
$ solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
```

View File

@ -7,13 +7,13 @@ You can publish your validator information to the chain to be publicly visible t
Run the solana CLI to populate a validator info account:
```bash
$ solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
```
For details about optional fields for VALIDATOR\_INFO\_ARGS:
```bash
$ solana validator-info publish --help
solana validator-info publish --help
```
## Keybase
@ -33,4 +33,3 @@ Including a Keybase username allows client applications \(like the Solana Networ
3. Add or update your `solana validator-info` with your Keybase username. The
CLI will verify the `validator-<PUBKEY>` file

View File

@ -5,70 +5,65 @@
The **identity pubkey** for your validator can also be found by running:
```bash
$ solana-keygen pubkey ~/validator-keypair.json
solana-keygen pubkey ~/validator-keypair.json
```
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the gossip network by running:
```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip --entrypoint testnet.solana.com:8001 spy
```
## Monitoring Catch Up
It may take some time to catch up with the cluster after your validator boots.
Use the `catchup` command to monitor your validator through this process:
```bash
solana catchup ~/validator-keypair.json
```
Until your validator has caught up, it will not be able to vote successfully and
stake cannot be delegated to it.
Also if you find the cluster's slot advancing faster than yours, you will likely
never catch up. This typically implies some kind of networking issue between
your validator and the rest of the cluster.
## Check Your Balance
Your account balance should decrease by the transaction fee amount as your
validator submits votes, and increase after serving as the leader. Pass the
`--lamports` are to observe in finer detail:
```bash
solana balance --lamports
```
## Check Vote Activity
The vote pubkey for the validator can be found by running:
The `solana show-vote-account` command displays the recent voting activity from your validator:
```bash
$ solana-keygen pubkey ~/validator-vote-keypair.json
solana show-vote-account ~/validator-vote-keypair.json
```
Provide the **vote pubkey** to the `solana show-vote-account` command to view the recent voting activity from your validator:
```bash
$ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
```
## Check Your Balance
Your account balance should decrease by the transaction fee amount as your validator submits votes, and increase after serving as the leader. Pass the `--lamports` are to observe in finer detail:
```bash
$ solana balance --lamports
```
## Check Slot Number
After your validator boots, it may take some time to catch up with the cluster. Use the `get-slot` command to view the current slot that the cluster is processing:
```bash
$ solana get-slot
```
The current slot that your validator is processing can then been seen with:
```bash
$ solana --url http://127.0.0.1:8899 get-slot
```
Until your validator has caught up, it will not be able to vote successfully and stake cannot be delegated to it.
Also if you find the cluster's slot advancing faster than yours, you will likely never catch up. This typically implies some kind of networking issue between your validator and the rest of the cluster.
## Get Cluster Info
There are several useful JSON-RPC endpoints for monitoring your validator on the cluster, as well as the health of the cluster:
```bash
# Similar to solana-gossip, you should see your validator in the list of cluster nodes
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
# If your validator is properly voting, it should appear in the list of `current` vote accounts. If staked, `stake` should be > 0
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
# Returns the current leader schedule
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
# Returns info about the current epoch. slotIndex should progress on subsequent calls.
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://testnet.solana.com:8899
```
## Validator Metrics
Metrics are available for local monitoring of your validator.
@ -76,9 +71,9 @@ Metrics are available for local monitoring of your validator.
Docker must be installed and the current user added to the docker group. Then download `solana-metrics.tar.bz2` from the Github Release and run
```bash
$ tar jxf solana-metrics.tar.bz2
$ cd solana-metrics/
$ ./start.sh
tar jxf solana-metrics.tar.bz2
cd solana-metrics/
./start.sh
```
A local InfluxDB and Grafana instance is now running on your machine. Define `SOLANA_METRICS_CONFIG` in your environment as described at the end of the `start.sh` output and restart your validator.
@ -92,6 +87,5 @@ Log messages emitted by your validator include a timestamp. When sharing logs wi
To make it easier to compare logs between different sources we request that everybody use Pacific Time on their validator nodes. In Linux this can be accomplished by running:
```bash
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
```

View File

@ -5,13 +5,13 @@
The `solana-install` tool can be used to easily install and upgrade the validator software on Linux x86\_64 and mac OS systems.
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
```bash
$ solana-install init
solana-install init
```
After a successful install, `solana-install update` may be used to easily update the cluster software to a newer version at any time.
@ -25,9 +25,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
```bash
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
cd solana-release/
export PATH=$PWD/bin:$PATH
```
### mac OS
@ -35,9 +35,9 @@ $ export PATH=$PWD/bin:$PATH
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
```bash
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
$ cd solana-release/
$ export PATH=$PWD/bin:$PATH
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
cd solana-release/
export PATH=$PWD/bin:$PATH
```
## Build From Source
@ -45,7 +45,6 @@ $ export PATH=$PWD/bin:$PATH
If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with:
```bash
$ ./scripts/cargo-install-all.sh .
$ export PATH=$PWD/bin:$PATH
./scripts/cargo-install-all.sh .
export PATH=$PWD/bin:$PATH
```

View File

@ -7,28 +7,28 @@ Adding stake can be accomplished by using the `solana` CLI
First create a stake account keypair with `solana-keygen`:
```bash
$ solana-keygen new -o ~/validator-config/stake-keypair.json
solana-keygen new -o ~/validator-stake-keypair.json
```
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 42 lamports:
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 4242 lamports:
```bash
$ solana create-stake-account ~/validator-config/stake-keypair.json 42 lamports
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
solana create-stake-account ~/validator-stake-keypair.json 4242 lamports
solana delegate-stake ~/validator-stake-keypair.json ~/validator-vote-keypair.json
```
Note that stakes need to warm up, and warmup increments are applied at Epoch boundaries, so it can take an hour or more for the change to fully take effect.
Stakes can be re-delegated to another node at any time with the same command:
Stakes can be re-delegated to another node at any time with the same command, but only one re-delegation is permitted per epoch:
```bash
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/some-other-validator-vote-keypair.json
solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote-keypair.json
```
Assuming the node is voting, now you're up and running and generating validator rewards. You'll want to periodically redeem/claim your rewards:
```bash
$ solana redeem-vote-credits ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
solana redeem-vote-credits ~/validator-stake-keypair.json ~/validator-vote-keypair.json
```
The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can only be earned while the validator is up and running. Further, once staked, the validator becomes an important part of the network. In order to safely remove a validator from the network, first deactivate its stake.
@ -36,7 +36,7 @@ The rewards lamports earned are split between your stake account and the vote ac
Stake can be deactivated by running:
```bash
$ solana deactivate-stake ~/validator-config/stake-keypair.json
solana deactivate-stake ~/validator-stake-keypair.json
```
The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake.
@ -44,4 +44,3 @@ The stake will cool down, deactivate over time. While cooling down, your stake w
Note that a stake account may only be used once, so after deactivation, use the cli's `withdraw-stake` command to recover the previously staked lamports.
Be sure and redeem your credits before withdrawing all your lamports. Once the account is fully withdrawn, the account is destroyed.

View File

@ -7,7 +7,7 @@ Before attaching a validator node, sanity check that the cluster is accessible t
Fetch the current transaction count over JSON RPC:
```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
```
Inspect the network explorer at [https://explorer.solana.com/](https://explorer.solana.com/) for activity.
@ -19,16 +19,16 @@ View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/test
Sanity check that you are able to interact with the cluster by receiving a small airdrop of lamports from the testnet drone:
```bash
$ solana set --url http://testnet.solana.com:8899
$ solana get
$ solana airdrop 123 lamports
$ solana balance --lamports
solana set --url http://testnet.solana.com:8899
solana get
solana airdrop 123 lamports
solana balance --lamports
```
Also try running following command to join the gossip network and view all the other nodes in the cluster:
```bash
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
solana-gossip --entrypoint testnet.solana.com:8001 spy
# Press ^C to exit
```
@ -37,7 +37,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
Create an identity keypair for your validator by running:
```bash
$ solana-keygen new -o ~/validator-keypair.json
solana-keygen new -o ~/validator-keypair.json
```
### Wallet Configuration
@ -45,30 +45,30 @@ $ solana-keygen new -o ~/validator-keypair.json
You can set solana configuration to use your validator keypair for all following commands:
```bash
$ solana set --keypair ~/validator-keypair.json
solana set --keypair ~/validator-keypair.json
```
**All following solana commands assume you have set `--keypair` config to** your validator identity keypair.\*\* If you haven't, you will need to add the `--keypair` argument to each command, like:
```bash
$ solana --keypair ~/validator-keypair.json airdrop 1000 lamports
solana --keypair ~/validator-keypair.json airdrop 10
```
\(You can always override the set configuration by explicitly passing the `--keypair` argument with a command.\)
### Validator Start
Airdrop yourself some lamports to get started:
Airdrop yourself some SOL to get started:
```bash
$ solana airdrop 1000 lamports
solana airdrop 10
```
Your validator will need a vote account. Create it now with the following commands:
```bash
$ solana-keygen new -o ~/validator-vote-keypair.json
$ solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json 1 lamports
solana-keygen new -o ~/validator-vote-keypair.json
solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json
```
Then use one of the following commands, depending on your installation choice, to start the node:
@ -76,19 +76,19 @@ Then use one of the following commands, depending on your installation choice, t
If this is a `solana-install`-installation:
```bash
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
solana-validator --identity-keypair ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
```
Alternatively, the `solana-install run` command can be used to run the validator node while periodically checking for and applying software updates:
```bash
$ solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
solana-install run solana-validator -- --identity-keypair ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
```
If you built from source:
```bash
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity-keypair ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
```
### Enabling CUDA
@ -98,7 +98,7 @@ If your machine has a GPU with CUDA installed \(Linux-only currently\), include
Or if you built from source, define the SOLANA\_CUDA flag in your environment _before_ running any of the previously mentioned commands
```bash
$ export SOLANA_CUDA=1
export SOLANA_CUDA=1
```
When your validator is started look for the following log message to indicate that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`
@ -110,4 +110,3 @@ By default the validator will dynamically select available network ports in the
### Limiting ledger size to conserve disk space
By default the validator will retain the full ledger. To conserve disk space start the validator with the `--limit-ledger-size`, which will instruct the validator to only retain the last couple hours of ledger.

View File

@ -15,7 +15,7 @@ Prior to mainnet, the testnets may be running different versions of solana softw
You can submit a JSON-RPC request to see the specific version of the cluster.
```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
{"jsonrpc":"2.0","result":{"solana-core":"0.18.0-pre1"},"id":1}
```
@ -28,17 +28,17 @@ This guide is written in the context of testnet.solana.com, our most stable clus
If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet.
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0
```
```bash
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta
```
Similarly, you can add this argument to the `solana-install` command if you've built the program from source:
```bash
$ solana-install init 0.18.0
solana-install init 0.18.0
```
If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet.
@ -48,14 +48,14 @@ If you are downloading pre-compiled binaries or building from source, simply cho
The Solana CLI tool points at testnet.solana.com by default. Include a `--url` argument to point at a different testnet. For instance:
```bash
$ solana --url http://beta.testnet.solana.com:8899 balance
solana --url http://beta.testnet.solana.com:8899 balance
```
The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future cli commands. For example:
```bash
$ solana set --url http://beta.testnet.solana.com:8899
$ solana balance # Same result as command above
solana set --url http://beta.testnet.solana.com:8899
solana balance # Same result as command above
```
\(You can always override the set configuration by explicitly passing the `--url` argument with a command.\)
@ -63,12 +63,11 @@ $ solana balance # Same result as command above
Solana-gossip and solana-validator commands already require an explicit `--entrypoint` argument. Simply replace testnet.solana.com in the examples with an alternate url to interact with a different testnet. For example:
```bash
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
solana-validator --identity-keypair ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
```
You can also submit JSON-RPC requests to a different testnet, like:
```bash
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
```

View File

@ -18,13 +18,13 @@ The result of interpreting all programs on the ledger at a given [tick height](t
A contiguous set of [entries](terminology.md#entry) on the ledger covered by a [vote](terminology.md#ledger-vote). A [leader](terminology.md#leader) produces at most one block per [slot](terminology.md#slot).
## blockhash
A preimage resistant [hash](terminology.md#hash) of the [ledger](terminology.md#ledger) at a given [block height](terminology.md#block-height). Taken from the last [entry id](terminology.md#entry-id) in the slot
## block height
The number of [blocks](terminology.md#block) beneath the current block. The first block after the [genesis block](terminology.md#genesis-block) has height zero.
## block id
The [entry id](terminology.md#entry-id) of the last entry in a [block](terminology.md#block).
The number of [blocks](terminology.md#block) beneath the current block. The first block after the [genesis block](terminology.md#genesis-block) has height one.
## bootstrap leader
@ -72,7 +72,13 @@ An entry on the [ledger](terminology.md#ledger) either a [tick](terminology.md#t
## entry id
A globally unique identifier that is also a proof that the [entry](terminology.md#entry) was generated after a duration of time, all [transactions](terminology.md#transaction) included in the entry, and all previous entries on the [ledger](terminology.md#ledger). See [Proof of History](terminology.md#proof-of-history).
A preimage resistant [hash](terminology.md#hash) over the final contents of an entry, which acts as the [entry's](terminology.md#entry) globally unique identifier. The hash serves as evidence of:
* The entry being generated after a duration of time
* The specified [transactions](terminology.md#transaction) are those included in the entry
* The entry's position with respect to other entries in [ledger](terminology.md#ledger)
See [Proof of History](terminology.md#proof-of-history).
## epoch
@ -84,7 +90,7 @@ A proof which has the same format as a storage proof, but the sha state is actua
## fee account
The fee account in the transaction is the account pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Credit-Debit in the transaction since paying for the transaction reduces the account balance.
The fee account in the transaction is the account pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance.
## finality
@ -96,7 +102,11 @@ A [ledger](terminology.md#ledger) derived from common entries but then diverged.
## genesis block
The configuration file that prepares the [ledger](terminology.md#ledger) for the first [block](terminology.md#block).
The first [block](terminology.md#block) in the chain.
## genesis config
The configuration file that prepares the [ledger](terminology.md#ledger) for the [genesis block](terminology.md#genesis-block).
## hash
@ -112,11 +122,7 @@ A [public key](terminology.md#public-key) and corresponding [private key](termin
## lamport
A fractional [native token](terminology.md#native-token) with the value of approximately 0.0000000000582 [sol](terminology.md#sol) \(2^-34\).
## loader
A [program](terminology.md#program) with the ability to interpret the binary encoding of other on-chain programs.
A fractional [native token](terminology.md#native-token) with the value of 0.000000001 [sol](terminology.md#sol).
## leader
@ -142,6 +148,10 @@ A [hash](terminology.md#hash) of the [validator's state](terminology.md#bank-sta
A type of [client](terminology.md#client) that can verify it's pointing to a valid [cluster](terminology.md#cluster). It performs more ledger verification than a [thin client](terminology.md#thin-client) and less than a [validator](terminology.md#validator).
## loader
A [program](terminology.md#program) with the ability to interpret the binary encoding of other on-chain programs.
## lockout
The duration of time for which a [validator](terminology.md#validator) is unable to [vote](terminology.md#ledger-vote) on another [fork](terminology.md#fork).
@ -293,4 +303,3 @@ A reward tally for validators. A vote credit is awarded to a validator in its vo
## warmup period
Some number of epochs after stake has been delegated while it progressively becomes effective. During this period, the stake is considered to be "activating". More info about: [warmup and cooldown](cluster/stake-delegation-and-rewards.md#stake-warmup-cooldown-withdrawal)

View File

@ -1,12 +1,12 @@
# Anatomy of a Transaction
Transactions encode lists of instructions that are executed sequentially, and only committed if all the instructions complete successfully. All account updates are reverted upon the failure of a transaction. Each transaction details the accounts used, including which must sign and which are credit only, a recent blockhash, the instructions, and any signatures.
Transactions encode lists of instructions that are executed sequentially, and only committed if all the instructions complete successfully. All account updates are reverted upon the failure of a transaction. Each transaction details the accounts used, including which must sign and which are read only, a recent blockhash, the instructions, and any signatures.
## Accounts and Signatures
Each transaction explicitly lists all account public keys referenced by the transaction's instructions. A subset of those public keys are each accompanied by a transaction signature. Those signatures signal on-chain programs that the account holder has authorized the transaction. Typically, the program uses the authorization to permit debiting the account or modifying its data.
The transaction also marks some accounts as _credit-only accounts_. The runtime permits credit-only accounts to be credited concurrently. If a program attempts to debit a credit-only account or modify its account data, the transaction is rejected by the runtime.
The transaction also marks some accounts as _read-only accounts_. The runtime permits read-only accounts to be read concurrently. If a program attempts to modify a read-only account, the transaction is rejected by the runtime.
## Recent Blockhash
@ -15,4 +15,3 @@ A Transaction includes a recent blockhash to prevent duplication and to give tra
## Instructions
Each instruction specifies a single program account \(which must be marked executable\), a subset of the transaction's accounts that should be passed to the program, and a data byte array instruction that is passed to the program. The program interprets the data array and operates on the accounts specified by the instructions. The program can return successfully, or with an error code. An error return causes the entire transaction to fail immediately.

View File

@ -2,7 +2,7 @@
## The Runtime
The runtime is a concurrent transaction processor. Transactions specify their data dependencies upfront and dynamic memory allocation is explicit. By separating program code from the state it operates on, the runtime is able to choreograph concurrent access. Transactions accessing only credit-only accounts are executed in parallel whereas transactions accessing writable accounts are serialized. The runtime interacts with the program through an entrypoint with a well-defined interface. The data stored in an account is an opaque type, an array of bytes. The program has full control over its contents.
The runtime is a concurrent transaction processor. Transactions specify their data dependencies upfront and dynamic memory allocation is explicit. By separating program code from the state it operates on, the runtime is able to choreograph concurrent access. Transactions accessing only read-only accounts are executed in parallel whereas transactions accessing writable accounts are serialized. The runtime interacts with the program through an entrypoint with a well-defined interface. The data stored in an account is an opaque type, an array of bytes. The program has full control over its contents.
The transaction structure specifies a list of public keys and signatures for those keys and a sequential list of instructions that will operate over the states associated with the account keys. For the transaction to be committed all the instructions must execute successfully; if any abort the whole transaction fails to commit.
@ -28,7 +28,7 @@ The runtime enforces the following rules:
1. Only the _owner_ program may modify the contents of an account. This means that upon assignment data vector is guaranteed to be zero.
2. Total balances on all the accounts is equal before and after execution of a transaction.
3. After the transaction is executed, balances of credit-only accounts must be greater than or equal to the balances before the transaction.
3. After the transaction is executed, balances of read-only accounts must be equal to the balances before the transaction.
4. All instructions in the transaction executed atomically. If one fails, all account modifications are discarded.
Execution of the program involves mapping the program's public key to an entrypoint which takes a pointer to the transaction, and an array of loaded accounts.
@ -62,4 +62,3 @@ To pass messages between programs, the receiving program must accept the message
* \[Continuations and Signals for long running
Transactions\]\([https://github.com/solana-labs/solana/issues/1485](https://github.com/solana-labs/solana/issues/1485)\)

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.20.0"
version = "0.21.1"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -9,4 +9,4 @@ license = "Apache-2.0"
edition = "2018"
[build-dependencies]
cc = "1.0.46"
cc = "1.0.47"

View File

@ -8,6 +8,8 @@
# ./affects-files.sh .rs -- also matches foo.rs.bar
# ./affects-files.sh ^snap/ -- anything under the snap/ subdirectory
# ./affects-files.sh snap/ -- also matches foo/snap/
# Any pattern starting with the ! character will be negated:
# ./affects-files.sh !^book/ -- anything *not* under the book/ subdirectory
#
set -e
cd "$(dirname "$0")"/..
@ -18,11 +20,19 @@ if [[ -n $CI_PULL_REQUEST ]]; then
IFS=':' read -ra files <<< "$affectedFiles"
for pattern in "$@"; do
for file in "${files[@]}"; do
if [[ $file =~ $pattern ]]; then
exit 0
fi
done
if [[ ${pattern:0:1} = "!" ]]; then
for file in "${files[@]}"; do
if [[ ! $file =~ ${pattern:1} ]]; then
exit 0
fi
done
else
for file in "${files[@]}"; do
if [[ $file =~ $pattern ]]; then
exit 0
fi
done
fi
done
exit 1

View File

@ -7,7 +7,7 @@ steps:
timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 35
timeout_in_minutes: 20
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"
@ -17,18 +17,21 @@ steps:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 60
timeout_in_minutes: 30
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 40
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
name: "move"
timeout_in_minutes: 20
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
name: "local-cluster"
timeout_in_minutes: 40
timeout_in_minutes: 30
artifact_paths: "log-*.txt"
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 40
timeout_in_minutes: 30
- wait
- trigger: "solana-secondary"
branches: "!pull/*"

View File

@ -1,4 +1,4 @@
FROM solanalabs/rust:1.38.0
FROM solanalabs/rust:1.39.0
ARG date
RUN set -x \

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.38.0
FROM rust:1.39.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0

View File

@ -75,6 +75,7 @@ source multinode-demo/common.sh
nodes=(
"multinode-demo/drone.sh"
"multinode-demo/bootstrap-leader.sh \
--no-restart \
--init-complete-file init-complete-node1.log \
--dynamic-port-range 8000-8050"
"multinode-demo/validator.sh \
@ -85,17 +86,19 @@ nodes=(
--rpc-port 18899"
)
for i in $(seq 1 $extraNodes); do
portStart=$((8100 + i * 50))
portEnd=$((portStart + 49))
nodes+=(
"multinode-demo/validator.sh \
--no-restart \
--dynamic-port-range $portStart-$portEnd
--label dyn$i \
--init-complete-file init-complete-node$((2 + i)).log"
)
done
if [[ extraNodes -gt 0 ]]; then
for i in $(seq 1 $extraNodes); do
portStart=$((8100 + i * 50))
portEnd=$((portStart + 49))
nodes+=(
"multinode-demo/validator.sh \
--no-restart \
--dynamic-port-range $portStart-$portEnd
--label dyn$i \
--init-complete-file init-complete-node$((2 + i)).log"
)
done
fi
numNodes=$((2 + extraNodes))
pids=()
@ -153,7 +156,7 @@ startNodes() {
addLogs=true
fi
initCompleteFiles=()
maybeExpectedGenesisBlockhash=
maybeExpectedGenesisHash=
for i in $(seq 0 $((${#nodes[@]} - 1))); do
declare cmd=${nodes[$i]}
@ -162,7 +165,7 @@ startNodes() {
rm -f "$initCompleteFile"
initCompleteFiles+=("$initCompleteFile")
fi
startNode "$i" "$cmd $maybeExpectedGenesisBlockhash"
startNode "$i" "$cmd $maybeExpectedGenesisHash"
if $addLogs; then
logs+=("$(getNodeLogFile "$i" "$cmd")")
fi
@ -176,9 +179,9 @@ startNodes() {
(
set -x
$solana_cli --keypair config/bootstrap-leader/identity-keypair.json \
--url http://127.0.0.1:8899 get-genesis-blockhash
) | tee genesis-blockhash.log
maybeExpectedGenesisBlockhash="--expected-genesis-blockhash $(tail -n1 genesis-blockhash.log)"
--url http://127.0.0.1:8899 get-genesis-hash
) | tee genesis-hash.log
maybeExpectedGenesisHash="--expected-genesis-hash $(tail -n1 genesis-hash.log)"
fi
done
@ -312,7 +315,7 @@ flag_error() {
if ! $skipSetup; then
clear_config_dir "$SOLANA_CONFIG_DIR"
multinode-demo/setup.sh
multinode-demo/setup.sh --hashes-per-tick sleep
else
verifyLedger
fi
@ -323,8 +326,8 @@ while [[ $iteration -le $iterations ]]; do
(
set -x
client_keypair=/tmp/client-id.json-$$
$solana_keygen new -f -o $client_keypair || exit $?
$solana_gossip spy --num-nodes-exactly $numNodes || exit $?
$solana_keygen new --no-passphrase -fso $client_keypair || exit $?
$solana_gossip spy -n 127.0.0.1:8001 --num-nodes-exactly $numNodes || exit $?
rm -rf $client_keypair
) || flag_error

View File

@ -21,15 +21,13 @@ declare print_free_tree=(
'core/src'
'drone/src'
'metrics/src'
'netutil/src'
'net-utils/src'
'runtime/src'
'sdk/bpf/rust/rust-utils'
'sdk/src'
'programs/bpf/rust'
'programs/stake_api/src'
'programs/stake_program/src'
'programs/vote_api/src'
'programs/vote_program/src'
'programs/stake/src'
'programs/vote/src'
)
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then
@ -50,11 +48,13 @@ fi
# marking up the code
#
# Ref: https://github.com/solana-labs/solana/issues/6474
#
# shellcheck disable=1001
declare useGithubIssueInsteadOf=(
'XXX'
'TBD'
'FIXME'
#'TODO' # TODO: Uncomment this line to disable TODOs
X\XX
T\BD
F\IXME
#T\ODO # TODO: Disable TODOs once all other TODOs are purged
)
if _ git --no-pager grep -n --max-depth=0 "${useGithubIssueInsteadOf[@]/#/-e }" -- '*.rs' '*.sh' '*.md'; then
@ -63,6 +63,6 @@ fi
# TODO: Remove this `git grep` once TODOs are banned above
# (this command is only used to highlight the current offenders)
_ git --no-pager grep -n --max-depth=0 "-e TODO" -- '*.rs' '*.sh' || true
_ git --no-pager grep -n --max-depth=0 "-e TODO" -- '*.rs' '*.sh' '*.md' || true
echo "^^^ +++"
# END TODO

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python2.7
#
# This script figures the order in which workspace crates must be published to
# crates.io. Along the way it also ensures there are no circular dependencies
@ -47,7 +47,7 @@ def get_packages():
while dependency_graph:
if max_iterations == 0:
# One day be more helpful and find the actual cycle for the user...
sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format(' '.join(dependency_graph.keys())))
sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format('\n '.join(dependency_graph.keys())))
max_iterations -= 1
for package, dependencies in dependency_graph.items():

View File

@ -67,8 +67,11 @@ echo --- Creating tarball
echo "target: $TARGET"
) > solana-release/version.yml
# Make CHANNEL available to include in the software version information
export CHANNEL
source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" solana-release
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
# Reduce the Windows archive size until
# https://github.com/appveyor/ci/issues/2997 is fixed

22
ci/run-sanity.sh Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
rm -f config/run/init-completed
timeout 15 ./run.sh &
pid=$!
attempts=20
while [[ ! -f config/run/init-completed ]]; do
sleep 1
if ((--attempts == 0)); then
echo "Error: validator failed to boot"
exit 1
fi
done
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"validatorExit"}' http://localhost:8899
wait $pid

View File

@ -13,8 +13,18 @@
# $ source ci/rust-version.sh
#
stable_version=1.38.0
nightly_version=2019-10-03
if [[ -n $RUST_STABLE_VERSION ]]; then
stable_version="$RUST_STABLE_VERSION"
else
stable_version=1.39.0
fi
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
nightly_version="$RUST_NIGHTLY_VERSION"
else
nightly_version=2019-11-13
fi
export rust_stable="$stable_version"
export rust_stable_docker_image=solanalabs/rust:"$stable_version"

View File

@ -6,13 +6,7 @@ set -e
cd "$(dirname "$0")/.."
(
set -x
find . -name "*.sh" \
-not -regex ".*/ci/semver_bash/.*" \
-not -regex ".*/.cargo/.*" \
-not -regex ".*/node_modules/.*" \
-not -regex ".*/target/.*" \
-print0 \
| xargs -0 \
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
git ls-files -- '*.sh' ':(exclude)ci/semver_bash' \
| xargs ci/docker-run.sh koalaman/shellcheck@sha256:fe24ab9a9b6b62d3adb162f4a80e006b6a63cae8c6ffafbae45772bab85e7294 --color=always --external-sources --shell=bash
)
echo --- ok

View File

@ -68,7 +68,7 @@ _ cargo +$rust_nightly bench --manifest-path programs/bpf/Cargo.toml ${V:+--verb
-- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE"
# Run banking bench. Doesn't require nightly, but use since it is already built.
_ cargo +$rust_nightly run --release --manifest-path banking_bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
_ cargo +$rust_nightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE"
# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some
# reason

1
ci/test-move.sh Symbolic link
View File

@ -0,0 +1 @@
test-stable.sh

View File

@ -10,6 +10,18 @@ annotate() {
}
}
# Run the appropriate test based on entrypoint
testName=$(basename "$0" .sh)
# Skip if only the book has been modified
ci/affects-files.sh \
\!^book/ \
|| {
annotate --style info \
"Skipped $testName as only book files were modified"
exit 0
}
source ci/rust-version.sh stable
export RUST_BACKTRACE=1
@ -26,19 +38,13 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
# Clear the BPF sysroot files, they are not automatically rebuilt
rm -rf target/xargo # Issue #3105
# Run the appropriate test based on entrypoint
testName=$(basename "$0" .sh)
echo "Executing $testName"
case $testName in
test-stable)
echo "Executing $testName"
_ cargo +"$rust_stable" build --tests --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path local_cluster/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
;;
test-stable-perf)
echo "Executing $testName"
ci/affects-files.sh \
.rs$ \
Cargo.lock$ \
@ -53,7 +59,7 @@ test-stable-perf)
^sdk/ \
|| {
annotate --style info \
"Skipped test-stable-perf as no relevant files were modified"
"Skipped $testName as no relevant files were modified"
exit 0
}
@ -80,10 +86,30 @@ test-stable-perf)
fi
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
;;
test-move)
ci/affects-files.sh \
Cargo.lock$ \
Cargo.toml$ \
^ci/rust-version.sh \
^ci/test-stable.sh \
^ci/test-move.sh \
^programs/move_loader \
^programs/librapay_api \
^logger/ \
^runtime/ \
^sdk/ \
|| {
annotate --style info \
"Skipped $testName as no relevant files were modified"
exit 0
}
_ cargo +"$rust_stable" test --manifest-path programs/move_loader/Cargo.toml ${V:+--verbose} -- --nocapture
_ cargo +"$rust_stable" test --manifest-path programs/librapay_api/Cargo.toml ${V:+--verbose} -- --nocapture
exit 0
;;
test-local-cluster)
echo "Executing $testName"
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture
exit 0
@ -93,9 +119,11 @@ test-local-cluster)
;;
esac
echo --- ci/localnet-sanity.sh
export CARGO_TOOLCHAIN=+"$rust_stable"
(
set -x
export CARGO_TOOLCHAIN=+"$rust_stable"
echo --- ci/localnet-sanity.sh
ci/localnet-sanity.sh -x
echo --- ci/run-sanity.sh
ci/run-sanity.sh -x
)

View File

@ -28,7 +28,6 @@ maybeDisableAirdrops=
maybeInternalNodesStakeLamports=
maybeInternalNodesLamports=
maybeExternalPrimordialAccountsFile=
maybeLamports=
maybeSlotsPerEpoch=
maybeTargetLamportsPerSignature=
maybeSlotsPerEpoch=
@ -75,17 +74,17 @@ Deploys a CD testnet
-S - Stop network software without tearing down nodes.
-f - Discard validator nodes that didn't bootup successfully
--no-airdrop
- If set, disables airdrops. Nodes must be funded in genesis block when airdrops are disabled.
- If set, disables airdrops. Nodes must be funded in genesis config when airdrops are disabled.
--internal-nodes-stake-lamports NUM_LAMPORTS
- Amount to stake internal nodes.
--internal-nodes-lamports NUM_LAMPORTS
- Amount to fund internal nodes in genesis block
- Amount to fund internal nodes in genesis config
--external-accounts-file FILE_PATH
- Path to external Primordial Accounts file, if it exists.
--hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster
--lamports NUM_LAMPORTS
- Specify the number of lamports to mint (default 100000000000000)
- Specify the number of lamports to mint (default 500000000000000000)
--skip-deploy-update
- If set, will skip software update deployment
--skip-remote-log-retrieval
@ -114,9 +113,6 @@ while [[ -n $1 ]]; do
elif [[ $1 = --slots-per-epoch ]]; then
maybeSlotsPerEpoch="$1 $2"
shift 2
elif [[ $1 = --lamports ]]; then
maybeLamports="$1 $2"
shift 2
elif [[ $1 = --target-lamports-per-signature ]]; then
maybeTargetLamportsPerSignature="$1 $2"
shift 2
@ -262,6 +258,11 @@ trap shutdown EXIT INT
set -x
# Fetch reusable testnet keypairs
if [[ ! -d net/keypairs ]]; then
git clone git@github.com:solana-labs/testnet-keypairs.git net/keypairs
fi
# Build a string to pass zone opts to $cloudProvider.sh: "-z zone1 -z zone2 ..."
zone_args=()
for val in "${zone[@]}"; do
@ -288,11 +289,16 @@ if ! $skipCreate; then
echo "--- $cloudProvider.sh create"
create_args=(
-p "$netName"
-a "$bootstrapValidatorAddress"
-c "$clientNodeCount"
-n "$additionalValidatorCount"
--dedicated
--self-destruct-hours 0
)
if [[ -n $bootstrapValidatorAddress ]]; then
create_args+=(-a "$bootstrapValidatorAddress")
fi
# shellcheck disable=SC2206
create_args+=(${zone_args[@]})
@ -402,7 +408,6 @@ if ! $skipStart; then
$maybeInternalNodesStakeLamports
$maybeInternalNodesLamports
$maybeExternalPrimordialAccountsFile
$maybeLamports
$maybeSlotsPerEpoch
$maybeTargetLamportsPerSignature
$maybeNoSnapshot

View File

@ -216,7 +216,7 @@ maybe_deploy_software() {
(
echo "--- net.sh restart"
set -x
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-ledger-verify "$arg"
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-poh-verify "$arg"
) || ok=false
if ! $ok; then
net/net.sh logs
@ -246,7 +246,7 @@ sanity() {
(
set -x
NO_INSTALL_CHECK=1 \
ci/testnet-sanity.sh beta-testnet-solana-com gce us-west1-b
ci/testnet-sanity.sh beta-testnet-solana-com gce -P us-west1-b
maybe_deploy_software --deploy-if-newer
)
;;
@ -260,7 +260,7 @@ sanity() {
testnet)
(
set -x
ci/testnet-sanity.sh testnet-solana-com gce us-west1-b
ci/testnet-sanity.sh testnet-solana-com gce -P us-west1-b
)
;;
testnet-perf)
@ -386,7 +386,7 @@ deploy() {
(
echo "--- net.sh update"
set -x
time net/net.sh update -t edge --platform linux --platform osx --platform windows
time net/net.sh update -t "$CHANNEL_OR_TAG" --platform linux --platform osx --platform windows
)
;;
testnet-perf)
@ -479,7 +479,7 @@ deploy() {
fi
if [[ -z $INTERNAL_NODES_STAKE_LAMPORTS ]]; then
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 17179869184" # 1 SOL
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 1000000000" # 1 SOL
elif [[ $INTERNAL_NODES_STAKE_LAMPORTS == skip ]]; then
maybeInternalNodesStakeLamports=""
else
@ -487,7 +487,7 @@ deploy() {
fi
if [[ -z $INTERNAL_NODES_LAMPORTS ]]; then
maybeInternalNodesLamports="--internal-nodes-lamports 34359738368" # 2 SOL
maybeInternalNodesLamports="--internal-nodes-lamports 2000000000" # 2 SOL
elif [[ $INTERNAL_NODES_LAMPORTS == skip ]]; then
maybeInternalNodesLamports=""
else
@ -506,14 +506,6 @@ deploy() {
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
fi
if [[ -z $LAMPORTS ]]; then
maybeLamports="--lamports 8589934592000000000"
elif [[ $LAMPORTS == skip ]]; then
maybeLamports=""
else
maybeLamports="--lamports ${LAMPORTS}"
fi
if [[ -z $ADDITIONAL_DISK_SIZE_GB ]]; then
maybeAdditionalDisk="--validator-additional-disk-size-gb 32000"
elif [[ $ADDITIONAL_DISK_SIZE_GB == skip ]]; then
@ -522,7 +514,6 @@ deploy() {
maybeAdditionalDisk="--validator-additional-disk-size-gb ${ADDITIONAL_DISK_SIZE_GB}"
fi
# Multiple V100 GPUs are available in us-west1, us-central1 and europe-west4
# shellcheck disable=SC2068
# shellcheck disable=SC2086
@ -545,8 +536,7 @@ deploy() {
${maybeInternalNodesStakeLamports} \
${maybeInternalNodesLamports} \
${maybeExternalAccountsFile} \
${maybeLamports} \
--target-lamports-per-signature 1 \
--target-lamports-per-signature 0 \
--slots-per-epoch 4096 \
${maybeAdditionalDisk}
)

20
clap-utils/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "solana-clap-utils"
version = "0.21.1"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
edition = "2018"
[dependencies]
clap = "2.33.0"
rpassword = "4.0"
semver = "0.9.0"
solana-sdk = { path = "../sdk", version = "0.21.1" }
tiny-bip39 = "0.6.2"
url = "2.1.0"
[lib]
name = "solana_clap_utils"

View File

@ -1,9 +1,11 @@
use crate::keypair::{keypair_from_seed_phrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG};
use clap::ArgMatches;
use solana_sdk::{
native_token::sol_to_lamports,
pubkey::Pubkey,
signature::{read_keypair_file, Keypair, KeypairUtil},
signature::{read_keypair_file, Keypair, KeypairUtil, Signature},
};
use std::str::FromStr;
// Return parsed values from matches at `name`
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
@ -32,7 +34,12 @@ where
// Return the keypair for an argument with filename `name` or None if not present.
pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
if let Some(value) = matches.value_of(name) {
read_keypair_file(value).ok()
if value == ASK_KEYWORD {
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(name, skip_validation, true).ok()
} else {
read_keypair_file(value).ok()
}
} else {
None
}
@ -44,6 +51,20 @@ pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey()))
}
// Return pubkey/signature pairs for a string of the form pubkey=signature
pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubkey, Signature)>> {
matches.values_of(name).map(|values| {
values
.map(|pubkey_signer_string| {
let mut signer = pubkey_signer_string.split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect()
})
}
pub fn amount_of(matches: &ArgMatches<'_>, name: &str, unit: &str) -> Option<u64> {
if matches.value_of(unit) == Some("lamports") {
value_of(matches, name)
@ -125,14 +146,17 @@ mod tests {
let matches = app()
.clone()
.get_matches_from(vec!["test", "--single", &outfile]);
assert_eq!(keypair_of(&matches, "single"), Some(keypair));
assert_eq!(keypair_of(&matches, "multiple"), None);
assert_eq!(
keypair_of(&matches, "single").unwrap().pubkey(),
keypair.pubkey()
);
assert!(keypair_of(&matches, "multiple").is_none());
let matches =
app()
.clone()
.get_matches_from(vec!["test", "--single", "random_keypair_file.json"]);
assert_eq!(keypair_of(&matches, "single"), None);
assert!(keypair_of(&matches, "single").is_none());
fs::remove_file(&outfile).unwrap();
}
@ -163,4 +187,25 @@ mod tests {
fs::remove_file(&outfile).unwrap();
}
#[test]
fn test_pubkeys_sigs_of() {
let key1 = Pubkey::new_rand();
let key2 = Pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let sig2 = Keypair::new().sign_message(&[1u8]);
let signer1 = format!("{}={}", key1, sig1);
let signer2 = format!("{}={}", key2, sig2);
let matches = app().clone().get_matches_from(vec![
"test",
"--multiple",
&signer1,
"--multiple",
&signer2,
]);
assert_eq!(
pubkeys_sigs_of(&matches, "multiple"),
Some(vec![(key1, sig1), (key2, sig2)])
);
}
}

View File

@ -0,0 +1,120 @@
use crate::keypair::ASK_KEYWORD;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{read_keypair_file, Signature};
use std::str::FromStr;
// Return an error if a pubkey cannot be parsed.
pub fn is_pubkey(string: String) -> Result<(), String> {
match string.parse::<Pubkey>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
}
// Return an error if a hash cannot be parsed.
pub fn is_hash(string: String) -> Result<(), String> {
match string.parse::<Hash>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
}
// Return an error if a keypair file cannot be parsed.
pub fn is_keypair(string: String) -> Result<(), String> {
read_keypair_file(&string)
.map(|_| ())
.map_err(|err| format!("{:?}", err))
}
// Return an error if a keypair file cannot be parsed
pub fn is_keypair_or_ask_keyword(string: String) -> Result<(), String> {
if string.as_str() == ASK_KEYWORD {
return Ok(());
}
read_keypair_file(&string)
.map(|_| ())
.map_err(|err| format!("{:?}", err))
}
// Return an error if string cannot be parsed as pubkey string or keypair file location
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
}
// Return an error if string cannot be parsed as pubkey=signature string
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
let mut signer = string.split('=');
match Pubkey::from_str(
signer
.next()
.ok_or_else(|| "Malformed signer string".to_string())?,
) {
Ok(_) => {
match Signature::from_str(
signer
.next()
.ok_or_else(|| "Malformed signer string".to_string())?,
) {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
}
Err(err) => Err(format!("{:?}", err)),
}
}
// Return an error if a url cannot be parsed.
pub fn is_url(string: String) -> Result<(), String> {
match url::Url::parse(&string) {
Ok(url) => {
if url.has_host() {
Ok(())
} else {
Err("no host provided".to_string())
}
}
Err(err) => Err(format!("{:?}", err)),
}
}
pub fn is_semver(semver: &str) -> Result<(), String> {
match semver::Version::parse(&semver) {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
}
pub fn is_release_channel(channel: &str) -> Result<(), String> {
match channel {
"edge" | "beta" | "stable" => Ok(()),
_ => Err(format!("Invalid release channel {}", channel)),
}
}
pub fn is_port(port: String) -> Result<(), String> {
port.parse::<u16>()
.map(|_| ())
.map_err(|e| format!("{:?}", e))
}
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
percentage
.parse::<u8>()
.map_err(|e| {
format!(
"Unable to parse input percentage, provided: {}, err: {:?}",
percentage, e
)
})
.and_then(|v| {
if v > 100 {
Err(format!(
"Percentage must be in range of 0 to 100, provided: {}",
v
))
} else {
Ok(())
}
})
}

167
clap-utils/src/keypair.rs Normal file
View File

@ -0,0 +1,167 @@
use crate::ArgConstant;
use bip39::{Language, Mnemonic, Seed};
use clap::values_t;
use rpassword::prompt_password_stderr;
use solana_sdk::{
pubkey::Pubkey,
signature::{
keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair_file, Keypair,
KeypairUtil,
},
};
use std::{
error,
io::{stdin, stdout, Write},
process::exit,
};
// Keyword used to indicate that the user should be asked for a keypair seed phrase
pub const ASK_KEYWORD: &str = "ASK";
pub const ASK_SEED_PHRASE_ARG: ArgConstant<'static> = ArgConstant {
long: "ask-seed-phrase",
name: "ask_seed_phrase",
help: "Securely recover a keypair using a seed phrase and optional passphrase",
};
pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
long: "skip-seed-phrase-validation",
name: "skip_seed_phrase_validation",
help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 official English word list",
};
#[derive(Debug, PartialEq)]
pub enum Source {
File,
Generated,
SeedPhrase,
}
pub struct KeypairWithSource {
pub keypair: Keypair,
pub source: Source,
}
impl KeypairWithSource {
fn new(keypair: Keypair, source: Source) -> Self {
Self { keypair, source }
}
}
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
let passphrase = prompt_password_stderr(&prompt)?;
if !passphrase.is_empty() {
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
if confirmed != passphrase {
return Err("Passphrases did not match".into());
}
}
Ok(passphrase)
}
/// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation
/// Optionally skips validation of seed phrase
/// Optionally confirms recovered public key
pub fn keypair_from_seed_phrase(
keypair_name: &str,
skip_validation: bool,
confirm_pubkey: bool,
) -> Result<Keypair, Box<dyn error::Error>> {
let seed_phrase = prompt_password_stderr(&format!("[{}] seed phrase: ", keypair_name))?;
let seed_phrase = seed_phrase.trim();
let passphrase_prompt = format!(
"[{}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ",
keypair_name,
);
let keypair = if skip_validation {
let passphrase = prompt_passphrase(&passphrase_prompt)?;
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
} else {
let sanitized = sanitize_seed_phrase(seed_phrase);
let mnemonic = Mnemonic::from_phrase(sanitized, Language::English)?;
let passphrase = prompt_passphrase(&passphrase_prompt)?;
let seed = Seed::new(&mnemonic, &passphrase);
keypair_from_seed(seed.as_bytes())?
};
if confirm_pubkey {
let pubkey = Pubkey::new(keypair.public.as_ref());
print!("Recovered pubkey `{:?}`. Continue? (y/n): ", pubkey);
let _ignored = stdout().flush();
let mut input = String::new();
stdin().read_line(&mut input).expect("Unexpected input");
if input.to_lowercase().trim() != "y" {
println!("Exiting");
exit(1);
}
}
Ok(keypair)
}
/// Checks CLI arguments to determine whether a keypair should be:
/// - inputted securely via stdin,
/// - read in from a file,
/// - or newly generated
pub fn keypair_input(
matches: &clap::ArgMatches,
keypair_name: &str,
) -> Result<KeypairWithSource, Box<dyn error::Error>> {
let ask_seed_phrase_matches =
values_t!(matches.values_of(ASK_SEED_PHRASE_ARG.name), String).unwrap_or_default();
let keypair_match_name = keypair_name.replace('-', "_");
if ask_seed_phrase_matches
.iter()
.any(|s| s.as_str() == keypair_name)
{
if matches.value_of(keypair_match_name).is_some() {
clap::Error::with_description(
&format!(
"`--{} {}` cannot be used with `{} <PATH>`",
ASK_SEED_PHRASE_ARG.long, keypair_name, keypair_name
),
clap::ErrorKind::ArgumentConflict,
)
.exit();
}
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
keypair_from_seed_phrase(keypair_name, skip_validation, true)
.map(|keypair| KeypairWithSource::new(keypair, Source::SeedPhrase))
} else if let Some(keypair_file) = matches.value_of(keypair_match_name) {
read_keypair_file(keypair_file).map(|keypair| KeypairWithSource::new(keypair, Source::File))
} else {
Ok(KeypairWithSource::new(Keypair::new(), Source::Generated))
}
}
fn sanitize_seed_phrase(seed_phrase: &str) -> String {
seed_phrase
.split_whitespace()
.collect::<Vec<&str>>()
.join(" ")
}
#[cfg(test)]
mod tests {
use super::*;
use clap::ArgMatches;
#[test]
fn test_keypair_input() {
let arg_matches = ArgMatches::default();
let KeypairWithSource { source, .. } = keypair_input(&arg_matches, "").unwrap();
assert_eq!(source, Source::Generated);
}
#[test]
fn test_sanitize_seed_phrase() {
let seed_phrase = " Mary had\ta\u{2009}little \n\t lamb";
assert_eq!(
"Mary had a little lamb".to_owned(),
sanitize_seed_phrase(seed_phrase)
);
}
}

28
clap-utils/src/lib.rs Normal file
View File

@ -0,0 +1,28 @@
#[macro_export]
macro_rules! version {
() => {
&*format!(
"{}{}",
env!("CARGO_PKG_VERSION"),
if option_env!("CI_TAG").unwrap_or("").is_empty() {
format!(
" [channel={} commit={}]",
option_env!("CHANNEL").unwrap_or("unknown"),
option_env!("CI_COMMIT").unwrap_or("unknown"),
)
} else {
"".to_string()
},
)
};
}
pub struct ArgConstant<'a> {
pub long: &'a str,
pub name: &'a str,
pub help: &'a str,
}
pub mod input_parsers;
pub mod input_validators;
pub mod keypair;

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "0.20.0"
version = "0.21.1"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -11,38 +11,41 @@ homepage = "https://solana.com/"
[dependencies]
bincode = "1.2.0"
bs58 = "0.3.0"
chrono = { version = "0.4.9", features = ["serde"] }
chrono = { version = "0.4.10", features = ["serde"] }
clap = "2.33.0"
criterion-stats = "0.3.0"
ctrlc = { version = "3.1.3", features = ["termination"] }
console = "0.9.0"
console = "0.9.1"
dirs = "2.0.2"
lazy_static = "1.4.0"
log = "0.4.8"
indicatif = "0.13.0"
num-traits = "0.2"
pretty-hex = "0.1.1"
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
serde = "1.0.101"
serde_derive = "1.0.101"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
solana-client = { path = "../client", version = "0.20.0" }
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
solana-drone = { path = "../drone", version = "0.20.0" }
solana-logger = { path = "../logger", version = "0.20.0" }
solana-netutil = { path = "../netutil", version = "0.20.0" }
solana-runtime = { path = "../runtime", version = "0.20.0" }
solana-sdk = { path = "../sdk", version = "0.20.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" }
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-config-program = { path = "../programs/config", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-storage-program = { path = "../programs/storage", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.21.1" }
url = "2.1.0"
[dev-dependencies]
solana-core = { path = "../core", version = "0.20.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
solana-core = { path = "../core", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
tempfile = "3.1.0"
[[bin]]
name = "solana"

File diff suppressed because it is too large Load Diff

View File

@ -7,16 +7,21 @@ use crate::{
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use serde_json::Value;
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::{rpc_client::RpcClient, rpc_request::RpcVoteAccountInfo};
use solana_sdk::{
clock,
clock::{self, Slot},
commitment_config::CommitmentConfig,
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_transaction,
};
use std::{
collections::VecDeque,
net::SocketAddr,
thread::sleep,
time::{Duration, Instant},
};
@ -31,20 +36,70 @@ pub trait ClusterQuerySubCommands {
impl ClusterQuerySubCommands for App<'_, '_> {
fn cluster_query_subcommands(self) -> Self {
self.subcommand(
SubCommand::with_name("catchup")
.about("Wait for a validator to catch up to the cluster")
.arg(
Arg::with_name("node_pubkey")
.index(1)
.takes_value(true)
.value_name("PUBKEY")
.validator(is_pubkey_or_keypair)
.required(true)
.help("Identity pubkey of the validator"),
),
)
.subcommand(
SubCommand::with_name("cluster-version")
.about("Get the version of the cluster entrypoint"),
)
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
.subcommand(SubCommand::with_name("get-block-time")
.about("Get estimated production time of a block")
.arg(
Arg::with_name("slot")
.index(1)
.takes_value(true)
.value_name("SLOT")
.required(true)
.help("Slot number of the block to query")
)
)
.subcommand(
SubCommand::with_name("get-epoch-info")
.about("Get information about the current epoch"),
.about("Get information about the current epoch")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return information at maximum-lockout commitment level",
),
),
)
.subcommand(
SubCommand::with_name("get-genesis-blockhash").about("Get the genesis blockhash"),
SubCommand::with_name("get-genesis-hash").about("Get the genesis hash"),
)
.subcommand(SubCommand::with_name("get-slot").about("Get current slot"))
.subcommand(
SubCommand::with_name("get-transaction-count").about("Get current transaction count"),
SubCommand::with_name("get-slot").about("Get current slot")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return slot at maximum-lockout commitment level",
),
),
)
.subcommand(
SubCommand::with_name("get-transaction-count").about("Get current transaction count")
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Return count at maximum-lockout commitment level",
),
),
)
.subcommand(
SubCommand::with_name("ping")
@ -66,16 +121,36 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.takes_value(true)
.help("Stop after submitting count transactions"),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.value_name("NUMBER")
.takes_value(true)
.default_value("1")
.help("Number of lamports to transfer for each transaction"),
)
.arg(
Arg::with_name("timeout")
.short("t")
.long("timeout")
.value_name("SECONDS")
.takes_value(true)
.default_value("10")
.default_value("15")
.help("Wait up to timeout seconds for transaction confirmation"),
)
.arg(
Arg::with_name("confirmed")
.long("confirmed")
.takes_value(false)
.help(
"Wait until the transaction is confirmed at maximum-lockout commitment level",
),
),
)
.subcommand(
SubCommand::with_name("show-gossip")
.about("Show the current gossip network nodes"),
)
.subcommand(
SubCommand::with_name("show-validators")
.about("Show information about the current validators")
@ -89,7 +164,16 @@ impl ClusterQuerySubCommands for App<'_, '_> {
}
}
pub fn parse_catchup(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
Ok(CliCommandInfo {
command: CliCommand::Catchup { node_pubkey },
require_keypair: false,
})
}
pub fn parse_cluster_ping(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let lamports = value_t_or_exit!(matches, "lamports", u64);
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
let count = if matches.is_present("count") {
Some(value_t_or_exit!(matches, "count", u64))
@ -97,16 +181,67 @@ pub fn parse_cluster_ping(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Cl
None
};
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
Ok(CliCommandInfo {
command: CliCommand::Ping {
lamports,
interval,
count,
timeout,
commitment_config,
},
require_keypair: true,
})
}
pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let slot = value_t_or_exit!(matches, "slot", u64);
Ok(CliCommandInfo {
command: CliCommand::GetBlockTime { slot },
require_keypair: false,
})
}
pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
Ok(CliCommandInfo {
command: CliCommand::GetEpochInfo { commitment_config },
require_keypair: false,
})
}
pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
Ok(CliCommandInfo {
command: CliCommand::GetSlot { commitment_config },
require_keypair: false,
})
}
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let commitment_config = if matches.is_present("confirmed") {
CommitmentConfig::default()
} else {
CommitmentConfig::recent()
};
Ok(CliCommandInfo {
command: CliCommand::GetTransactionCount { commitment_config },
require_keypair: false,
})
}
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let use_lamports_unit = matches.is_present("lamports");
@ -116,21 +251,76 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
})
}
pub fn process_cluster_version(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult {
let remote_version: Value = serde_json::from_str(&rpc_client.get_version()?)?;
println!(
"{} {}",
style("Cluster versions from:").bold(),
config.json_rpc_url
);
if let Some(versions) = remote_version.as_object() {
for (key, value) in versions.iter() {
if let Some(value_string) = value.as_str() {
println_name_value(&format!("* {}:", key), &value_string);
}
/// Creates a new process bar for processing that will take an unknown amount of time
fn new_spinner_progress_bar() -> ProgressBar {
let progress_bar = ProgressBar::new(42);
progress_bar
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
progress_bar.enable_steady_tick(100);
progress_bar
}
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
let rpc_addr = cluster_nodes
.iter()
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
.rpc
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?;
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
let node_client = RpcClient::new_socket(rpc_addr);
let mut previous_rpc_slot = std::u64::MAX;
let mut previous_slot_distance = 0;
let sleep_interval = 5;
loop {
let rpc_slot = rpc_client.get_slot_with_commitment(CommitmentConfig::recent())?;
let node_slot = node_client.get_slot_with_commitment(CommitmentConfig::recent())?;
if node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
progress_bar.finish_and_clear();
return Ok(format!(
"{} has caught up (us:{} them:{})",
node_pubkey, node_slot, rpc_slot,
));
}
let slot_distance = rpc_slot as i64 - node_slot as i64;
progress_bar.set_message(&format!(
"Validator is {} slots away (us:{} them:{}){}",
slot_distance,
node_slot,
rpc_slot,
if previous_rpc_slot == std::u64::MAX {
"".to_string()
} else {
let slots_per_second =
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
format!(
" and {} at {:.1} slots/second",
if slots_per_second < 0.0 {
"falling behind"
} else {
"gaining"
},
slots_per_second,
)
}
));
sleep(Duration::from_secs(sleep_interval as u64));
previous_rpc_slot = rpc_slot;
previous_slot_distance = slot_distance;
}
Ok("".to_string())
}
pub fn process_cluster_version(rpc_client: &RpcClient) -> ProcessResult {
let remote_version = rpc_client.get_version()?;
Ok(remote_version.solana_core)
}
pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
@ -142,8 +332,16 @@ pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
))
}
pub fn process_get_epoch_info(rpc_client: &RpcClient) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info()?;
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
let timestamp = rpc_client.get_block_time(slot)?;
Ok(timestamp.to_string())
}
pub fn process_get_epoch_info(
rpc_client: &RpcClient,
commitment_config: &CommitmentConfig,
) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
println!();
println_name_value("Current epoch:", &epoch_info.epoch.to_string());
println_name_value("Current slot:", &epoch_info.absolute_slot.to_string());
@ -171,27 +369,36 @@ pub fn process_get_epoch_info(rpc_client: &RpcClient) -> ProcessResult {
Ok("".to_string())
}
pub fn process_get_genesis_blockhash(rpc_client: &RpcClient) -> ProcessResult {
let genesis_blockhash = rpc_client.get_genesis_blockhash()?;
Ok(genesis_blockhash.to_string())
pub fn process_get_genesis_hash(rpc_client: &RpcClient) -> ProcessResult {
let genesis_hash = rpc_client.get_genesis_hash()?;
Ok(genesis_hash.to_string())
}
pub fn process_get_slot(rpc_client: &RpcClient) -> ProcessResult {
let slot = rpc_client.get_slot()?;
pub fn process_get_slot(
rpc_client: &RpcClient,
commitment_config: &CommitmentConfig,
) -> ProcessResult {
let slot = rpc_client.get_slot_with_commitment(commitment_config.clone())?;
Ok(slot.to_string())
}
pub fn process_get_transaction_count(rpc_client: &RpcClient) -> ProcessResult {
let transaction_count = rpc_client.get_transaction_count()?;
pub fn process_get_transaction_count(
rpc_client: &RpcClient,
commitment_config: &CommitmentConfig,
) -> ProcessResult {
let transaction_count =
rpc_client.get_transaction_count_with_commitment(commitment_config.clone())?;
Ok(transaction_count.to_string())
}
pub fn process_ping(
rpc_client: &RpcClient,
config: &CliConfig,
lamports: u64,
interval: &Duration,
count: &Option<u64>,
timeout: &Duration,
commitment_config: &CommitmentConfig,
) -> ProcessResult {
let to = Keypair::new().pubkey();
@ -214,14 +421,18 @@ pub fn process_ping(
let (recent_blockhash, fee_calculator) = rpc_client.get_new_blockhash(&last_blockhash)?;
last_blockhash = recent_blockhash;
let transaction = system_transaction::transfer(&config.keypair, &to, 1, recent_blockhash);
let transaction =
system_transaction::transfer(&config.keypair, &to, lamports, recent_blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &transaction.message)?;
match rpc_client.send_transaction(&transaction) {
Ok(signature) => {
let transaction_sent = Instant::now();
loop {
let signature_status = rpc_client.get_signature_status(&signature)?;
let signature_status = rpc_client.get_signature_status_with_commitment(
&signature,
commitment_config.clone(),
)?;
let elapsed_time = Instant::now().duration_since(transaction_sent);
if let Some(transaction_status) = signature_status {
match transaction_status {
@ -229,8 +440,8 @@ pub fn process_ping(
let elapsed_time_millis = elapsed_time.as_millis() as u64;
confirmation_time.push_back(elapsed_time_millis);
println!(
"{}1 lamport transferred: seq={:<3} time={:>4}ms signature={}",
CHECK_MARK, seq, elapsed_time_millis, signature
"{}{} lamport(s) transferred: seq={:<3} time={:>4}ms signature={}",
CHECK_MARK, lamports, seq, elapsed_time_millis, signature
);
confirmed_count += 1;
}
@ -255,8 +466,7 @@ pub fn process_ping(
// Sleep for half a slot
if signal_receiver
.recv_timeout(Duration::from_millis(
500 * solana_sdk::clock::DEFAULT_TICKS_PER_SLOT
/ solana_sdk::clock::DEFAULT_TICKS_PER_SECOND,
500 * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
))
.is_ok()
{
@ -302,6 +512,42 @@ pub fn process_ping(
Ok("".to_string())
}
pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
fn format_port(addr: Option<SocketAddr>) -> String {
addr.map(|addr| addr.port().to_string())
.unwrap_or_else(|| "none".to_string())
}
let s: Vec<_> = cluster_nodes
.iter()
.map(|node| {
format!(
"{:15} | {:44} | {:6} | {:5} | {:5}",
node.gossip
.map(|addr| addr.ip().to_string())
.unwrap_or_else(|| "none".to_string()),
node.pubkey,
format_port(node.gossip),
format_port(node.tpu),
format_port(node.rpc),
)
})
.collect();
Ok(format!(
"IP Address | Node identifier \
| Gossip | TPU | RPC\n\
----------------+----------------------------------------------+\
--------+-------+-------\n\
{}\n\
Nodes: {}",
s.join("\n"),
s.len(),
))
}
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
let vote_accounts = rpc_client.get_vote_accounts()?;
let total_active_stake = vote_accounts
@ -370,7 +616,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
}
}
println!(
"{} {:<44} {:<44} {:>3} ({:>4.1}%) {:>10} {:>11} {:>11}",
"{} {:<44} {:<44} {:>3}% {:>10} {:>11} {:>11}",
if delinquent {
WARNING.to_string()
} else {
@ -379,7 +625,6 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
vote_account.node_pubkey,
vote_account.vote_pubkey,
vote_account.commission,
f64::from(vote_account.commission) * 100.0 / f64::from(std::u8::MAX),
non_zero_or_dash(vote_account.last_vote),
non_zero_or_dash(vote_account.root_slot),
if vote_account.activated_stake > 0 {
@ -433,24 +678,40 @@ mod tests {
}
);
let slot = 100;
let test_get_block_time = test_commands.clone().get_matches_from(vec![
"test",
"get-block-time",
&slot.to_string(),
]);
assert_eq!(
parse_command(&test_get_block_time).unwrap(),
CliCommandInfo {
command: CliCommand::GetBlockTime { slot },
require_keypair: false
}
);
let test_get_epoch_info = test_commands
.clone()
.get_matches_from(vec!["test", "get-epoch-info"]);
assert_eq!(
parse_command(&test_get_epoch_info).unwrap(),
CliCommandInfo {
command: CliCommand::GetEpochInfo,
command: CliCommand::GetEpochInfo {
commitment_config: CommitmentConfig::recent(),
},
require_keypair: false
}
);
let test_get_genesis_blockhash = test_commands
let test_get_genesis_hash = test_commands
.clone()
.get_matches_from(vec!["test", "get-genesis-blockhash"]);
.get_matches_from(vec!["test", "get-genesis-hash"]);
assert_eq!(
parse_command(&test_get_genesis_blockhash).unwrap(),
parse_command(&test_get_genesis_hash).unwrap(),
CliCommandInfo {
command: CliCommand::GetGenesisBlockhash,
command: CliCommand::GetGenesisHash,
require_keypair: false
}
);
@ -461,7 +722,9 @@ mod tests {
assert_eq!(
parse_command(&test_get_slot).unwrap(),
CliCommandInfo {
command: CliCommand::GetSlot,
command: CliCommand::GetSlot {
commitment_config: CommitmentConfig::recent(),
},
require_keypair: false
}
);
@ -472,25 +735,36 @@ mod tests {
assert_eq!(
parse_command(&test_transaction_count).unwrap(),
CliCommandInfo {
command: CliCommand::GetTransactionCount,
command: CliCommand::GetTransactionCount {
commitment_config: CommitmentConfig::recent(),
},
require_keypair: false
}
);
let test_ping = test_commands
.clone()
.get_matches_from(vec!["test", "ping", "-i", "1", "-c", "2", "-t", "3"]);
let test_ping = test_commands.clone().get_matches_from(vec![
"test",
"ping",
"-i",
"1",
"-c",
"2",
"-t",
"3",
"--confirmed",
]);
assert_eq!(
parse_command(&test_ping).unwrap(),
CliCommandInfo {
command: CliCommand::Ping {
lamports: 1,
interval: Duration::from_secs(1),
count: Some(2),
timeout: Duration::from_secs(3),
commitment_config: CommitmentConfig::default(),
},
require_keypair: true
}
);
}
// TODO: Add process tests
}

View File

@ -16,14 +16,14 @@ lazy_static! {
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
pub struct Config {
pub url: String,
pub keypair: String,
pub keypair_path: String,
}
impl Config {
pub fn new(url: &str, keypair: &str) -> Self {
pub fn new(url: &str, keypair_path: &str) -> Self {
Self {
url: url.to_string(),
keypair: keypair.to_string(),
keypair_path: keypair_path.to_string(),
}
}

View File

@ -1,4 +1,5 @@
use console::style;
use solana_sdk::transaction::Transaction;
// Pretty print a "name value"
pub fn println_name_value(name: &str, value: &str) {
@ -22,3 +23,14 @@ pub fn println_name_value_or(name: &str, value: &str, default_value: &str) {
println!("{} {}", style(name).bold(), style(value));
};
}
pub fn println_signers(tx: &Transaction) {
println!();
println!("Blockhash: {}", tx.message.recent_blockhash);
println!("Signers (Pubkey=Signature):");
tx.signatures
.iter()
.zip(tx.message.account_keys.clone())
.for_each(|(signature, pubkey)| println!(" {:?}={:?}", pubkey, signature));
println!();
}

View File

@ -1,36 +0,0 @@
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::read_keypair_file;
// Return an error if a pubkey cannot be parsed.
pub fn is_pubkey(string: String) -> Result<(), String> {
match string.parse::<Pubkey>() {
Ok(_) => Ok(()),
Err(err) => Err(format!("{:?}", err)),
}
}
// Return an error if a keypair file cannot be parsed.
pub fn is_keypair(string: String) -> Result<(), String> {
read_keypair_file(&string)
.map(|_| ())
.map_err(|err| format!("{:?}", err))
}
// Return an error if string cannot be parsed as pubkey string or keypair file location
pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
}
// Return an error if a url cannot be parsed.
pub fn is_url(string: String) -> Result<(), String> {
match url::Url::parse(&string) {
Ok(url) => {
if url.has_host() {
Ok(())
} else {
Err("no host provided".to_string())
}
}
Err(err) => Err(format!("{:?}", err)),
}
}

View File

@ -5,8 +5,6 @@ pub mod cli;
pub mod cluster_query;
pub mod config;
pub mod display;
pub mod input_parsers;
pub mod input_validators;
pub mod stake;
pub mod storage;
pub mod validator_info;

View File

@ -1,34 +1,45 @@
use clap::{crate_description, crate_name, crate_version, Arg, ArgGroup, ArgMatches, SubCommand};
use clap::{crate_description, crate_name, Arg, ArgGroup, ArgMatches, SubCommand};
use console::style;
use solana_clap_utils::{
input_validators::is_url,
keypair::{
self, keypair_input, KeypairWithSource, ASK_SEED_PHRASE_ARG,
SKIP_SEED_PHRASE_VALIDATION_ARG,
},
};
use solana_cli::{
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliError},
config::{self, Config},
display::{println_name_value, println_name_value_or},
input_validators::is_url,
};
use solana_sdk::signature::read_keypair_file;
use std::error;
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
let parse_args = match matches.subcommand() {
("get", Some(subcommand_matches)) => {
if let Some(config_file) = matches.value_of("config_file") {
let default_cli_config = CliConfig::default();
let config = Config::load(config_file).unwrap_or_default();
if let Some(field) = subcommand_matches.value_of("specific_setting") {
let (value, default_value) = match field {
"url" => (config.url, default_cli_config.json_rpc_url),
"keypair" => (config.keypair, default_cli_config.keypair_path.unwrap()),
"url" => (config.url, CliConfig::default_json_rpc_url()),
"keypair" => (config.keypair_path, CliConfig::default_keypair_path()),
_ => unreachable!(),
};
println_name_value_or(&format!("* {}:", field), &value, &default_value);
} else {
println_name_value("Wallet Config:", config_file);
println_name_value_or("* url:", &config.url, &default_cli_config.json_rpc_url);
println_name_value_or(
"* url:",
&config.url,
&CliConfig::default_json_rpc_url(),
);
println_name_value_or(
"* keypair:",
&config.keypair,
&default_cli_config.keypair_path.unwrap(),
&config.keypair_path,
&CliConfig::default_keypair_path(),
);
}
} else {
@ -46,12 +57,12 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
config.url = url.to_string();
}
if let Some(keypair) = subcommand_matches.value_of("keypair") {
config.keypair = keypair.to_string();
config.keypair_path = keypair.to_string();
}
config.save(config_file)?;
println_name_value("Wallet Config Updated:", config_file);
println_name_value("* url:", &config.url);
println_name_value("* keypair:", &config.keypair);
println_name_value("* keypair:", &config.keypair_path);
} else {
println!(
"{} Either provide the `--config` arg or ensure home directory exists to use the default config location",
@ -86,101 +97,138 @@ pub fn parse_args(matches: &ArgMatches<'_>) -> Result<CliConfig, Box<dyn error::
} = parse_command(&matches)?;
let (keypair, keypair_path) = if require_keypair {
let keypair_path = if matches.is_present("keypair") {
matches.value_of("keypair").unwrap().to_string()
} else if config.keypair != "" {
config.keypair
} else {
let default = CliConfig::default();
let maybe_keypair_path = default.keypair_path.unwrap();
if !std::path::Path::new(&maybe_keypair_path).exists() {
return Err(CliError::KeypairFileNotFound(
"Generate a new keypair with `solana-keygen new`".to_string(),
)
.into());
let KeypairWithSource { keypair, source } = keypair_input(&matches, "keypair")?;
match source {
keypair::Source::File => (
keypair,
Some(matches.value_of("keypair").unwrap().to_string()),
),
keypair::Source::SeedPhrase => (keypair, None),
keypair::Source::Generated => {
let keypair_path = if config.keypair_path != "" {
config.keypair_path
} else {
let default_keypair_path = CliConfig::default_keypair_path();
if !std::path::Path::new(&default_keypair_path).exists() {
return Err(CliError::KeypairFileNotFound(
"Generate a new keypair with `solana-keygen new`".to_string(),
)
.into());
}
default_keypair_path
};
let keypair = read_keypair_file(&keypair_path).or_else(|err| {
Err(CliError::BadParameter(format!(
"{}: Unable to open keypair file: {}",
err, keypair_path
)))
})?;
(keypair, Some(keypair_path))
}
maybe_keypair_path
};
let keypair = read_keypair_file(&keypair_path).or_else(|err| {
Err(CliError::BadParameter(format!(
"{}: Unable to open keypair file: {}",
err, keypair_path
)))
})?;
(keypair, Some(keypair_path.to_string()))
}
} else {
let default = CliConfig::default();
(default.keypair, None)
};
let print_header = !matches.is_present("no_header");
Ok(CliConfig {
command,
json_rpc_url,
keypair,
keypair_path,
rpc_client: None,
print_header,
})
}
fn main() -> Result<(), Box<dyn error::Error>> {
solana_logger::setup();
let matches = app(crate_name!(), crate_description!(), crate_version!())
.arg({
let arg = Arg::with_name("config_file")
.short("C")
.long("config")
.value_name("PATH")
.takes_value(true)
.global(true)
.help("Configuration file to use");
if let Some(ref config_file) = *config::CONFIG_FILE {
arg.default_value(&config_file)
} else {
arg
}
})
.arg(
Arg::with_name("json_rpc_url")
.short("u")
.long("url")
.value_name("URL")
.takes_value(true)
.global(true)
.validator(is_url)
.help("JSON RPC URL for the solana cluster"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.global(true)
.takes_value(true)
.help("/path/to/id.json"),
)
.subcommand(
SubCommand::with_name("get")
.about("Get cli config settings")
.arg(
Arg::with_name("specific_setting")
.index(1)
.value_name("CONFIG_FIELD")
.takes_value(true)
.possible_values(&["url", "keypair"])
.help("Return a specific config setting"),
),
)
.subcommand(
SubCommand::with_name("set")
.about("Set a cli config setting")
.group(
ArgGroup::with_name("config_settings")
.args(&["json_rpc_url", "keypair"])
.multiple(true)
.required(true),
),
)
.get_matches();
let matches = app(
crate_name!(),
crate_description!(),
solana_clap_utils::version!(),
)
.arg({
let arg = Arg::with_name("config_file")
.short("C")
.long("config")
.value_name("PATH")
.takes_value(true)
.global(true)
.help("Configuration file to use");
if let Some(ref config_file) = *config::CONFIG_FILE {
arg.default_value(&config_file)
} else {
arg
}
})
.arg(
Arg::with_name("json_rpc_url")
.short("u")
.long("url")
.value_name("URL")
.takes_value(true)
.global(true)
.validator(is_url)
.help("JSON RPC URL for the solana cluster"),
)
.arg(
Arg::with_name("keypair")
.short("k")
.long("keypair")
.value_name("PATH")
.global(true)
.takes_value(true)
.help("/path/to/id.json"),
)
.arg(
Arg::with_name("no_header")
.long("no-header")
.global(true)
.help("Disable information header"),
)
.arg(
Arg::with_name(ASK_SEED_PHRASE_ARG.name)
.long(ASK_SEED_PHRASE_ARG.long)
.value_name("KEYPAIR NAME")
.global(true)
.takes_value(true)
.possible_values(&["keypair"])
.help(ASK_SEED_PHRASE_ARG.help),
)
.arg(
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
.global(true)
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
)
.subcommand(
SubCommand::with_name("get")
.about("Get cli config settings")
.arg(
Arg::with_name("specific_setting")
.index(1)
.value_name("CONFIG_FIELD")
.takes_value(true)
.possible_values(&["url", "keypair"])
.help("Return a specific config setting"),
),
)
.subcommand(
SubCommand::with_name("set")
.about("Set a cli config setting")
.group(
ArgGroup::with_name("config_settings")
.args(&["json_rpc_url", "keypair"])
.multiple(true)
.required(true),
),
)
.get_matches();
if parse_settings(&matches)? {
let config = parse_args(&matches)?;

View File

@ -1,28 +1,31 @@
use crate::{
cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult,
},
input_parsers::*,
input_validators::*,
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
get_blockhash_fee_calculator, log_instruction_custom_error, replace_signatures, return_signers,
CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use console::style;
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::rpc_client::RpcClient;
use solana_sdk::signature::{Keypair, Signature};
use solana_sdk::{
account_utils::State,
hash::Hash,
pubkey::Pubkey,
signature::KeypairUtil,
system_instruction::SystemError,
sysvar::stake_history::{self, StakeHistory},
sysvar::{
stake_history::{self, StakeHistory},
Sysvar,
},
transaction::Transaction,
};
use solana_stake_api::{
use solana_stake_program::stake_state::Meta;
use solana_stake_program::{
stake_instruction::{self, StakeError},
stake_state::{Authorized, Lockup, StakeAuthorize, StakeState},
};
use solana_vote_api::vote_state::VoteState;
use solana_vote_program::vote_state::VoteState;
use std::ops::Deref;
pub trait StakeSubCommands {
@ -35,13 +38,13 @@ impl StakeSubCommands for App<'_, '_> {
SubCommand::with_name("create-stake-account")
.about("Create a stake account")
.arg(
Arg::with_name("stake_account_pubkey")
Arg::with_name("stake_account")
.index(1)
.value_name("STAKE ACCOUNT")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Address of the stake account to fund (pubkey or keypair)")
.validator(is_keypair_or_ask_keyword)
.help("Keypair of the stake account to fund")
)
.arg(
Arg::with_name("amount")
@ -119,6 +122,29 @@ impl StakeSubCommands for App<'_, '_> {
.validator(is_pubkey_or_keypair)
.help("The vote account to which the stake will be delegated")
)
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
),
)
.subcommand(
SubCommand::with_name("stake-authorize-staker")
@ -175,6 +201,29 @@ impl StakeSubCommands for App<'_, '_> {
.required(true)
.help("Stake account to be deactivated.")
)
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
),
)
.subcommand(
SubCommand::with_name("withdraw-stake")
@ -269,8 +318,8 @@ impl StakeSubCommands for App<'_, '_> {
}
pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let slot = value_of(&matches, "lockup").unwrap_or(0);
let stake_account = keypair_of(matches, "stake_account").unwrap();
let epoch = value_of(&matches, "lockup").unwrap_or(0);
let custodian = pubkey_of(matches, "custodian").unwrap_or_default();
let staker = pubkey_of(matches, "authorized_staker");
let withdrawer = pubkey_of(matches, "authorized_withdrawer");
@ -278,10 +327,10 @@ pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommand
Ok(CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account_pubkey,
stake_account: stake_account.into(),
staker,
withdrawer,
lockup: Lockup { custodian, slot },
lockup: Lockup { custodian, epoch },
lamports,
},
require_keypair: true,
@ -292,10 +341,20 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let force = matches.is_present("force");
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
Ok(CliCommandInfo {
command: CliCommand::DelegateStake(stake_account_pubkey, vote_account_pubkey, force),
require_keypair: true,
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force,
sign_only,
signers,
blockhash,
},
require_keypair: !sign_only,
})
}
@ -327,9 +386,17 @@ pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandI
pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
Ok(CliCommandInfo {
command: CliCommand::DeactivateStake(stake_account_pubkey),
require_keypair: true,
command: CliCommand::DeactivateStake {
stake_account_pubkey,
sign_only,
signers,
blockhash,
},
require_keypair: !sign_only,
})
}
@ -371,15 +438,16 @@ pub fn parse_show_stake_history(matches: &ArgMatches<'_>) -> Result<CliCommandIn
pub fn process_create_stake_account(
rpc_client: &RpcClient,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
stake_account: &Keypair,
staker: &Option<Pubkey>,
withdrawer: &Option<Pubkey>,
lockup: &Lockup,
lamports: u64,
) -> ProcessResult {
let stake_account_pubkey = stake_account.pubkey();
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
(stake_account_pubkey, "stake_account_pubkey".to_string()),
(&stake_account_pubkey, "stake_account_pubkey".to_string()),
)?;
if rpc_client.get_account(&stake_account_pubkey).is_ok() {
@ -409,7 +477,7 @@ pub fn process_create_stake_account(
let ixs = stake_instruction::create_stake_account_with_lockup(
&config.keypair.pubkey(),
stake_account_pubkey,
&stake_account_pubkey,
&authorized,
lockup,
lamports,
@ -418,11 +486,12 @@ pub fn process_create_stake_account(
let mut tx = Transaction::new_signed_with_payer(
ixs,
Some(&config.keypair.pubkey()),
&[&config.keypair],
&[&config.keypair, stake_account],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
let result =
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, stake_account]);
log_instruction_custom_error::<SystemError>(result)
}
@ -460,8 +529,12 @@ pub fn process_deactivate_stake_account(
rpc_client: &RpcClient,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
let ixs = vec![stake_instruction::deactivate_stake(
stake_account_pubkey,
&config.keypair.pubkey(),
@ -472,9 +545,16 @@ pub fn process_deactivate_stake_account(
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
if let Some(signers) = signers {
replace_signatures(&mut tx, &signers)?;
}
if sign_only {
return_signers(&tx)
} else {
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
}
pub fn process_withdraw_stake(
@ -533,7 +613,7 @@ pub fn process_show_stake_account(
use_lamports_unit: bool,
) -> ProcessResult {
let stake_account = rpc_client.get_account(stake_account_pubkey)?;
if stake_account.owner != solana_stake_api::id() {
if stake_account.owner != solana_stake_program::id() {
return Err(CliError::RpcRequestError(
format!("{:?} is not a stake account", stake_account_pubkey).to_string(),
)
@ -544,11 +624,16 @@ pub fn process_show_stake_account(
println!("authorized withdrawer: {}", authorized.staker);
}
fn show_lockup(lockup: &Lockup) {
println!("lockup slot: {}", lockup.slot);
println!("lockup epoch: {}", lockup.epoch);
println!("lockup custodian: {}", lockup.custodian);
}
match stake_account.state() {
Ok(StakeState::Stake(authorized, lockup, stake)) => {
Ok(StakeState::Stake(
Meta {
authorized, lockup, ..
},
stake,
)) => {
println!(
"total stake: {}",
build_balance_message(stake_account.lamports, use_lamports_unit, true)
@ -556,19 +641,23 @@ pub fn process_show_stake_account(
println!("credits observed: {}", stake.credits_observed);
println!(
"delegated stake: {}",
build_balance_message(stake.stake, use_lamports_unit, true)
build_balance_message(stake.delegation.stake, use_lamports_unit, true)
);
if stake.voter_pubkey != Pubkey::default() {
println!("delegated voter pubkey: {}", stake.voter_pubkey);
if stake.delegation.voter_pubkey != Pubkey::default() {
println!("delegated voter pubkey: {}", stake.delegation.voter_pubkey);
}
println!(
"stake activates starting from epoch: {}",
stake.activation_epoch
if stake.delegation.activation_epoch < std::u64::MAX {
stake.delegation.activation_epoch
} else {
0
}
);
if stake.deactivation_epoch < std::u64::MAX {
if stake.delegation.deactivation_epoch < std::u64::MAX {
println!(
"stake deactivates starting from epoch: {}",
stake.deactivation_epoch
stake.delegation.deactivation_epoch
);
}
show_authorized(&authorized);
@ -577,7 +666,9 @@ pub fn process_show_stake_account(
}
Ok(StakeState::RewardsPool) => Ok("Stake account is a rewards pool".to_string()),
Ok(StakeState::Uninitialized) => Ok("Stake account is uninitialized".to_string()),
Ok(StakeState::Initialized(authorized, lockup)) => {
Ok(StakeState::Initialized(Meta {
authorized, lockup, ..
})) => {
println!("Stake account is undelegated");
show_authorized(&authorized);
show_lockup(&lockup);
@ -630,6 +721,9 @@ pub fn process_delegate_stake(
stake_account_pubkey: &Pubkey,
vote_account_pubkey: &Pubkey,
force: bool,
sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
) -> ProcessResult {
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
@ -676,7 +770,8 @@ pub fn process_delegate_stake(
}
}
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
let ixs = vec![stake_instruction::delegate_stake(
stake_account_pubkey,
@ -690,45 +785,70 @@ pub fn process_delegate_stake(
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
if let Some(signers) = signers {
replace_signatures(&mut tx, &signers)?;
}
if sign_only {
return_signers(&tx)
} else {
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cli::{app, parse_command};
use solana_sdk::signature::write_keypair;
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
#[test]
fn test_parse_command() {
let test_commands = app("test", "desc", "version");
let pubkey = Pubkey::new_rand();
let pubkey_string = format!("{}", pubkey);
let (keypair_file, mut tmp_file) = make_tmp_file();
let stake_account_keypair = Keypair::new();
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
let stake_account_pubkey = stake_account_keypair.pubkey();
let stake_account_string = stake_account_pubkey.to_string();
let test_authorize_staker = test_commands.clone().get_matches_from(vec![
"test",
"stake-authorize-staker",
&pubkey_string,
&pubkey_string,
&stake_account_string,
&stake_account_string,
]);
assert_eq!(
parse_command(&test_authorize_staker).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize(pubkey, pubkey, StakeAuthorize::Staker),
command: CliCommand::StakeAuthorize(
stake_account_pubkey,
stake_account_pubkey,
StakeAuthorize::Staker
),
require_keypair: true
}
);
let test_authorize_withdrawer = test_commands.clone().get_matches_from(vec![
"test",
"stake-authorize-withdrawer",
&pubkey_string,
&pubkey_string,
&stake_account_string,
&stake_account_string,
]);
assert_eq!(
parse_command(&test_authorize_withdrawer).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize(pubkey, pubkey, StakeAuthorize::Withdrawer),
command: CliCommand::StakeAuthorize(
stake_account_pubkey,
stake_account_pubkey,
StakeAuthorize::Withdrawer
),
require_keypair: true
}
);
@ -741,7 +861,7 @@ mod tests {
let test_create_stake_account = test_commands.clone().get_matches_from(vec![
"test",
"create-stake-account",
&pubkey_string,
&keypair_file,
"50",
"--authorized-staker",
&authorized_string,
@ -757,11 +877,11 @@ mod tests {
parse_command(&test_create_stake_account).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account_pubkey: pubkey,
stake_account: stake_account_keypair.into(),
staker: Some(authorized),
withdrawer: Some(authorized),
lockup: Lockup {
slot: 43,
epoch: 43,
custodian,
},
lamports: 50
@ -769,24 +889,29 @@ mod tests {
require_keypair: true
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let stake_account_keypair = Keypair::new();
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
let stake_account_pubkey = stake_account_keypair.pubkey();
let stake_account_string = stake_account_pubkey.to_string();
let test_create_stake_account2 = test_commands.clone().get_matches_from(vec![
"test",
"create-stake-account",
&pubkey_string,
&keypair_file,
"50",
"lamports",
]);
assert_eq!(
parse_command(&test_create_stake_account2).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStakeAccount {
stake_account_pubkey: pubkey,
stake_account: stake_account_keypair.into(),
staker: None,
withdrawer: None,
lockup: Lockup {
slot: 0,
custodian: Pubkey::default(),
},
lockup: Lockup::default(),
lamports: 50
},
require_keypair: true
@ -794,18 +919,25 @@ mod tests {
);
// Test DelegateStake Subcommand
let stake_pubkey = Pubkey::new_rand();
let stake_pubkey_string = stake_pubkey.to_string();
let vote_account_pubkey = Pubkey::new_rand();
let vote_account_string = vote_account_pubkey.to_string();
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
"delegate-stake",
&stake_pubkey_string,
&pubkey_string,
&stake_account_string,
&vote_account_string,
]);
assert_eq!(
parse_command(&test_delegate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake(stake_pubkey, pubkey, false),
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force: false,
sign_only: false,
signers: None,
blockhash: None
},
require_keypair: true
}
);
@ -814,13 +946,124 @@ mod tests {
"test",
"delegate-stake",
"--force",
&stake_pubkey_string,
&pubkey_string,
&stake_account_string,
&vote_account_string,
]);
assert_eq!(
parse_command(&test_delegate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake(stake_pubkey, pubkey, true),
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force: true,
sign_only: false,
signers: None,
blockhash: None
},
require_keypair: true
}
);
// Test Delegate Subcommand w/ Blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--blockhash",
&blockhash_string,
]);
assert_eq!(
parse_command(&test_delegate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force: false,
sign_only: false,
signers: None,
blockhash: Some(blockhash)
},
require_keypair: true
}
);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--sign-only",
]);
assert_eq!(
parse_command(&test_delegate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force: false,
sign_only: true,
signers: None,
blockhash: None
},
require_keypair: false
}
);
// Test Delegate Subcommand w/ signer
let key1 = Pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--signer",
&signer1,
]);
assert_eq!(
parse_command(&test_delegate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force: false,
sign_only: false,
signers: Some(vec![(key1, sig1)]),
blockhash: None
},
require_keypair: true
}
);
// Test Delegate Subcommand w/ signers
let key2 = Pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2);
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
"test",
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--signer",
&signer1,
"--signer",
&signer2,
]);
assert_eq!(
parse_command(&test_delegate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DelegateStake {
stake_account_pubkey,
vote_account_pubkey,
force: false,
sign_only: false,
signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None
},
require_keypair: true
}
);
@ -829,8 +1072,8 @@ mod tests {
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-stake",
&stake_pubkey_string,
&pubkey_string,
&stake_account_string,
&stake_account_string,
"42",
"lamports",
]);
@ -838,7 +1081,7 @@ mod tests {
assert_eq!(
parse_command(&test_withdraw_stake).unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawStake(stake_pubkey, pubkey, 42),
command: CliCommand::WithdrawStake(stake_account_pubkey, stake_account_pubkey, 42),
require_keypair: true
}
);
@ -847,15 +1090,111 @@ mod tests {
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
"test",
"deactivate-stake",
&stake_pubkey_string,
&stake_account_string,
]);
assert_eq!(
parse_command(&test_deactivate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake(stake_pubkey),
command: CliCommand::DeactivateStake {
stake_account_pubkey,
sign_only: false,
signers: None,
blockhash: None
},
require_keypair: true
}
);
// Test Deactivate Subcommand w/ Blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
"test",
"deactivate-stake",
&stake_account_string,
"--blockhash",
&blockhash_string,
]);
assert_eq!(
parse_command(&test_deactivate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
sign_only: false,
signers: None,
blockhash: Some(blockhash)
},
require_keypair: true
}
);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
"test",
"deactivate-stake",
&stake_account_string,
"--sign-only",
]);
assert_eq!(
parse_command(&test_deactivate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
sign_only: true,
signers: None,
blockhash: None
},
require_keypair: false
}
);
// Test Deactivate Subcommand w/ signers
let key1 = Pubkey::new_rand();
let sig1 = Keypair::new().sign_message(&[0u8]);
let signer1 = format!("{}={}", key1, sig1);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
"test",
"deactivate-stake",
&stake_account_string,
"--signer",
&signer1,
]);
assert_eq!(
parse_command(&test_deactivate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
sign_only: false,
signers: Some(vec![(key1, sig1)]),
blockhash: None
},
require_keypair: true
}
);
// Test Deactivate Subcommand w/ signers
let key2 = Pubkey::new_rand();
let sig2 = Keypair::new().sign_message(&[0u8]);
let signer2 = format!("{}={}", key2, sig2);
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
"test",
"deactivate-stake",
&stake_account_string,
"--signer",
&signer1,
"--signer",
&signer2,
]);
assert_eq!(
parse_command(&test_deactivate_stake).unwrap(),
CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
sign_only: false,
signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None
},
require_keypair: true
}
);
}
// TODO: Add process tests
}

View File

@ -1,18 +1,16 @@
use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, log_instruction_custom_error, CliCommand,
CliCommandInfo, CliConfig, CliError, ProcessResult,
},
input_parsers::*,
input_validators::*,
use crate::cli::{
check_account_for_fee, check_unique_pubkeys, log_instruction_custom_error, CliCommand,
CliCommandInfo, CliConfig, CliError, ProcessResult,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::rpc_client::RpcClient;
use solana_sdk::signature::Keypair;
use solana_sdk::{
account_utils::State, message::Message, pubkey::Pubkey, signature::KeypairUtil,
system_instruction::SystemError, transaction::Transaction,
};
use solana_storage_api::storage_instruction::{self, StorageAccountType};
use solana_storage_program::storage_instruction::{self, StorageAccountType};
pub trait StorageSubCommands {
fn storage_subcommands(self) -> Self;
@ -32,12 +30,12 @@ impl StorageSubCommands for App<'_, '_> {
.validator(is_pubkey_or_keypair),
)
.arg(
Arg::with_name("storage_account_pubkey")
Arg::with_name("storage_account")
.index(2)
.value_name("STORAGE ACCOUNT PUBKEY")
.value_name("STORAGE ACCOUNT")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair),
.validator(is_keypair_or_ask_keyword),
),
)
.subcommand(
@ -52,12 +50,12 @@ impl StorageSubCommands for App<'_, '_> {
.validator(is_pubkey_or_keypair),
)
.arg(
Arg::with_name("storage_account_pubkey")
Arg::with_name("storage_account")
.index(2)
.value_name("STORAGE ACCOUNT PUBKEY")
.value_name("STORAGE ACCOUNT")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair),
.validator(is_keypair_or_ask_keyword),
),
)
.subcommand(
@ -102,11 +100,11 @@ pub fn parse_storage_create_archiver_account(
matches: &ArgMatches<'_>,
) -> Result<CliCommandInfo, CliError> {
let account_owner = pubkey_of(matches, "storage_account_owner").unwrap();
let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap();
let storage_account = keypair_of(matches, "storage_account").unwrap();
Ok(CliCommandInfo {
command: CliCommand::CreateStorageAccount {
account_owner,
storage_account_pubkey,
storage_account: storage_account.into(),
account_type: StorageAccountType::Archiver,
},
require_keypair: true,
@ -117,11 +115,11 @@ pub fn parse_storage_create_validator_account(
matches: &ArgMatches<'_>,
) -> Result<CliCommandInfo, CliError> {
let account_owner = pubkey_of(matches, "storage_account_owner").unwrap();
let storage_account_pubkey = pubkey_of(matches, "storage_account_pubkey").unwrap();
let storage_account = keypair_of(matches, "storage_account").unwrap();
Ok(CliCommandInfo {
command: CliCommand::CreateStorageAccount {
account_owner,
storage_account_pubkey,
storage_account: storage_account.into(),
account_type: StorageAccountType::Validator,
},
require_keypair: true,
@ -154,9 +152,10 @@ pub fn process_create_storage_account(
rpc_client: &RpcClient,
config: &CliConfig,
account_owner: &Pubkey,
storage_account_pubkey: &Pubkey,
storage_account: &Keypair,
account_type: StorageAccountType,
) -> ProcessResult {
let storage_account_pubkey = storage_account.pubkey();
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
(
@ -168,13 +167,18 @@ pub fn process_create_storage_account(
let ixs = storage_instruction::create_storage_account(
&config.keypair.pubkey(),
&account_owner,
storage_account_pubkey,
&storage_account_pubkey,
1,
account_type,
);
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], ixs, recent_blockhash);
let mut tx = Transaction::new_signed_instructions(
&[&config.keypair, &storage_account],
ixs,
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
let result =
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, &storage_account]);
log_instruction_custom_error::<SystemError>(result)
}
@ -204,14 +208,14 @@ pub fn process_show_storage_account(
) -> ProcessResult {
let account = rpc_client.get_account(storage_account_pubkey)?;
if account.owner != solana_storage_api::id() {
if account.owner != solana_storage_program::id() {
return Err(CliError::RpcRequestError(
format!("{:?} is not a storage account", storage_account_pubkey).to_string(),
)
.into());
}
use solana_storage_api::storage_contract::StorageContract;
use solana_storage_program::storage_contract::StorageContract;
let storage_contract: StorageContract = account.state().map_err(|err| {
CliError::RpcRequestError(
format!("Unable to deserialize storage account: {:?}", err).to_string(),
@ -226,45 +230,60 @@ pub fn process_show_storage_account(
mod tests {
use super::*;
use crate::cli::{app, parse_command};
use solana_sdk::signature::write_keypair;
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
#[test]
fn test_parse_command() {
let test_commands = app("test", "desc", "version");
let pubkey = Pubkey::new_rand();
let pubkey_string = pubkey.to_string();
let storage_account_pubkey = Pubkey::new_rand();
let storage_account_string = storage_account_pubkey.to_string();
let (keypair_file, mut tmp_file) = make_tmp_file();
let storage_account_keypair = Keypair::new();
write_keypair(&storage_account_keypair, tmp_file.as_file_mut()).unwrap();
let test_create_archiver_storage_account = test_commands.clone().get_matches_from(vec![
"test",
"create-archiver-storage-account",
&pubkey_string,
&storage_account_string,
&keypair_file,
]);
assert_eq!(
parse_command(&test_create_archiver_storage_account).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStorageAccount {
account_owner: pubkey,
storage_account_pubkey,
storage_account: storage_account_keypair.into(),
account_type: StorageAccountType::Archiver,
},
require_keypair: true
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let storage_account_keypair = Keypair::new();
write_keypair(&storage_account_keypair, tmp_file.as_file_mut()).unwrap();
let storage_account_pubkey = storage_account_keypair.pubkey();
let storage_account_string = storage_account_pubkey.to_string();
let test_create_validator_storage_account = test_commands.clone().get_matches_from(vec![
"test",
"create-validator-storage-account",
&pubkey_string,
&storage_account_string,
&keypair_file,
]);
assert_eq!(
parse_command(&test_create_validator_storage_account).unwrap(),
CliCommandInfo {
command: CliCommand::CreateStorageAccount {
account_owner: pubkey,
storage_account_pubkey,
storage_account: storage_account_keypair.into(),
account_type: StorageAccountType::Validator,
},
require_keypair: true
@ -288,5 +307,4 @@ mod tests {
}
);
}
// TODO: Add process tests
}

View File

@ -1,37 +1,35 @@
use crate::{
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
display::println_name_value,
input_parsers::pubkey_of,
input_validators::{is_pubkey, is_url},
};
use bincode::deserialize;
use clap::{App, Arg, ArgMatches, SubCommand};
use reqwest::Client;
use serde_derive::{Deserialize, Serialize};
use serde_json::{Map, Value};
use solana_clap_utils::{
input_parsers::pubkey_of,
input_validators::{is_pubkey, is_url},
};
use solana_client::rpc_client::RpcClient;
use solana_config_api::{config_instruction, get_config_data, ConfigKeys, ConfigState};
use solana_sdk::account::Account;
use solana_sdk::message::Message;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::transaction::Transaction;
use solana_config_program::{config_instruction, get_config_data, ConfigKeys, ConfigState};
use solana_sdk::{
account::Account,
commitment_config::CommitmentConfig,
message::Message,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
transaction::Transaction,
};
use std::error;
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
pub const MAX_VALIDATOR_INFO: u64 = 576;
// Config account key: Va1idator1nfo111111111111111111111111111111
pub const REGISTER_CONFIG_KEY: [u8; 32] = [
7, 81, 151, 1, 116, 72, 242, 172, 93, 194, 60, 158, 188, 122, 199, 140, 10, 39, 37, 122, 198,
20, 69, 141, 224, 164, 241, 111, 128, 0, 0, 0,
];
solana_sdk::solana_name_id!(
REGISTER_CONFIG_KEY,
"Va1idator1nfo111111111111111111111111111111"
);
solana_sdk::declare_id!("Va1idator1nfo111111111111111111111111111111");
#[derive(Debug, Deserialize, PartialEq, Serialize, Default)]
pub struct ValidatorInfo {
@ -130,7 +128,7 @@ fn parse_validator_info(
pubkey: &Pubkey,
account: &Account,
) -> Result<(Pubkey, Map<String, serde_json::value::Value>), Box<dyn error::Error>> {
if account.owner != solana_config_api::id() {
if account.owner != solana_config_program::id() {
return Err(format!("{} is not a validator info account", pubkey).into());
}
let key_list: ConfigKeys = deserialize(&account.data)?;
@ -274,7 +272,7 @@ pub fn process_set_validator_info(
info: validator_string,
};
// Check for existing validator-info account
let all_config = rpc_client.get_program_accounts(&solana_config_api::id())?;
let all_config = rpc_client.get_program_accounts(&solana_config_program::id())?;
let existing_account = all_config
.iter()
.filter(|(_, account)| {
@ -297,7 +295,9 @@ pub fn process_set_validator_info(
};
// Check existence of validator-info account
let balance = rpc_client.poll_get_balance(&info_pubkey).unwrap_or(0);
let balance = rpc_client
.poll_get_balance_with_commitment(&info_pubkey, CommitmentConfig::default())
.unwrap_or(0);
let keys = vec![(id(), false), (config.keypair.pubkey(), true)];
let (message, signers): (Message, Vec<&Keypair>) = if balance == 0 {
@ -362,7 +362,7 @@ pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>
rpc_client.get_account(&validator_info_pubkey)?,
)]
} else {
let all_config = rpc_client.get_program_accounts(&solana_config_api::id())?;
let all_config = rpc_client.get_program_accounts(&solana_config_program::id())?;
all_config
.into_iter()
.filter(|(_, validator_info_account)| {
@ -485,7 +485,7 @@ mod tests {
parse_validator_info(
&Pubkey::default(),
&Account {
owner: solana_config_api::id(),
owner: solana_config_program::id(),
data,
..Account::default()
}

View File

@ -1,19 +1,16 @@
use crate::{
cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult,
},
input_parsers::*,
input_validators::*,
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
use solana_client::rpc_client::RpcClient;
use solana_sdk::signature::Keypair;
use solana_sdk::{
account::Account, pubkey::Pubkey, signature::KeypairUtil, system_instruction::SystemError,
transaction::Transaction,
};
use solana_vote_api::{
use solana_vote_program::{
vote_instruction::{self, VoteError},
vote_state::{VoteAuthorize, VoteInit, VoteState},
};
@ -28,13 +25,13 @@ impl VoteSubCommands for App<'_, '_> {
SubCommand::with_name("create-vote-account")
.about("Create a vote account")
.arg(
Arg::with_name("vote_account_pubkey")
Arg::with_name("vote_account")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.value_name("VOTE ACCOUNT KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Vote account address to fund"),
.validator(is_keypair_or_ask_keyword)
.help("Vote account keypair to fund"),
)
.arg(
Arg::with_name("node_pubkey")
@ -50,7 +47,7 @@ impl VoteSubCommands for App<'_, '_> {
.long("commission")
.value_name("NUM")
.takes_value(true)
.help("The commission taken on reward redemption (0-255), default: 0"),
.help("The commission taken on reward redemption (0-100), default: 0"),
)
.arg(
Arg::with_name("authorized_voter")
@ -161,7 +158,7 @@ impl VoteSubCommands for App<'_, '_> {
}
pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let vote_account = keypair_of(matches, "vote_account").unwrap();
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
let commission = value_of(&matches, "commission").unwrap_or(0);
let authorized_voter = pubkey_of(matches, "authorized_voter");
@ -169,7 +166,7 @@ pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandI
Ok(CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account_pubkey,
vote_account: vote_account.into(),
node_pubkey,
authorized_voter,
authorized_withdrawer,
@ -231,19 +228,20 @@ pub fn parse_vote_uptime_command(matches: &ArgMatches<'_>) -> Result<CliCommandI
pub fn process_create_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
vote_account: &Keypair,
node_pubkey: &Pubkey,
authorized_voter: &Option<Pubkey>,
authorized_withdrawer: &Option<Pubkey>,
commission: u8,
) -> ProcessResult {
let vote_account_pubkey = vote_account.pubkey();
check_unique_pubkeys(
(vote_account_pubkey, "vote_account_pubkey".to_string()),
(&vote_account_pubkey, "vote_account_pubkey".to_string()),
(&node_pubkey, "node_pubkey".to_string()),
)?;
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
(vote_account_pubkey, "vote_account_pubkey".to_string()),
(&vote_account_pubkey, "vote_account_pubkey".to_string()),
)?;
let required_balance =
rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?;
@ -254,20 +252,24 @@ pub fn process_create_vote_account(
};
let vote_init = VoteInit {
node_pubkey: *node_pubkey,
authorized_voter: authorized_voter.unwrap_or(*vote_account_pubkey),
authorized_voter: authorized_voter.unwrap_or(vote_account_pubkey),
authorized_withdrawer: authorized_withdrawer.unwrap_or(config.keypair.pubkey()),
commission,
};
let ixs = vote_instruction::create_account(
&config.keypair.pubkey(),
vote_account_pubkey,
&vote_account_pubkey,
&vote_init,
lamports,
);
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], ixs, recent_blockhash);
let mut tx = Transaction::new_signed_instructions(
&[&config.keypair, vote_account],
ixs,
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, vote_account]);
log_instruction_custom_error::<SystemError>(result)
}
@ -307,7 +309,7 @@ fn get_vote_account(
) -> Result<(Account, VoteState), Box<dyn std::error::Error>> {
let vote_account = rpc_client.get_account(vote_account_pubkey)?;
if vote_account.owner != solana_vote_api::id() {
if vote_account.owner != solana_vote_program::id() {
return Err(CliError::RpcRequestError(
format!("{:?} is not a vote account", vote_account_pubkey).to_string(),
)
@ -343,10 +345,7 @@ pub fn process_show_vote_account(
vote_state.authorized_withdrawer
);
println!("credits: {}", vote_state.credits());
println!(
"commission: {}%",
f64::from(vote_state.commission) / f64::from(std::u32::MAX)
);
println!("commission: {}%", vote_state.commission);
println!(
"root slot: {}",
match vote_state.root_slot {
@ -430,11 +429,19 @@ pub fn process_uptime(
mod tests {
use super::*;
use crate::cli::{app, parse_command};
use solana_sdk::signature::write_keypair;
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
#[test]
fn test_parse_command() {
let test_commands = app("test", "desc", "version");
let pubkey = Pubkey::new_rand();
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let pubkey_string = pubkey.to_string();
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
@ -451,13 +458,16 @@ mod tests {
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
// Test CreateVoteAccount SubCommand
let node_pubkey = Pubkey::new_rand();
let node_pubkey_string = format!("{}", node_pubkey);
let test_create_vote_account = test_commands.clone().get_matches_from(vec![
"test",
"create-vote-account",
&pubkey_string,
&keypair_file,
&node_pubkey_string,
"--commission",
"10",
@ -466,7 +476,7 @@ mod tests {
parse_command(&test_create_vote_account).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account_pubkey: pubkey,
vote_account: keypair.into(),
node_pubkey,
authorized_voter: None,
authorized_withdrawer: None,
@ -475,17 +485,22 @@ mod tests {
require_keypair: true
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
let test_create_vote_account2 = test_commands.clone().get_matches_from(vec![
"test",
"create-vote-account",
&pubkey_string,
&keypair_file,
&node_pubkey_string,
]);
assert_eq!(
parse_command(&test_create_vote_account2).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account_pubkey: pubkey,
vote_account: keypair.into(),
node_pubkey,
authorized_voter: None,
authorized_withdrawer: None,
@ -494,12 +509,17 @@ mod tests {
require_keypair: true
}
);
// test init with an authed voter
let authed = Pubkey::new_rand();
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
let test_create_vote_account3 = test_commands.clone().get_matches_from(vec![
"test",
"create-vote-account",
&pubkey_string,
&keypair_file,
&node_pubkey_string,
"--authorized-voter",
&authed.to_string(),
@ -508,7 +528,7 @@ mod tests {
parse_command(&test_create_vote_account3).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account_pubkey: pubkey,
vote_account: keypair.into(),
node_pubkey,
authorized_voter: Some(authed),
authorized_withdrawer: None,
@ -517,11 +537,15 @@ mod tests {
require_keypair: true
}
);
let (keypair_file, mut tmp_file) = make_tmp_file();
let keypair = Keypair::new();
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
// test init with an authed withdrawer
let test_create_vote_account4 = test_commands.clone().get_matches_from(vec![
"test",
"create-vote-account",
&pubkey_string,
&keypair_file,
&node_pubkey_string,
"--authorized-withdrawer",
&authed.to_string(),
@ -530,7 +554,7 @@ mod tests {
parse_command(&test_create_vote_account4).unwrap(),
CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account_pubkey: pubkey,
vote_account: keypair.into(),
node_pubkey,
authorized_voter: None,
authorized_withdrawer: Some(authed),
@ -562,5 +586,4 @@ mod tests {
}
);
}
// TODO: Add process tests
}

View File

@ -1,14 +1,16 @@
use serde_json::{json, Value};
use serde_json::Value;
use solana_cli::cli::{process_command, CliCommand, CliConfig};
use solana_client::rpc_client::RpcClient;
use solana_client::rpc_request::RpcRequest;
use solana_core::validator::new_validator_for_tests;
use solana_drone::drone::run_local_drone;
use solana_sdk::bpf_loader;
use std::fs::{remove_dir_all, File};
use std::io::Read;
use std::path::PathBuf;
use std::sync::mpsc::channel;
use solana_sdk::{bpf_loader, pubkey::Pubkey};
use std::{
fs::{remove_dir_all, File},
io::Read,
path::PathBuf,
str::FromStr,
sync::mpsc::channel,
};
#[test]
fn test_cli_deploy_program() {
@ -56,35 +58,17 @@ fn test_cli_deploy_program() {
.unwrap()
.as_str()
.unwrap();
let params = json!([program_id_str]);
let account_info = rpc_client
.retry_make_rpc_request(&RpcRequest::GetAccountInfo, Some(params), 0)
.unwrap();
let account_info_obj = account_info.as_object().unwrap();
assert_eq!(
account_info_obj.get("lamports").unwrap().as_u64().unwrap(),
minimum_balance_for_rent_exemption
);
let owner_array = account_info.get("owner").unwrap();
assert_eq!(owner_array, &json!(bpf_loader::id()));
assert_eq!(
account_info_obj
.get("executable")
.unwrap()
.as_bool()
.unwrap(),
true
);
let program_id = Pubkey::from_str(&program_id_str).unwrap();
let account = rpc_client.get_account(&program_id).unwrap();
assert_eq!(account.lamports, minimum_balance_for_rent_exemption);
assert_eq!(account.owner, bpf_loader::id());
assert_eq!(account.executable, true);
let mut file = File::open(pathbuf.to_str().unwrap().to_string()).unwrap();
let mut elf = Vec::new();
file.read_to_end(&mut elf).unwrap();
assert_eq!(
account_info_obj.get("data").unwrap().as_array().unwrap(),
&elf
);
assert_eq!(account.data, elf);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();

Some files were not shown because too many files have changed in this diff Show More