Compare commits

...

308 Commits

Author SHA1 Message Date
d18dc94209 Update testnet book source to release 0.17.0 (#5339) 2019-07-29 18:53:00 -06:00
8242fd19eb Move coverage back to the default queue (#5318) (#5320)
(cherry picked from commit 506b305959)
2019-07-28 23:17:45 -07:00
469e91cd8d Add --use_move mode to bench-tps (#5311) (#5316)
automerge
2019-07-28 14:15:57 -07:00
4889c2a29c Add move mode to bench-tps (bp #5250) (#5310)
automerge
2019-07-27 17:51:12 -07:00
3c6115c94a Pull all libra crates from crates.io (bp #5306) (#5307)
automerge
2019-07-27 15:48:41 -07:00
70b15317a9 Move credit-only and Move proposals to the implemented section of the book (#5308) (#5309)
automerge
2019-07-27 15:41:18 -07:00
a834e9ae10 Add libray_api (bp #5304) (#5305)
automerge
2019-07-27 13:30:59 -07:00
7796e87814 Revert "Default log level to to RUST_LOG=solana=info (#5296)" (#5302)
This reverts commit c63a38ae57.
2019-07-27 07:46:45 -07:00
64c770275b Integrate Move VM into main build (#5229)
* Integrate Move VM into top-level build

* Switch to protoc-free libra
2019-07-27 06:59:46 -06:00
855f7ff352 Move Move deps from a branch to a tag (#5300) 2019-07-26 23:51:42 -06:00
b59a99111c Bump url from 1.7.2 to 2.0.0 (#5247)
* Bump url from 1.7.2 to 2.0.0

Bumps [url](https://github.com/servo/rust-url) from 1.7.2 to 2.0.0.
- [Release notes](https://github.com/servo/rust-url/releases)
- [Commits](https://github.com/servo/rust-url/compare/v1.7.2...v2.0.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>

* Adapt to url 2.0.0
2019-07-26 21:46:38 -07:00
252257fe66 Rewrite multinode-demo/replicator.sh to avoid fullnode.sh (#5299) 2019-07-26 19:00:34 -07:00
e2c9d87d91 Move verify to finalize (#5297)
automerge
2019-07-26 17:51:07 -07:00
9d34b80ed6 Upgrade to all the latest packages our existing Cargo.toml will allow (#5298)
automerge
2019-07-26 17:21:01 -07:00
c63a38ae57 Default log level to to RUST_LOG=solana=info (#5296) 2019-07-26 16:29:16 -07:00
20da2604f8 storage-keypair argument should not be required (#5295)
automerge
2019-07-26 15:18:55 -07:00
33de2cad6d Replace TokenPair in exchange (#5292)
* simplify token pair representation, rename to AssetPair for forward compat.

* update bench exchange TokenPair use
2019-07-26 14:31:08 -06:00
aef7bae60d Let grace ticks to roll over into multiple leader slots (#5268)
* Let grace ticks to roll over into multiple leader slots

* address review comments
2019-07-26 11:33:51 -07:00
54ac7ed1ea Voting/storage keypair can now be provided by the user (#5288) 2019-07-26 11:05:02 -07:00
0180246680 Clean up argument parsing (#5290)
automerge
2019-07-26 10:37:03 -07:00
dab7de7496 Add confidence cache to BankForks (#5066)
* Add confidence cache to BankForks

* Include stake-weighted lockouts in cache

* Add cache test

* Move confidence cache updates to handle_votable_bank

* Prune confidence cache on prune_non_root()

* Spin thread to process aggregate_stake_lockouts

* Add long-running thread for stake_weighted_lockouts computation
2019-07-26 11:27:57 -06:00
feaf29792f Error cleanly on show vote/stake/storage account pubkey mismatch (#5289)
Also deverb vote/stake account variables
2019-07-26 09:34:12 -07:00
5f09aa36b3 Drop code supporting no leader rotation (#5286) 2019-07-26 09:26:27 -07:00
d6c74f438a Delete vestigal --vote-account argument (#5287)
automerge
2019-07-26 08:42:48 -07:00
349ebec629 Bump serde from 1.0.94 to 1.0.97 (#5285)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.94 to 1.0.97.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.94...v1.0.97)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-26 07:22:25 -06:00
f4554be72c add vote withdraw (#5284) 2019-07-25 23:20:47 -07:00
8537da19bb groom accounts_db (#5283) 2019-07-25 22:59:28 -07:00
d1eff5d607 Merge create-stake-account into delegate-stake (#5280) 2019-07-25 16:53:43 -07:00
19e4f70244 Change default behavior to remove a prerelease tag if present instead of a minor version bump 2019-07-25 16:33:24 -07:00
a233a1c822 Fix poh recorder not flushing virtual ticks immediately (#5277)
* Fix poh not flushing virtual ticks immediately

* Fix test_would_be_leader_soon
2019-07-25 11:08:44 -07:00
27bc0a22dd Add support for invoking and publishing Move modules (#5278) 2019-07-25 09:30:24 -07:00
7ee8383e02 Bump serde_derive from 1.0.94 to 1.0.97 (#5279)
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.94 to 1.0.97.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.94...v1.0.97)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-25 06:47:24 -06:00
bab0f6be1e Store Move account data in a deterministic order (#5276) 2019-07-24 21:43:14 -07:00
535df0026d Fixes for Blocktree space amplification and slot deletion (#5266)
* Fixes for Blocktree space amplification and slot deletion
2019-07-24 17:28:08 -07:00
3bd35dd7cc Remove usage of api.testnet.solana.com (#5274) 2019-07-24 17:06:26 -07:00
39d29fab82 Exchange update cont. (#5272)
* Trade -> Order for keyedAcct indices

* rename deserialize_trade -> deserialize_order

* rename do_order_cancel params

* rename vars *_trade -> *_order
2019-07-24 17:49:10 -06:00
fbfe1a59a6 bump timeout 2019-07-24 13:53:08 -07:00
77c79effc1 Update github token 2019-07-24 13:50:49 -07:00
83540087c3 Switch to forked libra packages (#5270)
* Switch to forked libra packages

* Don't Cargo.lock lib crates
2019-07-24 14:21:22 -06:00
937816e67b Post warning if window service isn't receiving any data (#5269)
automerge
2019-07-24 12:46:10 -07:00
c3a941086d Remove more unwraps (#5267)
automerge
2019-07-24 12:30:43 -07:00
1046c5e32c Adjust log levels (#5265)
automerge
2019-07-24 10:15:49 -07:00
baac8d2590 Upgrade libra (#5264)
automerge
2019-07-24 09:56:29 -07:00
610a02c518 Bump jsonrpc-ws-server from 12.0.0 to 12.1.0 (#5261)
Bumps [jsonrpc-ws-server](https://github.com/paritytech/jsonrpc) from 12.0.0 to 12.1.0.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/compare/v12.0.0...v12.1.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-24 10:52:44 -06:00
444bd7a702 Bump semver from 0.7.0 to 0.9.0 (#5260)
Bumps [semver](https://github.com/steveklabnik/semver) from 0.7.0 to 0.9.0.
- [Release notes](https://github.com/steveklabnik/semver/releases)
- [Commits](https://github.com/steveklabnik/semver/compare/v0.7.0...v0.9.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-24 10:52:20 -06:00
7afc61e0b9 Cap Move program's execution (#5259) 2019-07-24 08:06:03 -07:00
d4d9bec2a9 NDEBUG=1 2019-07-24 07:28:16 -07:00
d647a4ec57 Bump publish-crate.sh timeout 2019-07-23 22:12:34 -07:00
536b4c1a25 Export genesis creation function (#5252) 2019-07-23 21:34:17 -07:00
547a7a345f Add logs to indicate when the leader changes (#5253) 2019-07-23 22:19:20 -04:00
26e380e53c Sort bench-tps keypairs (#5254)
automerge
2019-07-23 17:46:33 -07:00
8a12ed029c make accounts_db own the directory paths (#5230)
* change paths to something accounts_db (the singleton) owns, fixes SIGILL

* fail deserialize if paths don't work
serialize paths, too

* test that paths are populated from a bank snapshot
2019-07-23 13:47:48 -07:00
b41e8333b1 Add support to install a specific Solana version directly from the Github releases (#5248) 2019-07-23 12:51:10 -07:00
8f646e21d7 Bump cc from 1.0.37 to 1.0.38 (#5245)
Bumps [cc](https://github.com/alexcrichton/cc-rs) from 1.0.37 to 1.0.38.
- [Release notes](https://github.com/alexcrichton/cc-rs/releases)
- [Commits](https://github.com/alexcrichton/cc-rs/compare/1.0.37...1.0.38)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-23 10:51:50 -07:00
5608af0246 Cleanup and fix Move account invoking (#5244)
* Remove deps, geneerate genesis, cleanup

* Fix tests, specify sender in ix data

* nits
2019-07-23 08:54:34 -06:00
17b9ea3e3b Update buildkite-secondary.yml 2019-07-23 07:33:13 -07:00
88d4d1db7a Update buildkite.yml 2019-07-23 00:20:05 -07:00
cab4c88c71 Bump timeouts 2019-07-22 23:52:30 -07:00
4ec5a899f5 Check longer 2019-07-22 23:41:42 -07:00
c2f74330ef Drop ring crate (#5242) 2019-07-22 23:11:40 -07:00
2c8e0bcf87 Introduce --config-dir to avoid shipping clear-config.sh (#5241) 2019-07-22 22:59:02 -07:00
4966ab528e validator.sh: Add --reset-ledger option (#5235)
* Add --recreate-ledger option

* --reset-ledger
2019-07-22 22:20:54 -07:00
5f81a67298 Add --no-deploy option to allow restarting nodes without a software update (#5182) 2019-07-22 21:38:26 -07:00
a0ccdccff1 Call book/build.sh from docker (#5237)
* Call book/build.sh from docker

* debug

* Revert "debug"

This reverts commit 32986b73b7.
2019-07-22 21:37:43 -07:00
735c7c9841 Add manual publish for book and create book-beta (#5112) 2019-07-22 17:45:00 -06:00
3a69459645 Surface validator pubkey in metrics (#5227) 2019-07-22 16:08:21 -07:00
21cef2fe21 Do not attempt to create solana user multiple times (#5228)
* Do not attempt to create solana user multiple times
2019-07-22 16:13:08 -06:00
038c6ea0a7 Bump solana_libra to v0.0.0.1 (#5225)
automerge
2019-07-22 14:17:06 -07:00
81f4fd56c7 Log the repairee pubkey when unable to serve them (#5222)
automerge
2019-07-22 14:13:29 -07:00
264a3d7dde Increase ticks_per_slot for banking benchmark (#5221) 2019-07-22 13:57:28 -07:00
43bf176fab more granular check for memoffset ignore in audit (#5219)
* more granular check for memoffset ignore in audit

* debugggin

* debugggin

* debugggin

* debugggin

* debugggin
2019-07-22 13:36:27 -07:00
baec17fdf4 Fix some nightly warnings (#5218) 2019-07-22 12:51:02 -07:00
186b514ebb Embed Move (#5150)
automerge
2019-07-22 12:01:52 -07:00
2d42c1e33e add root to terminology (#5209)
* add root to terminology

* review feedback
2019-07-22 09:36:20 -07:00
9cef522eee Bump walkdir from 2.2.8 to 2.2.9 (#5204)
Bumps [walkdir](https://github.com/BurntSushi/walkdir) from 2.2.8 to 2.2.9.
- [Release notes](https://github.com/BurntSushi/walkdir/releases)
- [Commits](https://github.com/BurntSushi/walkdir/compare/2.2.8...2.2.9)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-22 09:35:46 -07:00
a6302acfd5 Bump jsonrpc-http-server from 12.0.0 to 12.1.0 (#5211)
Bumps [jsonrpc-http-server](https://github.com/paritytech/jsonrpc) from 12.0.0 to 12.1.0.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/compare/v12.0.0...v12.1.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-22 10:34:27 -06:00
ac72265c6b Request a uniform timezone 2019-07-22 09:25:36 -07:00
09da6b4b48 Encourage setting an RPC port 2019-07-22 08:23:36 -07:00
0d8f5379a0 Add time units 2019-07-22 08:22:21 -07:00
02c7b89a8f Update location of TdS external accounts file url 2019-07-21 17:38:15 -07:00
90ae33c200 Update incoming_webhook 2019-07-21 11:27:19 -07:00
55c879ce2d Update S3 key 2019-07-21 11:18:17 -07:00
1b5a332239 Adjustments for appveyor server 2019-07-21 09:21:28 -07:00
595017499e accounts_index: RwLock per-account (#5198)
* accounts_index: RwLock per-account

Lots of lock contention on the accounts_index lock,
only take write-lock on accounts_index if we need to insert/remove an
account.
For updates, take a read-lock and then write-lock on the individual
account.

* Remove unneeded enumerate and add comments.
2019-07-20 17:58:39 -07:00
9b1471acae Upgrade to Rust 1.36.0 (#5206)
* Upgrade to Rust 1.36.0

* Move test-checks.sh back to stable

* update nightly version to 2019-07-19

* use both nightly and stable for checks
2019-07-20 18:53:16 -06:00
b766ac0899 rent (#5205) 2019-07-20 16:28:17 -07:00
e6b525a614 disable audit until crossbeam epoch release (#5208) 2019-07-20 15:50:26 -07:00
a07b17b9b5 Drop older slots in the ledger (#5188)
* Add facility to delete blocktree columns in range

* Add ledger cleanup service

* Add local_cluster test
2019-07-20 13:13:55 -07:00
9d2940d487 Show wallet commands for better log debugging 2019-07-19 20:21:51 -07:00
6969ece2dd Ensure CI_OS_NAME is set for appveyor server 2019-07-19 20:06:32 -07:00
48fc35884c Add Transaction Documentation (#5115) 2019-07-19 16:42:50 -06:00
0958905df8 Bump reqwest from 0.9.18 to 0.9.19 (#5201)
Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.9.18 to 0.9.19.
- [Release notes](https://github.com/seanmonstar/reqwest/releases)
- [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md)
- [Commits](https://github.com/seanmonstar/reqwest/compare/v0.9.18...v0.9.19)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-19 16:13:34 -06:00
c95cda51c9 Fix internal node lamport funding and staking allocation logic (#5192)
* Plumb node funding from genesis

* Cleanup naming convention

*  Fix balance vs stake yml file logic

* Lamps not Stakes
2019-07-19 12:51:38 -06:00
3f54c0f1a6 Update struct order arbitrarily to match rpc output (#5197) 2019-07-19 11:45:04 -06:00
4684faa5e8 Bump jsonrpc-core from 12.0.0 to 12.1.0 (#5143)
Bumps [jsonrpc-core](https://github.com/paritytech/jsonrpc) from 12.0.0 to 12.1.0.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/compare/v12.0.0...v12.1.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-19 09:59:56 -07:00
111d0eb89b runtime: Add bench for accounts::hash_internal_state (#5157)
* runtime: Add bench for accounts::hash_internal_state

* fixup! cargo fmt

* fixup! cargo clippy

* fixup! Use a more representitive number of accounts

* fixup! More descriptive name for accounts creation helper
2019-07-19 10:32:29 -06:00
8b69998379 Lower recovery messages (#5181) 2019-07-19 09:20:14 -07:00
a21251dfea Fix up signal handling 2019-07-19 08:35:22 -07:00
06cd7c1020 Disable restart 2019-07-19 08:35:22 -07:00
782846f295 Document fetch-perf-libs.sh when building with CUDA 2019-07-19 08:35:22 -07:00
19e131d710 Bump jsonrpc-derive from 12.0.0 to 12.1.0 (#5193)
Bumps [jsonrpc-derive](https://github.com/paritytech/jsonrpc) from 12.0.0 to 12.1.0.
- [Release notes](https://github.com/paritytech/jsonrpc/releases)
- [Commits](https://github.com/paritytech/jsonrpc/compare/v12.0.0...v12.1.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-19 07:34:48 -07:00
9fd34cd985 Bump untrusted from 0.6.2 to 0.7.0 (#5194)
Bumps [untrusted](https://github.com/briansmith/untrusted) from 0.6.2 to 0.7.0.
- [Release notes](https://github.com/briansmith/untrusted/releases)
- [Commits](https://github.com/briansmith/untrusted/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-19 07:34:04 -07:00
adfb8ff2a1 Add getEpochInfo() and getLeaderSchedule() RPC methods (#5189)
* Add getLeaderSchedule() RPC method

* Add getEpochInfo() RPC method

* Add JSON RPC docs
2019-07-19 07:31:18 -07:00
83aa609540 Bump winreg from 0.6.0 to 0.6.1 (#5149)
Bumps [winreg](https://github.com/gentoo90/winreg-rs) from 0.6.0 to 0.6.1.
- [Release notes](https://github.com/gentoo90/winreg-rs/releases)
- [Commits](https://github.com/gentoo90/winreg-rs/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-18 21:24:40 -07:00
1e1cb7c57c Select stable rust version (#5180) 2019-07-18 21:19:07 -07:00
cdbd1b908a Ensure validator process is kill when stdout/stderr are redirected (#5179) 2019-07-18 21:18:48 -07:00
a12e7a2e33 Separate build and deploy steps in net/net.sh (#5184)
So one can keep the network up while a new experiment is building
2019-07-18 18:59:47 -07:00
25080f1a33 fix book typos (#5185) 2019-07-18 17:24:22 -07:00
afa05acb32 more replay_stage grooming (#5163) 2019-07-18 14:54:27 -07:00
d47caf2af8 add information to panic (#5177) 2019-07-18 14:41:32 -07:00
a3a91ba222 Fix misleading variable name (#5176)
automerge
2019-07-18 14:07:32 -07:00
751b54b60b Skip sleeping in replay stage if a bank was recently processed (#5161)
* Skip sleeping in replay stage if a bank was recently processed

* Remove return
2019-07-18 12:04:53 -07:00
488dd0e563 Keybase: s/id/username (#5165) 2019-07-18 12:16:13 -06:00
b58558ea4e net/: startnode/stopnode now works for blockstreamer/replicator nodes (#5146)
* startnode/stopnode now works for blockstreamer/replicator nodes

* Plumb --skip-ledger-verify through net/
2019-07-17 19:26:23 -07:00
6ad9dc18d8 Add ability to prune ledger (#5128)
* Add utility to prune the ledger

* Add tests

* Fix clippy

* Fix off by one

* Rework to force delete every column

* Minor fixup
2019-07-17 14:42:29 -07:00
027ebb6670 no more OUT_DIR (#5139)
* no more OUT_DIR

* no more OUT_DIR

* more information about failure
2019-07-17 14:27:58 -07:00
0ffd91df27 groom poh_recorder (#5127)
* groom poh_recorder

* fixup

* nits

* slot() from the outside means "the slot the recorder is working on"

* remove redundant check

* review comments, put next_tick back in the "is reset" check

* remove redundant check
2019-07-17 14:10:15 -07:00
10d85f8366 Add weighted shuffle support for values upto u64::MAX (#5151)
automerge
2019-07-17 12:44:28 -07:00
7aad427511 Bump libloading from 0.5.1 to 0.5.2 (#4950)
Bumps [libloading](https://github.com/nagisa/rust_libloading) from 0.5.1 to 0.5.2.
- [Release notes](https://github.com/nagisa/rust_libloading/releases)
- [Commits](https://github.com/nagisa/rust_libloading/compare/0.5.1...0.5.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-17 09:29:45 -06:00
bbd0455418 Bump log from 0.4.6 to 0.4.7 (#5144)
Bumps [log](https://github.com/rust-lang/log) from 0.4.6 to 0.4.7.
- [Release notes](https://github.com/rust-lang/log/releases)
- [Changelog](https://github.com/rust-lang-nursery/log/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/log/compare/0.4.6...0.4.7)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-17 09:25:57 -06:00
5174b3bc3f use precalculated max_tick_height (#5134) 2019-07-17 00:19:38 -07:00
f88c72c41e stress tweaks (#5140) 2019-07-16 22:04:40 -07:00
9f678cc32a Show stake pubkey 2019-07-16 20:10:15 -07:00
57036fbcc1 Check harder on crates.io for recently published crates (#5136) 2019-07-16 19:09:49 -07:00
349e5001d6 clear-config.sh now works with a secondary disk (#5135) 2019-07-16 19:09:14 -07:00
94db9cd412 Reduce banking_stage bench copy-paste code and fix programs bench (#4926) 2019-07-16 18:28:18 -07:00
b505a0df22 Throw more threads at hash_internal_state (#5023) 2019-07-16 16:58:30 -07:00
acf096c5f7 Add cross-program invocation proposal (#4922)
automerge
2019-07-16 16:36:05 -07:00
e8583f5cfe Bump tokio from 0.1.21 to 0.1.22 (#4935)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 0.1.21 to 0.1.22.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-16 16:48:11 -06:00
5825b967d2 Check for valid pid before kill in node stop script (#5126) 2019-07-16 14:31:15 -07:00
bf5bce50a4 Fix stake pruning test (#5124) 2019-07-16 13:20:03 -04:00
77ea8b9b3e Add LoaderInstruction::InvokeMain (#5116)
* Remove unreachable, untested runtime check

* tx_data -> ix_data

* Add LoaderInstruction::InvokeMain

* Add test and allow loaders to be registered statically.

* Fix clippy error
2019-07-16 10:45:32 -06:00
176cec6215 Update Rust-BPF Sysroot (#5122) 2019-07-16 07:42:22 -08:00
5ab4975c44 Improve validator-info CLI (#5121)
* Fix index OOB panic

* Handle 'get' empty validator-info responses properly

* Improve 'get' argument flow

* Improve arg help text

* Improve 'publish' argument flow

* Update book doc
2019-07-16 09:22:55 -06:00
7e60ee39d9 Add missing dash 2019-07-16 07:27:35 -07:00
3ea2933e2d It's 2019 2019-07-15 20:58:21 -07:00
fe87c05423 fix transaction_count (#5110)
* fix transaction_count

* add sig count to bank hash
2019-07-15 13:42:59 -07:00
6b86f85916 Add C API (#5072) 2019-07-15 13:17:17 -06:00
04649de6a6 Boot remote native loads, take 2 (#5106)
* Drop dependencies on remote native loads

* Remove remote native loads
2019-07-15 13:16:09 -06:00
92d78451b1 Update expected keybase-pubkey location (#5104)
automerge
2019-07-15 09:28:06 -07:00
0c87928132 Keybase pubkey file instructions and verification for validators (#5090)
* Document publishing a pubkey on keybase

* Verify keybase-pubkey
2019-07-14 23:48:50 -06:00
db7e78bf99 Add node zone and count to ENV (#5100)
* Add node zone and count to ENV
2019-07-14 22:40:18 -06:00
adecd4cfdc Pull testnet vars up to buildkite env (#5098) 2019-07-14 20:27:49 -06:00
40faaef9da Revert "Logging (#5017)" (#5096)
This reverts commit b50a3bae72.
2019-07-14 18:48:15 -07:00
9b54528c8e Fix some nightly warnings (#5093)
ONCE_INIT => Once::new
Box<Error> => Box<dyn Error>
2019-07-14 13:37:55 -07:00
440d006ec1 Plumb --no-snapshot in from CI (#5077)
* Plumb --no-snapshot in from CI
2019-07-14 13:17:30 -06:00
6c49b10784 Purge remaining uses of Locktower (#5076)
automerge
2019-07-13 00:24:15 -07:00
c858d1dbb3 Bump tempfile from 3.0.8 to 3.1.0 (#4882)
Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.0.8 to 3.1.0.
- [Release notes](https://github.com/Stebalien/tempfile/releases)
- [Changelog](https://github.com/Stebalien/tempfile/blob/master/NEWS)
- [Commits](https://github.com/Stebalien/tempfile/compare/v3.0.8...v3.1.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-12 23:57:35 -07:00
741a0a8a4e Correctly decode update manifest (#5086)
automerge
2019-07-12 22:55:55 -07:00
16b6576839 use precalculated max_tick_height (#5084) 2019-07-12 22:25:48 -07:00
6accf21229 Add epoch voting history to show-vote-account (#5080) (#5085)
automerge
2019-07-12 22:01:12 -07:00
d2b21ce8d0 Stop trying to publish crates that are unpublishable 2019-07-12 21:53:09 -07:00
b01990d480 Avoid trying to republish crates already on crates.io 2019-07-12 21:43:16 -07:00
d7fdfb7e21 Give publish-crate more time 2019-07-12 20:28:10 -07:00
19fe468dbc Add design proposal to embed Libra's Move (#5067)
* Add design proposal to embed Libra's Move

* Apply review feedback

* Pipeline VM -> the runtime
* defines -> define
2019-07-12 21:12:55 -06:00
259a5130a8 whoops (#5083) 2019-07-12 19:08:51 -07:00
0d27515d09 tmp dirs target to farf (#5079) 2019-07-12 18:28:42 -07:00
1c966aac25 Facility to generate a blocktree prune list using ledger tool (#5041)
automerge
2019-07-12 16:58:13 -07:00
d2b6c2e0ce syscall work, rename syscall to sysvar, rename current to clock (#5074)
* syscall work, rename syscall to sysvar, rename current to clock

* missed one

* nit
2019-07-12 16:38:15 -07:00
7aecb87bce Add a version field to blobs (#5057) 2019-07-12 13:43:19 -07:00
4a02914b30 Add pub key authorized list 2019-07-12 12:34:17 -07:00
7c12ecbe81 Fix unnecessary computation (#5055) 2019-07-12 11:30:37 -07:00
f093377805 apt-get update before installing certbot (#5054)
* apt-get update before installing certbot
2019-07-12 11:50:40 -06:00
5ac173d208 Enable GPUs and secondary disks for TdS net, pull external account file (#5031)
* Enable V100 GPUs over 3 regions for TdS cluster

* Turn on secondary config-local drive for tds net

* Enable long args bypass for GPU machine details

* bypass quoted long arg

* Pull external account file from wget

* typo

* Symlink config-local instead of changing the path variables

* Fix link path
2019-07-12 09:38:47 -06:00
9f58318fc5 Add --no-snapshot to disable booting a validator from a snapshot (#5050)
automerge
2019-07-11 21:03:17 -07:00
ebcdc06dc3 Restore ledger-tool print and json commands (#5048)
* Restore ledger-tool print and  json commands

* Remove obsolete read_ledger()
2019-07-11 20:33:36 -07:00
22315d88e7 Fix credit only commit_credits race (#5028)
* Fix credit only drain race

* Refactor commit credits for tests

* Fix tests to use commit_credits_unsafe
2019-07-11 18:46:49 -07:00
0a36a78133 Fix replicator segment selection (#5046) 2019-07-11 18:31:41 -07:00
a25446f045 Pull in more Rust-BPF compatible built-ins (#5043) 2019-07-11 15:16:30 -08:00
2860d2fe27 Pull in support for Rust-BPF stack argument passing (#5038) 2019-07-11 14:27:18 -08:00
e4861f52e0 Add support for additional disks for config-local (#5030)
* Add support for additional disks for config-local

* Restore wrongly deleted lines

* Shellcheck

* add args in the right place dummy

* Fix nits

* typo

* var naming cleanup

* Add stub function for remaining cloud providers
2019-07-11 16:23:32 -06:00
5698d48dc8 merkle-tree: Make instantiation a little less painful (#5037)
automerge
2019-07-11 15:15:08 -07:00
5b95685e12 Add rewards to is_syscall_id() (#5035) 2019-07-11 13:47:22 -08:00
4c90898f0b Dynamic erasure set configuration (#5018)
* Use local erasure session to create/broadcast coding blobs

* Individual session for each recovery (as the config might be different)

* address review comments

* new constructors for session and coding generator

* unit test for dynamic erasure config
2019-07-11 13:58:33 -07:00
a191f3fd90 add node_pubkey to vote warning (#5033) 2019-07-11 13:12:26 -07:00
b2c776eabc Fix getProgramAccounts RPC (#5024)
* Use scan_accounts to load accounts by program_id

* Add bank test

* Use get_program_accounts in RPC
2019-07-11 12:58:28 -06:00
2c8d6f87e6 Add validator-info CLI (#4970)
* Add validator-info CLI

* Add GetProgramAccounts method to solana-client

* Update validator-info args, and add get subcommand

* Update ValidatorInfo lengths

* Add account filter for get --all

* Update testnet participation doc to reflect validator-info

* Flesh out tests

* Review comments
2019-07-11 12:38:52 -06:00
08f6de0acd Plumb scan_accounts into accounts_db, adding load from storage (#5029) 2019-07-11 12:16:02 -06:00
bd92f37553 Terminology (#4995)
* update exchange program: tradeOrder->Order, tradeRequest->OrderRequest, tradeCancel->OrderCancel

* Update bench-exchange: tradeOrder -> Order

* update bench exchange Readme
2019-07-10 23:22:33 -06:00
2abbc89dcd add accounts_index_scan_accounts (#5020) 2019-07-10 22:06:32 -07:00
8cad992170 reduce replicode in accounts, fix cast to i64 (#5025) 2019-07-10 21:22:58 -07:00
41d0db078e Wait for bootstrap leader to initialize before starting other validators (#5027) 2019-07-10 21:03:48 -07:00
8781aebe06 Pass SOLANA_METRICS_CONFIG along to oom-monitor.sh (#5021) 2019-07-10 20:11:55 -07:00
727c15ef8a start from random point in fork stores (#5010) 2019-07-10 18:44:49 -07:00
e4926e4110 Set exit when replicator run exits (#5016) 2019-07-10 16:27:18 -07:00
b50a3bae72 Logging (#5017)
* Add logging to replay_stage

* locktower logging
2019-07-10 15:52:31 -07:00
35ec7a5156 Decouple turns from segments in PoRep (#5004)
* Decouple Segments from Turns in Storage

* Get replicator local cluster tests running in a reasonable amount of time

* Fix unused imports

* Document new RPC APIs

* Check for exit while polling
2019-07-10 13:33:29 -07:00
a383ea532f Implement new Index Column (#4827)
* Implement new Index Column

* Correct slicing of blobs

* Mark coding blobs as coding when they're recovered

* Prevent broadcast stages from mixing coding and data blobs in blocktree

* Mark recovered blobs as present in the index

* Fix indexing error in recovery

* Fix broken tests, and some bug fixes

* increase min stack size for coverage runs
2019-07-10 11:08:17 -07:00
b1a678b2db Document getSlotsPerSegment in rpc api doc (#5005)
* Document getSlotsPerSegment in rpc api doc
2019-07-10 10:05:11 -07:00
e563a4dda3 Rename tds-testnet to tds (#5008) 2019-07-10 10:26:24 -06:00
dbe533385e Improve signature checks in config_api (#5001)
automerge
2019-07-10 01:00:49 -07:00
f537482c86 remove set_leader from cluster_info (#4998) 2019-07-09 22:06:47 -07:00
aebd70ddce Move letsencrypt arg to create_args 2019-07-09 21:27:12 -07:00
7d80cfb17a Include --letsencrypt ($1) 2019-07-09 20:54:11 -07:00
b8e7736af2 Move SLOTS_PER_SEGMENT to genesis (#4992)
automerge
2019-07-09 16:48:40 -07:00
32b55e6703 Fund solana-install deployments from the mint keypair to avoid airdrops (#4997) 2019-07-09 16:45:28 -07:00
0a949677f0 net/ plumbing to manage LetsEncrypt TLS certificates (#4985)
automerge
2019-07-09 15:45:46 -07:00
f777a1a74c groom replay_stage and poh_recorder (#4961)
* groom replay_stage and poh_recorder

* fixup

* fixup

* don't freeze() parent, need to review bank_forks and maybe vote...
2019-07-09 15:36:30 -07:00
d111223085 Fix always passing in remote filename, even if no accounts file (#4993)
* Fix always passing in remote filename, even if no accounts file

* typo
2019-07-09 16:07:31 -06:00
1ca7e9f67b Add testnet-tds support to testnet manager (#4762)
* Add testnet-tds support to testnet scripts
2019-07-09 14:39:55 -06:00
bc8f435d45 Shell script nits (#4982) 2019-07-09 12:09:13 -08:00
5e221bf219 Make config_api more robust (#4980)
* Make config_api more robust

* Add test and update store instruction
2019-07-09 13:37:18 -06:00
fc58b3e8c3 Fix typos 2019-07-09 09:35:52 -06:00
1033f52877 Add pubkey (#4971) 2019-07-09 00:54:22 -07:00
4771177f9d Update LLVM to v0.0.11 (#4976) 2019-07-08 23:22:49 -08:00
50c6b5d62d Work around missing lib on linux (Issue #4972) (#4975) 2019-07-08 22:24:57 -08:00
f9a2254688 Split out Rust BPF no-std stuff (#4968) 2019-07-08 20:28:05 -08:00
49250f62aa make commit_credits one trip through the rwlock (#4969) 2019-07-08 20:46:21 -07:00
22ef3c7c54 Blob verify (#4951)
* Ensure signable data is not out of range

* Add a broadcast stage that puts bad sizes in blobs

* Resign blob after modifyign size

* Remove assertions that fail when size != meta.size
2019-07-08 18:21:52 -07:00
417e8d5064 fix blocktree_processor test_process_entries_stress (#4967) 2019-07-08 18:11:58 -07:00
1feb9bea21 Harden Merkle Tree against second pre-image attacks (#4925)
* merkle-tree: Harden against second pre-image attacks

* core/chacha: Bump test golden hash
2019-07-08 19:00:06 -06:00
563c42b829 Consistant message format (#4965) 2019-07-08 16:07:45 -08:00
841e5e326c Program mutable lamports (#4964) 2019-07-08 15:52:25 -08:00
281deae102 Update config program to accommodate multiple signers (#4946)
* Update config program to accommodate multiple signers

* Update install CLI

* Remove account_type u32; add handling for unsigned keys in list

* ConfigKeys doc
2019-07-08 18:33:56 -05:00
c5ba2e0883 bank_forks test stability (#4959)
automerge
2019-07-08 15:55:49 -07:00
eb4edd75e6 make bank commit_credits non public (#4944)
* make bank commit_credits non pub

* track down create() failure

* move bank_client to process_transaction(), which commits credits
2019-07-08 15:37:54 -07:00
bb6bcd79c0 Handle replicator errors without panicking (#4955)
* Handle replicator errors without panicking

* Unwelcome println
2019-07-08 12:43:35 -07:00
ef7022d638 Refactor replicators to not block on startup (#4932)
* Refactor replicators to not block on startup

* Ignore setup failure
2019-07-08 10:17:25 -07:00
2aac094f63 Ensure blobs are deserializable without unwrapping (#4948)
* Return result from deserializing blobs in blocktree instead of assuming deserialization will succeed

* Mark bad deserialization as dead fork

* Add test for corrupted blobs in blocktree and replay_stage
2019-07-07 14:37:12 -07:00
fc180f4cbf Halve stake of malicious validator (#4937) 2019-07-05 15:45:39 -07:00
e26a0bf840 Bump env_logger from 0.6.1 to 0.6.2 (#4879)
Bumps [env_logger](https://github.com/sebasmagri/env_logger) from 0.6.1 to 0.6.2.
- [Release notes](https://github.com/sebasmagri/env_logger/releases)
- [Changelog](https://github.com/sebasmagri/env_logger/blob/master/CHANGELOG.md)
- [Commits](https://github.com/sebasmagri/env_logger/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-05 14:11:10 -06:00
3557975c1f install: more little window fixes (#4930)
* Only add .exe extension if no extension was given

* Switch to ctrlc crate for freebie Windows ^C handling
2019-07-03 17:45:08 -07:00
b4aebbd991 Increment InfluxDB to 1.7 (#4931) 2019-07-03 17:44:49 -07:00
db13b52e6a Bump serde_json from 1.0.39 to 1.0.40 (#4881)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.39 to 1.0.40.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.39...v1.0.40)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-03 16:15:56 -06:00
f1f6537837 Reduce default commission from 100% to 50% (#4929) 2019-07-03 14:18:08 -07:00
2ec5d2c7f5 start local_cluster nodes from genesis blocks (#4928) 2019-07-03 14:03:52 -07:00
42e5623e26 Update rust-bpf-sysroot to v0.5 (#4920) 2019-07-03 11:20:21 -08:00
ab9f2adc69 [Security] Bump smallvec from 0.6.9 to 0.6.10 (#4921)
Bumps [smallvec](https://github.com/servo/rust-smallvec) from 0.6.9 to 0.6.10. **This update includes security fixes.**
- [Release notes](https://github.com/servo/rust-smallvec/releases)
- [Commits](https://github.com/servo/rust-smallvec/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-07-03 12:12:48 -07:00
f551b34725 Fix issue in polling for transaction signatures (#4923)
- Specifically if multiple confirmation for the signature is requested
2019-07-02 20:56:10 -07:00
55b8ff72d0 Enable parallel tests to reduce test time (#4919)
* Add crate to serialize some tests

* Ignore unused attribute warning

* Enable parallel run in CI

* Try to fix lograte tests

* Fix interdependent counter tests
2019-07-02 17:35:03 -07:00
bf319ab06d Convert syscall accounts to credit only accounts (#4915) 2019-07-02 15:17:28 -07:00
12ef0c25b5 change vote commission to u8 (from u32) (#4887)
automerge
2019-07-02 14:18:11 -07:00
8620d0a3b2 Add curl retries 2019-07-02 08:37:18 -07:00
933ae51fcc Add .exe extension before checking for a program file on windows (#4902) 2019-07-02 08:04:27 -07:00
c1201e54fa Avoid signal-hook crate on windows (#4900) 2019-07-01 22:52:55 -07:00
3615445a12 Broadcast run for injecting fake blobs in turbine (#4889)
* Broadcast run for injecting fake blobs in turbine

* address review comments

* new local cluster test that uses fake blob broadcast

* added a test to make sure tvu_peers ordering is guaranteed
2019-07-01 17:54:03 -07:00
091999a17e fix Instruction and CompiledInstruction field names (#4895)
* s/program_ids_index/program_id for Instruction

* s/program_ids_index/program_id_index for CompiledInstruction
2019-07-01 18:34:22 -06:00
417066ad30 Fix bench-tps funding math; make generate_keypairs() and fund_keys() algorithms consistent (#4841)
* Fix funding math; make generate_keypairs and fund_keys consistent

* Add test, and fix inconsistencies it exposes

* De-pow math, and use assert_eq in tests for better failure msgs
2019-07-01 18:32:03 -06:00
2abe051a1f run command now kills child process on SIGTERM to cleanly exit (#4896)
automerge
2019-07-01 17:10:14 -07:00
65adce65fa Always send pull responses to the origin addr (#4894) 2019-07-01 16:49:05 -07:00
0c8f187993 remove syscall tick height (#4891) 2019-07-01 16:21:51 -07:00
cbd2938035 update book with stake stuff (#4893) 2019-07-01 15:16:41 -07:00
0999225794 Try to gracefully terminal child process before using SIGKILL (#4890) 2019-07-01 14:08:30 -07:00
38b44f2496 Reduce slot duration and consecutive leader slots (#4838)
* change consecutive leader slots to 4

* reduce polling frequency for transaction signature confirmation

* adjust wait time for transaction signature confirmation

* fix nominal test

* fix flakiness in wallet pay test
2019-07-01 13:21:00 -07:00
c1953dca8f Cleanup some of banking stage (#4878)
* Add committable transactions that cause errors like InstructionErrors back to retryable list on MaxHeightReached

* Remove unnecessary logic

* Add comments/renaming for clarity
2019-07-01 12:14:40 -07:00
19ea5fe0c0 Rework fullnode.sh to recover better from genesis block resets (#4884) 2019-07-01 11:54:00 -07:00
d7ed3b8024 Add RPC api to return program accounts (#4876)
automerge
2019-06-29 09:59:07 -07:00
a89589a1d5 Add Measure abstraction over measuring time intervals (#4851)
Allows one to swap in different implementations. This provides
the normal Insant::now() -> .elapsed() path.
2019-06-29 15:34:49 +02:00
41bda18046 Disable Enter prompt when stdin is not a tty (#4874) 2019-06-28 17:43:43 -07:00
0c832f4668 Don't prompt the user to update their PATH if --no-modify-path was supplied (#4872) 2019-06-28 16:45:01 -07:00
75b494d4a3 Lower warn to info, fetch from validator root instead of root + 1 (#4870)
* Lower warn to info, fetch from validator root instead of root + 1

* b/c -> because
2019-06-28 16:17:20 -07:00
f0191a98ab Bump serde from 1.0.93 to 1.0.94 (#4864)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.93 to 1.0.94.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.93...v1.0.94)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-28 15:07:16 -07:00
76413cbfac Ensure validator process is killed when fullnode.sh is killed (#4869)
automerge
2019-06-28 14:24:44 -07:00
0fa1af5d47 Cleanup num_threads() and batch_limit numbers (#4852) 2019-06-28 10:55:24 +02:00
af1c70f032 book: Add simple payment and state verification proposal (#4200)
automerge
2019-06-27 17:08:10 -07:00
278614fc7c Impl credit-only accounts in Budget (#4862) 2019-06-27 19:22:21 -04:00
baca35ef4d book: Make build a little less annoying (#4861)
automerge
2019-06-27 15:20:37 -07:00
66552d7047 Credit-Only Accounts: Cache account balance for thread-safe load/store (#4691)
* Implement CreditOnlyLocks

* Update credit-only atomic on account load

* Update credit-only atomic after bank.freeze_lock; store credits if all credit-only lock references are dropped

* Commit credit-only credits on bank freeze

* Update core to CreditAccountLocks

* Impl credit-only in System Transfer

* Rework CreditAccountLocks, test, and fix bugs

* Review comments: Pass CreditAccountLocks by reference; Tighten up insert block

* Only store credits on completed slot

* Check balance in bench_exchange funding to ensure commit_credits has completed

* Add is_debitable info to KeyedAccount meta to pass into programs

* Reinstate CreditOnlyLocks check on lock_account

* Rework CreditAccountLocks to remove strong_count usage

* Add multi-threaded credit-only locks test

* Improve RwLocks usage

* Review comments: panic if bad things happen; tighter code

* Assert lock_accounts race does not happen

* Revert panic if bad things happen; not a bad thing
2019-06-27 17:25:10 -04:00
979df17328 Bump serde_derive from 1.0.93 to 1.0.94 (#4856)
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.93 to 1.0.94.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.93...v1.0.94)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-27 13:18:19 -07:00
6cec61dcfc Re-enable tests (#4848) 2019-06-27 12:09:14 -07:00
a9b044f0ab Fix banking_stage benchmark sends (#4850)
Only one big batch was being sent so only 1
thread active at a time in the benchmark.
2019-06-27 10:37:33 +02:00
fbea9d8621 Page-pin packet memory for cuda (#4250)
* Page-pin packet memory for cuda

Bring back recyclers and pin offset buffers

* Add packet recycler to streamer

* Add set_pinnable to sigverify vecs to pin them

* Add packets reset test

* Add test for recycler and reduce the gc lock critical section
* Add comments/tests to cuda_runtime

* Add recycler to recv_blobs path.

* Add trace/names for debug and PacketsRecycler to bench-streamer

* Predict realloc and unpin beforehand.

* Add helper to reserve and pin

* Cap buffered packets length

* Call cuda wrapper functions
2019-06-27 09:32:32 +02:00
44a572416d Save snapshots followed by accounts to avoid stale account data (#4847)
* save snapshots before account stores

* update comment
2019-06-26 23:19:55 -07:00
97c97db97e Fix early exit clearing all buffered packets (#4810) 2019-06-26 22:39:50 -07:00
b8ae025f90 rsync of ledger/ and state.tgz now works on both macOS and Linux (#4845)
automerge
2019-06-26 22:10:24 -07:00
27221e28f6 Use default pubkey for solana-install sanity check 2019-06-26 21:49:22 -07:00
9a52b01171 Change to crossbeam channel in banking_threads VerifiedReceiver (#4822)
* Add crossbeam channel instead of channel in banking_stage
2019-06-26 18:42:27 -07:00
8cea650535 Handle NaN and inifinite point values (#4839) 2019-06-26 18:33:52 -07:00
531679eeaf Bump generic-array from 0.13.0 to 0.13.1 (#4801)
Bumps [generic-array](https://github.com/fizyk20/generic-array) from 0.13.0 to 0.13.1.
- [Release notes](https://github.com/fizyk20/generic-array/releases)
- [Changelog](https://github.com/fizyk20/generic-array/blob/master/CHANGELOG.md)
- [Commits](https://github.com/fizyk20/generic-array/commits)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-26 17:47:12 -06:00
850f77ab3b Minor refactor of duplicated reward claim logic (#4835)
automerge
2019-06-26 15:01:45 -07:00
4a10fd3272 Upload all artifacts 2019-06-26 14:37:18 -07:00
9e2eb9e4f9 Set CI_REPO_SLUG correctly for the solana-secondary pipeline 2019-06-26 14:37:18 -07:00
8120b57f17 Setup reward pools in genesis (#4831)
automerge
2019-06-26 13:51:17 -07:00
f651c0922a Airdrop more token in wallet sanity due to fee (#4830)
automerge
2019-06-26 13:32:58 -07:00
8d2ec20201 Tidied up intro paragraph (#4819)
I tidied up the intro paragraph!
2019-06-26 12:12:25 -07:00
dce1f80aac Made tiny change to second paragraph (#4820)
Replaced 'it's' with 'Solana
2019-06-26 12:12:11 -07:00
df1c473341 Add storage point tracking and tie in storage rewards to economics (#4824)
* Add storage point tracking and tie in storage rewards to epochs and economics

* Prevent validators from updating their validations for a segment

* Fix test

* Retain syscall scoping for readability

* Update Credits to own epoch tracking
2019-06-26 10:40:03 -07:00
8a64e1ddc3 add fee burning (#4818) 2019-06-26 10:13:21 -07:00
eb47538a82 Bump chrono from 0.4.6 to 0.4.7 (#4812)
Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.6 to 0.4.7.
- [Release notes](https://github.com/chronotope/chrono/releases)
- [Changelog](https://github.com/chronotope/chrono/blob/master/CHANGELOG.md)
- [Commits](https://github.com/chronotope/chrono/compare/v0.4.6...v0.4.7)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-26 07:58:41 -06:00
861d6468ca Stake weighted pruning for the gossip network (#4769)
* Stake weighted pruning

* Fix compile error

* Fix clippy errors

* Add helper for creating a connected staked network

* Bug fixes and test groundwork

* Small refactor

* Anatoly's feedback and tests

* Doc updates

* @rob-solana's feedback

* Fix test bug and add log trace

* @rob-solana's feedback
2019-06-26 00:30:16 -07:00
d6737b8cc9 Set epoch schedule in set_root in leader schedule cache (#4821) 2019-06-26 00:19:48 -07:00
30592f2b12 Integration tests for stake API (#4811)
* more tests for rewards redemption

* break circular deps

* code review
2019-06-25 23:00:35 -07:00
1f950781c2 Use temp path for append_vec tests (#4765)
* Use temp path for serialize test

* set account path
2019-06-25 16:11:57 -07:00
f20ba423ca Merklize PoH TX mixin hash (#4644) 2019-06-25 14:44:27 -06:00
c5e6ebb496 Create snapshots sparsely (#4815) 2019-06-25 12:10:17 -07:00
9e7f618cff Set proper count value for account stores (#4797)
* set count values for store accounts

* Use AppendVecId type
2019-06-25 07:21:45 -07:00
74a06e4230 Update thinclient to resend the same tx until its blockhash expires (#4807) 2019-06-24 16:46:34 -07:00
70f93cc126 remove mining_pool from stake_state (#4804) 2019-06-24 16:01:02 -07:00
3f8ff23125 Forward transactions to the leader for next Nth slot (#4806)
* review comments
2019-06-24 15:56:50 -07:00
29611fb61d tower consensus naming (#4598)
s/locktower/tower/g
2019-06-24 13:41:23 -07:00
407b1d3e6f Bump console from 0.7.5 to 0.7.7 (#4798)
Bumps [console](https://github.com/mitsuhiko/console) from 0.7.5 to 0.7.7.
- [Release notes](https://github.com/mitsuhiko/console/releases)
- [Commits](https://github.com/mitsuhiko/console/compare/0.7.5...0.7.7)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-24 05:40:15 -07:00
206e62271b Ignore flaky test_two_unbalanced_stakes (#4794)
automerge
2019-06-23 20:55:43 -07:00
4e78354ab6 Bump serde_derive from 1.0.92 to 1.0.93 (#4790)
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.92 to 1.0.93.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.92...v1.0.93)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-23 20:23:41 -07:00
1561d9c8d4 Remove --storage-mining-pool-lamports (#4792) 2019-06-23 20:19:53 -07:00
0e1480b84e Bump serde from 1.0.92 to 1.0.93 (#4791)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.92 to 1.0.93.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.92...v1.0.93)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
2019-06-23 17:23:41 -07:00
fd6047d1c5 Add validation tip 2019-06-23 09:10:39 -07:00
b0467be393 Add quotes 2019-06-23 09:09:12 -07:00
1b0b095813 Setup v0.1[78] backport label 2019-06-23 09:04:43 -07:00
bd43724dfc Don't set automerge label the PR has status failures 2019-06-22 20:52:32 -07:00
11992946a4 Add storage reward pools (#4779) 2019-06-22 17:18:35 -07:00
0cc8a841ab set automerge label on mergify backport PRs (#4788) 2019-06-22 09:00:00 -07:00
23b6b85bf0 Prevent Travis/Appveyor from trying to build mergify branches (#4786) 2019-06-22 08:42:27 -07:00
96b56fa6f7 Update authorized public key (#4783) 2019-06-22 08:33:39 -07:00
405ca1bcb2 Add instructions and processor for stake deactivation (#4781)
automerge
2019-06-21 23:45:03 -07:00
c6316bb24b Initial mergify config 2019-06-21 22:50:17 -07:00
b7f169e06e Program instruction to withdraw un-staked lamports from stake account (#4780) 2019-06-21 22:28:34 -07:00
e4b466874c Remove storage-mining-pool-keypair arg 2019-06-21 21:38:03 -07:00
9911942dbd Increment cargo.toml files to v0.17.0 2019-06-22 04:35:25 +00:00
395 changed files with 19681 additions and 6767 deletions

View File

@ -4,7 +4,7 @@ version: '{build}'
branches:
only:
- master
- /v[0-9.]+/
- /^v[0-9.]+/
cache:
- '%USERPROFILE%\.cargo'
@ -16,7 +16,7 @@ build_script:
notifications:
- provider: Slack
incoming_webhook:
secure: 6HTXVh+FBz29LGJb+taFOo9dqoADfo9xyAszeyXZF5Ub9t5NERytKAR35B2wb+uIOOCBF8+JhmH4437Cgf/ti4IqvURzW1QReXK7eQhn1EI=
secure: 6HnLbeS6/Iv7JSMrrHQ7V9OSIjH/3KFzvZiinNWgQqEN0e9A6zaE4MwEXUYDWbcvVJiQneWit6dswY8Scoms2rS1PWEN5N6sjgLgyzroptc=
channel: ci-status
on_build_success: false
on_build_failure: true
@ -25,16 +25,16 @@ notifications:
deploy:
- provider: S3
access_key_id:
secure: ptvqM/yvgeTeA12XOzybH1KYNh95AdfEvqoH9mvP2ic=
secure: G6uzyGqbkMCXS2+sCeBCT/+s/11AHLWXCuGayfKcMEE=
secret_access_key:
secure: IkrgBlz5hdxvwcJdMXyyHUrpWhKa6fXLOD/8rm/rjKqYCdrba9B8V1nLZVrzXGGy
secure: Lc+aVrbcPSXoDV7h2J7gqKT+HX0n3eEzp3JIrSP2pcKxbAikGnCtOogCiHO9/er2
bucket: release.solana.com
region: us-west-1
set_public: true
- provider: GitHub
auth_token:
secure: vQ3jMl5LQrit6+TQONA3ZgQjZ/Ej62BN2ReVb2NSOwjITHMu1131hjc3dOrMEZL6
secure: JdggY+mrznklWDcV0yvetHhD9eRcNdc627q6NcZdZAJsDidYcGgZ/tgYJiXb9D1A
draft: false
prerelease: false
on:

View File

@ -33,3 +33,10 @@ source ci/env.sh
kill -9 "$victim" || true
done
)
# HACK: These are in our docker images, need to be removed from CARGO_HOME
# because we try to cache downloads across builds with CARGO_HOME
# cargo lacks a facility for "system" tooling, always tries CARGO_HOME first
cargo uninstall cargo-audit || true
cargo uninstall svgbob_cli || true
cargo uninstall mdbook || true

1
.gitignore vendored
View File

@ -23,3 +23,4 @@ log-*.txt
# intellij files
/.idea/
/solana.iml
/.vscode/

45
.mergify.yml Normal file
View File

@ -0,0 +1,45 @@
# Validate your changes with:
#
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
#
# https://doc.mergify.io/
pull_request_rules:
- name: remove outdated reviews
conditions:
- base=master
actions:
dismiss_reviews:
changes_requested: true
- name: set automerge label on mergify backport PRs
conditions:
- author=mergify[bot]
- head~=^mergify/bp/
- "#status-failure=0"
actions:
label:
add:
- automerge
- name: v0.16 backport
conditions:
- base=master
- label=v0.16
actions:
backport:
branches:
- v0.16
- name: v0.17 backport
conditions:
- base=master
- label=v0.17
actions:
backport:
branches:
- v0.17
- name: v0.18 backport
conditions:
- base=master
- label=v0.18
actions:
backport:
branches:
- v0.18

View File

@ -4,7 +4,7 @@ os:
language: rust
cache: cargo
rust:
- 1.35.0
- 1.36.0
install:
- source ci/rust-version.sh
@ -17,7 +17,7 @@ script:
branches:
only:
- master
- /v.*/
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
notifications:
slack:

3692
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,7 @@ members = [
"bench-exchange",
"bench-streamer",
"bench-tps",
"sdk-c",
"chacha-sys",
"client",
"core",
@ -16,6 +17,7 @@ members = [
"ledger-tool",
"logger",
"merkle-tree",
"measure",
"metrics",
"netutil",
"programs/bpf",
@ -28,9 +30,13 @@ members = [
"programs/exchange_api",
"programs/exchange_program",
"programs/failure_program",
"programs/move_loader_api",
"programs/move_loader_program",
"programs/librapay_api",
"programs/noop_program",
"programs/stake_api",
"programs/stake_program",
"programs/stake_tests",
"programs/storage_api",
"programs/storage_program",
"programs/token_api",
@ -41,7 +47,11 @@ members = [
"runtime",
"sdk",
"upload-perf",
"validator-info",
"vote-signer",
"wallet",
]
exclude = ["programs/bpf/rust/noop"]
exclude = [
"programs/bpf/rust/noop",
]

View File

@ -127,12 +127,9 @@ Remote Testnets
We maintain several testnets:
* `testnet` - public stable testnet accessible via testnet.solana.com, with an https proxy for web apps at api.testnet.solana.com. Runs 24/7
* `testnet` - public stable testnet accessible via testnet.solana.com. Runs 24/7
* `testnet-beta` - public beta channel testnet accessible via beta.testnet.solana.com. Runs 24/7
* `testnet-edge` - public edge channel testnet accessible via edge.testnet.solana.com. Runs 24/7
* `testnet-perf` - permissioned stable testnet running a 24/7 soak test
* `testnet-beta-perf` - permissioned beta channel testnet running a multi-hour soak test weekday mornings
* `testnet-edge-perf` - permissioned edge channel testnet running a multi-hour soak test weekday mornings
## Deploy process

View File

@ -61,7 +61,7 @@ There are three release channels that map to branches as follows:
## Release Steps
### Advance the Channels
### Creating a new branch from master
#### Create the new branch
1. Pick your branch point for release on master.
@ -84,6 +84,12 @@ There are three release channels that map to branches as follows:
At this point, `ci/channel-info.sh` should show your freshly cut release branch as
"BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
### Update documentation
Document the new recommended version by updating
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
in book/src/testnet-participation.md on the release (beta) branch.
### Make the Release
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
@ -106,6 +112,25 @@ We use [github's Releases UI](https://github.com/solana-labs/solana/releases) fo
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
release branch.
### Publish updated Book
We maintain three copies of the "book" as official documentation:
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
https://solana-labs.github.io/book/
2) "Book-edge" tracks the tip of the master branch and updates automatically.
https://solana-labs.github.io/book-edge/
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
https://solana-labs.github.io/book-beta/
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
```bash
PUBLISH_BOOK_TAG=v0.16.6
```
https://buildkite.com/solana-labs/manual-update-book
### Update software on testnet.solana.com
The testnet running on testnet.solana.com is set to use a fixed release tag
@ -145,12 +170,6 @@ TESTNET_TAG=[same value as used in TESTNET_TAG in the schedules]
TESTNET_OP=create-and-start
```
#### Update documentation
Document the new recommended version by updating
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
in book/src/testnet-participation.md for both edge and beta channel branches.
### Alert the community
Notify Discord users on #validator-support that a new release for

View File

@ -1,3 +1,4 @@
/target/
/config/
/config-local/
/farf/

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.16.0"
version = "0.17.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -12,29 +12,29 @@ publish = false
bincode = "1.1.4"
bs58 = "0.2.0"
clap = "2.32.0"
env_logger = "0.6.0"
env_logger = "0.6.2"
itertools = "0.8.0"
log = "0.4.6"
log = "0.4.7"
num-derive = "0.2"
num-traits = "0.2"
rand = "0.6.5"
rayon = "1.1.0"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
serde = "1.0.97"
serde_derive = "1.0.97"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
# solana-runtime = { path = "../solana/runtime"}
solana = { path = "../core", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-drone = { path = "../drone", version = "0.16.0" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.16.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
untrusted = "0.6.2"
solana = { path = "../core", version = "0.17.0" }
solana-client = { path = "../client", version = "0.17.0" }
solana-drone = { path = "../drone", version = "0.17.0" }
solana-exchange-api = { path = "../programs/exchange_api", version = "0.17.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.0" }
solana-logger = { path = "../logger", version = "0.17.0" }
solana-metrics = { path = "../metrics", version = "0.17.0" }
solana-netutil = { path = "../netutil", version = "0.17.0" }
solana-runtime = { path = "../runtime", version = "0.17.0" }
solana-sdk = { path = "../sdk", version = "0.17.0" }
untrusted = "0.7.0"
ws = "0.8.1"
[features]

View File

@ -6,10 +6,10 @@ learn how to start and interact with the exchange.
### Table of Contents
[Overview](#Overview)<br>
[Premiss](#Premiss)<br>
[Premise](#Premise)<br>
[Exchange startup](#Exchange-startup)<br>
[Trade requests](#Trade-requests)<br>
[Trade cancellations](#Trade-cancellations)<br>
[Order Requests](#Trade-requests)<br>
[Order Cancellations](#Trade-cancellations)<br>
[Trade swap](#Trade-swap)<br>
[Exchange program operations](#Exchange-program-operations)<br>
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
@ -22,9 +22,9 @@ An exchange is a marketplace where one asset can be traded for another. This
demo demonstrates one way to host an exchange on the Solana blockchain by
emulating a currency exchange.
The assets are virtual tokens held by investors who may post trade requests to
The assets are virtual tokens held by investors who may post order requests to
the exchange. A Swapper monitors the exchange and posts swap requests for
matching trade orders. All the transactions can execute concurrently.
matching orders. All the transactions can execute concurrently.
## Premise
@ -59,43 +59,43 @@ matching trade orders. All the transactions can execute concurrently.
ratios are represented as fixed point numbers. The fixed point scaler is
defined in
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
- Trade request
- Order request
- A Solana transaction executed by the exchange requesting the trade of one
type of token for another. Trade requests are made up of the token pair,
type of token for another. order requests are made up of the token pair,
the direction of the trade, quantity of the primary token, the price ratio,
and the two token accounts to be credited/deducted. An example trade
request looks like "T AB 5 2" which reads "Exchange 5 A tokens to B tokens
at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
deducted and 10 B tokens credited to the trade initiator's token accounts.
Successful trade requests result in a trade order.
- Trade order
- The result of a successful trade request. Trade orders are stored in
accounts owned by the submitter of the trade request. They can only be
Successful order requests result in an order.
- Order
- The result of a successful order request. orders are stored in
accounts owned by the submitter of the order request. They can only be
canceled by their owner but can be used by anyone in a trade swap. They
contain the same information as the trade request.
contain the same information as the order request.
- Price spread
- The difference between the two matching trade orders. The spread is the
- The difference between the two matching orders. The spread is the
profit of the Swapper initiating the swap request.
- Swap requirements
- Policies that result in a successful trade swap.
- Swap request
- A request to exchange tokens between to trade orders
- A request to exchange tokens between to orders
- Trade swap
- A successful trade. A swap consists of two matching trade orders that meet
- A successful trade. A swap consists of two matching orders that meet
swap requirements. A trade swap may not wholly satisfy one or both of the
trade orders in which case the trade orders are adjusted appropriately. As
orders in which case the orders are adjusted appropriately. As
long as the swap requirements are met there will be an exchange of tokens
between accounts. Any price spread is deposited into the Swapper's profit
account. All trade swaps are recorded in a new account for posterity.
- Investor
- Individual investors who hold a number of tokens and wish to trade them on
the exchange. Investors operate as Solana thin clients who own a set of
accounts containing tokens and/or trade requests. Investors post
accounts containing tokens and/or order requests. Investors post
transactions to the exchange in order to request tokens and post or cancel
trade requests.
order requests.
- Swapper
- An agent who facilitates trading between investors. Swappers operate as
Solana thin clients who monitor all the trade orders looking for a trade
Solana thin clients who monitor all the orders looking for a trade
match. Once found, the Swapper issues a swap request to the exchange.
Swappers are the engine of the exchange and are rewarded for their efforts by
accumulating the price spreads of the swaps they initiate. Swappers also
@ -123,7 +123,7 @@ the investors that trades submitted after that point will be analyzed. <!--This
is not ideal, and instead investors should be able to submit trades at any time,
and the Swapper could come and go without missing a trade. One way to achieve
this is for the Swapper to read the current state of all accounts looking for all
open trade orders.-->
open orders.-->
Investors will initially query the exchange to discover their current balance
for each type of token. If the investor does not already have an account for
@ -181,19 +181,19 @@ pub enum ExchangeInstruction {
}
```
## Trade requests
## Order Requests
When an investor decides to exchange a token of one type for another, they
submit a transaction to the Solana Blockchain containing a trade request, which,
if successful, is turned into a trade order. Trade orders do not expire but are
cancellable. <!-- Trade orders should have a timestamp to enable trade
expiration --> When a trade order is created, tokens are deducted from a token
account and the trade order acts as an escrow. The tokens are held until the
trade order is fulfilled or canceled. If the direction is `To`, then the number
submit a transaction to the Solana Blockchain containing an order request, which,
if successful, is turned into an order. orders do not expire but are
cancellable. <!-- orders should have a timestamp to enable trade
expiration --> When an order is created, tokens are deducted from a token
account and the order acts as an escrow. The tokens are held until the
order is fulfilled or canceled. If the direction is `To`, then the number
of `tokens` are deducted from the primary account, if `From` then `tokens`
multiplied by `price` are deducted from the secondary account. Trade orders are
multiplied by `price` are deducted from the secondary account. orders are
no longer valid when the number of `tokens` goes to zero, at which point they
can no longer be used. <!-- Could support refilling trade orders, so trade order
can no longer be used. <!-- Could support refilling orders, so order
accounts are refilled rather than accumulating -->
```rust
@ -205,7 +205,7 @@ pub enum Direction {
From,
}
pub struct TradeRequestInfo {
pub struct OrderRequestInfo {
/// Direction of trade
pub direction: Direction,
@ -224,7 +224,7 @@ pub struct TradeRequestInfo {
}
pub enum ExchangeInstruction {
/// Trade request
/// order request
/// key 0 - Signer
/// key 1 - Account in which to record the swap
/// key 2 - Token account associated with this trade
@ -233,7 +233,7 @@ pub enum ExchangeInstruction {
/// Trade accounts are populated with this structure
pub struct TradeOrderInfo {
/// Owner of the trade order
/// Owner of the order
pub owner: Pubkey,
/// Direction of the exchange
pub direction: Direction,
@ -252,7 +252,7 @@ pub struct TradeOrderInfo {
}
```
## Trade cancellations
## Order cancellations
An investor may cancel a trade at anytime, but only trades they own. If the
cancellation is successful, any tokens held in escrow are returned to the
@ -260,9 +260,9 @@ account from which they came.
```rust
pub enum ExchangeInstruction {
/// Trade cancellation
/// order cancellation
/// key 0 - Signer
/// key 1 -Trade order to cancel
/// key 1 -order to cancel
TradeCancellation,
}
```
@ -270,14 +270,14 @@ pub enum ExchangeInstruction {
## Trade swaps
The Swapper is monitoring the accounts assigned to the exchange program and
building a trade-order table. The trade order table is used to identify
matching trade orders which could be fulfilled. When a match is found the
building a trade-order table. The order table is used to identify
matching orders which could be fulfilled. When a match is found the
Swapper should issue a swap request. Swap requests may not satisfy the entirety
of either order, but the exchange will greedily fulfill it. Any leftover tokens
in either account will keep the trade order valid for further swap requests in
in either account will keep the order valid for further swap requests in
the future.
Matching trade orders are defined by the following swap requirements:
Matching orders are defined by the following swap requirements:
- Opposite polarity (one `To` and one `From`)
- Operate on the same token pair
@ -379,8 +379,8 @@ pub enum ExchangeInstruction {
/// Trade swap request
/// key 0 - Signer
/// key 1 - Account in which to record the swap
/// key 2 - 'To' trade order
/// key 3 - `From` trade order
/// key 2 - 'To' order
/// key 3 - `From` order
/// key 4 - Token account associated with the To Trade
/// key 5 - Token account associated with From trade
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
@ -391,9 +391,9 @@ pub enum ExchangeInstruction {
pub struct TradeSwapInfo {
/// Pair swapped
pub pair: TokenPair,
/// `To` trade order
/// `To` order
pub to_trade_order: Pubkey,
/// `From` trade order
/// `From` order
pub from_trade_order: Pubkey,
/// Number of primary tokens exchanged
pub primary_tokens: u64,
@ -424,22 +424,22 @@ pub enum ExchangeInstruction {
/// the exchange has a limitless number of tokens it can transfer.
TransferRequest(Token, u64),
/// Trade request
/// order request
/// key 0 - Signer
/// key 1 - Account in which to record the swap
/// key 2 - Token account associated with this trade
TradeRequest(TradeRequestInfo),
/// Trade cancellation
/// order cancellation
/// key 0 - Signer
/// key 1 -Trade order to cancel
/// key 1 -order to cancel
TradeCancellation,
/// Trade swap request
/// key 0 - Signer
/// key 1 - Account in which to record the swap
/// key 2 - 'To' trade order
/// key 3 - `From` trade order
/// key 2 - 'To' order
/// key 3 - `From` order
/// key 4 - Token account associated with the To Trade
/// key 5 - Token account associated with From trade
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
@ -478,6 +478,3 @@ To also see the cluster messages:
```bash
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
```

View File

@ -332,7 +332,7 @@ fn do_tx_transfers<T>(
struct TradeInfo {
trade_account: Pubkey,
order_info: TradeOrderInfo,
order_info: OrderInfo,
}
#[allow(clippy::too_many_arguments)]
fn swapper<T>(
@ -509,7 +509,7 @@ fn trader<T>(
T: Client,
{
// TODO Hard coded for now
let pair = TokenPair::AB;
let pair = AssetPair::default();
let tokens = 1;
let price = 1000;
let mut account_group: usize = 0;
@ -538,7 +538,7 @@ fn trader<T>(
} else {
Direction::To
};
let order_info = TradeOrderInfo {
let order_info = OrderInfo {
/// Owner of the trade order
owner: Pubkey::default(), // don't care
direction,
@ -646,6 +646,20 @@ where
false
}
fn verify_funding_transfer<T: SyncClient + ?Sized>(
client: &T,
tx: &Transaction,
amount: u64,
) -> bool {
for a in &tx.message().account_keys[1..] {
if client.get_balance(a).unwrap_or(0) >= amount {
return true;
}
}
false
}
pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
let total = lamports * (dests.len() as u64 + 1);
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
@ -703,6 +717,7 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
.collect();
let mut retries = 0;
let amount = chunk[0].1[0].1;
while !to_fund_txs.is_empty() {
let receivers = to_fund_txs
.iter()
@ -731,7 +746,7 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
let mut waits = 0;
loop {
sleep(Duration::from_millis(200));
to_fund_txs.retain(|(_, tx)| !verify_transfer(client, &tx));
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
if to_fund_txs.is_empty() {
break;
}

View File

@ -10,7 +10,7 @@ use std::{error, fmt};
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ToOrder {
pub pubkey: Pubkey,
pub info: TradeOrderInfo,
pub info: OrderInfo,
}
impl Ord for ToOrder {
@ -26,7 +26,7 @@ impl PartialOrd for ToOrder {
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FromOrder {
pub pubkey: Pubkey,
pub info: TradeOrderInfo,
pub info: OrderInfo,
}
impl Ord for FromOrder {
@ -95,11 +95,7 @@ impl OrderBook {
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
// Ok(())
// }
pub fn push(
&mut self,
pubkey: Pubkey,
info: TradeOrderInfo,
) -> Result<(), Box<dyn error::Error>> {
pub fn push(&mut self, pubkey: Pubkey, info: OrderInfo) -> Result<(), Box<dyn error::Error>> {
check_trade(info.direction, info.tokens, info.price)?;
match info.direction {
Direction::To => {

View File

@ -1 +1,2 @@
/target/
/farf/

View File

@ -2,16 +2,16 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.16.0"
version = "0.17.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana = { path = "../core", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana = { path = "../core", version = "0.17.0" }
solana-logger = { path = "../logger", version = "0.17.0" }
solana-netutil = { path = "../netutil", version = "0.17.0" }
[features]
cuda = ["solana/cuda"]

View File

@ -1,4 +1,5 @@
use clap::{crate_description, crate_name, crate_version, App, Arg};
use solana::packet::PacketsRecycler;
use solana::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
use solana::result::Result;
use solana::streamer::{receiver, PacketReceiver};
@ -16,7 +17,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut msgs = Packets::default();
msgs.packets.resize(10, Packet::default());
for w in &mut msgs.packets {
for w in msgs.packets.iter_mut() {
w.meta.size = PACKET_DATA_SIZE;
w.meta.set_addr(&addr);
}
@ -74,6 +75,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
let recycler = PacketsRecycler::default();
for _ in 0..num_sockets {
let read = solana_netutil::bind_to(port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
@ -83,7 +85,13 @@ fn main() -> Result<()> {
let (s_reader, r_reader) = channel();
read_channels.push(r_reader);
read_threads.push(receiver(Arc::new(read), &exit, s_reader));
read_threads.push(receiver(
Arc::new(read),
&exit,
s_reader,
recycler.clone(),
"bench-streamer-test",
));
}
let t_producer1 = producer(&addr, exit.clone());

View File

@ -1,3 +1,4 @@
/target/
/config/
/config-local/
/farf/

View File

@ -2,27 +2,30 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.16.0"
version = "0.17.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
bincode = "1.1.4"
clap = "2.33.0"
log = "0.4.6"
log = "0.4.7"
rayon = "1.1.0"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
serde = "1.0.97"
serde_derive = "1.0.97"
serde_json = "1.0.40"
serde_yaml = "0.8.9"
solana = { path = "../core", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-drone = { path = "../drone", version = "0.16.0" }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana = { path = "../core", version = "0.17.0" }
solana-client = { path = "../client", version = "0.17.0" }
solana-drone = { path = "../drone", version = "0.17.0" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.17.0" }
solana-logger = { path = "../logger", version = "0.17.0" }
solana-metrics = { path = "../metrics", version = "0.17.0" }
solana-measure = { path = "../measure", version = "0.17.0" }
solana-netutil = { path = "../netutil", version = "0.17.0" }
solana-runtime = { path = "../runtime", version = "0.17.0" }
solana-sdk = { path = "../sdk", version = "0.17.0" }
[features]
cuda = ["solana/cuda"]

View File

@ -1,13 +1,16 @@
use solana_metrics;
use bincode;
use log::*;
use rayon::prelude::*;
use solana::gen_keys::GenKeys;
use solana_client::perf_utils::{sample_txs, SampleStats};
use solana_drone::drone::request_airdrop_transaction;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_sdk::client::Client;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_instruction;
use solana_sdk::system_transaction;
@ -24,6 +27,8 @@ use std::thread::Builder;
use std::time::Duration;
use std::time::Instant;
use solana_librapay_api::librapay_transaction;
pub const MAX_SPENDS_PER_TX: u64 = 4;
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128;
@ -43,6 +48,7 @@ pub struct Config {
pub duration: Duration,
pub tx_count: usize,
pub sustained: bool,
pub use_move: bool,
}
impl Default for Config {
@ -54,6 +60,7 @@ impl Default for Config {
duration: Duration::new(std::u64::MAX, 0),
tx_count: 500_000,
sustained: false,
use_move: false,
}
}
}
@ -63,6 +70,8 @@ pub fn do_bench_tps<T>(
config: Config,
gen_keypairs: Vec<Keypair>,
keypair0_balance: u64,
program_id: &Pubkey,
libra_mint_id: &Pubkey,
) -> u64
where
T: 'static + Client + Send + Sync,
@ -73,6 +82,7 @@ where
thread_batch_sleep_ms,
duration,
tx_count,
use_move,
sustained,
} = config;
@ -165,6 +175,9 @@ where
&keypairs[len..],
threads,
reclaim_lamports_back_to_source_account,
use_move,
&program_id,
&libra_mint_id,
);
// In sustained mode overlap the transfers with generation
// this has higher average performance but lower peak performance
@ -228,6 +241,9 @@ fn generate_txs(
dest: &[Keypair],
threads: usize,
reclaim: bool,
use_move: bool,
libra_pay_program_id: &Pubkey,
libra_mint_id: &Pubkey,
) {
let tx_count = source.len();
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
@ -241,10 +257,25 @@ fn generate_txs(
let transactions: Vec<_> = pairs
.par_iter()
.map(|(id, keypair)| {
(
system_transaction::create_user_account(id, &keypair.pubkey(), 1, *blockhash),
timestamp(),
)
if use_move {
(
librapay_transaction::transfer(
libra_pay_program_id,
libra_mint_id,
&id,
&id,
&keypair.pubkey(),
1,
*blockhash,
),
timestamp(),
)
} else {
(
system_transaction::create_user_account(id, &keypair.pubkey(), 1, *blockhash),
timestamp(),
)
}
})
.collect();
@ -346,10 +377,12 @@ pub fn fund_keys<T: Client>(
source: &Keypair,
dests: &[Keypair],
total: u64,
lamports_per_signature: u64,
max_fee: u64,
mut extra: u64,
) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
println!("funding keys {}", dests.len());
while !notfunded.is_empty() {
@ -362,7 +395,8 @@ pub fn fund_keys<T: Client>(
break;
}
let start = notfunded.len() - max_units as usize;
let per_unit = (f.1 - max_units * lamports_per_signature) / max_units;
let fees = if extra > 0 { max_fee } else { 0 };
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
let moves: Vec<_> = notfunded[start..]
.iter()
.map(|k| (k.pubkey(), per_unit))
@ -374,6 +408,7 @@ pub fn fund_keys<T: Client>(
if !moves.is_empty() {
to_fund.push((f.0, moves));
}
extra -= 1;
}
// try to transfer a "few" at a time with recent blockhash
@ -388,13 +423,10 @@ pub fn fund_keys<T: Client>(
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
(
k.clone(),
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
&k.pubkey(),
&m,
)),
)
let tx = Transaction::new_unsigned_instructions(
system_instruction::transfer_many(&k.pubkey(), &m),
);
(k.clone(), tx)
})
.collect();
@ -582,28 +614,166 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
}
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> Vec<Keypair> {
pub fn generate_keypairs(
seed_keypair: &Keypair,
count: u64,
use_move: bool,
) -> (Vec<Keypair>, u64) {
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
let mut rnd = GenKeys::new(seed);
let mut total_keys = 1;
let mut total_keys = 0;
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
let mut delta = 1;
while total_keys < count {
total_keys *= MAX_SPENDS_PER_TX;
extra += delta;
delta *= MAX_SPENDS_PER_TX;
total_keys += delta;
}
rnd.gen_n_keypairs(total_keys)
if use_move {
// Move funding is a naive loop that doesn't
// need aligned number of keys.
(rnd.gen_n_keypairs(count), extra)
} else {
(rnd.gen_n_keypairs(total_keys), extra)
}
}
fn fund_move_keys<T: Client>(
client: &T,
funding_key: &Keypair,
keypairs: &[Keypair],
total: u64,
libra_pay_program_id: &Pubkey,
libra_mint_program_id: &Pubkey,
libra_mint_key: &Keypair,
) {
let (mut blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
info!("creating the libra funding account..");
let libra_funding_key = Keypair::new();
let tx = librapay_transaction::create_account(
funding_key,
&libra_funding_key.pubkey(),
1,
blockhash,
);
let sig = client
.async_send_transaction(tx)
.expect("create_account in generate_and_fund_keypairs");
client.poll_for_signature(&sig).unwrap();
info!("minting to funding keypair");
let tx = librapay_transaction::mint_tokens(
&libra_mint_program_id,
funding_key,
libra_mint_key,
&libra_funding_key.pubkey(),
total,
blockhash,
);
let sig = client
.async_send_transaction(tx)
.expect("create_account in generate_and_fund_keypairs");
client.poll_for_signature(&sig).unwrap();
info!("creating move accounts.. {}", keypairs.len());
let create_len = 8;
let mut funding_time = Measure::start("funding_time");
for (i, keys) in keypairs.chunks(create_len).enumerate() {
let mut tx_send = Measure::start("poll");
let pubkeys: Vec<_> = keys.iter().map(|k| k.pubkey()).collect();
let tx = librapay_transaction::create_accounts(funding_key, &pubkeys, 1, blockhash);
let ser_size = bincode::serialized_size(&tx).unwrap();
let sig = client
.async_send_transaction(tx)
.expect("create_account in generate_and_fund_keypairs");
tx_send.stop();
let mut poll = Measure::start("poll");
client.poll_for_signature(&sig).unwrap();
poll.stop();
if i % 10 == 0 {
blockhash = client.get_recent_blockhash().unwrap().0;
info!(
"size: {} created {} accounts of {} sig: {}us send: {}us",
ser_size,
i,
(keypairs.len() / create_len),
poll.as_us(),
tx_send.as_us()
);
}
}
funding_time.stop();
info!("funding accounts {}ms", funding_time.as_ms());
let mut sigs = vec![];
let tx_count = keypairs.len();
let amount = total / (tx_count as u64);
for (i, key) in keypairs[..tx_count].iter().enumerate() {
let tx = librapay_transaction::transfer(
libra_pay_program_id,
&libra_mint_key.pubkey(),
funding_key,
&libra_funding_key,
&key.pubkey(),
amount,
blockhash,
);
let sig = client
.async_send_transaction(tx.clone())
.expect("create_account in generate_and_fund_keypairs");
sigs.push((sig, key));
if i % 50 == 0 {
blockhash = client.get_recent_blockhash().unwrap().0;
}
}
for (i, (sig, key)) in sigs.iter().enumerate() {
let mut times = 0;
loop {
match client.poll_for_signature(&sig) {
Ok(_) => {
break;
}
Err(e) => {
info!("e :{:?} waiting times: {} sig: {}", e, times, sig);
times += 1;
sleep(Duration::from_secs(1));
}
}
}
times = 0;
loop {
let balance = librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
if amount != balance {
info!("i: {} balance: {} times: {}", i, balance, times);
times += 1;
sleep(Duration::from_secs(1));
} else {
break;
}
}
if i % 10 == 0 {
info!("funding {} of {}", i, tx_count);
}
}
info!("done..");
}
pub fn generate_and_fund_keypairs<T: Client>(
client: &T,
drone_addr: Option<SocketAddr>,
funding_pubkey: &Keypair,
funding_key: &Keypair,
tx_count: usize,
lamports_per_account: u64,
libra_keys: Option<(&Pubkey, &Pubkey, &Arc<Keypair>)>,
) -> Result<(Vec<Keypair>, u64)> {
info!("Creating {} keypairs...", tx_count * 2);
let mut keypairs = generate_keypairs(funding_pubkey, tx_count as u64 * 2);
let (mut keypairs, extra) =
generate_keypairs(funding_key, tx_count as u64 * 2, libra_keys.is_some());
info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume.
@ -613,21 +783,34 @@ pub fn generate_and_fund_keypairs<T: Client>(
.unwrap_or(0);
if lamports_per_account > last_keypair_balance {
let (_, fee_calculator) = client.get_recent_blockhash().unwrap();
let extra =
let (_blockhash, fee_calculator) = client.get_recent_blockhash().unwrap();
let account_desired_balance =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let total = extra * (keypairs.len() as u64);
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total)?;
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
let total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &drone_addr.unwrap(), funding_key, total)?;
}
if let Some((libra_pay_program_id, libra_mint_program_id, libra_mint_key)) = libra_keys {
fund_move_keys(
client,
funding_key,
&keypairs,
total,
libra_pay_program_id,
libra_mint_program_id,
libra_mint_key,
);
} else {
fund_keys(
client,
funding_key,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
extra,
);
}
info!("adding more lamports {}", extra);
fund_keys(
client,
funding_pubkey,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
);
}
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
@ -644,9 +827,11 @@ mod tests {
use solana::validator::ValidatorConfig;
use solana_client::thin_client::create_client;
use solana_drone::drone::run_local_drone;
use solana_librapay_api::{upload_mint_program, upload_payment_program};
use solana_runtime::bank::Bank;
use solana_runtime::bank_client::BankClient;
use solana_sdk::client::SyncClient;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_block::create_genesis_block;
use std::sync::mpsc::channel;
@ -665,47 +850,93 @@ mod tests {
assert_eq!(should_switch_directions(20, 101), false);
}
#[test]
fn test_bench_tps_local_cluster() {
fn test_bench_tps_local_cluster(config: Config) {
solana_logger::setup();
const NUM_NODES: usize = 1;
let cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990; NUM_NODES],
cluster_lamports: 2_000_000,
cluster_lamports: 200_000_000,
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
..ClusterConfig::default()
});
let drone_keypair = Keypair::new();
cluster.transfer(&cluster.funding_keypair, &drone_keypair.pubkey(), 1_000_000);
let (addr_sender, addr_receiver) = channel();
run_local_drone(drone_keypair, addr_sender, None);
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
let mut config = Config::default();
config.tx_count = 100;
config.duration = Duration::from_secs(5);
cluster.transfer(
&cluster.funding_keypair,
&drone_keypair.pubkey(),
100_000_000,
);
let client = create_client(
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
FULLNODE_PORT_RANGE,
);
let (libra_mint_id, libra_pay_program_id) = if config.use_move {
let libra_mint_id = upload_mint_program(&drone_keypair, &client);
let libra_pay_program_id = upload_payment_program(&drone_keypair, &client);
(libra_mint_id, libra_pay_program_id)
} else {
(Pubkey::default(), Pubkey::default())
};
let (addr_sender, addr_receiver) = channel();
run_local_drone(drone_keypair, addr_sender, None);
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
let lamports_per_account = 100;
let libra_keys = if config.use_move {
Some((
&libra_pay_program_id,
&libra_mint_id,
&cluster.libra_mint_keypair,
))
} else {
None
};
let (keypairs, _keypair_balance) = generate_and_fund_keypairs(
&client,
Some(drone_addr),
&config.id,
config.tx_count,
lamports_per_account,
libra_keys,
)
.unwrap();
let total = do_bench_tps(vec![client], config, keypairs, 0);
let total = do_bench_tps(
vec![client],
config,
keypairs,
0,
&libra_pay_program_id,
&cluster.libra_mint_keypair.pubkey(),
);
assert!(total > 100);
}
#[test]
fn test_bench_tps_local_cluster_solana() {
let mut config = Config::default();
config.tx_count = 100;
config.duration = Duration::from_secs(10);
test_bench_tps_local_cluster(config);
}
#[test]
#[ignore]
fn test_bench_tps_local_cluster_move() {
let mut config = Config::default();
config.tx_count = 100;
config.duration = Duration::from_secs(10);
config.use_move = true;
test_bench_tps_local_cluster(config);
}
#[test]
fn test_bench_tps_bank_client() {
let (genesis_block, id) = create_genesis_block(10_000);
@ -718,9 +949,17 @@ mod tests {
config.duration = Duration::from_secs(5);
let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20).unwrap();
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, None)
.unwrap();
do_bench_tps(clients, config, keypairs, 0);
do_bench_tps(
clients,
config,
keypairs,
0,
&Pubkey::default(),
&Pubkey::default(),
);
}
#[test]
@ -732,10 +971,36 @@ mod tests {
let lamports = 20;
let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports).unwrap();
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, None).unwrap();
for kp in &keypairs {
assert!(client.get_balance(&kp.pubkey()).unwrap() >= lamports);
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
}
}
#[test]
fn test_bench_tps_fund_keys_with_fees() {
let (mut genesis_block, id) = create_genesis_block(10_000);
let fee_calculator = FeeCalculator::new(11);
genesis_block.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_block);
let client = BankClient::new(bank);
let tx_count = 10;
let lamports = 20;
let (keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, None).unwrap();
let max_fee = client
.get_recent_blockhash()
.unwrap()
.1
.max_lamports_per_signature;
for kp in &keypairs {
assert_eq!(
client.get_balance(&kp.pubkey()).unwrap(),
lamports + max_fee
);
}
}
}

View File

@ -22,6 +22,7 @@ pub struct Config {
pub write_to_client_file: bool,
pub read_from_client_file: bool,
pub target_lamports_per_signature: u64,
pub use_move: bool,
}
impl Default for Config {
@ -40,6 +41,7 @@ impl Default for Config {
write_to_client_file: false,
read_from_client_file: false,
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
use_move: false,
}
}
}
@ -100,6 +102,11 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
.long("sustained")
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
)
.arg(
Arg::with_name("use-move")
.long("use-move")
.help("Use Move language transactions to perform transfers."),
)
.arg(
Arg::with_name("tx_count")
.long("tx_count")
@ -211,5 +218,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
}
args.use_move = matches.is_present("use-move");
args
}

View File

@ -6,7 +6,8 @@ use crate::bench::{
};
use solana::gossip_service::{discover_cluster, get_multi_client};
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::Keypair;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::collections::HashMap;
use std::fs::File;
use std::io::prelude::*;
@ -37,10 +38,11 @@ fn main() {
write_to_client_file,
read_from_client_file,
target_lamports_per_signature,
use_move,
} = cli_config;
if write_to_client_file {
let keypairs = generate_keypairs(&id, tx_count as u64 * 2);
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2, use_move);
let num_accounts = keypairs.len() as u64;
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
@ -91,6 +93,10 @@ fn main() {
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
last_balance = balance;
});
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
// This prevents the amount of storage needed for bench-tps accounts from creeping up
// across multiple runs.
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
(keypairs, last_balance)
} else {
generate_and_fund_keypairs(
@ -99,6 +105,7 @@ fn main() {
&id,
tx_count,
NUM_LAMPORTS_PER_ACCOUNT,
None,
)
.unwrap_or_else(|e| {
eprintln!("Error could not fund keys: {:?}", e);
@ -113,7 +120,15 @@ fn main() {
duration,
tx_count,
sustained,
use_move,
};
do_bench_tps(vec![client], config, keypairs, keypair_balance);
do_bench_tps(
vec![client],
config,
keypairs,
keypair_balance,
&Pubkey::new_rand(),
&Pubkey::new_rand(),
);
}

View File

@ -0,0 +1,18 @@
+------------+
| Bank-Merkle|
+------------+
^ ^
/ \
+-----------------+ +-------------+
| Bank-Diff-Merkle| | Block-Merkle|
+-----------------+ +-------------+
^ ^
/ \
+------+ +--------------------------+
| Hash | | Previous Bank-Diff-Merkle|
+------+ +--------------------------+
^ ^
/ \
+---------------+ +---------------+
| Hash(Account1)| | Hash(Account2)|
+---------------+ +---------------+

View File

@ -0,0 +1,19 @@
+---------------+
| Block-Merkle |
+---------------+
^ ^
/ \
+-------------+ +-------------+
| Entry-Merkle| | Entry-Merkle|
+-------------+ +-------------+
^ ^
/ \
+-------+ +-------+
| Hash | | Hash |
+-------+ +-------+
^ ^ ^ ^
/ | | \
+-----------------+ +-----------------+ +-----------------+ +---+
| Hash(T1, status)| | Hash(T2, status)| | Hash(T3, status)| | 0 |
+-----------------+ +-----------------+ +-----------------+ +---+

View File

@ -3,4 +3,4 @@ set -e
cd "$(dirname "$0")"
make -j"$(nproc)"
make -j"$(nproc)" test

View File

@ -4,11 +4,14 @@ MD_SRCS=$(wildcard src/*.md)
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg) $(MSC_SRCS:art/%.msc=src/img/%.svg)
all: html/index.html
TARGET=html/index.html
TEST_STAMP=src/tests.ok
test: src/tests.ok
all: $(TARGET)
open: all
test: $(TEST_STAMP)
open: $(TEST_STAMP)
mdbook build --open
watch: $(SVG_IMGS)
@ -26,11 +29,11 @@ src/%.md: %.md
@mkdir -p $(@D)
@cp $< $@
src/tests.ok: $(SVG_IMGS) $(MD_SRCS)
$(TEST_STAMP): $(TARGET)
mdbook test
touch $@
html/index.html: src/tests.ok
$(TARGET): $(SVG_IMGS) $(MD_SRCS)
mdbook build
clean:

View File

@ -30,8 +30,12 @@
- [Blocktree](blocktree.md)
- [Gossip Service](gossip.md)
- [The Runtime](runtime.md)
- [Anatomy of a Transaction](transaction.md)
- [API Reference](api-reference.md)
- [Transaction](transaction-api.md)
- [Instruction](instruction-api.md)
- [Blockstreamer](blockstreamer.md)
- [JSON RPC API](jsonrpc-api.md)
- [JavaScript API](javascript-api.md)
@ -55,18 +59,21 @@
- [Economic Design MVP](ed_mvp.md)
- [References](ed_references.md)
- [Cluster Test Framework](cluster-test-framework.md)
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
- [Validator](validator-proposal.md)
- [Simple Payment and State Verification](simple-payment-and-state-verification.md)
- [Cross-Program Invocation](cross-program-invocation.md)
- [Implemented Design Proposals](implemented-proposals.md)
- [Blocktree](blocktree.md)
- [Cluster Software Installation and Updates](installer.md)
- [Deterministic Transaction Fees](transaction-fees.md)
- [Fork Selection](fork-selection.md)
- [Tower BFT](tower-bft.md)
- [Leader-to-Leader Transition](leader-leader-transition.md)
- [Leader-to-Validator Transition](leader-validator-transition.md)
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
- [Persistent Account Storage](persistent-account-storage.md)
- [Reliable Vote Transmission](reliable-vote-transmission.md)
- [Repair Service](repair-service.md)
- [Testing Programs](testing-programs.md)
- [Testing Programs](testing-programs.md)
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
- [Embedding the Move Langauge](embedding-move.md)

View File

@ -4,7 +4,7 @@ A validator votes on a PoH hash for two purposes. First, the vote indicates it
believes the ledger is valid up until that point in time. Second, since many
valid forks may exist at a given height, the vote also indicates exclusive
support for the fork. This document describes only the former. The latter is
described in [fork selection](fork-selection.md).
described in [Tower BFT](tower-bft.md).
## Current Design
@ -50,12 +50,11 @@ log the time since the NewBlock transaction was submitted.
### Finality and Payouts
Locktower is the proposed [fork selection](fork-selection.md) algorithm. It
proposes that payment to miners be postponed until the *stack* of validator
votes reaches a certain depth, at which point rollback is not economically
feasible. The vote program may therefore implement locktower. Vote instructions
would need to reference a global locktower account so that it can track
cross-block state.
[Tower BFT](tower-bft.md) is the proposed fork selection algorithm. It proposes
that payment to miners be postponed until the *stack* of validator votes reaches
a certain depth, at which point rollback is not economically feasible. The vote
program may therefore implement Tower BFT. Vote instructions would need to
reference a global Tower account so that it can track cross-block state.
## Challenges

View File

@ -0,0 +1,111 @@
# Cross-Program Invocation
## Problem
In today's implementation a client can create a transaction that modifies two
accounts, each owned by a separate on-chain program:
```rust,ignore
let message = Message::new(vec![
token_instruction::pay(&alice_pubkey),
acme_instruction::launch_missiles(&bob_pubkey),
]);
client.send_message(&[&alice_keypair, &bob_keypair], &message);
```
The current implementation does not, however, allow the `acme` program to
conveniently invoke `token` instructions on the client's behalf:
```rust,ignore
let message = Message::new(vec![
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
]);
client.send_message(&[&alice_keypair, &bob_keypair], &message);
```
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes
`token_instruction::pay` from the `acme` program. The workaround is to extend the
`acme` program with the implementation of the `token` program, and create `token`
accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify.
With that workaround, `acme` can modify token-like accounts created by the `acme`
program, but not token accounts created by the `token` program.
## Proposed Solution
The goal of this design is to modify Solana's runtime such that an on-chain
program can invoke an instruction from another program.
Given two on-chain programs `token` and `acme`, each implementing instructions
`pay()` and `launch_missiles()` respectively, we would ideally like to implement
the `acme` module with a call to a function defined in the `token` module:
```rust,ignore
use token;
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
...
}
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
token::pay(&keyed_accounts[1..])?;
launch_missiles(keyed_accounts)?;
}
```
The above code would require that the `token` crate be dynamically linked,
so that a custom linker could intercept calls and validate accesses to
`keyed_accounts`. That is, even though the client intends to modify both
`token` and `acme` accounts, only `token` program is permitted to modify
the `token` account, and only the `acme` program is permitted to modify
the `acme` account.
Backing off from that ideal cross-program call, a slightly more
verbose solution is to expose token's existing `process_instruction()`
entrypoint to the acme program:
```rust,ignore
use token_instruction;
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
...
}
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
let alice_pubkey = keyed_accounts[1].key;
let instruction = token_instruction::pay(&alice_pubkey);
process_instruction(&instruction)?;
launch_missiles(keyed_accounts)?;
}
```
where `process_instruction()` is built into Solana's runtime and responsible
for routing the given instruction to the `token` program via the instruction's
`program_id` field. Before invoking `pay()`, the runtime must also ensure that
`acme` didn't modify any accounts owned by `token`. It does this by calling
`runtime::verify_instruction()` and then afterward updating all the `pre_*`
variables to tentatively commit `acme`'s account modifications. After `pay()`
completes, the runtime must again ensure that `token` didn't modify any
accounts owned by `acme`. It should call `verify_instruction()` again, but this
time with the `token` program ID. Lastly, after `pay_and_launch_missiles()`
completes, the runtime must call `verify_instruction()` one more time, where it
normally would, but using all updated `pre_*` variables. If executing
`pay_and_launch_missiles()` up to `pay()` made no invalid account changes,
`pay()` made no invalid changes, and executing from `pay()` until
`pay_and_launch_missiles()` returns made no invalid changes, then the runtime
can transitively assume `pay_and_launch_missiles()` as whole made no invalid
account changes, and therefore commit all account modifications.
### Setting `KeyedAccount.is_signer`
When `process_instruction()` is invoked, the runtime must create a new
`KeyedAccounts` parameter using the signatures from the *original* transaction
data. Since the `token` program is immutable and existed on-chain prior to the
`acme` program, the runtime can safely treat the transaction signature as a
signature of a transaction with a `token` instruction. When the runtime sees
the given instruction references `alice_pubkey`, it looks up the key in the
transaction to see if that key corresponds to a transaction signature. In this
case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the
`token` program to modify Alice's account.

View File

@ -0,0 +1,66 @@
# Embedding the Move Language
## Problem
Solana enables developers to write on-chain programs in general purpose
programming languages such as C or Rust, but those programs contain
Solana-specific mechanisms. For example, there isn't another chain that asks
developers to create a Rust module with a `process_instruction(KeyedAccounts)`
function. Whenever practical, Solana should offer dApp developers more portable
options.
Until just recently, no popular blockchain offered a language that could expose
the value of Solana's massively parallel [runtime](runtime.md). Solidity
contracts, for example, do not separate references to shared data from contract
code, and therefore need to be executed serially to ensure deterministic
behavior. In practice we see that the most aggressively optimized EVM-based
blockchains all seem to peak out around 1,200 TPS - a small fraction of what
Solana can do. The Libra project, on the other hand, designed an on-chain
programming language called Move that is more suitable for parallel execution.
Like Solana's runtime, Move programs depend on accounts for all shared state.
The biggest design difference between Solana's runtime and Libra's Move VM is
how they manage safe invocations between modules. Solana took an operating
systems approach and Libra took the domain-specific language approach. In the
runtime, a module must trap back into the runtime to ensure the caller's module
did not write to data owned by the callee. Likewise, when the callee completes,
it must again trap back to the runtime to ensure the callee did not write to
data owned by the caller. Move, on the other hand, includes an advanced type
system that allows these checks to be run by its bytecode verifier. Because
Move bytecode can be verified, the cost of verification is paid just once, at
the time the module is loaded on-chain. In the runtime, the cost is paid each
time a transaction crosses between modules. The difference is similar in spirit
to the difference between a dynamically-typed language like Python versus a
statically-typed language like Java. Solana's runtime allows dApps to be
written in general purpose programming languages, but that comes with the cost
of runtime checks when jumping between programs.
This proposal attempts to define a way to embed the Move VM such that:
* cross-module invocations within Move do not require the runtime's
cross-program runtime checks
* Move programs can leverage functionality in other Solana programs and vice
versa
* Solana's runtime parallelism is exposed to batches of Move and non-Move
transactions
## Proposed Solution
### Move VM as a Solana loader
The Move VM shall be embedded as a Solana loader under the identifier
`MOVE_PROGRAM_ID`, so that Move modules can be marked as `executable` with the
VM as its `owner`. This will allow modules to load module dependencies, as well
as allow for parallel execution of Move scripts.
All data accounts owned by Move modules must set their owners to the loader,
`MOVE_PROGRAM_ID`. Since Move modules encapsulate their account data in the
same way Solana programs encapsulate theirs, the Move module owner should be
embedded in the account data. The runtime will grant write access to the Move
VM, and Move grants access to the module accounts.
### Interacting with Solana programs
To invoke instructions in non-Move programs, Solana would need to extend the
Move VM with a `process_instruction()` system call. It would work the same as
`process_instruction()` Rust BPF programs.

View File

@ -55,7 +55,7 @@ Validators can ignore forks at other points (e.g. from the wrong leader), or
slash the leader responsible for the fork.
Validators vote based on a greedy choice to maximize their reward described in
[forks selection](fork-selection.md).
[Tower BFT](tower-bft.md).
### Validator's View

View File

@ -22,7 +22,7 @@ gossip endpoint (a socket address).
Records shared over gossip are arbitrary, but signed and versioned (with a
timestamp) as needed to make sense to the node receiving them. If a node
recieves two records from the same source, it it updates its own copy with the
receives two records from the same source, it updates its own copy with the
record with the most recent timestamp.
## Gossip Service Interface
@ -34,8 +34,8 @@ Nodes send push messages to `PUSH_FANOUT` push peers.
Upon receiving a push message, a node examines the message for:
1. Duplication: if the message has been seen before, the node responds with
`PushMessagePrune` and drops the message
1. Duplication: if the message has been seen before, the node drops the message
and may respond with `PushMessagePrune` if forwarded from a low staked node
2. New data: if the message is new to the node
* Stores the new information with an updated version in its cluster info and
@ -51,7 +51,7 @@ Upon receiving a push message, a node examines the message for:
A nodes selects its push peers at random from the active set of known peers.
The node keeps this selection for a relatively long time. When a prune message
is received, the node drops the push peer that sent the prune. Prune is an
indication that there is another, faster path to that node than direct push.
indication that there is another, higher stake weighted path to that node than direct push.
The set of push peers is kept fresh by rotating a new node into the set every
`PUSH_MSG_TIMEOUT/2` milliseconds.

View File

@ -153,7 +153,7 @@ FLAGS:
OPTIONS:
-d, --data_dir <PATH> Directory to store install data [default: /Users/mvines/Library/Application Support/solana]
-u, --url <URL> JSON RPC URL for the solana cluster [default: https://api.testnet.solana.com/]
-u, --url <URL> JSON RPC URL for the solana cluster [default: http://testnet.solana.com:8899]
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
```

View File

@ -0,0 +1,25 @@
# Instructions
For the purposes of building a [Transaction](transaction.md), a more
verbose instruction format is used:
* **Instruction:**
* **program_id:** The pubkey of the on-chain program that executes the
instruction
* **accounts:** An ordered list of accounts that should be passed to
the program processing the instruction, including metadata detailing
if an account is a signer of the transaction and if it is a credit
only account.
* **data:** A byte array that is passed to the program executing the
instruction
A more compact form is actually included in a `Transaction`:
* **CompiledInstruction:**
* **program_id_index:** The index of the `program_id` in the
`account_keys` list
* **accounts:** An ordered list of indices into `account_keys`
specifying the accounds that should be passed to the program
processing the instruction.
* **data:** A byte array that is passed to the program executing the
instruction

View File

@ -1,13 +1,13 @@
# What is Solana?
Solana is the name of an open source project that is implementing a new,
Solana is an open source project implementing a new,
high-performance, permissionless blockchain. Solana is also the name of a
company headquartered in San Francisco that maintains the open source project.
# About this Book
This book describes the Solana open source project, a blockchain built from the
ground up for scale. The book covers why it's useful, how to use it, how it
ground up for scale. The book covers why Solana is useful, how to use it, how it
works, and why it will continue to work long after the company Solana closes
its doors. The goal of the Solana architecture is to demonstrate there exists a
set of software algorithms that when used in combination to implement a

View File

@ -25,9 +25,15 @@ Methods
* [getAccountInfo](#getaccountinfo)
* [getBalance](#getbalance)
* [getClusterNodes](#getclusternodes)
* [getEpochInfo](#getepochinfo)
* [getLeaderSchedule](#getleaderschedule)
* [getProgramAccounts](#getprogramaccounts)
* [getRecentBlockhash](#getrecentblockhash)
* [getSignatureStatus](#getsignaturestatus)
* [getSlotLeader](#getslotleader)
* [getSlotsPerSegment](#getslotspersegment)
* [getStorageTurn](#getstorageturn)
* [getStorageTurnRate](#getstorageturnrate)
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
* [getTransactionCount](#gettransactioncount)
* [getTotalSupply](#gettotalsupply)
@ -96,6 +102,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
{"jsonrpc":"2.0","result":true,"id":1}
```
---
### getAccountInfo
Returns all information associated with the account of provided Pubkey
##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
##### Results:
The result field will be a JSON object with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
```
---
### getBalance
@ -142,28 +174,73 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
---
### getAccountInfo
Returns all information associated with the account of provided Pubkey
### getEpochInfo
Returns information about the current epoch
##### Parameters:
* `string` - Pubkey of account to query, as base-58 encoded string
None
##### Results:
The result field will be a JSON object with the following sub fields:
The result field will be an object with the following fields:
* `epoch`, the current epoch
* `slotIndex`, the current slot relative to the start of the current epoch
* `slotsInEpoch`, the number of slots in this epoch
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
```
---
### getLeaderSchedule
Returns the leader schedule for the current epoch
##### Parameters:
None
##### Results:
The result field will be an array of leader public keys (as base-58 encoded
strings) for each slot in the current epoch
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[...],"id":1}
```
---
### getProgramAccounts
Returns all accounts owned by the provided program Pubkey
##### Parameters:
* `string` - Pubkey of program, as base-58 encoded string
##### Results:
The result field will be an array of arrays. Each sub array will contain:
* `string` - a the account Pubkey as base-58 encoded string
and a JSON object, with the following sub fields:
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `data`, array of bytes representing any data associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
```
---
@ -234,7 +311,67 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
```
-----
----
### getSlotsPerSegment
Returns the current storage segment size in terms of slots
##### Parameters:
None
##### Results:
* `u64` - Number of slots in a storage segment
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1024","id":1}
```
----
### getStorageTurn
Returns the current storage turn's blockhash and slot
##### Parameters:
None
##### Results:
An array consisting of
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
* `u64` - the current storage turn slot
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
```
----
### getStorageTurnRate
Returns the current storage turn rate in terms of slots per turn
##### Parameters:
None
##### Results:
* `u64` - Number of slots in storage turn
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1024","id":1}
```
----
### getNumBlocksSinceSignatureConfirmation
Returns the current number of blocks since signature has been confirmed.
@ -402,7 +539,7 @@ for a given account public key changes
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
---

View File

@ -96,7 +96,7 @@ ends up scheduled for the first two epochs because the leader schedule is also
generated at slot 0 for the next epoch. The length of the first two epochs can
be specified in the genesis block as well. The minimum length of the first
epochs must be greater than or equal to the maximum rollback depth as defined in
[fork selection](fork-selection.md).
[Tower BFT](tower-bft.md).
## Leader Schedule Generation Algorithm

View File

@ -74,7 +74,7 @@ The program should have a list of slots which are valid storage mining slots.
This list should be maintained by keeping track of slots which are rooted slots in which a significant
portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT
number of slots would be added to this set. The program should check that the slot is in this set. The set can
be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/locktower state.
be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state.
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of
the previous storage epoch PoH value.

View File

@ -60,7 +60,7 @@ The read is satisfied by pointing to a memory-mapped location in the
## Root Forks
The [fork selection algorithm](fork-selection.md) eventually selects a fork as a
[Tower BFT](tower-bft.md) eventually selects a fork as a
root fork and the fork is squashed. A squashed/root fork cannot be rolled back.
When a fork is squashed, all accounts in its parents not already present in the

View File

@ -0,0 +1,172 @@
# Simple Payment and State Verification
It is often useful to allow low resourced clients to participate in a Solana
cluster. Be this participation economic or contract execution, verification
that a client's activity has been accepted by the network is typically
expensive. This proposal lays out a mechanism for such clients to confirm that
their actions have been committed to the ledger state with minimal resource
expenditure and third-party trust.
## A Naive Approach
Validators store the signatures of recently confirmed transactions for a short
period of time to ensure that they are not processed more than once. Validators
provide a JSON RPC endpoint, which clients can use to query the cluster if a
transaction has been recently processed. Validators also provide a PubSub
notification, whereby a client registers to be notified when a given signature
is observed by the validator. While these two mechanisms allow a client to
verify a payment, they are not a proof and rely on completely trusting a
fullnode.
We will describe a way to minimize this trust using Merkle Proofs to anchor the
fullnode's response in the ledger, allowing the client to confirm on their own
that a sufficient number of their preferred validators have confirmed a
transaction. Requiring multiple validator attestations further reduces trust in
the fullnode, as it increases both the technical and economic difficulty of
compromising several other network participants.
## Light Clients
A 'light client' is a cluster participant that does not itself run a fullnode.
This light client would provide a level of security greater than trusting a
remote fullnode, without requiring the light client to spend a lot of resources
verifying the ledger.
Rather than providing transaction signatures directly to a light client, the
fullnode instead generates a Merkle Proof from the transaction of interest to
the root of a Merkle Tree of all transactions in the including block. This Merkle
Root is stored in a ledger entry which is voted on by validators, providing it
consensus legitimacy. The additional level of security for a light client depends
on an initial canonical set of validators the light client considers to be the
stakeholders of the cluster. As that set is changed, the client can update its
internal set of known validators with [receipts](#receipts). This may become
challenging with a large number of delegated stakes.
Fullnodes themselves may want to use light client APIs for performance reasons.
For example, during the initial launch of a fullnode, the fullnode may use a
cluster provided checkpoint of the state and verify it with a receipt.
## Receipts
A receipt is a minimal proof that; a transaction has been included in a block,
that the block has been voted on by the client's preferred set of validators and
that the votes have reached the desired confirmation depth.
The receipts for both state and payments start with a Merkle Path from the
value into a Bank-Merkle that has been voted on and included in the ledger. A
chain of PoH Entries containing subsequent validator votes, deriving from the
Bank-Merkle, is the confirmation proof.
Clients can examine this ledger data and compute the finality using Solana's fork
selection rules.
### Payment Merkle Path
A payment receipt is a data structure that contains a Merkle Path from a
transaction to the required set of validator votes.
An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted
by signature.
<img alt="Block Merkle Diagram" src="img/spv-block-merkle.svg" class="center"/>
A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block.
Transaction status is necessary for the receipt because the state receipt is
constructed for the block. Two transactions over the same state can appear in
the block, and therefore, there is no way to infer from just the state whether a
transaction that is committed to the ledger has succeeded or failed in modifying
the intended state. It may not be necessary to encode the full status code, but
a single status bit to indicate the transaction's success.
### State Merkle Path
A state receipt provides a confirmation that a specific state is committed at the
end of the block. Inter-block state transitions do not generate a receipt.
For example:
* A sends 5 Lamports to B
* B spends 5 Lamports
* C sends 5 Lamports to A
At the end of the block, A and B are in the exact same starting state, and any
state receipt would point to the same value for A or B.
The Bank-Merkle is computed from the Merkle Tree of the new state changes, along
with the Previous Bank-Merkle, and the Block-Merkle.
<img alt="Bank Merkle Diagram" src="img/spv-bank-merkle.svg" class="center"/>
A state receipt contains only the state changes occurring in the block. A direct
Merkle Path to the current Bank-Merkle guarantees the state value at that bank
hash, but it cannot be used to generate a “current” receipt to the latest state
if the state modification occurred in some previous block. There is no guarantee
that the path provided by the validator is the latest one available out of all
the previous Bank-Merkles.
Clients that want to query the chain for a receipt of the "latest" state would
need to create a transaction that would update the Merkle Path for that account,
such as a credit of 0 Lamports.
### Validator Votes
Leaders should coalesce the validator votes by stake weight into a single entry.
This will reduce the number of entries necessary to create a receipt.
### Chain of Entries
A receipt has a PoH link from the payment or state Merkle Path root to a list of
consecutive validation votes.
It contains the following:
* State -> Bank-Merkle
or
* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Merkle
And a vector of PoH entries:
* Validator vote entries
* Ticks
* Light entries
```rust,ignore
/// This Entry definition skips over the transactions and only contains the
/// hash of the transactions used to modify PoH.
LightEntry {
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
hash: Hash,
/// The Merkle Root of the transactions encoded into the Entry.
entry_hash: Hash,
}
```
The light entries are reconstructed from Entries and simply show the entry Merkle
Root that was mixed in to the PoH hash, instead of the full transaction set.
Clients do not need the starting vote state. The [fork selection](book/src/fork-selection.md) algorithm is
defined such that only votes that appear after the transaction provide finality
for the transaction, and finality is independent of the starting state.
### Verification
A light client that is aware of the supermajority set validators can verify a
receipt by following the Merkle Path to the PoH chain. The Bank-Merkle is the
Merkle Root and will appear in votes included in an Entry. The light client can
simulate [fork selection](book/src/fork-selection.md) for the consecutive votes
and verify that the receipt is confirmed at the desired lockout threshold.
### Synthetic State
Synthetic state should be computed into the Bank-Merkle along with the bank
generated state.
For example:
* Epoch validator accounts and their stakes and weights.
* Computed fee rates
These values should have an entry in the Bank-Merkle. They should live under
known accounts, and therefore have an exact address in the Merkle Path.

View File

@ -11,7 +11,7 @@ of getting its stake slashed. The economics are covered in [staking
rewards](staking-rewards.md). This chapter, on the other hand, describes the
underlying mechanics of its implementation.
## Basic Besign
## Basic Design
The general idea is that the validator owns a Vote account. The Vote account
tracks validator votes, counts validator generated credits, and provides any
@ -20,7 +20,7 @@ stakes delegated to it and has no staking weight.
A separate Stake account (created by a staker) names a Vote account to which the
stake is delegated. Rewards generated are proportional to the amount of
lamports staked. The Stake account is owned by the staker only. Lamports
lamports staked. The Stake account is owned by the staker only. Some portion of the lamports
stored in this account are the stake.
## Passive Delegation
@ -31,7 +31,7 @@ the Vote account or submitting votes to the account.
The total stake allocated to a Vote account can be calculated by the sum of
all the Stake accounts that have the Vote account pubkey as the
`StakeState::Delegate::voter_pubkey`.
`StakeState::Stake::voter_pubkey`.
## Vote and Stake accounts
@ -46,15 +46,15 @@ program that its delegate has participated in validating the ledger.
VoteState is the current state of all the votes the validator has submitted to
the network. VoteState contains the following state information:
* votes - The submitted votes data structure.
* `votes` - The submitted votes data structure.
* credits - The total number of rewards this vote program has generated over its
* `credits` - The total number of rewards this vote program has generated over its
lifetime.
* root\_slot - The last slot to reach the full lockout commitment necessary for
* `root_slot` - The last slot to reach the full lockout commitment necessary for
rewards.
* commission - The commission taken by this VoteState for any rewards claimed by
* `commission` - The commission taken by this VoteState for any rewards claimed by
staker's Stake accounts. This is the percentage ceiling of the reward.
* Account::lamports - The accumulated lamports from the commission. These do not
@ -71,13 +71,17 @@ count as stakes.
### VoteInstruction::AuthorizeVoteSigner(Pubkey)
* `account[0]` - RW - The VoteState
`VoteState::authorized_vote_signer` is set to to `Pubkey`, instruction must by
signed by Pubkey
`VoteState::authorized_vote_signer` is set to to `Pubkey`, the transaction must by
signed by the Vote account's current `authorized_vote_signer`. <br>
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
for its votes. That service is responsible for ensuring the vote won't cause
the staker to be slashed.
### VoteInstruction::Vote(Vec<Vote>)
* `account[0]` - RW - The VoteState
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Fork Selection](fork-selection.md)
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Tower BFT](tower-bft.md)
* `account[1]` - RO - A list of some N most recent slots and their hashes for the vote to be verified against.
@ -85,14 +89,16 @@ count as stakes.
### StakeState
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
A StakeState takes one of three forms, StakeState::Uninitialized, StakeState::Stake and StakeState::RewardsPool.
### StakeState::Delegate
### StakeState::Stake
StakeState is the current delegation preference of the **staker**. StakeState
StakeState::Stake is the current delegation preference of the **staker** and
contains the following state information:
* Account::lamports - The staked lamports.
* Account::lamports - The lamports available for staking.
* `stake` - the staked amount (subject to warm up and cool down) for generating rewards, always less than or equal to Account::lamports
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
delegated to.
@ -100,56 +106,53 @@ delegated to.
* `credits_observed` - The total credits claimed over the lifetime of the
program.
### StakeState::MiningPool
* `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warm up.
There are two approaches to the mining pool. The bank could allow the
StakeState program to bypass the token balance check, or a program representing
the mining pool could run on the network. To avoid a single network wide lock,
the pool can be split into several mining pools. This design focuses on using
StakeState::MiningPool instances as the cluster wide mining pools.
* `deactivated` - the epoch at which this stake will be completely de-activated, which is `cool down` epochs after StakeInstruction::Deactivate is issued.
* 256 StakeState::MiningPool are initialized, each with 1/256 number of mining pool
tokens stored as `Account::lamports`.
### StakeState::RewardsPool
The stakes and the MiningPool are accounts that are owned by the same `Stake`
program.
To avoid a single network wide lock or contention in redemption, 256 RewardsPools are part of genesis under pre-determined keys, each with std::u64::MAX credits to be able to satisfy redemptions according to point value.
### StakeInstruction::Initialize
The Stakes and the RewardsPool are accounts that are owned by the same `Stake` program.
* `account[0]` - RW - The StakeState::Delegate instance.
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
`StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
### StakeInstruction::DelegateStake(u64)
The Stake account is moved from Uninitialized to StakeState::Stake form. This is
how stakers choose their initial delegate validator node and activate their
stake account lamports.
* `account[0]` - RW - The StakeState::Stake instance. <br>
`StakeState::Stake::credits_observed` is initialized to `VoteState::credits`,<br>
`StakeState::Stake::voter_pubkey` is initialized to `account[1]`,<br>
`StakeState::Stake::stake` is initialized to the u64 passed as an argument above,<br>
`StakeState::Stake::activated` is initialized to current Bank epoch, and<br>
`StakeState::Stake::deactivated` is initialized to std::u64::MAX
* `account[1]` - R - The VoteState instance.
* `account[2]` - R - syscall::current account, carries information about current Bank epoch
### StakeInstruction::RedeemVoteCredits
The Staker or the owner of the Stake account sends a transaction with this
The staker or the owner of the Stake account sends a transaction with this
instruction to claim rewards.
The Vote account and the Stake account pair maintain a lifetime counter
of total rewards generated and claimed. When claiming rewards, the total lamports
deposited into the Stake account and as validator commission is proportional to
`VoteState::credits - StakeState::credits_observed`.
The Vote account and the Stake account pair maintain a lifetime counter of total
rewards generated and claimed. Rewards are paid according to a point value
supplied by the Bank from inflation. A `point` is one credit * one staked
lamport, rewards paid are proportional to the number of lamports staked.
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
reward.
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
credits.
* `account[2]` - R - The VoteState instance, must be the same as
`StakeState::voter_pubkey`
* `account[0]` - RW - The StakeState::Stake instance that is redeeming rewards.
* `account[1]` - R - The VoteState instance, must be the same as `StakeState::voter_pubkey`
* `account[2]` - RW - The StakeState::RewardsPool instance that will fulfill the request (picked at random).
* `account[3]` - R - syscall::rewards account from the Bank that carries point value.
Reward is paid out for the difference between `VoteState::credits` to
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
`VoteState::credits`. The commission is deposited into the Vote account token
`StakeState::Stake::credits_observed`, multiplied by `syscall::rewards::Rewards::validator_point_value`.
`StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token
balance, and the reward is deposited to the Stake account token balance.
The total lamports paid is a percentage-rate of the lamports staked muiltplied by
the ratio of rewards being redeemed to rewards that could have been generated
during the rate period.
Any random MiningPool can be used to redeem the credits.
```rust,ignore
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
@ -157,24 +160,26 @@ stake_state.credits_observed = vote_state.credits;
```
`credits_to_claim` is used to compute the reward and commission, and
`StakeState::Delegate::credits_observed` is updated to the latest
`StakeState::Stake::credits_observed` is updated to the latest
`VoteState::credits` value.
## Collecting network fees into the MiningPool
### StakeInstruction::Deactivate
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
At the end of the block, before the bank is frozen, but after it processed all
the transactions for the block, a virtual instruction is executed to collect
the transaction fees.
* `account[0]` - RW - The StakeState::Stake instance that is deactivating, the transaction must be signed by this key.
* `account[1]` - R - syscall::current account from the Bank that carries current epoch
* A portion of the fees are deposited into the leader's account.
* A portion of the fees are deposited into the smallest StakeState::MiningPool
account.
StakeState::Stake::deactivated is set to the current epoch + cool down. The account's stake will ramp down to zero by
that epoch, and Account::lamports will be available for withdrawal.
## Authorizing a Vote Signer
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
for its votes. That service is responsible for ensuring the vote won't cause
the staker to be slashed.
### StakeInstruction::Withdraw(u64)
Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn.
* `account[0]` - RW - The StakeState::Stake from which to withdraw, the transaction must be signed by this key.
* `account[1]` - RW - Account that should be credited with the withdrawn lamports.
* `account[2]` - R - syscall::current account from the Bank that carries current epoch, to calculate stake.
## Benefits of the design
@ -187,9 +192,6 @@ the staker to be slashed.
* Commission for the work is deposited when a reward is claimed by the delegated
stake.
This proposal would benefit from the `read-only` accounts proposal to allow for
many rewards to be claimed concurrently.
## Example Callflow
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>

View File

@ -91,6 +91,10 @@ History](#proof-of-history).
The time, i.e. number of [slots](#slot), for which a [leader
schedule](#leader-schedule) is valid.
#### finality
When nodes representing 2/3rd of the stake have a common [root](#root).
#### fork
A [ledger](#ledger) derived from common entries but then diverged.
@ -213,6 +217,15 @@ The public key of a [keypair](#keypair).
Storage mining client, stores some part of the ledger enumerated in blocks and
submits storage proofs to the chain. Not a full-node.
#### root
A [block](#block) or [slot](#slot) that has reached maximum [lockout](#lockout)
on a validator. The root is the highest block that is an ancestor of all active
forks on a validator. All ancestor blocks of a root are also transitively a
root. Blocks that are not an ancestor and not a descendant of the root are
excluded from consideration for consensus and can be discarded.
#### runtime
The component of a [fullnode](#fullnode) responsible for [program](#program)

View File

@ -74,8 +74,7 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
software on Linux x86_64 and mac OS systems.
```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.17.0/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the
@ -122,8 +121,11 @@ $ ./scripts/cargo-install-all.sh .
$ export PATH=$PWD/bin:$PATH
```
If building for CUDA, include the `cuda` feature flag as well:
If building for CUDA (Linux only), fetch the perf-libs first then include the
`cuda` feature flag when building:
```bash
$ ./fetch-perf-libs.sh
$ source /home/mvines/ws/solana/target/perf-libs/env.sh
$ ./scripts/cargo-install-all.sh . cuda
$ export PATH=$PWD/bin:$PATH
```
@ -152,21 +154,18 @@ choice, to start the node:
If this is a `solana-install`-installation:
```bash
$ clear-config.sh
$ validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
$ validator.sh --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
```
Alternatively, the `solana-install run` command can be used to run the validator
node while periodically checking for and applying software updates:
```bash
$ clear-config.sh
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
```
If you built from source:
```bash
$ USE_INSTALL=1 ./multinode-demo/clear-config.sh
$ USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
```
#### Enabling CUDA
@ -240,3 +239,46 @@ A local InfluxDB and Grafana instance is now running on your machine. Define
`start.sh` output and restart your validator.
Metrics should now be streaming and visible from your local Grafana dashboard.
#### Timezone For Log Messages
Log messages emitted by your validator include a timestamp. When sharing logs
with others to help triage issues, that timestamp can cause confusion as it does
not contain timezone information.
To make it easier to compare logs between different sources we request that
everybody use Pacific Time on their validator nodes. In Linux this can be
accomplished by running:
```bash
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
```
#### Publishing Validator Info
You can publish your validator information to the chain to be publicly visible
to other users.
Run the solana-validator-info CLI to populate a validator-info account:
```bash
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
```
Optional fields for VALIDATOR_INFO_ARGS:
* Website
* Keybase Username
* Details
##### Keybase
Including a Keybase username allows client applications (like the Solana Network
Explorer) to automatically pull in your validator public profile, including
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
Keybase:
1. Join https://keybase.io/ and complete the profile for your validator
2. Add your validator **identity pubkey** to Keybase:
* Create an empty file on your local computer called `validator-<PUBKEY>`
* In Keybase, navigate to the Files section, and upload your pubkey file to
a `solana` subdirectory in your public folder: `/keybase/public/<KEYBASE_USERNAME>/solana`
* To check your pubkey, ensure you can successfully browse to
`https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<PUBKEY>`
3. Add or update your `solana-validator-info` with your Keybase username. The
CLI will verify the `validator-<PUBKEY>` file

View File

@ -53,8 +53,7 @@ software.
##### Linux and mac OS
```bash
$ export SOLANA_RELEASE=v0.16.0 # skip this line to install the latest release
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.17.0/install/solana-install-init.sh | sh -s
```
Alternatively build the `solana-install` program from source and run the

View File

@ -1,7 +1,7 @@
# Fork Selection
# Tower BFT
This design describes a *Fork Selection* algorithm. It addresses the following
problems:
This design describes Solana's *Tower BFT* algorithm. It addresses the
following problems:
* Some forks may not end up accepted by the super-majority of the cluster, and
voters need to recover from voting on such forks.

View File

@ -0,0 +1,48 @@
# The Transaction
### Components of a `Transaction`
* **Transaction:**
* **message:** Defines the transaction
* **header:** Details the account types of and signatures required by
the transaction
* **num_required_signatures:** The total number of signatures
required to make the transaction valid.
* **num_credit_only_signed_accounts:** The last
`num_credit_only_signed_accounts` signatures refer to signing
credit only accounts. Credit only accounts can be used concurrently
by multiple parallel transactions, but their balance may only be
increased, and their account data is read-only.
* **num_credit_only_unsigned_accounts:** The last
`num_credit_only_unsigned_accounts` pubkeys in `account_keys` refer
to non-signing credit only accounts
* **account_keys:** List of pubkeys used by the transaction, including
by the instructions and for signatures. The first
`num_required_signatures` pubkeys must sign the transaction.
* **recent_blockhash:** The ID of a recent ledger entry. Validators will
reject transactions with a `recent_blockhash` that is too old.
* **instructions:** A list of [instructions](instruction.md) that are
run sequentially and committed in one atomic transaction if all
succeed.
* **signatures:** A list of signatures applied to the transaction. The
list is always of length `num_required_signatures`, and the signature
at index `i` corresponds to the pubkey at index `i` in `account_keys`.
The list is initialized with empty signatures (i.e. zeros), and
populated as signatures are added.
### Transaction Signing
A `Transaction` is signed by using an ed25519 keypair to sign the
serialization of the `message`. The resulting signature is placed at the
index of `signatures` matching the index of the keypair's pubkey in
`account_keys`.
### Transaction Serialization
`Transaction`s (and their `message`s) are serialized and deserialized
using the [bincode](https://crates.io/crates/bincode) crate with a
non-standard vector serialization that uses only one byte for the length
if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3
bytes if it requires 15 or 16 bits. The vector serialization is defined
by Solana's
[short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).

43
book/src/transaction.md Normal file
View File

@ -0,0 +1,43 @@
# Anatomy of a Transaction
Transactions encode lists of instructions that are executed
sequentially, and only committed if all the instructions complete
successfully. All account states are reverted upon the failure of a
transaction. Each Transaction details the accounts used, including which
must sign and which are credit only, a recent blockhash, the
instructions, and any signatures.
## Accounts and Signatures
Each transaction explicitly lists all accounts that it needs access to.
This includes accounts that are transferring tokens, accounts whose user
data is being modified, and the program accounts that are being called
by the instructions. Each account that is not an executable program can
be marked as a requiring a signature and/or as credit only. All accounts
marked as signers must have a valid signature in the transaction's list
of signatures before the transaction is considered valid. Any accounts
marked as credit only may only have their token value increased, and
their user data is read only. Accounts are locked by the runtime,
ensuring that they are not modified by a concurrent program while the
transaction is running. Credit only accounts can safely be shared, so
the runtime will allow multiple concurrent credit only locks on an
account.
## Recent Blockhash
A Transaction includes a recent blockhash to prevent duplication and to
give transactions lifetimes. Any transaction that is completely
identical to a previous one is rejected, so adding a newer blockhash
allows multiple transactions to repeat the exact same action.
Transactions also have lifetimes that are defined by the blockhash, as
any transaction whose blockhash is too old will be rejected.
## Instructions
Each instruction specifies a single program account (which must be
marked executable), a subset of the transaction's accounts that should
be passed to the program, and a data byte array instruction that is
passed to the program. The program interprets the data array and
operates on the accounts specified by the instructions. The program can
return successfully, or with an error code. An error return causes the
entire transaction to fail immediately.

View File

@ -1 +1,2 @@
/target/
/farf/

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.16.0"
version = "0.17.0"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -9,4 +9,4 @@ license = "Apache-2.0"
edition = "2018"
[build-dependencies]
cc = "1.0.37"
cc = "1.0.38"

View File

@ -1,16 +1,16 @@
steps:
- command: "sdk/docker-solana/build.sh"
timeout_in_minutes: 20
timeout_in_minutes: 40
name: "publish docker"
- command: "ci/publish-crate.sh"
timeout_in_minutes: 40
timeout_in_minutes: 90
name: "publish crate"
branches: "!master"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- command: "ci/publish-tarball.sh"
timeout_in_minutes: 25
timeout_in_minutes: 45
name: "publish tarball"
- command: "ci/publish-book.sh"
timeout_in_minutes: 15

View File

@ -4,7 +4,7 @@ steps:
timeout_in_minutes: 5
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 15
timeout_in_minutes: 35
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf"

View File

@ -1,6 +1,10 @@
# Note: when the rust version is changed also modify
# ci/rust-version.sh to pick up the new image tag
FROM rust:1.35.0
FROM rust:1.36.0
# Add Google Protocol Buffers for Libra's metrics library.
ENV PROTOC_VERSION 3.8.0
ENV PROTOC_ZIP protoc-$PROTOC_VERSION-linux-x86_64.zip
RUN set -x \
&& apt update \
@ -20,6 +24,8 @@ RUN set -x \
mscgen \
rsync \
sudo \
golang \
unzip \
\
&& rm -rf /var/lib/apt/lists/* \
&& rustup component add rustfmt \
@ -28,4 +34,8 @@ RUN set -x \
&& cargo install svgbob_cli \
&& cargo install mdbook \
&& rustc --version \
&& cargo --version
&& cargo --version \
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
&& unzip -o $PROTOC_ZIP -d /usr/local bin/protoc \
&& unzip -o $PROTOC_ZIP -d /usr/local include/* \
&& rm -f $PROTOC_ZIP

View File

@ -33,9 +33,15 @@ if [[ -n $CI ]]; then
export CI_PULL_REQUEST=
fi
export CI_OS_NAME=linux
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
# The solana-secondary pipeline should use the slug of the pipeline that
# triggered it
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
else
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
fi
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
# the solana-secondary builder
# the solana-secondary pipeline
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
else
@ -53,7 +59,7 @@ if [[ -n $CI ]]; then
fi
if [[ $CI_LINUX = True ]]; then
export CI_OS_NAME=linux
elif [[ $CI_WINDOWS = True ]]; then
else
export CI_OS_NAME=windows
fi
export CI_REPO_SLUG=$APPVEYOR_REPO_NAME

View File

@ -5,7 +5,6 @@ skipSetup=false
iterations=1
restartInterval=never
rollingRestart=false
maybeNoLeaderRotation=
extraNodes=0
walletRpcPort=:8899
@ -54,9 +53,6 @@ while getopts "ch?i:k:brxR" opt; do
k)
restartInterval=$OPTARG
;;
b)
maybeNoLeaderRotation="--stake 0"
;;
x)
extraNodes=$((extraNodes + 1))
;;
@ -82,7 +78,6 @@ nodes=(
--no-restart \
--init-complete-file init-complete-node1.log"
"multinode-demo/validator.sh \
$maybeNoLeaderRotation \
--enable-rpc-exit \
--no-restart \
--init-complete-file init-complete-node2.log \
@ -94,8 +89,7 @@ for i in $(seq 1 $extraNodes); do
"multinode-demo/validator.sh \
--no-restart \
--label dyn$i \
--init-complete-file init-complete-node$((2 + i)).log \
$maybeNoLeaderRotation"
--init-complete-file init-complete-node$((2 + i)).log"
)
done
numNodes=$((2 + extraNodes))
@ -125,21 +119,26 @@ startNode() {
echo "log: $log"
}
waitForNodeToInit() {
declare initCompleteFile=$1
while [[ ! -r $initCompleteFile ]]; do
if [[ $SECONDS -ge 240 ]]; then
echo "^^^ +++"
echo "Error: $initCompleteFile not found in $SECONDS seconds"
exit 1
fi
echo "Waiting for $initCompleteFile ($SECONDS)..."
sleep 2
done
echo "Found $initCompleteFile"
}
initCompleteFiles=()
waitForAllNodesToInit() {
echo "--- ${#initCompleteFiles[@]} nodes booting"
SECONDS=
for initCompleteFile in "${initCompleteFiles[@]}"; do
while [[ ! -r $initCompleteFile ]]; do
if [[ $SECONDS -ge 240 ]]; then
echo "^^^ +++"
echo "Error: $initCompleteFile not found in $SECONDS seconds"
exit 1
fi
echo "Waiting for $initCompleteFile ($SECONDS)..."
sleep 2
done
echo "Found $initCompleteFile"
waitForNodeToInit "$initCompleteFile"
done
echo "All nodes finished booting in $SECONDS seconds"
}
@ -162,6 +161,13 @@ startNodes() {
if $addLogs; then
logs+=("$(getNodeLogFile "$i" "$cmd")")
fi
# 1 == bootstrap leader, wait until it boots before starting
# other validators
if [[ "$i" -eq 1 ]]; then
SECONDS=
waitForNodeToInit "$initCompleteFile"
fi
done
waitForAllNodesToInit
@ -300,7 +306,6 @@ else
fi
startNodes
lastTransactionCount=
enforceTransactionCountAdvance=true
while [[ $iteration -le $iterations ]]; do
echo "--- Node count ($iteration)"
(
@ -336,36 +341,20 @@ while [[ $iteration -le $iterations ]]; do
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
if [[ -n $lastTransactionCount ]]; then
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
if $enforceTransactionCountAdvance; then
if [[ $lastTransactionCount -ge $transactionCount ]]; then
echo "Error: Transaction count is not advancing"
echo "* lastTransactionCount: $lastTransactionCount"
echo "* transactionCount: $transactionCount"
flag_error
fi
else
echo "enforceTransactionCountAdvance=false"
if [[ $lastTransactionCount -ge $transactionCount ]]; then
echo "Error: Transaction count is not advancing"
echo "* lastTransactionCount: $lastTransactionCount"
echo "* transactionCount: $transactionCount"
flag_error
fi
enforceTransactionCountAdvance=true
fi
lastTransactionCount=$transactionCount
echo "--- Wallet sanity ($iteration)"
flag_error_if_no_leader_rotation() {
# TODO: Stop ignoring wallet sanity failures when leader rotation is enabled
# once https://github.com/solana-labs/solana/issues/2474 is fixed
if [[ -n $maybeNoLeaderRotation ]]; then
flag_error
else
# Wallet error occurred (and was ignored) so transactionCount may not
# advance on the next iteration
enforceTransactionCountAdvance=false
fi
}
(
set -x
timeout 60s scripts/wallet-sanity.sh --url http://127.0.0.1"$walletRpcPort"
) || flag_error_if_no_leader_rotation
) || flag_error
iteration=$((iteration + 1))

View File

@ -46,16 +46,22 @@ if _ git --no-pager grep -n 'Default::default()' -- '*.rs'; then
fi
# Let's keep a .gitignore for every crate, ensure it's got
# /target/ in it
# /target/ and /farf/ in it
declare gitignores_ok=true
for i in $(git --no-pager ls-files \*/Cargo.toml ); do
dir=$(dirname "$i")
if [[ ! -f $dir/.gitignore ]]; then
echo 'error: nits.sh .gitnore missing for crate '"$dir" >&2
gitignores_ok=false
elif ! grep -q -e '^/target/$' "$dir"/.gitignore; then
else
if ! grep -q -e '^/target/$' "$dir"/.gitignore; then
echo 'error: nits.sh "/target/" apparently missing from '"$dir"'/.gitignore' >&2
gitignores_ok=false
fi
if ! grep -q -e '^/farf/$' "$dir"/.gitignore ; then
echo 'error: nits.sh "/farf/" apparently missing from '"$dir"'/.gitignore' >&2
gitignores_ok=false
fi
fi
done
"$gitignores_ok"

View File

@ -2,8 +2,50 @@
set -e
cd "$(dirname "$0")/.."
BOOK="book"
book/build.sh
source ci/rust-version.sh stable
eval "$(ci/channel-info.sh)"
if [[ -n $PUBLISH_BOOK_TAG ]]; then
CURRENT_TAG="$(git describe --tags)"
COMMIT_TO_PUBLISH="$(git rev-list -n 1 "${PUBLISH_BOOK_TAG}")"
# book is manually published at a specified release tag
if [[ $PUBLISH_BOOK_TAG != "$CURRENT_TAG" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
commit: "$COMMIT_TO_PUBLISH"
env:
PUBLISH_BOOK_TAG: "$PUBLISH_BOOK_TAG"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
repo=git@github.com:solana-labs/book.git
else
# book-edge and book-beta are published automatically on the tip of the branch
case $CHANNEL in
edge)
repo=git@github.com:solana-labs/book-edge.git
;;
beta)
repo=git@github.com:solana-labs/book-beta.git
;;
*)
echo "--- publish skipped"
exit 0
;;
esac
BOOK=$CHANNEL
fi
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "book/build.sh"
echo --- create book repo
(
@ -16,22 +58,7 @@ echo --- create book repo
git commit -m "${CI_COMMIT:-local}"
)
eval "$(ci/channel-info.sh)"
# Only publish the book from the edge and beta channels for now.
case $CHANNEL in
edge)
repo=git@github.com:solana-labs/book-edge.git
;;
beta)
repo=git@github.com:solana-labs/book.git
;;
*)
echo "--- publish skipped"
exit 0
;;
esac
echo "--- publish $CHANNEL"
echo "--- publish $BOOK"
cd book/html/
git remote add origin $repo
git fetch origin master

View File

@ -2,6 +2,7 @@
set -e
cd "$(dirname "$0")/.."
source ci/semver_bash/semver.sh
source ci/rust-version.sh stable
# shellcheck disable=SC2086
is_crate_version_uploaded() {
@ -25,35 +26,56 @@ expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
exit 1
}
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
Cargo_tomls=$(ci/order-crates-for-publishing.py)
for Cargo_toml in $Cargo_tomls; do
echo "-- $Cargo_toml"
echo "--- $Cargo_toml"
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
echo "Error: $Cargo_toml version is not $expectedCrateVersion"
exit 1
}
crate_name=$(grep -m 1 '^name = ' "$Cargo_toml" | cut -f 3 -d ' ' | tr -d \")
if grep -q "^publish = false" "$Cargo_toml"; then
echo "$crate_name is is marked as unpublishable"
continue
fi
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
echo "${crate_name} version ${expectedCrateVersion} is already on crates.io"
continue
fi
(
set -x
crate=$(dirname "$Cargo_toml")
# TODO: the rocksdb package does not build with the stock rust docker image,
# so use the solana rust docker image until this is resolved upstream
source ci/rust-version.sh
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
# shellcheck disable=SC2086
crate_name=$(grep -m 1 '^name = ' $Cargo_toml | cut -f 3 -d ' ' | tr -d \")
numRetries=30
for ((i = 1 ; i <= numRetries ; i++)); do
echo "Attempt ${i} of ${numRetries}"
# shellcheck disable=SC2086
if [[ $(is_crate_version_uploaded $crate_name $expectedCrateVersion) = True ]] ; then
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io"
break
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io REST API"
really_uploaded=0
(
set -x
rm -rf crate-test
cargo +"$rust_stable" init crate-test
cd crate-test/
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
echo "[workspace]" >> Cargo.toml
cargo +"$rust_stable" check
) && really_uploaded=1
if ((really_uploaded)); then
break;
fi
echo "${crate_name} not yet available for download from crates.io"
fi
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
sleep 2

View File

@ -49,7 +49,8 @@ windows)
TARGET=x86_64-pc-windows-msvc
;;
*)
TARGET=unknown-unknown-unknown
echo CI_OS_NAME unset
exit 1
;;
esac
@ -70,6 +71,12 @@ echo --- Creating tarball
source ci/rust-version.sh stable
scripts/cargo-install-all.sh +"$rust_stable" solana-release
# Reduce the archive size until
# https://github.com/appveyor/ci/issues/2997 is fixed
if [[ -n $APPVEYOR ]]; then
rm -f solana-release/bin/solana-validator.exe solana-release/bin/solana-bench-exchange.exe
fi
if $PERF_LIBS; then
rm -rf target/perf-libs
./fetch-perf-libs.sh
@ -94,22 +101,13 @@ echo --- Creating tarball
set -e
cd "$(dirname "$0")"/..
export USE_INSTALL=1
export REQUIRE_CONFIG_DIR=1
exec multinode-demo/validator.sh "$@"
EOF
chmod +x solana-release/bin/validator.sh
# Add a wrapper script for clear-config.sh
# TODO: Remove multinode/... from tarball
cat > solana-release/bin/clear-config.sh <<'EOF'
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"/..
export USE_INSTALL=1
exec multinode-demo/clear-config.sh "$@"
EOF
chmod +x solana-release/bin/clear-config.sh
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
tar cvf solana-release-$TARGET.tar solana-release
bzip2 solana-release-$TARGET.tar
cp solana-release/bin/solana-install-init solana-install-init-$TARGET
)
@ -120,16 +118,16 @@ if [[ "$CI_OS_NAME" = linux ]]; then
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
fi
echo --- Saving build artifacts
source ci/upload-ci-artifact.sh
upload-ci-artifact solana-release-$TARGET.tar.bz2
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0
fi
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
upload-ci-artifact "$file"
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
continue
fi
if [[ -n $BUILDKITE ]]; then
echo --- AWS S3 Store: "$file"
(

View File

@ -13,8 +13,8 @@
# $ source ci/rust-version.sh
#
stable_version=1.35.0
nightly_version=2019-06-20
stable_version=1.36.0
nightly_version=2019-07-19
export rust_stable="$stable_version"
export rust_stable_docker_image=solanalabs/rust:"$stable_version"

View File

@ -12,16 +12,25 @@ export RUSTFLAGS="-D warnings"
do_bpf_check() {
_ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_nightly" clippy --all -- --version
_ cargo +"$rust_nightly" test --all
_ cargo +"$rust_nightly" clippy --version
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
_ cargo +"$rust_stable" audit
}
(
(
cd sdk/bpf/rust/rust-no-std
do_bpf_check
)
(
cd sdk/bpf/rust/rust-utils
do_bpf_check
)
(
cd sdk/bpf/rust/rust-test
do_bpf_check
)
for project in programs/bpf/rust/*/ ; do
(
cd "$project"
@ -31,9 +40,10 @@ do_bpf_check() {
)
_ cargo +"$rust_stable" fmt --all -- --check
_ cargo +"$rust_stable" clippy --all -- --version
_ cargo +"$rust_stable" clippy --version
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
_ cargo +"$rust_stable" audit
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0011 # https://github.com/solana-labs/solana/issues/5207
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ book/build.sh

View File

@ -33,7 +33,7 @@ test-stable)
echo "Executing $testName"
_ cargo +"$rust_stable" build --all ${V:+--verbose}
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture
;;
test-stable-perf)
echo "Executing $testName"
@ -77,7 +77,7 @@ test-stable-perf)
# Run root package library tests
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture
;;
*)
echo "Error: Unknown test: $testName"

View File

@ -24,6 +24,14 @@ blockstreamer=false
deployUpdateManifest=true
fetchLogs=true
maybeHashesPerTick=
maybeDisableAirdrops=
maybeInternalNodesStakeLamports=
maybeInternalNodesLamports=
maybeExternalPrimordialAccountsFile=
maybeLamports=
maybeLetsEncrypt=
maybeFullnodeAdditionalDiskSize=
maybeNoSnapshot=
usage() {
exitcode=0
@ -62,11 +70,28 @@ Deploys a CD testnet
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
-S - Stop network software without tearing down nodes.
-f - Discard validator nodes that didn't bootup successfully
-w - Skip time-consuming "bells and whistles" that are
unnecessary for a high-node count demo testnet
--no-airdrop
- If set, disables airdrops. Nodes must be funded in genesis block when airdrops are disabled.
--internal-nodes-stake-lamports NUM_LAMPORTS
- Amount to stake internal nodes.
--internal-nodes-lamports NUM_LAMPORTS
- Amount to fund internal nodes in genesis block
--external-accounts-file FILE_PATH
- Path to external Primordial Accounts file, if it exists.
--hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster
--lamports NUM_LAMPORTS
- Specify the number of lamports to mint (default 100000000000000)
--skip-deploy-update
- If set, will skip software update deployment
--skip-remote-log-retrieval
- If set, will not fetch logs from remote nodes
--letsencrypt [dns name]
- Attempt to generate a TLS certificate using this DNS name
--fullnode-additional-disk-size-gb [number]
- Size of additional disk in GB for all fullnodes
--no-snapshot
- If set, disables booting validators from a snapshot
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
metrics
@ -82,6 +107,39 @@ while [[ -n $1 ]]; do
if [[ $1 = --hashes-per-tick ]]; then
maybeHashesPerTick="$1 $2"
shift 2
elif [[ $1 = --lamports ]]; then
maybeLamports="$1 $2"
shift 2
elif [[ $1 = --no-airdrop ]]; then
maybeDisableAirdrops="$1"
shift 1
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
maybeInternalNodesStakeLamports="$1 $2"
shift 2
elif [[ $1 = --internal-nodes-lamports ]]; then
maybeInternalNodesLamports="$1 $2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
maybeExternalPrimordialAccountsFile="$1 $2"
shift 2
elif [[ $1 = --skip-deploy-update ]]; then
deployUpdateManifest=false
shift 1
elif [[ $1 = --skip-remote-log-retrieval ]]; then
fetchLogs=false
shift 1
elif [[ $1 = --letsencrypt ]]; then
maybeLetsEncrypt="$1 $2"
shift 2
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
maybeFullnodeAdditionalDiskSize="$1 $2"
shift 2
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
shortArgs+=("$1")
shift
elif [[ $1 = --no-snapshot ]]; then
maybeNoSnapshot="$1"
shift 1
else
usage "Unknown long option: $1"
fi
@ -228,6 +286,11 @@ if ! $skipCreate; then
# shellcheck disable=SC2206
create_args+=(${zone_args[@]})
if [[ -n $maybeLetsEncrypt ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeLetsEncrypt
create_args+=($maybeLetsEncrypt)
fi
if $blockstreamer; then
create_args+=(-u)
fi
@ -256,6 +319,11 @@ if ! $skipCreate; then
create_args+=(-f)
fi
if [[ -n $maybeFullnodeAdditionalDiskSize ]]; then
# shellcheck disable=SC2206 # Do not want to quote
create_args+=($maybeFullnodeAdditionalDiskSize)
fi
time net/"$cloudProvider".sh create "${create_args[@]}"
else
echo "--- $cloudProvider.sh config"
@ -318,7 +386,6 @@ if ! $skipStart; then
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
args+=($maybeHashesPerTick)
fi
if $reuseLedger; then
args+=(-r)
fi
@ -334,7 +401,32 @@ if ! $skipStart; then
args+=(--deploy-update windows)
fi
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
if [[ -n $maybeDisableAirdrops ]]; then
# shellcheck disable=SC2206
args+=($maybeDisableAirdrops)
fi
if [[ -n $maybeInternalNodesStakeLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeInternalNodesStakeLamports
args+=($maybeInternalNodesStakeLamports)
fi
if [[ -n $maybeInternalNodesLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeInternalNodesLamports
args+=($maybeInternalNodesLamports)
fi
if [[ -n $maybeExternalPrimordialAccountsFile ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeExternalPrimordialAccountsFile
args+=($maybeExternalPrimordialAccountsFile)
fi
if [[ -n $maybeLamports ]]; then
# shellcheck disable=SC2206 # Do not want to quote $maybeLamports
args+=($maybeLamports)
fi
if [[ -n $maybeNoSnapshot ]]; then
# shellcheck disable=SC2206
args+=($maybeNoSnapshot)
fi
time net/net.sh "${args[@]}"
) || ok=false

View File

@ -44,6 +44,8 @@ steps:
value: "testnet-beta-perf"
- label: "testnet-demo"
value: "testnet-demo"
- label: "tds"
value: "tds"
- select: "Operation"
key: "testnet-operation"
default: "sanity-or-restart"
@ -153,6 +155,10 @@ testnet-demo)
: "${GCE_NODE_COUNT:=150}"
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
;;
tds)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
@ -287,6 +293,14 @@ sanity() {
$ok
)
;;
tds)
(
set -x
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-sanity.sh tds-solana-com gce "${GCE_ZONES[0]}" -f
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
@ -321,7 +335,8 @@ deploy() {
(
set -x
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-a eipalloc-0ccd4f2239886fa94 --letsencrypt edge.testnet.solana.com \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
@ -347,7 +362,8 @@ deploy() {
set -x
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-a eipalloc-0f286cf8a0771ce35 --letsencrypt beta.testnet.solana.com \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
@ -378,7 +394,8 @@ deploy() {
# shellcheck disable=SC2068
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f -a eipalloc-0fa502bf95f6f18b2 \
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f \
-a eipalloc-0fa502bf95f6f18b2 --letsencrypt testnet.solana.com \
${skipCreate:+-e} \
${maybeSkipStart:+-s} \
${maybeStop:+-S} \
@ -424,7 +441,9 @@ deploy() {
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f -w \
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \
--skip-deploy-update \
--skip-remote-log-retrieval \
-a demo-testnet-solana-com \
${skipCreate:+-e} \
${maybeSkipStart:+-s} \
@ -436,7 +455,9 @@ deploy() {
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x -w \
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \
--skip-deploy-update \
--skip-remote-log-retrieval \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
@ -444,6 +465,125 @@ deploy() {
fi
)
;;
tds)
(
set -x
# Allow cluster configuration to be overridden from env vars
if [[ -z $TDS_ZONES ]]; then
TDS_ZONES="us-west1-a,us-central1-a,europe-west4-a"
fi
GCE_CLOUD_ZONES=(); while read -r -d, ; do GCE_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TDS_ZONES},"
if [[ -z $TDS_NODE_COUNT ]]; then
TDS_NODE_COUNT="3"
fi
if [[ -z $TDS_CLIENT_COUNT ]]; then
TDS_CLIENT_COUNT="1"
fi
if [[ -z $ENABLE_GPU ]]; then
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
elif [[ $ENABLE_GPU == skip ]]; then
maybeGpu=()
else
maybeGpu=(-G "${ENABLE_GPU}")
fi
if [[ -z $HASHES_PER_TICK ]]; then
maybeHashesPerTick="--hashes-per-tick auto"
elif [[ $HASHES_PER_TICK == skip ]]; then
maybeHashesPerTick=""
else
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
fi
if [[ -z $DISABLE_AIRDROPS ]]; then
DISABLE_AIRDROPS="true"
fi
if [[ $DISABLE_AIRDROPS == true ]] ; then
maybeDisableAirdrops="--no-airdrop"
else
maybeDisableAirdrops=""
fi
if [[ -z $INTERNAL_NODES_STAKE_LAMPORTS ]]; then
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 1000000000000"
elif [[ $INTERNAL_NODES_STAKE_LAMPORTS == skip ]]; then
maybeInternalNodesStakeLamports=""
else
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports ${INTERNAL_NODES_STAKE_LAMPORTS}"
fi
if [[ -z $INTERNAL_NODES_LAMPORTS ]]; then
maybeInternalNodesLamports="--internal-nodes-lamports 2000000000000"
elif [[ $INTERNAL_NODES_LAMPORTS == skip ]]; then
maybeInternalNodesLamports=""
else
maybeInternalNodesLamports="--internal-nodes-lamports ${INTERNAL_NODES_LAMPORTS}"
fi
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/validators/all.yml
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
elif [[ $EXTERNAL_ACCOUNTS_FILE_URL == skip ]]; then
maybeExternalAccountsFile=""
else
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
fi
if [[ -z $LAMPORTS ]]; then
maybeLamports="--lamports 8589934592000000000"
elif [[ $LAMPORTS == skip ]]; then
maybeLamports=""
else
maybeLamports="--lamports ${LAMPORTS}"
fi
if [[ -z $ADDITIONAL_DISK_SIZE_GB ]]; then
maybeAdditionalDisk="--fullnode-additional-disk-size-gb 32000"
elif [[ $ADDITIONAL_DISK_SIZE_GB == skip ]]; then
maybeAdditionalDisk=""
else
maybeAdditionalDisk="--fullnode-additional-disk-size-gb ${ADDITIONAL_DISK_SIZE_GB}"
fi
# Multiple V100 GPUs are available in us-west1, us-central1 and europe-west4
# shellcheck disable=SC2068
# shellcheck disable=SC2086
NO_LEDGER_VERIFY=1 \
NO_VALIDATOR_SANITY=1 \
ci/testnet-deploy.sh -p tds-solana-com -C gce \
"${maybeGpu[@]}" \
-d pd-ssd \
${GCE_CLOUD_ZONES[@]/#/-z } \
-t "$CHANNEL_OR_TAG" \
-n ${TDS_NODE_COUNT} \
-c ${TDS_CLIENT_COUNT} \
-P -u \
-a tds-solana-com --letsencrypt tds.solana.com \
${maybeHashesPerTick} \
${skipCreate:+-e} \
${skipStart:+-s} \
${maybeStop:+-S} \
${maybeDelete:+-D} \
${maybeDisableAirdrops} \
${maybeInternalNodesStakeLamports} \
${maybeInternalNodesLamports} \
${maybeExternalAccountsFile} \
${maybeLamports} \
${maybeAdditionalDisk} \
--skip-deploy-update \
--no-snapshot
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1

1
client/.gitignore vendored
View File

@ -1 +1,2 @@
/target/
/farf/

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.16.0"
version = "0.17.0"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -11,18 +11,18 @@ edition = "2018"
[dependencies]
bincode = "1.1.4"
bs58 = "0.2.0"
jsonrpc-core = "12.0.0"
log = "0.4.2"
jsonrpc-core = "12.1.0"
log = "0.4.7"
rand = "0.6.5"
rayon = "1.1.0"
reqwest = "0.9.18"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
reqwest = "0.9.19"
serde = "1.0.97"
serde_derive = "1.0.97"
serde_json = "1.0.40"
solana-netutil = { path = "../netutil", version = "0.17.0" }
solana-sdk = { path = "../sdk", version = "0.17.0" }
[dev-dependencies]
jsonrpc-core = "12.0.0"
jsonrpc-http-server = "12.0.0"
solana-logger = { path = "../logger", version = "0.16.0" }
jsonrpc-core = "12.1.0"
jsonrpc-http-server = "12.1.0"
solana-logger = { path = "../logger", version = "0.17.0" }

View File

@ -274,6 +274,39 @@ impl RpcClient {
self.get_account(pubkey).map(|account| account.lamports)
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> io::Result<Vec<(Pubkey, Account)>> {
let params = json!([format!("{}", pubkey)]);
let response = self
.client
.send(&RpcRequest::GetProgramAccounts, Some(params), 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("AccountNotFound: pubkey={}: {}", pubkey, err),
)
})?;
let accounts: Vec<(String, Account)> =
serde_json::from_value::<Vec<(String, Account)>>(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
let mut pubkey_accounts: Vec<(Pubkey, Account)> = Vec::new();
for (string, account) in accounts.into_iter() {
let pubkey = string.parse().map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetProgramAccounts parse failure: {:?}", err),
)
})?;
pubkey_accounts.push((pubkey, account));
}
Ok(pubkey_accounts)
}
/// Request the transaction count. If the response packet is dropped by the network,
/// this method will try again 5 times.
pub fn get_transaction_count(&self) -> io::Result<u64> {
@ -443,7 +476,7 @@ impl RpcClient {
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> io::Result<()> {
) -> io::Result<usize> {
let mut now = Instant::now();
let mut confirmed_blocks = 0;
loop {
@ -452,8 +485,11 @@ impl RpcClient {
Ok(count) => {
if confirmed_blocks != count {
info!(
"signature {} confirmed {} out of {}",
signature, count, min_confirmed_blocks
"signature {} confirmed {} out of {} after {} ms",
signature,
count,
min_confirmed_blocks,
now.elapsed().as_millis()
);
now = Instant::now();
confirmed_blocks = count;
@ -467,12 +503,23 @@ impl RpcClient {
}
};
if now.elapsed().as_secs() > 15 {
// TODO: Return a better error.
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
info!(
"signature {} confirmed {} out of {} failed after {} ms",
signature,
confirmed_blocks,
min_confirmed_blocks,
now.elapsed().as_millis()
);
if confirmed_blocks > 0 {
return Ok(confirmed_blocks);
} else {
// TODO: Return a better error.
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
}
}
sleep(Duration::from_millis(250));
sleep(Duration::from_secs(1));
}
Ok(())
Ok(confirmed_blocks)
}
pub fn get_num_blocks_since_signature_confirmation(

View File

@ -10,13 +10,15 @@ pub enum RpcRequest {
GetBalance,
GetClusterNodes,
GetNumBlocksSinceSignatureConfirmation,
GetProgramAccounts,
GetRecentBlockhash,
GetSignatureStatus,
GetSlot,
GetSlotLeader,
GetEpochVoteAccounts,
GetStorageBlockhash,
GetStorageSlot,
GetStorageTurn,
GetStorageTurnRate,
GetSlotsPerSegment,
GetStoragePubkeysForSlot,
GetTransactionCount,
RegisterNode,
@ -38,13 +40,15 @@ impl RpcRequest {
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation"
}
RpcRequest::GetProgramAccounts => "getProgramAccounts",
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
RpcRequest::GetSignatureStatus => "getSignatureStatus",
RpcRequest::GetSlot => "getSlot",
RpcRequest::GetSlotLeader => "getSlotLeader",
RpcRequest::GetEpochVoteAccounts => "getEpochVoteAccounts",
RpcRequest::GetStorageBlockhash => "getStorageBlockhash",
RpcRequest::GetStorageSlot => "getStorageSlot",
RpcRequest::GetStorageTurn => "getStorageTurn",
RpcRequest::GetStorageTurnRate => "getStorageTurnRate",
RpcRequest::GetSlotsPerSegment => "getSlotsPerSegment",
RpcRequest::GetStoragePubkeysForSlot => "getStoragePubkeysForSlot",
RpcRequest::GetTransactionCount => "getTransactionCount",
RpcRequest::RegisterNode => "registerNode",

View File

@ -16,7 +16,7 @@ use solana_sdk::packet::PACKET_DATA_SIZE;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_instruction;
use solana_sdk::timing::duration_as_ms;
use solana_sdk::timing::{duration_as_ms, MAX_PROCESSING_AGE};
use solana_sdk::transaction::{self, Transaction};
use solana_sdk::transport::Result as TransportResult;
use std::io;
@ -203,20 +203,39 @@ impl ThinClient {
keypairs: &[&Keypair],
transaction: &mut Transaction,
tries: usize,
min_confirmed_blocks: usize,
pending_confirmations: usize,
) -> io::Result<Signature> {
for x in 0..tries {
let now = Instant::now();
let mut buf = vec![0; serialized_size(&transaction).unwrap() as usize];
let mut wr = std::io::Cursor::new(&mut buf[..]);
let mut num_confirmed = 0;
let mut wait_time = MAX_PROCESSING_AGE;
serialize_into(&mut wr, &transaction)
.expect("serialize Transaction in pub fn transfer_signed");
self.transactions_socket
.send_to(&buf[..], &self.transactions_addr())?;
if self
.poll_for_signature_confirmation(&transaction.signatures[0], min_confirmed_blocks)
.is_ok()
{
return Ok(transaction.signatures[0]);
// resend the same transaction until the transaction has no chance of succeeding
while now.elapsed().as_secs() < wait_time as u64 {
if num_confirmed == 0 {
// Send the transaction if there has been no confirmation (e.g. the first time)
self.transactions_socket
.send_to(&buf[..], &self.transactions_addr())?;
}
if let Ok(confirmed_blocks) = self.poll_for_signature_confirmation(
&transaction.signatures[0],
pending_confirmations,
) {
num_confirmed = confirmed_blocks;
if confirmed_blocks >= pending_confirmations {
return Ok(transaction.signatures[0]);
}
// Since network has seen the transaction, wait longer to receive
// all pending confirmations. Resending the transaction could result into
// extra transaction fees
wait_time = wait_time.max(
MAX_PROCESSING_AGE * pending_confirmations.saturating_sub(num_confirmed),
);
}
}
info!(
"{} tries failed transfer to {}",
@ -378,7 +397,7 @@ impl SyncClient for ThinClient {
&self,
signature: &Signature,
min_confirmed_blocks: usize,
) -> TransportResult<()> {
) -> TransportResult<usize> {
Ok(self
.rpc_client()
.poll_for_signature_confirmation(signature, min_confirmed_blocks)?)

1
core/.gitignore vendored
View File

@ -1 +1,2 @@
/target/
/farf/

View File

@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.16.0"
version = "0.17.0"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -21,55 +21,61 @@ kvstore = ["solana-kvstore"]
bincode = "1.1.4"
bs58 = "0.2.0"
byteorder = "1.3.2"
chrono = { version = "0.4.0", features = ["serde"] }
chrono = { version = "0.4.7", features = ["serde"] }
core_affinity = "0.5.9"
crc = { version = "1.8.1", optional = true }
crossbeam-channel = "0.3"
hashbrown = "0.2.0"
indexmap = "1.0"
itertools = "0.8.0"
jsonrpc-core = "12.0.0"
jsonrpc-derive = "12.0.0"
jsonrpc-http-server = "12.0.0"
jsonrpc-core = "12.1.0"
jsonrpc-derive = "12.1.0"
jsonrpc-http-server = "12.1.0"
jsonrpc-pubsub = "12.0.0"
jsonrpc-ws-server = "12.0.0"
jsonrpc-ws-server = "12.1.0"
libc = "0.2.58"
log = "0.4.2"
log = "0.4.7"
memmap = { version = "0.7.0", optional = true }
nix = "0.14.1"
num-traits = "0.2"
rand = "0.6.5"
rand_chacha = "0.1.1"
rayon = "1.1.0"
reqwest = "0.9.18"
reqwest = "0.9.19"
rocksdb = "0.11.0"
serde = "1.0.92"
serde_derive = "1.0.92"
serde_json = "1.0.39"
solana-budget-api = { path = "../programs/budget_api", version = "0.16.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.16.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.16.0" }
solana-client = { path = "../client", version = "0.16.0" }
solana-config-program = { path = "../programs/config_program", version = "0.16.0" }
solana-drone = { path = "../drone", version = "0.16.0" }
serde = "1.0.97"
serde_derive = "1.0.97"
serde_json = "1.0.40"
solana-budget-api = { path = "../programs/budget_api", version = "0.17.0" }
solana-budget-program = { path = "../programs/budget_program", version = "0.17.0" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.17.0" }
solana-client = { path = "../client", version = "0.17.0" }
solana-config-program = { path = "../programs/config_program", version = "0.17.0" }
solana-drone = { path = "../drone", version = "0.17.0" }
solana-ed25519-dalek = "0.2.0"
solana-exchange-program = { path = "../programs/exchange_program", version = "0.16.0" }
solana-kvstore = { path = "../kvstore", version = "0.16.0", optional = true }
solana-logger = { path = "../logger", version = "0.16.0" }
solana-metrics = { path = "../metrics", version = "0.16.0" }
solana-netutil = { path = "../netutil", version = "0.16.0" }
solana-runtime = { path = "../runtime", version = "0.16.0" }
solana-sdk = { path = "../sdk", version = "0.16.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.16.0" }
solana-stake-program = { path = "../programs/stake_program", version = "0.16.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.16.0" }
solana-storage-program = { path = "../programs/storage_program", version = "0.16.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.16.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.16.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.16.0" }
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.0" }
solana-kvstore = { path = "../kvstore", version = "0.17.0", optional = true }
solana-logger = { path = "../logger", version = "0.17.0" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.17.0" }
solana-metrics = { path = "../metrics", version = "0.17.0" }
solana-measure = { path = "../measure", version = "0.17.0" }
solana-netutil = { path = "../netutil", version = "0.17.0" }
solana-runtime = { path = "../runtime", version = "0.17.0" }
solana-sdk = { path = "../sdk", version = "0.17.0" }
solana-stake-api = { path = "../programs/stake_api", version = "0.17.0" }
solana-stake-program = { path = "../programs/stake_program", version = "0.17.0" }
solana-storage-api = { path = "../programs/storage_api", version = "0.17.0" }
solana-storage-program = { path = "../programs/storage_program", version = "0.17.0" }
solana-vote-api = { path = "../programs/vote_api", version = "0.17.0" }
solana-vote-program = { path = "../programs/vote_program", version = "0.17.0" }
solana-vote-signer = { path = "../vote-signer", version = "0.17.0" }
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.17.0" }
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.17.0" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.17.0" }
sys-info = "0.5.7"
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
untrusted = "0.7.0"
# reed-solomon-erasure's simd_c feature fails to build for x86_64-pc-windows-msvc, use pure-rust
[target.'cfg(windows)'.dependencies]
@ -80,6 +86,8 @@ reed-solomon-erasure = "3.1.1"
[dev-dependencies]
hex-literal = "0.2.0"
matches = "0.1.6"
serial_test = "0.2.0"
serial_test_derive = "0.2.0"
[[bench]]
name = "banking_stage"

View File

@ -4,6 +4,7 @@ extern crate test;
#[macro_use]
extern crate solana;
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
@ -17,16 +18,18 @@ use solana::poh_recorder::WorkingBankEntries;
use solana::service::Service;
use solana::test_tx::test_tx;
use solana_runtime::bank::Bank;
use solana_sdk::hash::hash;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_sdk::signature::KeypairUtil;
use solana_sdk::signature::Signature;
use solana_sdk::system_instruction;
use solana_sdk::system_transaction;
use solana_sdk::timing::{
duration_as_us, timestamp, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES,
};
use solana_sdk::timing::{duration_as_us, timestamp};
use solana_sdk::transaction::Transaction;
use std::iter;
use std::sync::atomic::Ordering;
use std::sync::mpsc::{channel, Receiver};
use std::sync::mpsc::Receiver;
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use test::Bencher;
@ -76,8 +79,12 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
// This tests the performance of buffering packets.
// If the packet buffers are copied, performance will be poor.
bencher.iter(move || {
let _ignored =
BankingStage::consume_buffered_packets(&my_pubkey, &poh_recorder, &mut packets);
let _ignored = BankingStage::consume_buffered_packets(
&my_pubkey,
&poh_recorder,
&mut packets,
10_000,
);
});
exit.store(true, Ordering::Relaxed);
@ -86,13 +93,52 @@ fn bench_consume_buffered(bencher: &mut Bencher) {
let _unused = Blocktree::destroy(&ledger_path);
}
#[bench]
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
let to_pubkey = Pubkey::new_rand();
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
(0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new_rand();
new.message.account_keys[1] = Pubkey::new_rand();
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
.collect()
}
fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
let progs = 4;
(0..txes)
.into_iter()
.map(|_| {
let mut instructions = vec![];
let from_key = Keypair::new();
for _ in 1..progs {
let to_key = Pubkey::new_rand();
instructions.push(system_instruction::transfer(&from_key.pubkey(), &to_key, 1));
}
let mut new = Transaction::new_unsigned_instructions(instructions);
new.sign(&[&from_key], hash);
new
})
.collect()
}
enum TransactionType {
Accounts,
Programs,
}
fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
solana_logger::setup();
let num_threads = BankingStage::num_threads() as usize;
// a multiple of packet chunk 2X duplicates to avoid races
const CHUNKS: usize = 32;
let txes = 192 * num_threads * CHUNKS;
// a multiple of packet chunk duplicates to avoid races
const CHUNKS: usize = 8;
const PACKETS_PER_BATCH: usize = 192;
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
let mint_total = 1_000_000_000_000;
let GenesisBlockInfo {
mut genesis_block,
@ -104,25 +150,17 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
// during the benchmark
genesis_block.ticks_per_slot = 10_000;
let (verified_sender, verified_receiver) = channel();
let (vote_sender, vote_receiver) = channel();
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let bank = Arc::new(Bank::new(&genesis_block));
let to_pubkey = Pubkey::new_rand();
let dummy = system_transaction::transfer(&mint_keypair, &to_pubkey, 1, genesis_block.hash());
trace!("txs: {}", txes);
let transactions: Vec<_> = (0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new(&from[0..32]);
new.message.account_keys[1] = Pubkey::new(&to[0..32]);
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
.collect();
debug!("threads: {} txs: {}", num_threads, txes);
let transactions = match tx_type {
TransactionType::Accounts => make_accounts_txs(txes, &mint_keypair, genesis_block.hash()),
TransactionType::Programs => make_programs_txs(txes, genesis_block.hash()),
};
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = system_transaction::transfer(
@ -146,7 +184,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH)
.into_iter()
.map(|x| {
let len = x.packets.len();
@ -182,12 +220,13 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let now = Instant::now();
let mut sent = 0;
for v in verified[start..start + chunk_len].chunks(verified.len() / num_threads) {
trace!(
"sending... {}..{} {}",
for v in verified[start..start + chunk_len].chunks(chunk_len / num_threads) {
debug!(
"sending... {}..{} {} v.len: {}",
start,
start + chunk_len,
timestamp()
timestamp(),
v.len(),
);
for xv in v {
sent += xv.0.packets.len();
@ -197,7 +236,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
check_txs(&signal_receiver2, txes / CHUNKS);
// This signature clear may not actually clear the signatures
// in this chunk, but since we rotate between 32 chunks then
// in this chunk, but since we rotate between CHUNKS then
// we should clear them by the time we come around again to re-use that chunk.
bank.clear_signatures();
trace!(
@ -217,124 +256,11 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
}
#[bench]
#[ignore]
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
let progs = 4;
let num_threads = BankingStage::num_threads() as usize;
// a multiple of packet chunk 2X duplicates to avoid races
let txes = 96 * 100 * num_threads * 2;
let mint_total = 1_000_000_000_000;
let GenesisBlockInfo {
genesis_block,
mint_keypair,
..
} = create_genesis_block(mint_total);
let (verified_sender, verified_receiver) = channel();
let (vote_sender, vote_receiver) = channel();
let bank = Arc::new(Bank::new(&genesis_block));
let to_pubkey = Pubkey::new_rand();
let dummy = system_transaction::transfer(&mint_keypair, &to_pubkey, 1, genesis_block.hash());
let transactions: Vec<_> = (0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
new.message.account_keys[0] = Pubkey::new(&from[0..32]);
new.message.account_keys[1] = Pubkey::new(&to[0..32]);
let prog = new.message.instructions[0].clone();
for i in 1..progs {
//generate programs that spend to random keys
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let to_key = Pubkey::new(&to[0..32]);
new.message.account_keys.push(to_key);
assert_eq!(new.message.account_keys.len(), i + 2);
new.message.instructions.push(prog.clone());
assert_eq!(new.message.instructions.len(), i + 1);
new.message.instructions[i].accounts[1] = 1 + i as u8;
assert_eq!(new.key(i, 1), Some(&to_key));
assert_eq!(
new.message.account_keys[new.message.instructions[i].accounts[1] as usize],
to_key
);
}
assert_eq!(new.message.instructions.len(), progs);
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
.collect();
transactions.iter().for_each(|tx| {
let fund = system_transaction::transfer(
&mint_keypair,
&tx.message.account_keys[0],
mint_total / txes as u64,
genesis_block.hash(),
);
bank.process_transaction(&fund).unwrap();
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
.into_iter()
.map(|x| {
let len = x.packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let ledger_path = get_tmp_ledger_path!();
{
let blocktree = Arc::new(
Blocktree::open(&ledger_path).expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, signal_receiver) =
create_test_recorder(&bank, &blocktree);
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(RwLock::new(cluster_info));
let _banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
vote_receiver,
);
poh_recorder.lock().unwrap().set_bank(&bank);
let mut id = genesis_block.hash();
for _ in 0..(MAX_RECENT_BLOCKHASHES * DEFAULT_TICKS_PER_SLOT as usize) {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
let half_len = verified.len() / 2;
let mut start = 0;
let signal_receiver = Arc::new(signal_receiver);
let signal_receiver2 = signal_receiver.clone();
bencher.iter(move || {
// make sure the transactions are still valid
bank.register_tick(&genesis_block.hash());
for v in verified[start..start + half_len].chunks(verified.len() / num_threads) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver2, txes / 2);
bank.clear_signatures();
start += half_len;
start %= verified.len();
});
drop(vote_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blocktree::destroy(&ledger_path).unwrap();
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
bench_banking(bencher, TransactionType::Accounts);
}
#[bench]
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
bench_banking(bencher, TransactionType::Programs);
}

View File

@ -3,6 +3,7 @@
extern crate test;
use solana::packet::to_packets;
use solana::recycler::Recycler;
use solana::sigverify;
use solana::test_tx::test_tx;
use test::Bencher;
@ -14,8 +15,10 @@ fn bench_sigverify(bencher: &mut Bencher) {
// generate packet vector
let batches = to_packets(&vec![tx; 128]);
let recycler = Recycler::default();
let recycler_out = Recycler::default();
// verify packets
bencher.iter(|| {
let _ans = sigverify::ed25519_verify(&batches);
let _ans = sigverify::ed25519_verify(&batches, &recycler, &recycler_out);
})
}

View File

@ -3,6 +3,7 @@
extern crate solana;
extern crate test;
use crossbeam_channel::unbounded;
use log::*;
use rand::{thread_rng, Rng};
use solana::packet::to_packets_chunked;
@ -21,7 +22,7 @@ use test::Bencher;
fn bench_sigverify_stage(bencher: &mut Bencher) {
solana_logger::setup();
let (packet_s, packet_r) = channel();
let (verified_s, verified_r) = channel();
let (verified_s, verified_r) = unbounded();
let sigverify_disabled = false;
let stage = SigVerifyStage::new(packet_r, sigverify_disabled, verified_s);

View File

@ -20,6 +20,39 @@ pub struct BankForks {
root: u64,
slots: HashSet<u64>,
snapshot_path: Option<String>,
confidence: HashMap<u64, Confidence>,
}
#[derive(Debug, Default, PartialEq)]
pub struct Confidence {
fork_stakes: u64,
epoch_stakes: u64,
lockouts: u64,
stake_weighted_lockouts: u128,
}
impl Confidence {
pub fn new(fork_stakes: u64, epoch_stakes: u64, lockouts: u64) -> Self {
Self {
fork_stakes,
epoch_stakes,
lockouts,
stake_weighted_lockouts: 0,
}
}
pub fn new_with_stake_weighted(
fork_stakes: u64,
epoch_stakes: u64,
lockouts: u64,
stake_weighted_lockouts: u128,
) -> Self {
Self {
fork_stakes,
epoch_stakes,
lockouts,
stake_weighted_lockouts,
}
}
}
impl Index<u64> for BankForks {
@ -40,6 +73,7 @@ impl BankForks {
root: 0,
slots: HashSet::new(),
snapshot_path: None,
confidence: HashMap::new(),
}
}
@ -104,15 +138,17 @@ impl BankForks {
working_bank,
slots: HashSet::new(),
snapshot_path: None,
confidence: HashMap::new(),
}
}
pub fn insert(&mut self, bank: Bank) {
pub fn insert(&mut self, bank: Bank) -> Arc<Bank> {
let bank = Arc::new(bank);
let prev = self.banks.insert(bank.slot(), bank.clone());
assert!(prev.is_none());
self.working_bank = bank.clone();
bank
}
// TODO: really want to kill this...
@ -160,6 +196,8 @@ impl BankForks {
let descendants = self.descendants();
self.banks
.retain(|slot, _| descendants[&root].contains(slot));
self.confidence
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
if self.snapshot_path.is_some() {
let diff: HashSet<_> = slots.symmetric_difference(&self.slots).collect();
trace!("prune non root {} - {:?}", root, diff);
@ -174,6 +212,41 @@ impl BankForks {
self.slots = slots.clone();
}
pub fn cache_fork_confidence(
&mut self,
fork: u64,
fork_stakes: u64,
epoch_stakes: u64,
lockouts: u64,
) {
self.confidence
.entry(fork)
.and_modify(|entry| {
entry.fork_stakes = fork_stakes;
entry.epoch_stakes = epoch_stakes;
entry.lockouts = lockouts;
})
.or_insert_with(|| Confidence::new(fork_stakes, epoch_stakes, lockouts));
}
pub fn cache_stake_weighted_lockouts(&mut self, fork: u64, stake_weighted_lockouts: u128) {
self.confidence
.entry(fork)
.and_modify(|entry| {
entry.stake_weighted_lockouts = stake_weighted_lockouts;
})
.or_insert(Confidence {
fork_stakes: 0,
epoch_stakes: 0,
lockouts: 0,
stake_weighted_lockouts,
});
}
pub fn get_fork_confidence(&self, fork: u64) -> Option<&Confidence> {
self.confidence.get(&fork)
}
fn get_io_error(error: &str) -> Error {
warn!("BankForks error: {:?}", error);
Error::new(ErrorKind::Other, error)
@ -329,8 +402,9 @@ impl BankForks {
names.sort();
let mut bank_maps = vec![];
let status_cache_rc = StatusCacheRc::default();
let id = (names[names.len() - 1] + 1) as usize;
let mut bank0 =
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc);
Bank::create_with_genesis(&genesis_block, account_paths.clone(), &status_cache_rc, id);
bank0.freeze();
let bank_root = BankForks::load_snapshots(
&names,
@ -354,6 +428,7 @@ impl BankForks {
root,
slots,
snapshot_path: snapshot_path.clone(),
confidence: HashMap::new(),
})
}
}
@ -437,10 +512,59 @@ mod tests {
assert_eq!(bank_forks.active_banks(), vec![1]);
}
#[test]
fn test_bank_forks_confidence_cache() {
let GenesisBlockInfo { genesis_block, .. } = create_genesis_block(10_000);
let bank = Bank::new(&genesis_block);
let fork = bank.slot();
let mut bank_forks = BankForks::new(0, bank);
assert!(bank_forks.confidence.get(&fork).is_none());
bank_forks.cache_fork_confidence(fork, 11, 12, 13);
assert_eq!(
bank_forks.confidence.get(&fork).unwrap(),
&Confidence {
fork_stakes: 11,
epoch_stakes: 12,
lockouts: 13,
stake_weighted_lockouts: 0,
}
);
// Ensure that {fork_stakes, epoch_stakes, lockouts} and stake_weighted_lockouts
// can be updated separately
bank_forks.cache_stake_weighted_lockouts(fork, 20);
assert_eq!(
bank_forks.confidence.get(&fork).unwrap(),
&Confidence {
fork_stakes: 11,
epoch_stakes: 12,
lockouts: 13,
stake_weighted_lockouts: 20,
}
);
bank_forks.cache_fork_confidence(fork, 21, 22, 23);
assert_eq!(
bank_forks
.confidence
.get(&fork)
.unwrap()
.stake_weighted_lockouts,
20,
);
}
struct TempPaths {
pub paths: String,
}
impl TempPaths {
fn remove_all(&self) {
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
paths.iter().for_each(|p| {
let _ignored = remove_dir_all(p);
});
}
}
#[macro_export]
macro_rules! tmp_bank_accounts_name {
() => {
@ -457,10 +581,7 @@ mod tests {
impl Drop for TempPaths {
fn drop(&mut self) {
let paths: Vec<String> = self.paths.split(',').map(|s| s.to_string()).collect();
paths.iter().for_each(|p| {
let _ignored = remove_dir_all(p);
});
self.remove_all()
}
}
@ -469,7 +590,7 @@ mod tests {
}
fn get_tmp_snapshots_path() -> TempPaths {
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
let path = format!("{}/snapshots", out_dir);
TempPaths {
paths: path.to_string(),
@ -478,7 +599,7 @@ mod tests {
fn get_tmp_bank_accounts_path(paths: &str) -> TempPaths {
let vpaths = get_paths_vec(paths);
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
let vpaths: Vec<_> = vpaths
.iter()
.map(|path| format!("{}/{}", out_dir, path))
@ -520,6 +641,8 @@ mod tests {
mint_keypair,
..
} = create_genesis_block(10_000);
path.remove_all();
spath.remove_all();
for index in 0..10 {
let bank0 = Bank::new_with_paths(&genesis_block, Some(path.paths.clone()));
bank0.freeze();

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,7 @@ use bincode::{deserialize, serialize};
use serde::de::DeserializeOwned;
use serde::Serialize;
use solana_sdk::timing::Slot;
use std::borrow::Borrow;
use std::collections::HashMap;
use std::marker::PhantomData;
@ -39,6 +40,10 @@ pub mod columns {
#[derive(Debug)]
/// The root column
pub struct Root;
#[derive(Debug)]
/// The index column
pub struct Index;
}
pub trait Backend: Sized + Send + Sync {
@ -82,6 +87,8 @@ where
fn key(index: Self::Index) -> B::OwnedKey;
fn index(key: &B::Key) -> Self::Index;
fn slot(index: Self::Index) -> Slot;
fn as_index(slot: Slot) -> Self::Index;
}
pub trait DbCursor<B>
@ -405,6 +412,31 @@ where
Ok(iter.map(|(key, value)| (C::index(&key), value)))
}
pub fn delete_slot(
&self,
batch: &mut WriteBatch<B>,
from: Option<Slot>,
to: Option<Slot>,
) -> Result<bool>
where
C::Index: PartialOrd + Copy,
{
let mut end = true;
let iter = self.iter(from.map(C::as_index))?;
for (index, _) in iter {
if let Some(to) = to {
if C::slot(index) > to {
end = false;
break;
}
};
if let Err(e) = batch.delete::<C>(index) {
error!("Error: {:?} while adding delete to batch {:?}", e, C::NAME)
}
}
Ok(end)
}
#[inline]
pub fn handle(&self) -> B::ColumnFamily {
self.backend.cf_handle(C::NAME).clone()

View File

@ -100,6 +100,25 @@ impl Column<Kvs> for cf::Data {
}
}
impl Column<Kvs> for cf::Index {
const NAME: &'static str = super::INDEX_CF;
type Index = u64;
fn key(slot: u64) -> Key {
let mut key = Key::default();
BigEndian::write_u64(&mut key.0[8..16], slot);
key
}
fn index(key: &Key) -> u64 {
BigEndian::read_u64(&key.0[8..16])
}
}
impl TypedColumn<Kvs> for cf::Index {
type Type = crate::blocktree::meta::Index;
}
impl Column<Kvs> for cf::DeadSlots {
const NAME: &'static str = super::DEAD_SLOTS;
type Index = u64;

View File

@ -1,6 +1,6 @@
use crate::erasure::{NUM_CODING, NUM_DATA};
use crate::erasure::ErasureConfig;
use solana_metrics::datapoint;
use std::borrow::Borrow;
use std::{collections::BTreeSet, ops::RangeBounds};
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
// The Meta column family
@ -27,6 +27,118 @@ pub struct SlotMeta {
pub is_connected: bool,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
/// Index recording presence/absence of blobs
pub struct Index {
pub slot: u64,
data: DataIndex,
coding: CodingIndex,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
pub struct DataIndex {
/// Map representing presence/absence of data blobs
index: BTreeSet<u64>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
/// Erasure coding information
pub struct CodingIndex {
/// Map from set index, to hashmap from blob index to presence bool
index: BTreeSet<u64>,
}
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
/// Erasure coding information
pub struct ErasureMeta {
/// Which erasure set in the slot this is
pub set_index: u64,
/// Size of shards in this erasure set
pub size: usize,
/// Erasure configuration for this erasure set
config: ErasureConfig,
}
#[derive(Debug, PartialEq)]
pub enum ErasureMetaStatus {
CanRecover,
DataFull,
StillNeed(usize),
}
impl Index {
pub(in crate::blocktree) fn new(slot: u64) -> Self {
Index {
slot,
data: DataIndex::default(),
coding: CodingIndex::default(),
}
}
pub fn data(&self) -> &DataIndex {
&self.data
}
pub fn coding(&self) -> &CodingIndex {
&self.coding
}
pub fn data_mut(&mut self) -> &mut DataIndex {
&mut self.data
}
pub fn coding_mut(&mut self) -> &mut CodingIndex {
&mut self.coding
}
}
/// TODO: Mark: Change this when coding
impl CodingIndex {
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
self.index.range(bounds).count()
}
pub fn is_present(&self, index: u64) -> bool {
self.index.contains(&index)
}
pub fn set_present(&mut self, index: u64, presence: bool) {
if presence {
self.index.insert(index);
} else {
self.index.remove(&index);
}
}
pub fn set_many_present(&mut self, presence: impl IntoIterator<Item = (u64, bool)>) {
for (idx, present) in presence.into_iter() {
self.set_present(idx, present);
}
}
}
impl DataIndex {
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
self.index.range(bounds).count()
}
pub fn is_present(&self, index: u64) -> bool {
self.index.contains(&index)
}
pub fn set_present(&mut self, index: u64, presence: bool) {
if presence {
self.index.insert(index);
} else {
self.index.remove(&index);
}
}
pub fn set_many_present(&mut self, presence: impl IntoIterator<Item = (u64, bool)>) {
for (idx, present) in presence.into_iter() {
self.set_present(idx, present);
}
}
}
impl SlotMeta {
pub fn is_full(&self) -> bool {
// last_index is std::u64::MAX when it has no information about how
@ -72,62 +184,37 @@ impl SlotMeta {
}
}
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, Eq, PartialEq)]
/// Erasure coding information
pub struct ErasureMeta {
/// Which erasure set in the slot this is
pub set_index: u64,
/// Size of shards in this erasure set
pub size: usize,
/// Bitfield representing presence/absence of data blobs
data: u64,
/// Bitfield representing presence/absence of coding blobs
coding: u64,
}
#[derive(Debug, PartialEq)]
pub enum ErasureMetaStatus {
CanRecover,
DataFull,
StillNeed(usize),
}
impl ErasureMeta {
pub fn new(set_index: u64) -> ErasureMeta {
pub fn new(set_index: u64, config: &ErasureConfig) -> ErasureMeta {
ErasureMeta {
set_index,
size: 0,
data: 0,
coding: 0,
config: *config,
}
}
pub fn status(&self) -> ErasureMetaStatus {
let (data_missing, coding_missing) =
(NUM_DATA - self.num_data(), NUM_CODING - self.num_coding());
if data_missing > 0 && data_missing + coding_missing <= NUM_CODING {
assert!(self.size != 0);
ErasureMetaStatus::CanRecover
pub fn status(&self, index: &Index) -> ErasureMetaStatus {
use ErasureMetaStatus::*;
let start_idx = self.start_index();
let (data_end_idx, coding_end_idx) = self.end_indexes();
let num_coding = index.coding().present_in_bounds(start_idx..coding_end_idx);
let num_data = index.data().present_in_bounds(start_idx..data_end_idx);
let (data_missing, coding_missing) = (
self.config.num_data() - num_data,
self.config.num_coding() - num_coding,
);
let total_missing = data_missing + coding_missing;
if data_missing > 0 && total_missing <= self.config.num_coding() {
CanRecover
} else if data_missing == 0 {
ErasureMetaStatus::DataFull
DataFull
} else {
ErasureMetaStatus::StillNeed(data_missing + coding_missing - NUM_CODING)
}
}
pub fn num_coding(&self) -> usize {
self.coding.count_ones() as usize
}
pub fn num_data(&self) -> usize {
self.data.count_ones() as usize
}
pub fn is_coding_present(&self, index: u64) -> bool {
if let Some(position) = self.data_index_in_set(index) {
self.coding & (1 << position) != 0
} else {
false
StillNeed(total_missing - self.config.num_coding())
}
}
@ -139,207 +226,78 @@ impl ErasureMeta {
self.size
}
pub fn set_coding_present(&mut self, index: u64, present: bool) {
if let Some(position) = self.data_index_in_set(index) {
if present {
self.coding |= 1 << position;
} else {
self.coding &= !(1 << position);
}
}
}
pub fn is_data_present(&self, index: u64) -> bool {
if let Some(position) = self.data_index_in_set(index) {
self.data & (1 << position) != 0
} else {
false
}
}
pub fn set_data_present(&mut self, index: u64, present: bool) {
if let Some(position) = self.data_index_in_set(index) {
if present {
self.data |= 1 << position;
} else {
self.data &= !(1 << position);
}
}
}
pub fn set_data_multi<I, Idx>(&mut self, indexes: I, present: bool)
where
I: IntoIterator<Item = Idx>,
Idx: Borrow<u64>,
{
for index in indexes.into_iter() {
self.set_data_present(*index.borrow(), present);
}
}
pub fn set_coding_multi<I, Idx>(&mut self, indexes: I, present: bool)
where
I: IntoIterator<Item = Idx>,
Idx: Borrow<u64>,
{
for index in indexes.into_iter() {
self.set_coding_present(*index.borrow(), present);
}
}
pub fn set_index_for(index: u64) -> u64 {
index / NUM_DATA as u64
}
pub fn data_index_in_set(&self, index: u64) -> Option<u64> {
let set_index = Self::set_index_for(index);
if set_index == self.set_index {
Some(index - self.start_index())
} else {
None
}
}
pub fn coding_index_in_set(&self, index: u64) -> Option<u64> {
self.data_index_in_set(index).map(|i| i + NUM_DATA as u64)
pub fn set_index_for(index: u64, num_data: usize) -> u64 {
index / num_data as u64
}
pub fn start_index(&self) -> u64 {
self.set_index * NUM_DATA as u64
self.set_index * self.config.num_data() as u64
}
/// returns a tuple of (data_end, coding_end)
pub fn end_indexes(&self) -> (u64, u64) {
let start = self.start_index();
(start + NUM_DATA as u64, start + NUM_CODING as u64)
(
start + self.config.num_data() as u64,
start + self.config.num_coding() as u64,
)
}
}
#[test]
fn test_meta_indexes() {
use rand::{thread_rng, Rng};
// to avoid casts everywhere
const NUM_DATA: u64 = crate::erasure::NUM_DATA as u64;
let mut rng = thread_rng();
for _ in 0..100 {
let set_index = rng.gen_range(0, 1_000);
let blob_index = (set_index * NUM_DATA) + rng.gen_range(0, NUM_DATA);
assert_eq!(set_index, ErasureMeta::set_index_for(blob_index));
let e_meta = ErasureMeta::new(set_index);
assert_eq!(e_meta.start_index(), set_index * NUM_DATA);
let (data_end_idx, coding_end_idx) = e_meta.end_indexes();
assert_eq!(data_end_idx, (set_index + 1) * NUM_DATA);
assert_eq!(coding_end_idx, set_index * NUM_DATA + NUM_CODING as u64);
}
let mut e_meta = ErasureMeta::new(0);
assert_eq!(e_meta.data_index_in_set(0), Some(0));
assert_eq!(e_meta.data_index_in_set(NUM_DATA / 2), Some(NUM_DATA / 2));
assert_eq!(e_meta.data_index_in_set(NUM_DATA - 1), Some(NUM_DATA - 1));
assert_eq!(e_meta.data_index_in_set(NUM_DATA), None);
assert_eq!(e_meta.data_index_in_set(std::u64::MAX), None);
e_meta.set_index = 1;
assert_eq!(e_meta.data_index_in_set(0), None);
assert_eq!(e_meta.data_index_in_set(NUM_DATA - 1), None);
assert_eq!(e_meta.data_index_in_set(NUM_DATA), Some(0));
assert_eq!(
e_meta.data_index_in_set(NUM_DATA * 2 - 1),
Some(NUM_DATA - 1)
);
assert_eq!(e_meta.data_index_in_set(std::u64::MAX), None);
}
#[test]
fn test_meta_coding_present() {
let mut e_meta = ErasureMeta::default();
e_meta.set_coding_multi(0..NUM_CODING as u64, true);
for i in 0..NUM_CODING as u64 {
assert_eq!(e_meta.is_coding_present(i), true);
}
for i in NUM_CODING as u64..NUM_DATA as u64 {
assert_eq!(e_meta.is_coding_present(i), false);
}
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 17) as u64);
let start_idx = e_meta.start_index();
e_meta.set_coding_multi(start_idx..start_idx + NUM_CODING as u64, true);
for i in start_idx..start_idx + NUM_CODING as u64 {
e_meta.set_coding_present(i, true);
assert_eq!(e_meta.is_coding_present(i), true);
}
for i in start_idx + NUM_CODING as u64..start_idx + NUM_DATA as u64 {
assert_eq!(e_meta.is_coding_present(i), false);
}
}
#[test]
fn test_erasure_meta_status() {
#[cfg(test)]
mod test {
use super::*;
use rand::{seq::SliceRandom, thread_rng};
// Local constansts just used to avoid repetitive casts
const N_DATA: u64 = crate::erasure::NUM_DATA as u64;
const N_CODING: u64 = crate::erasure::NUM_CODING as u64;
use std::iter::repeat;
let mut e_meta = ErasureMeta::default();
let mut rng = thread_rng();
let data_indexes: Vec<u64> = (0..N_DATA).collect();
let coding_indexes: Vec<u64> = (0..N_CODING).collect();
#[test]
fn test_erasure_meta_status() {
use ErasureMetaStatus::*;
assert_eq!(e_meta.status(), ErasureMetaStatus::StillNeed(NUM_DATA));
let set_index = 0;
let erasure_config = ErasureConfig::default();
e_meta.set_data_multi(0..N_DATA, true);
let mut e_meta = ErasureMeta::new(set_index, &erasure_config);
let mut rng = thread_rng();
let mut index = Index::new(0);
e_meta.size = 1;
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
let data_indexes = 0..erasure_config.num_data() as u64;
let coding_indexes = 0..erasure_config.num_coding() as u64;
e_meta.size = 1;
e_meta.set_coding_multi(0..N_CODING, true);
assert_eq!(e_meta.status(&index), StillNeed(erasure_config.num_data()));
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
index
.data_mut()
.set_many_present(data_indexes.clone().zip(repeat(true)));
for &idx in data_indexes.choose_multiple(&mut rng, NUM_CODING) {
e_meta.set_data_present(idx, false);
assert_eq!(e_meta.status(&index), DataFull);
assert_eq!(e_meta.status(), ErasureMetaStatus::CanRecover);
}
index
.coding_mut()
.set_many_present(coding_indexes.clone().zip(repeat(true)));
e_meta.set_data_multi(0..N_DATA, true);
for &idx in data_indexes
.clone()
.collect::<Vec<_>>()
.choose_multiple(&mut rng, erasure_config.num_data())
{
index.data_mut().set_present(idx, false);
for &idx in coding_indexes.choose_multiple(&mut rng, NUM_CODING) {
e_meta.set_coding_present(idx, false);
assert_eq!(e_meta.status(&index), CanRecover);
}
assert_eq!(e_meta.status(), ErasureMetaStatus::DataFull);
}
}
#[test]
fn test_meta_data_present() {
let mut e_meta = ErasureMeta::default();
e_meta.set_data_multi(0..NUM_DATA as u64, true);
for i in 0..NUM_DATA as u64 {
assert_eq!(e_meta.is_data_present(i), true);
}
for i in NUM_DATA as u64..2 * NUM_DATA as u64 {
assert_eq!(e_meta.is_data_present(i), false);
}
e_meta.set_index = ErasureMeta::set_index_for((NUM_DATA * 23) as u64);
let start_idx = e_meta.start_index();
e_meta.set_data_multi(start_idx..start_idx + NUM_DATA as u64, true);
for i in start_idx..start_idx + NUM_DATA as u64 {
assert_eq!(e_meta.is_data_present(i), true);
}
for i in start_idx - NUM_DATA as u64..start_idx {
assert_eq!(e_meta.is_data_present(i), false);
index
.data_mut()
.set_many_present(data_indexes.zip(repeat(true)));
for &idx in coding_indexes
.collect::<Vec<_>>()
.choose_multiple(&mut rng, erasure_config.num_coding())
{
index.coding_mut().set_present(idx, false);
assert_eq!(e_meta.status(&index), DataFull);
}
}
}

View File

@ -2,6 +2,7 @@ use crate::blocktree::db::columns as cf;
use crate::blocktree::db::{Backend, Column, DbCursor, IWriteBatch, TypedColumn};
use crate::blocktree::BlocktreeError;
use crate::result::{Error, Result};
use solana_sdk::timing::Slot;
use byteorder::{BigEndian, ByteOrder};
@ -15,7 +16,8 @@ use std::path::Path;
// A good value for this is the number of cores on the machine
const TOTAL_THREADS: i32 = 8;
const MAX_WRITE_BUFFER_SIZE: usize = 512 * 1024 * 1024;
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
#[derive(Debug)]
pub struct Rocks(rocksdb::DB);
@ -30,7 +32,9 @@ impl Backend for Rocks {
type Error = rocksdb::Error;
fn open(path: &Path) -> Result<Rocks> {
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta};
use crate::blocktree::db::columns::{
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, SlotMeta,
};
fs::create_dir_all(&path)?;
@ -38,14 +42,22 @@ impl Backend for Rocks {
let db_options = get_db_options();
// Column family names
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
let data_cf_descriptor = ColumnFamilyDescriptor::new(Data::NAME, get_cf_options());
let dead_slots_cf_descriptor = ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
let erasure_cf_descriptor = ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options());
let meta_cf_descriptor =
ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(SlotMeta::NAME));
let data_cf_descriptor =
ColumnFamilyDescriptor::new(Data::NAME, get_cf_options(Data::NAME));
let dead_slots_cf_descriptor =
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(DeadSlots::NAME));
let erasure_cf_descriptor =
ColumnFamilyDescriptor::new(Coding::NAME, get_cf_options(Coding::NAME));
let erasure_meta_cf_descriptor =
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
let root_cf_descriptor = ColumnFamilyDescriptor::new(Root::NAME, get_cf_options());
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(ErasureMeta::NAME));
let orphans_cf_descriptor =
ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(Orphans::NAME));
let root_cf_descriptor =
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
let index_cf_descriptor =
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
let cfs = vec![
meta_cf_descriptor,
@ -55,6 +67,7 @@ impl Backend for Rocks {
erasure_meta_cf_descriptor,
orphans_cf_descriptor,
root_cf_descriptor,
index_cf_descriptor,
];
// Open the database
@ -64,13 +77,16 @@ impl Backend for Rocks {
}
fn columns(&self) -> Vec<&'static str> {
use crate::blocktree::db::columns::{Coding, Data, DeadSlots, ErasureMeta, Orphans, Root, SlotMeta};
use crate::blocktree::db::columns::{
Coding, Data, DeadSlots, ErasureMeta, Index, Orphans, Root, SlotMeta,
};
vec![
Coding::NAME,
ErasureMeta::NAME,
DeadSlots::NAME,
Data::NAME,
Index::NAME,
Orphans::NAME,
Root::NAME,
SlotMeta::NAME,
@ -144,6 +160,14 @@ impl Column<Rocks> for cf::Coding {
fn index(key: &[u8]) -> (u64, u64) {
cf::Data::index(key)
}
fn slot(index: Self::Index) -> Slot {
index.0
}
fn as_index(slot: Slot) -> Self::Index {
(slot, 0)
}
}
impl Column<Rocks> for cf::Data {
@ -162,6 +186,41 @@ impl Column<Rocks> for cf::Data {
let index = BigEndian::read_u64(&key[8..16]);
(slot, index)
}
fn slot(index: Self::Index) -> Slot {
index.0
}
fn as_index(slot: Slot) -> Self::Index {
(slot, 0)
}
}
impl Column<Rocks> for cf::Index {
const NAME: &'static str = super::INDEX_CF;
type Index = u64;
fn key(slot: u64) -> Vec<u8> {
let mut key = vec![0; 8];
BigEndian::write_u64(&mut key[..], slot);
key
}
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn<Rocks> for cf::Index {
type Type = crate::blocktree::meta::Index;
}
impl Column<Rocks> for cf::DeadSlots {
@ -177,6 +236,14 @@ impl Column<Rocks> for cf::DeadSlots {
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn<Rocks> for cf::DeadSlots {
@ -196,6 +263,14 @@ impl Column<Rocks> for cf::Orphans {
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn<Rocks> for cf::Orphans {
@ -215,6 +290,14 @@ impl Column<Rocks> for cf::Root {
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn<Rocks> for cf::Root {
@ -234,6 +317,14 @@ impl Column<Rocks> for cf::SlotMeta {
fn index(key: &[u8]) -> u64 {
BigEndian::read_u64(&key[..8])
}
fn slot(index: Self::Index) -> Slot {
index
}
fn as_index(slot: Slot) -> Self::Index {
slot
}
}
impl TypedColumn<Rocks> for cf::SlotMeta {
@ -257,6 +348,14 @@ impl Column<Rocks> for cf::ErasureMeta {
BigEndian::write_u64(&mut key[8..], set_index);
key
}
fn slot(index: Self::Index) -> Slot {
index.0
}
fn as_index(slot: Slot) -> Self::Index {
(slot, 0)
}
}
impl TypedColumn<Rocks> for cf::ErasureMeta {
@ -307,11 +406,27 @@ impl std::convert::From<rocksdb::Error> for Error {
}
}
fn get_cf_options() -> Options {
fn get_cf_options(name: &'static str) -> Options {
use crate::blocktree::db::columns::{Coding, Data};
let mut options = Options::default();
options.set_max_write_buffer_number(32);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
match name {
Coding::NAME | Data::NAME => {
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
options.set_max_write_buffer_number(8);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
}
_ => {
// We want smaller CFs to flush faster. This results in more WAL files but lowers
// overall WAL space utilization and increases flush frequency
options.set_write_buffer_size(MIN_WRITE_BUFFER_SIZE as usize);
options.set_target_file_size_base(MIN_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MIN_WRITE_BUFFER_SIZE);
options.set_level_zero_file_num_compaction_trigger(1);
}
}
options
}
@ -322,8 +437,5 @@ fn get_db_options() -> Options {
options.increase_parallelism(TOTAL_THREADS);
options.set_max_background_flushes(4);
options.set_max_background_compactions(4);
options.set_max_write_buffer_number(32);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE as u64);
options
}

View File

@ -142,6 +142,7 @@ pub fn process_blocktree(
genesis_block: &GenesisBlock,
blocktree: &Blocktree,
account_paths: Option<String>,
verify_ledger: bool,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
let now = Instant::now();
info!("processing ledger...");
@ -166,7 +167,8 @@ pub fn process_blocktree(
blocktree.set_roots(&[0]).expect("Couldn't set first root");
let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0);
let leader_schedule_cache =
LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), &pending_slots[0].2);
let mut fork_info = vec![];
let mut last_status_report = Instant::now();
@ -204,7 +206,7 @@ pub fn process_blocktree(
}
if !entries.is_empty() {
if !entries.verify(&last_entry_hash) {
if verify_ledger && !entries.verify(&last_entry_hash) {
warn!(
"Ledger proof of history failed at slot: {}, entry: {}",
slot, entry_height
@ -225,7 +227,7 @@ pub fn process_blocktree(
if blocktree.is_root(slot) {
root = slot;
leader_schedule_cache.set_root(slot);
leader_schedule_cache.set_root(&bank);
bank.squash();
pending_slots.clear();
fork_info.clear();
@ -304,6 +306,7 @@ pub mod tests {
use crate::genesis_utils::{
create_genesis_block, create_genesis_block_with_leader, GenesisBlockInfo,
};
use rand::{thread_rng, Rng};
use solana_runtime::epoch_schedule::EpochSchedule;
use solana_sdk::hash::Hash;
use solana_sdk::instruction::InstructionError;
@ -373,7 +376,7 @@ pub mod tests {
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
let (mut _bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
@ -432,7 +435,7 @@ pub mod tests {
blocktree.set_roots(&[4, 1, 0]).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
@ -506,7 +509,7 @@ pub mod tests {
blocktree.set_roots(&[0, 1]).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 2); // There are two forks
assert_eq!(
@ -587,7 +590,7 @@ pub mod tests {
// Check that we can properly restart the ledger / leader scheduler doesn't fail
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1); // There is one fork
assert_eq!(
@ -723,7 +726,7 @@ pub mod tests {
.unwrap();
let entry_height = genesis_block.ticks_per_slot + entries.len() as u64;
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(bank_forks.root(), 0);
@ -754,7 +757,7 @@ pub mod tests {
let blocktree = Blocktree::open(&ledger_path).unwrap();
let (bank_forks, bank_forks_info, _) =
process_blocktree(&genesis_block, &blocktree, None).unwrap();
process_blocktree(&genesis_block, &blocktree, None, true).unwrap();
assert_eq!(bank_forks_info.len(), 1);
assert_eq!(
@ -1264,7 +1267,7 @@ pub mod tests {
} = create_genesis_block(1_000_000_000);
let mut bank = Bank::new(&genesis_block);
const NUM_TRANSFERS: usize = 100;
const NUM_TRANSFERS: usize = 128;
let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect();
// give everybody one lamport
@ -1275,6 +1278,7 @@ pub mod tests {
let mut i = 0;
let mut hash = bank.last_blockhash();
let mut root: Option<Arc<Bank>> = None;
loop {
let entries: Vec<_> = (0..NUM_TRANSFERS)
.map(|i| {
@ -1320,9 +1324,19 @@ pub mod tests {
)
.expect("process ticks failed");
let parent = Arc::new(bank);
if i % 16 == 0 {
root.map(|old_root| old_root.squash());
root = Some(parent.clone());
}
i += 1;
bank = Bank::new_from_parent(&Arc::new(bank), &Pubkey::default(), i as u64);
bank.squash();
bank = Bank::new_from_parent(
&parent,
&Pubkey::default(),
parent.slot() + thread_rng().gen_range(1, 3),
);
}
}

View File

@ -1,9 +1,11 @@
//! A stage to broadcast data from a leader node to validators
use self::broadcast_bad_blob_sizes::BroadcastBadBlobSizes;
use self::broadcast_fake_blobs_run::BroadcastFakeBlobsRun;
use self::fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun;
use self::standard_broadcast_run::StandardBroadcastRun;
use crate::blocktree::Blocktree;
use crate::cluster_info::{ClusterInfo, ClusterInfoError};
use crate::erasure::CodingGenerator;
use crate::erasure::{CodingGenerator, ErasureConfig};
use crate::poh_recorder::WorkingBankEntries;
use crate::result::{Error, Result};
use crate::service::Service;
@ -20,6 +22,8 @@ use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::Instant;
mod broadcast_bad_blob_sizes;
mod broadcast_fake_blobs_run;
mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;
@ -35,6 +39,8 @@ pub enum BroadcastStageReturnType {
pub enum BroadcastStageType {
Standard,
FailEntryVerification,
BroadcastFakeBlobs,
BroadcastBadBlobSizes,
}
impl BroadcastStageType {
@ -45,6 +51,7 @@ impl BroadcastStageType {
receiver: Receiver<WorkingBankEntries>,
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
erasure_config: &ErasureConfig,
) -> BroadcastStage {
match self {
BroadcastStageType::Standard => BroadcastStage::new(
@ -54,6 +61,7 @@ impl BroadcastStageType {
exit_sender,
blocktree,
StandardBroadcastRun::new(),
erasure_config,
),
BroadcastStageType::FailEntryVerification => BroadcastStage::new(
@ -63,6 +71,27 @@ impl BroadcastStageType {
exit_sender,
blocktree,
FailEntryVerificationBroadcastRun::new(),
erasure_config,
),
BroadcastStageType::BroadcastFakeBlobs => BroadcastStage::new(
sock,
cluster_info,
receiver,
exit_sender,
blocktree,
BroadcastFakeBlobsRun::new(0),
erasure_config,
),
BroadcastStageType::BroadcastBadBlobSizes => BroadcastStage::new(
sock,
cluster_info,
receiver,
exit_sender,
blocktree,
BroadcastBadBlobSizes::new(),
erasure_config,
),
}
}
@ -114,8 +143,9 @@ impl BroadcastStage {
receiver: &Receiver<WorkingBankEntries>,
blocktree: &Arc<Blocktree>,
mut broadcast_stage_run: impl BroadcastRun,
erasure_config: &ErasureConfig,
) -> BroadcastStageReturnType {
let coding_generator = CodingGenerator::default();
let coding_generator = CodingGenerator::new_from_config(erasure_config);
let mut broadcast = Broadcast {
coding_generator,
@ -167,9 +197,11 @@ impl BroadcastStage {
exit_sender: &Arc<AtomicBool>,
blocktree: &Arc<Blocktree>,
broadcast_stage_run: impl BroadcastRun + Send + 'static,
erasure_config: &ErasureConfig,
) -> Self {
let blocktree = blocktree.clone();
let exit_sender = exit_sender.clone();
let erasure_config = *erasure_config;
let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string())
.spawn(move || {
@ -180,6 +212,7 @@ impl BroadcastStage {
&receiver,
&blocktree,
broadcast_stage_run,
&erasure_config,
)
})
.unwrap();
@ -253,6 +286,7 @@ mod test {
&exit_sender,
&blocktree,
StandardBroadcastRun::new(),
&ErasureConfig::default(),
);
MockBroadcastStage {

View File

@ -0,0 +1,84 @@
use super::*;
use crate::packet::BLOB_HEADER_SIZE;
use solana_sdk::hash::Hash;
use solana_sdk::signature::Signable;
pub(super) struct BroadcastBadBlobSizes {}
impl BroadcastBadBlobSizes {
pub(super) fn new() -> Self {
Self {}
}
}
impl BroadcastRun for BroadcastBadBlobSizes {
fn run(
&mut self,
broadcast: &mut Broadcast,
cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
) -> Result<()> {
// 1) Pull entries from banking stage
let mut receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
let bank = receive_results.bank.clone();
let last_tick = receive_results.last_tick;
// 2) Convert entries to blobs + generate coding blobs. Set a garbage PoH on the last entry
// in the slot to make verification fail on validators
if last_tick == bank.max_tick_height() {
let mut last_entry = receive_results
.ventries
.last_mut()
.unwrap()
.last_mut()
.unwrap();
last_entry.0.hash = Hash::default();
}
let keypair = &cluster_info.read().unwrap().keypair.clone();
let latest_blob_index = blocktree
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0);
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
receive_results.ventries,
&broadcast.thread_pool,
latest_blob_index,
last_tick,
&bank,
&keypair,
&mut broadcast.coding_generator,
);
for b in data_blobs.iter().chain(coding_blobs.iter()) {
let mut w_b = b.write().unwrap();
let real_size = w_b.meta.size;
// corrupt the size in the header
w_b.set_size(std::usize::MAX - BLOB_HEADER_SIZE);
// resign the blob
w_b.sign(&keypair);
// don't corrupt the size in the meta so that broadcast will still work
w_b.meta.size = real_size;
}
blocktree.write_shared_blobs(data_blobs.iter())?;
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
// 3) Start broadcast step
let bank_epoch = bank.get_stakers_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
// Broadcast data + erasures
cluster_info.read().unwrap().broadcast(
sock,
data_blobs.iter().chain(coding_blobs.iter()),
stakes.as_ref(),
)?;
Ok(())
}
}

View File

@ -0,0 +1,167 @@
use super::*;
use crate::entry::Entry;
use solana_sdk::hash::Hash;
pub(super) struct BroadcastFakeBlobsRun {
last_blockhash: Hash,
partition: usize,
}
impl BroadcastFakeBlobsRun {
pub(super) fn new(partition: usize) -> Self {
Self {
last_blockhash: Hash::default(),
partition,
}
}
}
impl BroadcastRun for BroadcastFakeBlobsRun {
fn run(
&mut self,
broadcast: &mut Broadcast,
cluster_info: &Arc<RwLock<ClusterInfo>>,
receiver: &Receiver<WorkingBankEntries>,
sock: &UdpSocket,
blocktree: &Arc<Blocktree>,
) -> Result<()> {
// 1) Pull entries from banking stage
let receive_results = broadcast_utils::recv_slot_blobs(receiver)?;
let bank = receive_results.bank.clone();
let last_tick = receive_results.last_tick;
let keypair = &cluster_info.read().unwrap().keypair.clone();
let latest_blob_index = blocktree
.meta(bank.slot())
.expect("Database error")
.map(|meta| meta.consumed)
.unwrap_or(0);
let (data_blobs, coding_blobs) = broadcast_utils::entries_to_blobs(
receive_results.ventries,
&broadcast.thread_pool,
latest_blob_index,
last_tick,
&bank,
&keypair,
&mut broadcast.coding_generator,
);
// If the last blockhash is default, a new block is being created
// So grab the last blockhash from the parent bank
if self.last_blockhash == Hash::default() {
self.last_blockhash = bank.parent().unwrap().last_blockhash();
}
let fake_ventries: Vec<_> = (0..receive_results.num_entries)
.map(|_| vec![(Entry::new(&self.last_blockhash, 0, vec![]), 0)])
.collect();
let (fake_data_blobs, fake_coding_blobs) = broadcast_utils::entries_to_blobs(
fake_ventries,
&broadcast.thread_pool,
latest_blob_index,
last_tick,
&bank,
&keypair,
&mut broadcast.coding_generator,
);
// If it's the last tick, reset the last block hash to default
// this will cause next run to grab last bank's blockhash
if last_tick == bank.max_tick_height() {
self.last_blockhash = Hash::default();
}
blocktree.write_shared_blobs(data_blobs.iter())?;
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
// Set the forwarded flag to true, so that the blobs won't be forwarded to peers
data_blobs
.iter()
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
coding_blobs
.iter()
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
fake_data_blobs
.iter()
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
fake_coding_blobs
.iter()
.for_each(|blob| blob.write().unwrap().set_forwarded(true));
// 3) Start broadcast step
let peers = cluster_info.read().unwrap().tvu_peers();
peers.iter().enumerate().for_each(|(i, peer)| {
if i <= self.partition {
// Send fake blobs to the first N peers
fake_data_blobs.iter().for_each(|b| {
let blob = b.read().unwrap();
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
.unwrap();
});
fake_coding_blobs.iter().for_each(|b| {
let blob = b.read().unwrap();
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
.unwrap();
});
} else {
data_blobs.iter().for_each(|b| {
let blob = b.read().unwrap();
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
.unwrap();
});
coding_blobs.iter().for_each(|b| {
let blob = b.read().unwrap();
sock.send_to(&blob.data[..blob.meta.size], &peer.tvu)
.unwrap();
});
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::contact_info::ContactInfo;
use solana_sdk::pubkey::Pubkey;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
#[test]
fn test_tvu_peers_ordering() {
let mut cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
&Pubkey::new_rand(),
0,
));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)),
8080,
)));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)),
8080,
)));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 3)),
8080,
)));
cluster.insert_info(ContactInfo::new_with_socketaddr(&SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(192, 168, 1, 4)),
8080,
)));
let tvu_peers1 = cluster.tvu_peers();
(0..5).for_each(|_| {
cluster
.tvu_peers()
.iter()
.zip(tvu_peers1.iter())
.for_each(|(v1, v2)| {
assert_eq!(v1, v2);
});
});
}
}

View File

@ -52,7 +52,8 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
&mut broadcast.coding_generator,
);
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
blocktree.write_shared_blobs(data_blobs.iter())?;
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
// 3) Start broadcast step
let bank_epoch = bank.get_stakers_epoch(bank.slot());

View File

@ -82,7 +82,9 @@ impl BroadcastRun for StandardBroadcastRun {
&mut broadcast.coding_generator,
);
blocktree.write_shared_blobs(data_blobs.iter().chain(coding_blobs.iter()))?;
blocktree.write_shared_blobs(data_blobs.iter())?;
blocktree.put_shared_coding_blobs(coding_blobs.iter())?;
let to_blobs_elapsed = to_blobs_start.elapsed();
// 3) Start broadcast step

View File

@ -1,5 +1,4 @@
use crate::blocktree::Blocktree;
use solana_storage_api::SLOTS_PER_SEGMENT;
use std::fs::File;
use std::io;
use std::io::{BufWriter, Write};
@ -14,6 +13,7 @@ pub const CHACHA_KEY_SIZE: usize = 32;
pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>,
slice: u64,
slots_per_segment: u64,
out_path: &Path,
ivec: &mut [u8; CHACHA_BLOCK_SIZE],
) -> io::Result<usize> {
@ -28,7 +28,7 @@ pub fn chacha_cbc_encrypt_ledger(
let mut entry = slice;
loop {
match blocktree.read_blobs_bytes(0, SLOTS_PER_SEGMENT - total_entries, &mut buffer, entry) {
match blocktree.read_blobs_bytes(0, slots_per_segment - total_entries, &mut buffer, entry) {
Ok((num_entries, entry_len)) => {
debug!(
"chacha: encrypting slice: {} num_entries: {} entry_len: {}",
@ -113,10 +113,11 @@ mod tests {
let ledger_dir = "chacha_test_encrypt_file";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
let slots_per_segment = 32;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let out_path = Path::new("test_chacha_encrypt_file_output.txt.enc");
let entries = make_tiny_deterministic_test_entries(32);
let entries = make_tiny_deterministic_test_entries(slots_per_segment);
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.unwrap();
@ -125,7 +126,8 @@ mod tests {
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
);
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut key).unwrap();
chacha_cbc_encrypt_ledger(&blocktree, 0, slots_per_segment as u64, out_path, &mut key)
.unwrap();
let mut out_file = File::open(out_path).unwrap();
let mut buf = vec![];
let size = out_file.read_to_end(&mut buf).unwrap();
@ -133,7 +135,7 @@ mod tests {
hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes....
let golden: Hash = "E2HZjSC6VgH4nmEiTbMDATTeBcFjwSYz7QYvU7doGNhD"
let golden: Hash = "7hgFLHveuv9zvHpp6qpco9AHAJKyczdgxiktEMkeghDQ"
.parse()
.unwrap();

View File

@ -7,7 +7,6 @@ use crate::sigverify::{
chacha_cbc_encrypt_many_sample, chacha_end_sha_state, chacha_init_sha_state,
};
use solana_sdk::hash::Hash;
use solana_storage_api::SLOTS_PER_SEGMENT;
use std::io;
use std::mem::size_of;
use std::sync::Arc;
@ -19,6 +18,7 @@ use std::sync::Arc;
pub fn chacha_cbc_encrypt_file_many_keys(
blocktree: &Arc<Blocktree>,
segment: u64,
slots_per_segment: u64,
ivecs: &mut [u8],
samples: &[u64],
) -> io::Result<Vec<Hash>> {
@ -46,7 +46,7 @@ pub fn chacha_cbc_encrypt_file_many_keys(
chacha_init_sha_state(int_sha_states.as_mut_ptr(), num_keys as u32);
}
loop {
match blocktree.read_blobs_bytes(entry, SLOTS_PER_SEGMENT - total_entries, &mut buffer, 0) {
match blocktree.read_blobs_bytes(entry, slots_per_segment - total_entries, &mut buffer, 0) {
Ok((num_entries, entry_len)) => {
debug!(
"chacha_cuda: encrypting segment: {} num_entries: {} entry_len: {}",
@ -76,9 +76,9 @@ pub fn chacha_cbc_encrypt_file_many_keys(
entry += num_entries;
debug!(
"total entries: {} entry: {} segment: {} entries_per_segment: {}",
total_entries, entry, segment, SLOTS_PER_SEGMENT
total_entries, entry, segment, slots_per_segment
);
if (entry - segment) >= SLOTS_PER_SEGMENT {
if (entry - segment) >= slots_per_segment {
break;
}
}
@ -113,6 +113,7 @@ mod tests {
use crate::entry::make_tiny_test_entries;
use crate::replicator::sample_file;
use solana_sdk::hash::Hash;
use solana_sdk::timing::DEFAULT_SLOTS_PER_SEGMENT;
use std::fs::{remove_dir_all, remove_file};
use std::path::Path;
use std::sync::Arc;
@ -121,7 +122,8 @@ mod tests {
fn test_encrypt_file_many_keys_single() {
solana_logger::setup();
let entries = make_tiny_test_entries(32);
let slots_per_segment = 32;
let entries = make_tiny_test_entries(slots_per_segment);
let ledger_dir = "test_encrypt_file_many_keys_single";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
@ -140,12 +142,25 @@ mod tests {
);
let mut cpu_iv = ivecs.clone();
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut cpu_iv).unwrap();
chacha_cbc_encrypt_ledger(
&blocktree,
0,
slots_per_segment as u64,
out_path,
&mut cpu_iv,
)
.unwrap();
let ref_hash = sample_file(&out_path, &samples).unwrap();
let hashes =
chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut ivecs, &samples).unwrap();
let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree,
0,
slots_per_segment as u64,
&mut ivecs,
&samples,
)
.unwrap();
assert_eq!(hashes[0], ref_hash);
@ -178,7 +193,14 @@ mod tests {
);
ivec[0] = i;
ivecs.extend(ivec.clone().iter());
chacha_cbc_encrypt_ledger(&blocktree.clone(), 0, out_path, &mut ivec).unwrap();
chacha_cbc_encrypt_ledger(
&blocktree.clone(),
0,
DEFAULT_SLOTS_PER_SEGMENT,
out_path,
&mut ivec,
)
.unwrap();
ref_hashes.push(sample_file(&out_path, &samples).unwrap());
info!(
@ -189,8 +211,14 @@ mod tests {
);
}
let hashes =
chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut ivecs, &samples).unwrap();
let hashes = chacha_cbc_encrypt_file_many_keys(
&blocktree,
0,
DEFAULT_SLOTS_PER_SEGMENT,
&mut ivecs,
&samples,
)
.unwrap();
assert_eq!(hashes, ref_hashes);
@ -205,6 +233,13 @@ mod tests {
let ledger_path = get_tmp_ledger_path(ledger_dir);
let samples = [0];
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
assert!(chacha_cbc_encrypt_file_many_keys(&blocktree, 0, &mut keys, &samples,).is_err());
assert!(chacha_cbc_encrypt_file_many_keys(
&blocktree,
0,
DEFAULT_SLOTS_PER_SEGMENT,
&mut keys,
&samples,
)
.is_err());
}
}

View File

@ -47,7 +47,7 @@ use solana_sdk::transaction::Transaction;
use std::borrow::Borrow;
use std::borrow::Cow;
use std::cmp::min;
use std::collections::{BTreeSet, HashMap};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fmt;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::sync::atomic::{AtomicBool, Ordering};
@ -78,9 +78,6 @@ pub struct ClusterInfo {
pub gossip: CrdsGossip,
/// set the keypair that will be used to sign crds values generated. It is unset only in tests.
pub(crate) keypair: Arc<Keypair>,
// TODO: remove gossip_leader_pubkey once all usage of `set_leader()` and `leader_data()` is
// purged
gossip_leader_pubkey: Pubkey,
/// The network entrypoint
entrypoint: Option<ContactInfo>,
}
@ -181,7 +178,6 @@ impl ClusterInfo {
let mut me = Self {
gossip: CrdsGossip::default(),
keypair,
gossip_leader_pubkey: Pubkey::default(),
entrypoint: None,
};
let id = contact_info.id;
@ -206,7 +202,8 @@ impl ClusterInfo {
let mut entry = CrdsValue::ContactInfo(my_data);
entry.sign(&self.keypair);
self.gossip.refresh_push_active_set(stakes);
self.gossip.process_push_message(vec![entry], now);
self.gossip
.process_push_message(&self.id(), vec![entry], now);
}
// TODO kill insert_info, only used by tests
@ -236,15 +233,6 @@ impl ClusterInfo {
self.lookup(&self.id()).cloned().unwrap()
}
// Deprecated: don't use leader_data().
pub fn leader_data(&self) -> Option<&ContactInfo> {
let leader_pubkey = self.gossip_leader_pubkey;
if leader_pubkey == Pubkey::default() {
return None;
}
self.lookup(&leader_pubkey)
}
pub fn contact_info_trace(&self) -> String {
let now = timestamp();
let mut spy_nodes = 0;
@ -301,22 +289,12 @@ impl ClusterInfo {
)
}
/// Record the id of the current leader for use by `leader_tpu_via_blobs()`
pub fn set_leader(&mut self, leader_pubkey: &Pubkey) {
if *leader_pubkey != self.gossip_leader_pubkey {
warn!(
"{}: LEADER_UPDATE TO {} from {}",
self.gossip.id, leader_pubkey, self.gossip_leader_pubkey,
);
self.gossip_leader_pubkey = *leader_pubkey;
}
}
pub fn push_epoch_slots(&mut self, id: Pubkey, root: u64, slots: BTreeSet<u64>) {
let now = timestamp();
let mut entry = CrdsValue::EpochSlots(EpochSlots::new(id, root, slots, now));
entry.sign(&self.keypair);
self.gossip.process_push_message(vec![entry], now);
self.gossip
.process_push_message(&self.id(), vec![entry], now);
}
pub fn push_vote(&mut self, vote: Transaction) {
@ -324,7 +302,8 @@ impl ClusterInfo {
let vote = Vote::new(&self.id(), vote, now);
let mut entry = CrdsValue::Vote(vote);
entry.sign(&self.keypair);
self.gossip.process_push_message(vec![entry], now);
self.gossip
.process_push_message(&self.id(), vec![entry], now);
}
/// Get votes in the crds
@ -748,7 +727,7 @@ impl ClusterInfo {
/// retransmit messages to a list of nodes
/// # Remarks
/// We need to avoid having obj locked while doing any io, such as the `send_to`
/// We need to avoid having obj locked while doing a io, such as the `send_to`
pub fn retransmit_to(
obj: &Arc<RwLock<Self>>,
peers: &[ContactInfo],
@ -1071,12 +1050,13 @@ impl ClusterInfo {
fn handle_blob(
obj: &Arc<RwLock<Self>>,
blocktree: Option<&Arc<Blocktree>>,
stakes: &HashMap<Pubkey, u64>,
blob: &Blob,
) -> Vec<SharedBlob> {
deserialize(&blob.data[..blob.meta.size])
.into_iter()
.flat_map(|request| {
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), blocktree, request)
ClusterInfo::handle_protocol(obj, &blob.meta.addr(), blocktree, stakes, request)
})
.collect()
}
@ -1092,7 +1072,7 @@ impl ClusterInfo {
if caller.contact_info().is_none() {
return vec![];
}
let mut from = caller.contact_info().cloned().unwrap();
let from = caller.contact_info().unwrap();
if from.id == self_id {
warn!(
"PullRequest ignored, I'm talking to myself: me={} remoteme={}",
@ -1110,16 +1090,12 @@ impl ClusterInfo {
let len = data.len();
trace!("get updates since response {}", len);
let rsp = Protocol::PullResponse(self_id, data);
// The remote node may not know its public IP:PORT. Record what it looks like to us.
// This may or may not be correct for everybody, but it's better than leaving the remote with
// an unspecified address in our table
if from.gossip.ip().is_unspecified() {
inc_new_counter_debug!("cluster_info-window-request-updates-unspec-gossip", 1);
from.gossip = *from_addr;
}
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
// gossip addr, respond to the origin addr.
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
to_shared_blob(rsp, from.gossip).ok().into_iter().collect()
to_shared_blob(rsp, *from_addr).ok().into_iter().collect()
}
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
let len = data.len();
let now = Instant::now();
@ -1134,40 +1110,52 @@ impl ClusterInfo {
report_time_spent("ReceiveUpdates", &now.elapsed(), &format!(" len: {}", len));
}
fn handle_push_message(
me: &Arc<RwLock<Self>>,
from: &Pubkey,
data: Vec<CrdsValue>,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<SharedBlob> {
let self_id = me.read().unwrap().gossip.id;
inc_new_counter_debug!("cluster_info-push_message", 1, 0, 1000);
let prunes: Vec<_> = me
let updated: Vec<_> =
me.write()
.unwrap()
.gossip
.process_push_message(from, data, timestamp());
let updated_labels: Vec<_> = updated.into_iter().map(|u| u.value.label()).collect();
let prunes_map: HashMap<Pubkey, HashSet<Pubkey>> = me
.write()
.unwrap()
.gossip
.process_push_message(data, timestamp());
.prune_received_cache(updated_labels, stakes);
if !prunes.is_empty() {
inc_new_counter_debug!("cluster_info-push_message-prunes", prunes.len());
let ci = me.read().unwrap().lookup(from).cloned();
let pushes: Vec<_> = me.write().unwrap().new_push_requests();
inc_new_counter_debug!("cluster_info-push_message-pushes", pushes.len());
let mut rsp: Vec<_> = ci
.and_then(|ci| {
let mut rsp: Vec<_> = prunes_map
.into_iter()
.map(|(from, prune_set)| {
inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
me.read().unwrap().lookup(&from).cloned().and_then(|ci| {
let mut prune_msg = PruneData {
pubkey: self_id,
prunes,
prunes: prune_set.into_iter().collect(),
signature: Signature::default(),
destination: *from,
destination: from,
wallclock: timestamp(),
};
prune_msg.sign(&me.read().unwrap().keypair);
let rsp = Protocol::PruneMessage(self_id, prune_msg);
to_shared_blob(rsp, ci.gossip).ok()
})
.into_iter()
.collect();
})
.flatten()
.collect();
if !rsp.is_empty() {
let pushes: Vec<_> = me.write().unwrap().new_push_requests();
inc_new_counter_debug!("cluster_info-push_message-pushes", pushes.len());
let mut blobs: Vec<_> = pushes
.into_iter()
.filter_map(|(remote_gossip_addr, req)| {
@ -1269,6 +1257,7 @@ impl ClusterInfo {
me: &Arc<RwLock<Self>>,
from_addr: &SocketAddr,
blocktree: Option<&Arc<Blocktree>>,
stakes: &HashMap<Pubkey, u64>,
request: Protocol,
) -> Vec<SharedBlob> {
match request {
@ -1300,7 +1289,7 @@ impl ClusterInfo {
}
ret
});
Self::handle_push_message(me, &from, data)
Self::handle_push_message(me, &from, data, stakes)
}
Protocol::PruneMessage(from, data) => {
if data.verify() {
@ -1335,6 +1324,7 @@ impl ClusterInfo {
fn run_listen(
obj: &Arc<RwLock<Self>>,
blocktree: Option<&Arc<Blocktree>>,
bank_forks: Option<&Arc<RwLock<BankForks>>>,
requests_receiver: &BlobReceiver,
response_sender: &BlobSender,
) -> Result<()> {
@ -1345,8 +1335,16 @@ impl ClusterInfo {
reqs.append(&mut more);
}
let mut resps = Vec::new();
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
}
None => HashMap::new(),
};
for req in reqs {
let mut resp = Self::handle_blob(obj, blocktree, &req.read().unwrap());
let mut resp = Self::handle_blob(obj, blocktree, &stakes, &req.read().unwrap());
resps.append(&mut resp);
}
response_sender.send(resps)?;
@ -1355,6 +1353,7 @@ impl ClusterInfo {
pub fn listen(
me: Arc<RwLock<Self>>,
blocktree: Option<Arc<Blocktree>>,
bank_forks: Option<Arc<RwLock<BankForks>>>,
requests_receiver: BlobReceiver,
response_sender: BlobSender,
exit: &Arc<AtomicBool>,
@ -1366,6 +1365,7 @@ impl ClusterInfo {
let e = Self::run_listen(
&me,
blocktree.as_ref(),
bank_forks.as_ref(),
&requests_receiver,
&response_sender,
);
@ -1652,6 +1652,7 @@ mod tests {
use crate::blocktree::tests::make_many_slot_entries;
use crate::blocktree::Blocktree;
use crate::crds_value::CrdsValueLabel;
use crate::erasure::ErasureConfig;
use crate::packet::BLOB_HEADER_SIZE;
use crate::repair_service::RepairType;
use crate::result::Error;
@ -1816,6 +1817,7 @@ mod tests {
w_blob.set_size(data_size);
w_blob.set_index(1);
w_blob.set_slot(2);
w_blob.set_erasure_config(&ErasureConfig::default());
w_blob.meta.size = data_size + BLOB_HEADER_SIZE;
}
@ -1860,6 +1862,7 @@ mod tests {
blob.set_size(data_size);
blob.set_index(i);
blob.set_slot(2);
blob.set_erasure_config(&ErasureConfig::default());
blob.meta.size = data_size + BLOB_HEADER_SIZE;
blob
})
@ -1925,17 +1928,6 @@ mod tests {
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
}
#[test]
fn test_default_leader() {
solana_logger::setup();
let contact_info = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
let network_entry_point =
ContactInfo::new_gossip_entry_point(&socketaddr!("127.0.0.1:1239"));
cluster_info.insert_info(network_entry_point);
assert!(cluster_info.leader_data().is_none());
}
fn assert_in_range(x: u16, range: (u16, u16)) {
assert!(x >= range.0);
assert!(x < range.1);
@ -2019,12 +2011,9 @@ mod tests {
//create new cluster info, leader, and peer
let keypair = Keypair::new();
let peer_keypair = Keypair::new();
let leader_keypair = Keypair::new();
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
let leader = ContactInfo::new_localhost(&leader_keypair.pubkey(), 0);
let peer = ContactInfo::new_localhost(&peer_keypair.pubkey(), 0);
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
cluster_info.set_leader(&leader.id);
cluster_info.insert_info(peer.clone());
cluster_info.gossip.refresh_push_active_set(&HashMap::new());
//check that all types of gossip messages are signed correctly

View File

@ -234,6 +234,7 @@ impl ClusterInfoRepairListener {
let _ = Self::serve_repairs_to_repairee(
my_pubkey,
repairee_pubkey,
my_root,
blocktree,
&repairee_epoch_slots,
@ -249,8 +250,10 @@ impl ClusterInfoRepairListener {
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn serve_repairs_to_repairee(
my_pubkey: &Pubkey,
repairee_pubkey: &Pubkey,
my_root: u64,
blocktree: &Blocktree,
repairee_epoch_slots: &EpochSlots,
@ -260,14 +263,16 @@ impl ClusterInfoRepairListener {
num_slots_to_repair: usize,
epoch_schedule: &EpochSchedule,
) -> Result<()> {
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root + 1);
let slot_iter = blocktree.rooted_slot_iterator(repairee_epoch_slots.root);
if slot_iter.is_err() {
warn!("Root for repairee is on different fork OR replay_stage hasn't marked this slot as root yet");
info!(
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
my_root, repairee_epoch_slots.root, repairee_pubkey,
);
return Ok(());
}
let slot_iter = slot_iter?;
let mut slot_iter = slot_iter?;
let mut total_data_blobs_sent = 0;
let mut total_coding_blobs_sent = 0;
@ -276,6 +281,10 @@ impl ClusterInfoRepairListener {
epoch_schedule.get_stakers_epoch(repairee_epoch_slots.root);
let max_confirmed_repairee_slot =
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
// Skip the first slot in the iterator because we know it's the root slot which the repairee
// already has
slot_iter.next();
for (slot, slot_meta) in slot_iter {
if slot > my_root
|| num_slots_repaired >= num_slots_to_repair
@ -650,6 +659,7 @@ mod tests {
for repairman_pubkey in &eligible_repairmen {
ClusterInfoRepairListener::serve_repairs_to_repairee(
&repairman_pubkey,
&mock_repairee.id,
num_slots - 1,
&blocktree,
&repairee_epoch_slots,
@ -719,6 +729,7 @@ mod tests {
ClusterInfoRepairListener::serve_repairs_to_repairee(
&my_pubkey,
&mock_repairee.id,
total_slots - 1,
&blocktree,
&repairee_epoch_slots,
@ -740,6 +751,7 @@ mod tests {
EpochSlots::new(mock_repairee.id, stakers_slot_offset, repairee_slots, 1);
ClusterInfoRepairListener::serve_repairs_to_repairee(
&my_pubkey,
&mock_repairee.id,
total_slots - 1,
&blocktree,
&repairee_epoch_slots,

View File

@ -4,9 +4,9 @@ use crate::result::Result;
use crate::service::Service;
use crate::sigverify_stage::VerifiedPackets;
use crate::{packet, sigverify};
use crossbeam_channel::Sender as CrossbeamSender;
use solana_metrics::inc_new_counter_debug;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::Sender;
use std::sync::{Arc, Mutex, RwLock};
use std::thread::{self, sleep, Builder, JoinHandle};
use std::time::Duration;
@ -20,7 +20,7 @@ impl ClusterInfoVoteListener {
exit: &Arc<AtomicBool>,
cluster_info: Arc<RwLock<ClusterInfo>>,
sigverify_disabled: bool,
sender: Sender<VerifiedPackets>,
sender: CrossbeamSender<VerifiedPackets>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> Self {
let exit = exit.clone();
@ -45,7 +45,7 @@ impl ClusterInfoVoteListener {
exit: Arc<AtomicBool>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
sigverify_disabled: bool,
sender: &Sender<VerifiedPackets>,
sender: &CrossbeamSender<VerifiedPackets>,
poh_recorder: Arc<Mutex<PohRecorder>>,
) -> Result<()> {
let mut last_ts = 0;
@ -54,7 +54,7 @@ impl ClusterInfoVoteListener {
return Ok(());
}
let (votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
if poh_recorder.lock().unwrap().bank().is_some() {
if poh_recorder.lock().unwrap().has_bank() {
last_ts = new_ts;
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
let msgs = packet::to_packets(&votes);
@ -85,7 +85,7 @@ impl Service for ClusterInfoVoteListener {
#[cfg(test)]
mod tests {
use crate::locktower::MAX_RECENT_VOTES;
use crate::consensus::MAX_RECENT_VOTES;
use crate::packet;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};

View File

@ -4,10 +4,10 @@ use crate::blocktree::Blocktree;
/// All tests must start from an entry point and a funding keypair and
/// discover the rest of the network.
use crate::cluster_info::FULLNODE_PORT_RANGE;
use crate::consensus::VOTE_THRESHOLD_DEPTH;
use crate::contact_info::ContactInfo;
use crate::entry::{Entry, EntrySlice};
use crate::gossip_service::discover_cluster;
use crate::locktower::VOTE_THRESHOLD_DEPTH;
use hashbrown::HashSet;
use solana_client::thin_client::create_client;
use solana_runtime::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;

View File

@ -28,8 +28,17 @@ pub struct StakeLockout {
stake: u64,
}
impl StakeLockout {
pub fn lockout(&self) -> u64 {
self.lockout
}
pub fn stake(&self) -> u64 {
self.stake
}
}
#[derive(Default)]
pub struct Locktower {
pub struct Tower {
epoch_stakes: EpochStakes,
threshold_depth: usize,
threshold_size: f64,
@ -68,7 +77,7 @@ impl EpochStakes {
}
}
impl Locktower {
impl Tower {
pub fn new_from_forks(bank_forks: &BankForks, my_pubkey: &Pubkey) -> Self {
let mut frozen_banks: Vec<_> = bank_forks.frozen_banks().values().cloned().collect();
frozen_banks.sort_by_key(|b| (b.parents().len(), b.slot()));
@ -80,7 +89,7 @@ impl Locktower {
}
};
let mut locktower = Self {
let mut tower = Self {
epoch_stakes,
threshold_depth: VOTE_THRESHOLD_DEPTH,
threshold_size: VOTE_THRESHOLD_SIZE,
@ -88,10 +97,9 @@ impl Locktower {
recent_votes: VecDeque::default(),
};
let bank = locktower.find_heaviest_bank(bank_forks).unwrap();
locktower.lockouts =
Self::initialize_lockouts_from_bank(&bank, locktower.epoch_stakes.epoch);
locktower
let bank = tower.find_heaviest_bank(bank_forks).unwrap();
tower.lockouts = Self::initialize_lockouts_from_bank(&bank, tower.epoch_stakes.epoch);
tower
}
pub fn new(epoch_stakes: EpochStakes, threshold_depth: usize, threshold_size: f64) -> Self {
Self {
@ -120,7 +128,7 @@ impl Locktower {
let vote_state = VoteState::from(&account);
if vote_state.is_none() {
datapoint_warn!(
"locktower_warn",
"tower_warn",
(
"warn",
format!("Unable to get vote_state from account {}", key),
@ -141,7 +149,7 @@ impl Locktower {
);
debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
datapoint_info!(
"locktower-observed",
"tower-observed",
(
"slot",
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0),
@ -223,14 +231,14 @@ impl Locktower {
"epoch_stakes cannot move backwards"
);
info!(
"Locktower updated epoch bank slot: {} epoch: {}",
"Tower updated epoch bank slot: {} epoch: {}",
bank.slot(),
self.epoch_stakes.epoch
);
self.epoch_stakes =
EpochStakes::new_from_bank(bank, &self.epoch_stakes.delegate_pubkey);
datapoint_info!(
"locktower-epoch",
"tower-epoch",
("epoch", self.epoch_stakes.epoch, i64),
("self_staked", self.epoch_stakes.self_staked, i64),
("total_staked", self.epoch_stakes.total_staked, i64)
@ -256,7 +264,7 @@ impl Locktower {
.retain(|vote| slots.iter().any(|slot| vote.slot == *slot));
datapoint_info!(
"locktower-vote",
"tower-vote",
("latest", slot, i64),
("root", self.lockouts.root_slot.unwrap_or(0), i64)
);
@ -275,6 +283,10 @@ impl Locktower {
self.lockouts.root_slot
}
pub fn total_epoch_stakes(&self) -> u64 {
self.epoch_stakes.total_staked
}
pub fn calculate_weight(&self, stake_lockouts: &HashMap<u64, StakeLockout>) -> u128 {
let mut sum = 0u128;
let root_slot = self.lockouts.root_slot.unwrap_or(0);
@ -334,6 +346,27 @@ impl Locktower {
}
}
pub fn aggregate_stake_lockouts(
root: Option<u64>,
ancestors: &HashMap<u64, HashSet<u64>>,
stake_lockouts: HashMap<u64, StakeLockout>,
) -> HashMap<u64, u128> {
let mut stake_weighted_lockouts: HashMap<u64, u128> = HashMap::new();
for (fork, lockout) in stake_lockouts.iter() {
if root.is_none() || *fork >= root.unwrap() {
let mut slot_with_ancestors = vec![*fork];
slot_with_ancestors.extend(ancestors.get(&fork).unwrap_or(&HashSet::new()));
for slot in slot_with_ancestors {
if root.is_none() || slot >= root.unwrap() {
let entry = stake_weighted_lockouts.entry(slot).or_default();
*entry += u128::from(lockout.lockout) * u128::from(lockout.stake);
}
}
}
}
stake_weighted_lockouts
}
/// Update lockouts for all the ancestors
fn update_ancestor_lockouts(
stake_lockouts: &mut HashMap<u64, StakeLockout>,
@ -429,12 +462,13 @@ mod test {
fn test_collect_vote_lockouts_no_epoch_stakes() {
let accounts = gen_stakes(&[(1, &[0])]);
let epoch_stakes = EpochStakes::new_for_tests(2);
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
let tower = Tower::new(epoch_stakes, 0, 0.67);
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
.into_iter()
.collect();
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
let staked_lockouts = tower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
assert!(staked_lockouts.is_empty());
assert_eq!(tower.epoch_stakes.total_staked, 2);
}
#[test]
@ -442,13 +476,14 @@ mod test {
//two accounts voting for slot 0 with 1 token staked
let accounts = gen_stakes(&[(1, &[0]), (1, &[0])]);
let epoch_stakes = EpochStakes::new_from_stakes(0, &accounts);
let locktower = Locktower::new(epoch_stakes, 0, 0.67);
let tower = Tower::new(epoch_stakes, 0, 0.67);
let ancestors = vec![(1, vec![0].into_iter().collect()), (0, HashSet::new())]
.into_iter()
.collect();
let staked_lockouts = locktower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
let staked_lockouts = tower.collect_vote_lockouts(1, accounts.into_iter(), &ancestors);
assert_eq!(staked_lockouts[&0].stake, 2);
assert_eq!(staked_lockouts[&0].lockout, 2 + 2 + 4 + 4);
assert_eq!(tower.epoch_stakes.total_staked, 2);
}
#[test]
@ -457,14 +492,14 @@ mod test {
//two accounts voting for slot 0 with 1 token staked
let accounts = gen_stakes(&[(1, &votes), (1, &votes)]);
let epoch_stakes = EpochStakes::new_from_stakes(0, &accounts);
let mut locktower = Locktower::new(epoch_stakes, 0, 0.67);
let mut tower = Tower::new(epoch_stakes, 0, 0.67);
let mut ancestors = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY + 1) {
locktower.record_vote(i as u64, Hash::default());
tower.record_vote(i as u64, Hash::default());
ancestors.insert(i as u64, (0..i as u64).into_iter().collect());
}
assert_eq!(locktower.lockouts.root_slot, Some(0));
let staked_lockouts = locktower.collect_vote_lockouts(
assert_eq!(tower.lockouts.root_slot, Some(0));
let staked_lockouts = tower.collect_vote_lockouts(
MAX_LOCKOUT_HISTORY as u64,
accounts.into_iter(),
&ancestors,
@ -478,8 +513,8 @@ mod test {
#[test]
fn test_calculate_weight_skips_root() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
locktower.lockouts.root_slot = Some(1);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
tower.lockouts.root_slot = Some(1);
let stakes = vec![
(
0,
@ -498,12 +533,12 @@ mod test {
]
.into_iter()
.collect();
assert_eq!(locktower.calculate_weight(&stakes), 0u128);
assert_eq!(tower.calculate_weight(&stakes), 0u128);
}
#[test]
fn test_calculate_weight() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -513,12 +548,12 @@ mod test {
)]
.into_iter()
.collect();
assert_eq!(locktower.calculate_weight(&stakes), 8u128);
assert_eq!(tower.calculate_weight(&stakes), 8u128);
}
#[test]
fn test_check_vote_threshold_without_votes() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -528,12 +563,65 @@ mod test {
)]
.into_iter()
.collect();
assert!(locktower.check_vote_stake_threshold(0, &stakes));
assert!(tower.check_vote_stake_threshold(0, &stakes));
}
#[test]
fn test_aggregate_stake_lockouts() {
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
tower.lockouts.root_slot = Some(1);
let stakes = vec![
(
0,
StakeLockout {
stake: 1,
lockout: 32,
},
),
(
1,
StakeLockout {
stake: 1,
lockout: 24,
},
),
(
2,
StakeLockout {
stake: 1,
lockout: 16,
},
),
(
3,
StakeLockout {
stake: 1,
lockout: 8,
},
),
]
.into_iter()
.collect();
let ancestors = vec![
(0, HashSet::new()),
(1, vec![0].into_iter().collect()),
(2, vec![0, 1].into_iter().collect()),
(3, vec![0, 1, 2].into_iter().collect()),
]
.into_iter()
.collect();
let stake_weighted_lockouts =
Tower::aggregate_stake_lockouts(tower.root(), &ancestors, stakes);
assert!(stake_weighted_lockouts.get(&0).is_none());
assert_eq!(*stake_weighted_lockouts.get(&1).unwrap(), 8 + 16 + 24);
assert_eq!(*stake_weighted_lockouts.get(&2).unwrap(), 8 + 16);
assert_eq!(*stake_weighted_lockouts.get(&3).unwrap(), 8);
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -543,19 +631,19 @@ mod test {
)]
.into_iter()
.collect();
assert!(!locktower.is_slot_confirmed(0, &stakes));
assert!(!tower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_unknown_slot() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = HashMap::new();
assert!(!locktower.is_slot_confirmed(0, &stakes));
assert!(!tower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_slot_confirmed_pass() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -565,68 +653,68 @@ mod test {
)]
.into_iter()
.collect();
assert!(locktower.is_slot_confirmed(0, &stakes));
assert!(tower.is_slot_confirmed(0, &stakes));
}
#[test]
fn test_is_locked_out_empty() {
let locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = HashMap::new();
assert!(!locktower.is_locked_out(0, &descendants));
assert!(!tower.is_locked_out(0, &descendants));
}
#[test]
fn test_is_locked_out_root_slot_child_pass() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect())]
.into_iter()
.collect();
locktower.lockouts.root_slot = Some(0);
assert!(!locktower.is_locked_out(1, &descendants));
tower.lockouts.root_slot = Some(0);
assert!(!tower.is_locked_out(1, &descendants));
}
#[test]
fn test_is_locked_out_root_slot_sibling_fail() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect())]
.into_iter()
.collect();
locktower.lockouts.root_slot = Some(0);
assert!(locktower.is_locked_out(2, &descendants));
tower.lockouts.root_slot = Some(0);
assert!(tower.is_locked_out(2, &descendants));
}
#[test]
fn test_check_already_voted() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
locktower.record_vote(0, Hash::default());
assert!(locktower.has_voted(0));
assert!(!locktower.has_voted(1));
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
tower.record_vote(0, Hash::default());
assert!(tower.has_voted(0));
assert!(!tower.has_voted(1));
}
#[test]
fn test_is_locked_out_double_vote() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect()), (1, HashSet::new())]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
locktower.record_vote(1, Hash::default());
assert!(locktower.is_locked_out(0, &descendants));
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
assert!(tower.is_locked_out(0, &descendants));
}
#[test]
fn test_is_locked_out_child() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1].into_iter().collect())]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
assert!(!locktower.is_locked_out(1, &descendants));
tower.record_vote(0, Hash::default());
assert!(!tower.is_locked_out(1, &descendants));
}
#[test]
fn test_is_locked_out_sibling() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![
(0, vec![1, 2].into_iter().collect()),
(1, HashSet::new()),
@ -634,30 +722,30 @@ mod test {
]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
locktower.record_vote(1, Hash::default());
assert!(locktower.is_locked_out(2, &descendants));
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
assert!(tower.is_locked_out(2, &descendants));
}
#[test]
fn test_is_locked_out_last_vote_expired() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 0, 0.67);
let descendants = vec![(0, vec![1, 4].into_iter().collect()), (1, HashSet::new())]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
locktower.record_vote(1, Hash::default());
assert!(!locktower.is_locked_out(4, &descendants));
locktower.record_vote(4, Hash::default());
assert_eq!(locktower.lockouts.votes[0].slot, 0);
assert_eq!(locktower.lockouts.votes[0].confirmation_count, 2);
assert_eq!(locktower.lockouts.votes[1].slot, 4);
assert_eq!(locktower.lockouts.votes[1].confirmation_count, 1);
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
assert!(!tower.is_locked_out(4, &descendants));
tower.record_vote(4, Hash::default());
assert_eq!(tower.lockouts.votes[0].slot, 0);
assert_eq!(tower.lockouts.votes[0].confirmation_count, 2);
assert_eq!(tower.lockouts.votes[1].slot, 4);
assert_eq!(tower.lockouts.votes[1].confirmation_count, 1);
}
#[test]
fn test_check_vote_threshold_below_threshold() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -667,12 +755,12 @@ mod test {
)]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
tower.record_vote(0, Hash::default());
assert!(!tower.check_vote_stake_threshold(1, &stakes));
}
#[test]
fn test_check_vote_threshold_above_threshold() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -682,13 +770,13 @@ mod test {
)]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
assert!(locktower.check_vote_stake_threshold(1, &stakes));
tower.record_vote(0, Hash::default());
assert!(tower.check_vote_stake_threshold(1, &stakes));
}
#[test]
fn test_check_vote_threshold_above_threshold_after_pop() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = vec![(
0,
StakeLockout {
@ -698,18 +786,18 @@ mod test {
)]
.into_iter()
.collect();
locktower.record_vote(0, Hash::default());
locktower.record_vote(1, Hash::default());
locktower.record_vote(2, Hash::default());
assert!(locktower.check_vote_stake_threshold(6, &stakes));
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes));
}
#[test]
fn test_check_vote_threshold_above_threshold_no_stake() {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let stakes = HashMap::new();
locktower.record_vote(0, Hash::default());
assert!(!locktower.check_vote_stake_threshold(1, &stakes));
tower.record_vote(0, Hash::default());
assert!(!tower.check_vote_stake_threshold(1, &stakes));
}
#[test]
@ -724,7 +812,7 @@ mod test {
ancestors.insert(2, set);
let set: HashSet<u64> = vec![0u64].into_iter().collect();
ancestors.insert(1, set);
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
assert_eq!(stake_lockouts[&0].lockout, 2);
assert_eq!(stake_lockouts[&1].lockout, 2);
assert_eq!(stake_lockouts[&2].lockout, 2);
@ -742,12 +830,12 @@ mod test {
slot: 2,
confirmation_count: 1,
};
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
let vote = Lockout {
slot: 1,
confirmation_count: 2,
};
Locktower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
Tower::update_ancestor_lockouts(&mut stake_lockouts, &vote, &ancestors);
assert_eq!(stake_lockouts[&0].lockout, 2 + 4);
assert_eq!(stake_lockouts[&1].lockout, 2 + 4);
assert_eq!(stake_lockouts[&2].lockout, 2);
@ -760,7 +848,7 @@ mod test {
account.lamports = 1;
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].into_iter().cloned().collect();
Locktower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
Tower::update_ancestor_stakes(&mut stake_lockouts, 2, account.lamports, &ancestors);
assert_eq!(stake_lockouts[&0].stake, 1);
assert_eq!(stake_lockouts[&1].stake, 1);
assert_eq!(stake_lockouts[&2].stake, 1);
@ -782,51 +870,48 @@ mod test {
let total_stake = 4;
let threshold_size = 0.67;
let threshold_stake = (f64::ceil(total_stake as f64 * threshold_size)) as u64;
let locktower_votes: Vec<u64> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
let tower_votes: Vec<u64> = (0..VOTE_THRESHOLD_DEPTH as u64).collect();
let accounts = gen_stakes(&[
(threshold_stake, &[(VOTE_THRESHOLD_DEPTH - 2) as u64]),
(total_stake - threshold_stake, &locktower_votes[..]),
(total_stake - threshold_stake, &tower_votes[..]),
]);
// Initialize locktower
// Initialize tower
let stakes: HashMap<_, _> = accounts.iter().map(|(pk, (s, _))| (*pk, *s)).collect();
let epoch_stakes = EpochStakes::new(0, stakes, &Pubkey::default());
let mut locktower = Locktower::new(epoch_stakes, VOTE_THRESHOLD_DEPTH, threshold_size);
let mut tower = Tower::new(epoch_stakes, VOTE_THRESHOLD_DEPTH, threshold_size);
// CASE 1: Record the first VOTE_THRESHOLD locktower votes for fork 2. We want to
// CASE 1: Record the first VOTE_THRESHOLD tower votes for fork 2. We want to
// evaluate a vote on slot VOTE_THRESHOLD_DEPTH. The nth most recent vote should be
// for slot 0, which is common to all account vote states, so we should pass the
// threshold check
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64;
for vote in &locktower_votes {
locktower.record_vote(*vote, Hash::default());
for vote in &tower_votes {
tower.record_vote(*vote, Hash::default());
}
let stakes_lockouts = locktower.collect_vote_lockouts(
vote_to_evaluate,
accounts.clone().into_iter(),
&ancestors,
);
assert!(locktower.check_vote_stake_threshold(vote_to_evaluate, &stakes_lockouts));
let staked_lockouts =
tower.collect_vote_lockouts(vote_to_evaluate, accounts.clone().into_iter(), &ancestors);
assert!(tower.check_vote_stake_threshold(vote_to_evaluate, &staked_lockouts));
// CASE 2: Now we want to evaluate a vote for slot VOTE_THRESHOLD_DEPTH + 1. This slot
// will expire the vote in one of the vote accounts, so we should have insufficient
// stake to pass the threshold
let vote_to_evaluate = VOTE_THRESHOLD_DEPTH as u64 + 1;
let stakes_lockouts =
locktower.collect_vote_lockouts(vote_to_evaluate, accounts.into_iter(), &ancestors);
assert!(!locktower.check_vote_stake_threshold(vote_to_evaluate, &stakes_lockouts));
let staked_lockouts =
tower.collect_vote_lockouts(vote_to_evaluate, accounts.into_iter(), &ancestors);
assert!(!tower.check_vote_stake_threshold(vote_to_evaluate, &staked_lockouts));
}
fn vote_and_check_recent(num_votes: usize) {
let mut locktower = Locktower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let mut tower = Tower::new(EpochStakes::new_for_tests(2), 1, 0.67);
let start = num_votes.saturating_sub(MAX_RECENT_VOTES);
let expected: Vec<_> = (start..num_votes)
.map(|i| Vote::new(i as u64, Hash::default()))
.collect();
for i in 0..num_votes {
locktower.record_vote(i as u64, Hash::default());
tower.record_vote(i as u64, Hash::default());
}
assert_eq!(expected, locktower.recent_votes())
assert_eq!(expected, tower.recent_votes())
}
#[test]

View File

@ -147,7 +147,7 @@ impl ContactInfo {
}
#[cfg(test)]
fn new_with_pubkey_socketaddr(pubkey: &Pubkey, bind_addr: &SocketAddr) -> Self {
pub(crate) fn new_with_pubkey_socketaddr(pubkey: &Pubkey, bind_addr: &SocketAddr) -> Self {
fn next_port(addr: &SocketAddr, nxt: u16) -> SocketAddr {
let mut nxt_addr = *addr;
nxt_addr.set_port(addr.port() + nxt);

View File

@ -3,15 +3,16 @@
//! designed to run with a simulator or over a UDP network connection with messages up to a
//! packet::BLOB_DATA_SIZE size.
use crate::crds::Crds;
use crate::crds::{Crds, VersionedCrdsValue};
use crate::crds_gossip_error::CrdsGossipError;
use crate::crds_gossip_pull::CrdsGossipPull;
use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE};
use crate::crds_value::CrdsValue;
use crate::crds_value::{CrdsValue, CrdsValueLabel};
use solana_runtime::bloom::Bloom;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashMap;
use solana_sdk::signature::Signable;
use std::collections::{HashMap, HashSet};
///The min size for bloom filters
pub const CRDS_GOSSIP_BLOOM_SIZE: usize = 1000;
@ -39,25 +40,24 @@ impl CrdsGossip {
pub fn set_self(&mut self, id: &Pubkey) {
self.id = *id;
}
/// process a push message to the network
pub fn process_push_message(&mut self, values: Vec<CrdsValue>, now: u64) -> Vec<Pubkey> {
let labels: Vec<_> = values.iter().map(CrdsValue::label).collect();
let results: Vec<_> = values
pub fn process_push_message(
&mut self,
from: &Pubkey,
values: Vec<CrdsValue>,
now: u64,
) -> Vec<VersionedCrdsValue> {
values
.into_iter()
.map(|val| self.push.process_push_message(&mut self.crds, val, now))
.collect();
results
.into_iter()
.zip(labels)
.filter_map(|(r, d)| {
if r == Err(CrdsGossipError::PushMessagePrune) {
Some(d.pubkey())
} else if let Ok(Some(val)) = r {
.filter_map(|val| {
let res = self
.push
.process_push_message(&mut self.crds, from, val, now);
if let Ok(Some(val)) = res {
self.pull
.record_old_hash(val.value_hash, val.local_timestamp);
None
Some(val)
} else {
None
}
@ -65,6 +65,31 @@ impl CrdsGossip {
.collect()
}
/// remove redundant paths in the network
pub fn prune_received_cache(
&mut self,
labels: Vec<CrdsValueLabel>,
stakes: &HashMap<Pubkey, u64>,
) -> HashMap<Pubkey, HashSet<Pubkey>> {
let id = &self.id;
let crds = &self.crds;
let push = &mut self.push;
let versioned = labels
.into_iter()
.filter_map(|label| crds.lookup_versioned(&label));
let mut prune_map: HashMap<Pubkey, HashSet<_>> = HashMap::new();
for val in versioned {
let origin = val.value.pubkey();
let hash = val.value_hash;
let peers = push.prune_received_cache(id, &origin, hash, stakes);
for from in peers {
prune_map.entry(from).or_default().insert(origin);
}
}
prune_map
}
pub fn new_push_messages(&mut self, now: u64) -> (Pubkey, HashMap<Pubkey, Vec<CrdsValue>>) {
let push_messages = self.push.new_push_messages(&self.crds, now);
(self.id, push_messages)
@ -147,7 +172,7 @@ impl CrdsGossip {
}
if now > 5 * self.push.msg_timeout {
let min = now - 5 * self.push.msg_timeout;
self.push.purge_old_pushed_once_messages(min);
self.push.purge_old_received_cache(min);
}
if now > self.pull.crds_timeout {
let min = now - self.pull.crds_timeout;

View File

@ -2,7 +2,7 @@
pub enum CrdsGossipError {
NoPeers,
PushMessageTimeout,
PushMessagePrune,
PushMessageAlreadyReceived,
PushMessageOldVersion,
BadPruneDestination,
PruneMessageTimeout,

View File

@ -27,12 +27,13 @@ use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::cmp;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 5000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
#[derive(Clone)]
pub struct CrdsGossipPush {
@ -42,7 +43,8 @@ pub struct CrdsGossipPush {
active_set: IndexMap<Pubkey, Bloom<Pubkey>>,
/// push message queue
push_messages: HashMap<CrdsValueLabel, Hash>,
pushed_once: HashMap<Hash, u64>,
/// cache that tracks which validators a message was received from
received_cache: HashMap<Hash, (u64, HashSet<Pubkey>)>,
pub num_active: usize,
pub push_fanout: usize,
pub msg_timeout: u64,
@ -55,7 +57,7 @@ impl Default for CrdsGossipPush {
max_bytes: BLOB_DATA_SIZE,
active_set: IndexMap::new(),
push_messages: HashMap::new(),
pushed_once: HashMap::new(),
received_cache: HashMap::new(),
num_active: CRDS_GOSSIP_NUM_ACTIVE,
push_fanout: CRDS_GOSSIP_PUSH_FANOUT,
msg_timeout: CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS,
@ -67,10 +69,69 @@ impl CrdsGossipPush {
pub fn num_pending(&self) -> usize {
self.push_messages.len()
}
fn prune_stake_threshold(self_stake: u64, origin_stake: u64) -> u64 {
let min_path_stake = self_stake.min(origin_stake);
((CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT * min_path_stake as f64).round() as u64).max(1)
}
pub fn prune_received_cache(
&mut self,
self_pubkey: &Pubkey,
origin: &Pubkey,
hash: Hash,
stakes: &HashMap<Pubkey, u64>,
) -> Vec<Pubkey> {
let origin_stake = stakes.get(origin).unwrap_or(&0);
let self_stake = stakes.get(self_pubkey).unwrap_or(&0);
let cache = self.received_cache.get(&hash);
if cache.is_none() {
return Vec::new();
}
let peers = &cache.unwrap().1;
let peer_stake_total: u64 = peers.iter().map(|p| stakes.get(p).unwrap_or(&0)).sum();
let prune_stake_threshold = Self::prune_stake_threshold(*self_stake, *origin_stake);
if peer_stake_total < prune_stake_threshold {
return Vec::new();
}
let staked_peers: Vec<(Pubkey, u64)> = peers
.iter()
.filter_map(|p| stakes.get(p).map(|s| (*p, *s)))
.filter(|(_, s)| *s > 0)
.collect();
let mut seed = [0; 32];
seed[0..8].copy_from_slice(&thread_rng().next_u64().to_le_bytes());
let shuffle = weighted_shuffle(
staked_peers.iter().map(|(_, stake)| *stake).collect_vec(),
ChaChaRng::from_seed(seed),
);
let mut keep = HashSet::new();
let mut peer_stake_sum = 0;
for next in shuffle {
let (next_peer, next_stake) = staked_peers[next];
keep.insert(next_peer);
peer_stake_sum += next_stake;
if peer_stake_sum >= prune_stake_threshold {
break;
}
}
peers
.iter()
.filter(|p| !keep.contains(p))
.cloned()
.collect()
}
/// process a push message to the network
pub fn process_push_message(
&mut self,
crds: &mut Crds,
from: &Pubkey,
value: CrdsValue,
now: u64,
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
@ -81,18 +142,20 @@ impl CrdsGossipPush {
return Err(CrdsGossipError::PushMessageTimeout);
}
let label = value.label();
let new_value = crds.new_versioned(now, value);
let value_hash = new_value.value_hash;
if self.pushed_once.get(&value_hash).is_some() {
return Err(CrdsGossipError::PushMessagePrune);
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
received_set.insert(from.clone());
return Err(CrdsGossipError::PushMessageAlreadyReceived);
}
let old = crds.insert_versioned(new_value);
if old.is_err() {
return Err(CrdsGossipError::PushMessageOldVersion);
}
let mut received_set = HashSet::new();
received_set.insert(from.clone());
self.push_messages.insert(label, value_hash);
self.pushed_once.insert(value_hash, now);
self.received_cache.insert(value_hash, (now, received_set));
Ok(old.ok().and_then(|opt| opt))
}
@ -258,16 +321,17 @@ impl CrdsGossipPush {
self.push_messages.remove(&k);
}
}
/// purge old pushed_once messages
pub fn purge_old_pushed_once_messages(&mut self, min_time: u64) {
/// purge received push message cache
pub fn purge_old_received_cache(&mut self, min_time: u64) {
let old_msgs: Vec<Hash> = self
.pushed_once
.received_cache
.iter()
.filter_map(|(k, v)| if *v < min_time { Some(k) } else { None })
.filter_map(|(k, (rcvd_time, _))| if *rcvd_time < min_time { Some(k) } else { None })
.cloned()
.collect();
for k in old_msgs {
self.pushed_once.remove(&k);
self.received_cache.remove(&k);
}
}
}
@ -278,6 +342,55 @@ mod test {
use crate::contact_info::ContactInfo;
use solana_sdk::signature::Signable;
#[test]
fn test_prune() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut stakes = HashMap::new();
let self_id = Pubkey::new_rand();
let origin = Pubkey::new_rand();
stakes.insert(self_id, 100);
stakes.insert(origin, 100);
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&origin, 0));
let label = value.label();
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
let mut low_staked_set = HashSet::new();
low_staked_peers.for_each(|p| {
let _ = push.process_push_message(&mut crds, &p, value.clone(), 0);
low_staked_set.insert(p);
stakes.insert(p, 1);
});
let versioned = crds
.lookup_versioned(&label)
.expect("versioned value should exist");
let hash = versioned.value_hash;
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
assert!(
pruned.is_empty(),
"should not prune if min threshold has not been reached"
);
let high_staked_peer = Pubkey::new_rand();
let high_stake = CrdsGossipPush::prune_stake_threshold(100, 100) + 10;
stakes.insert(high_staked_peer, high_stake);
let _ = push.process_push_message(&mut crds, &high_staked_peer, value.clone(), 0);
let pruned = push.prune_received_cache(&self_id, &origin, hash, &stakes);
assert!(
pruned.len() < low_staked_set.len() + 1,
"should not prune all peers"
);
pruned.iter().for_each(|p| {
assert!(
low_staked_set.contains(p),
"only low staked peers should be pruned"
);
});
}
#[test]
fn test_process_push() {
let mut crds = Crds::default();
@ -286,15 +399,15 @@ mod test {
let label = value.label();
// push a new message
assert_eq!(
push.process_push_message(&mut crds, value.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Ok(None)
);
assert_eq!(crds.lookup(&label), Some(&value));
// push it again
assert_eq!(
push.process_push_message(&mut crds, value.clone(), 0),
Err(CrdsGossipError::PushMessagePrune)
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageAlreadyReceived)
);
}
#[test]
@ -306,13 +419,16 @@ mod test {
let value = CrdsValue::ContactInfo(ci.clone());
// push a new message
assert_eq!(push.process_push_message(&mut crds, value, 0), Ok(None));
assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Ok(None)
);
// push an old version
ci.wallclock = 0;
let value = CrdsValue::ContactInfo(ci.clone());
assert_eq!(
push.process_push_message(&mut crds, value, 0),
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Err(CrdsGossipError::PushMessageOldVersion)
);
}
@ -327,7 +443,7 @@ mod test {
ci.wallclock = timeout + 1;
let value = CrdsValue::ContactInfo(ci.clone());
assert_eq!(
push.process_push_message(&mut crds, value, 0),
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
Err(CrdsGossipError::PushMessageTimeout)
);
@ -335,7 +451,7 @@ mod test {
ci.wallclock = 0;
let value = CrdsValue::ContactInfo(ci.clone());
assert_eq!(
push.process_push_message(&mut crds, value, timeout + 1),
push.process_push_message(&mut crds, &Pubkey::default(), value, timeout + 1),
Err(CrdsGossipError::PushMessageTimeout)
);
}
@ -349,7 +465,7 @@ mod test {
// push a new message
assert_eq!(
push.process_push_message(&mut crds, value_old.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), value_old.clone(), 0),
Ok(None)
);
@ -357,7 +473,7 @@ mod test {
ci.wallclock = 1;
let value = CrdsValue::ContactInfo(ci.clone());
assert_eq!(
push.process_push_message(&mut crds, value, 0)
push.process_push_message(&mut crds, &Pubkey::default(), value, 0)
.unwrap()
.unwrap()
.value,
@ -433,7 +549,10 @@ mod test {
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let mut expected = HashMap::new();
expected.insert(peer.label().pubkey(), vec![new_msg.clone()]);
assert_eq!(push.process_push_message(&mut crds, new_msg, 0), Ok(None));
assert_eq!(
push.process_push_message(&mut crds, &Pubkey::default(), new_msg, 0),
Ok(None)
);
assert_eq!(push.active_set.len(), 1);
assert_eq!(push.new_push_messages(&crds, 0), expected);
}
@ -447,7 +566,7 @@ mod test {
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
let peer_3 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
assert_eq!(
push.process_push_message(&mut crds, peer_3.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
Ok(None)
);
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
@ -471,7 +590,7 @@ mod test {
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
let expected = HashMap::new();
assert_eq!(
push.process_push_message(&mut crds, new_msg.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
Ok(None)
);
push.process_prune_msg(&peer.label().pubkey(), &[new_msg.label().pubkey()]);
@ -490,7 +609,7 @@ mod test {
let new_msg = CrdsValue::ContactInfo(ci.clone());
let expected = HashMap::new();
assert_eq!(
push.process_push_message(&mut crds, new_msg.clone(), 1),
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 1),
Ok(None)
);
push.purge_old_pending_push_messages(&crds, 0);
@ -498,7 +617,7 @@ mod test {
}
#[test]
fn test_purge_old_pushed_once_messages() {
fn test_purge_old_received_cache() {
let mut crds = Crds::default();
let mut push = CrdsGossipPush::default();
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
@ -507,23 +626,23 @@ mod test {
let label = value.label();
// push a new message
assert_eq!(
push.process_push_message(&mut crds, value.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Ok(None)
);
assert_eq!(crds.lookup(&label), Some(&value));
// push it again
assert_eq!(
push.process_push_message(&mut crds, value.clone(), 0),
Err(CrdsGossipError::PushMessagePrune)
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageAlreadyReceived)
);
// purge the old pushed
push.purge_old_pushed_once_messages(1);
push.purge_old_received_cache(1);
// push it again
assert_eq!(
push.process_push_message(&mut crds, value.clone(), 0),
push.process_push_message(&mut crds, &Pubkey::default(), value.clone(), 0),
Err(CrdsGossipError::PushMessageOldVersion)
);
}

297
core/src/cuda_runtime.rs Normal file
View File

@ -0,0 +1,297 @@
// Module for cuda-related helper functions and wrappers.
//
// cudaHostRegister/cudaHostUnregister -
// apis for page-pinning memory. Cuda driver/hardware cannot overlap
// copies from host memory to GPU memory unless the memory is page-pinned and
// cannot be paged to disk. The cuda driver provides these interfaces to pin and unpin memory.
use crate::recycler::Reset;
#[cfg(feature = "cuda")]
use crate::sigverify::{cuda_host_register, cuda_host_unregister};
use std::ops::{Deref, DerefMut};
#[cfg(feature = "cuda")]
use std::mem::size_of;
#[cfg(feature = "cuda")]
use core::ffi::c_void;
#[cfg(feature = "cuda")]
use std::os::raw::c_int;
#[cfg(feature = "cuda")]
const CUDA_SUCCESS: c_int = 0;
pub fn pin<T>(_mem: &mut Vec<T>) {
#[cfg(feature = "cuda")]
unsafe {
let err = cuda_host_register(
_mem.as_mut_ptr() as *mut c_void,
_mem.capacity() * size_of::<T>(),
0,
);
if err != CUDA_SUCCESS {
error!(
"cudaHostRegister error: {} ptr: {:?} bytes: {}",
err,
_mem.as_ptr(),
_mem.capacity() * size_of::<T>()
);
}
}
}
pub fn unpin<T>(_mem: *mut T) {
#[cfg(feature = "cuda")]
unsafe {
let err = cuda_host_unregister(_mem as *mut c_void);
if err != CUDA_SUCCESS {
error!("cudaHostUnregister returned: {} ptr: {:?}", err, _mem);
}
}
}
// A vector wrapper where the underlying memory can be
// page-pinned. Controlled by flags in case user only wants
// to pin in certain circumstances.
#[derive(Debug)]
pub struct PinnedVec<T> {
x: Vec<T>,
pinned: bool,
pinnable: bool,
}
impl Reset for PinnedVec<u8> {
fn reset(&mut self) {
self.resize(0, 0u8);
}
}
impl Reset for PinnedVec<u32> {
fn reset(&mut self) {
self.resize(0, 0u32);
}
}
impl<T: Clone> Default for PinnedVec<T> {
fn default() -> Self {
Self {
x: Vec::new(),
pinned: false,
pinnable: false,
}
}
}
impl<T> Deref for PinnedVec<T> {
type Target = Vec<T>;
fn deref(&self) -> &Self::Target {
&self.x
}
}
impl<T> DerefMut for PinnedVec<T> {
fn deref_mut(&mut self) -> &mut Vec<T> {
&mut self.x
}
}
pub struct PinnedIter<'a, T>(std::slice::Iter<'a, T>);
pub struct PinnedIterMut<'a, T>(std::slice::IterMut<'a, T>);
impl<'a, T> Iterator for PinnedIter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
impl<'a, T> Iterator for PinnedIterMut<'a, T> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
impl<'a, T> IntoIterator for &'a mut PinnedVec<T> {
type Item = &'a T;
type IntoIter = PinnedIter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
PinnedIter(self.iter())
}
}
impl<'a, T> IntoIterator for &'a PinnedVec<T> {
type Item = &'a T;
type IntoIter = PinnedIter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
PinnedIter(self.iter())
}
}
impl<T: Clone> PinnedVec<T> {
pub fn reserve_and_pin(&mut self, size: usize) {
if self.x.capacity() < size {
if self.pinned {
unpin(&mut self.x);
self.pinned = false;
}
self.x.reserve(size);
}
self.set_pinnable();
if !self.pinned {
pin(&mut self.x);
self.pinned = true;
}
}
pub fn set_pinnable(&mut self) {
self.pinnable = true;
}
pub fn from_vec(source: Vec<T>) -> Self {
Self {
x: source,
pinned: false,
pinnable: false,
}
}
pub fn with_capacity(capacity: usize) -> Self {
let x = Vec::with_capacity(capacity);
Self {
x,
pinned: false,
pinnable: false,
}
}
pub fn iter(&self) -> PinnedIter<T> {
PinnedIter(self.x.iter())
}
pub fn iter_mut(&mut self) -> PinnedIterMut<T> {
PinnedIterMut(self.x.iter_mut())
}
pub fn is_empty(&self) -> bool {
self.x.is_empty()
}
pub fn len(&self) -> usize {
self.x.len()
}
#[cfg(feature = "cuda")]
pub fn as_ptr(&self) -> *const T {
self.x.as_ptr()
}
#[cfg(feature = "cuda")]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.x.as_mut_ptr()
}
pub fn push(&mut self, x: T) {
let old_ptr = self.x.as_mut_ptr();
let old_capacity = self.x.capacity();
// Predict realloc and unpin
if self.pinned && self.x.capacity() == self.x.len() {
unpin(old_ptr);
self.pinned = false;
}
self.x.push(x);
self.check_ptr(old_ptr, old_capacity, "push");
}
pub fn resize(&mut self, size: usize, elem: T) {
let old_ptr = self.x.as_mut_ptr();
let old_capacity = self.x.capacity();
// Predict realloc and unpin.
if self.pinned && self.x.capacity() < size {
unpin(old_ptr);
self.pinned = false;
}
self.x.resize(size, elem);
self.check_ptr(old_ptr, old_capacity, "resize");
}
fn check_ptr(&mut self, _old_ptr: *mut T, _old_capacity: usize, _from: &'static str) {
#[cfg(feature = "cuda")]
{
if self.pinnable && (self.x.as_ptr() != _old_ptr || self.x.capacity() != _old_capacity)
{
if self.pinned {
unpin(_old_ptr);
}
trace!(
"pinning from check_ptr old: {} size: {} from: {}",
_old_capacity,
self.x.capacity(),
_from
);
pin(&mut self.x);
self.pinned = true;
}
}
}
}
impl<T: Clone> Clone for PinnedVec<T> {
fn clone(&self) -> Self {
let mut x = self.x.clone();
let pinned = if self.pinned {
pin(&mut x);
true
} else {
false
};
debug!(
"clone PinnedVec: size: {} pinned?: {} pinnable?: {}",
self.x.capacity(),
self.pinned,
self.pinnable
);
Self {
x,
pinned,
pinnable: self.pinnable,
}
}
}
impl<T> Drop for PinnedVec<T> {
fn drop(&mut self) {
if self.pinned {
unpin(self.x.as_mut_ptr());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pinned_vec() {
let mut mem = PinnedVec::with_capacity(10);
mem.set_pinnable();
mem.push(50);
mem.resize(2, 10);
assert_eq!(mem[0], 50);
assert_eq!(mem[1], 10);
assert_eq!(mem.len(), 2);
assert_eq!(mem.is_empty(), false);
let mut iter = mem.iter();
assert_eq!(*iter.next().unwrap(), 50);
assert_eq!(*iter.next().unwrap(), 10);
assert_eq!(iter.next(), None);
}
}

View File

@ -10,8 +10,9 @@ use chrono::prelude::Utc;
use rayon::prelude::*;
use rayon::ThreadPool;
use solana_budget_api::budget_instruction;
use solana_merkle_tree::MerkleTree;
use solana_metrics::inc_new_counter_warn;
use solana_sdk::hash::{Hash, Hasher};
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::transaction::Transaction;
use std::borrow::Borrow;
@ -172,13 +173,16 @@ impl Entry {
pub fn hash_transactions(transactions: &[Transaction]) -> Hash {
// a hash of a slice of transactions only needs to hash the signatures
let mut hasher = Hasher::default();
transactions.iter().for_each(|tx| {
if !tx.signatures.is_empty() {
hasher.hash(&tx.signatures[0].as_ref());
}
});
hasher.result()
let signatures: Vec<_> = transactions
.iter()
.flat_map(|tx| tx.signatures.iter())
.collect();
let merkle_tree = MerkleTree::new(&signatures);
if let Some(root_hash) = merkle_tree.get_root() {
*root_hash
} else {
Hash::default()
}
}
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains

View File

@ -55,6 +55,38 @@ pub const NUM_CODING: usize = 8;
/// Total number of blobs in an erasure set; includes data and coding blobs
pub const ERASURE_SET_SIZE: usize = NUM_DATA + NUM_CODING;
#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct ErasureConfig {
num_data: usize,
num_coding: usize,
}
impl Default for ErasureConfig {
fn default() -> ErasureConfig {
ErasureConfig {
num_data: NUM_DATA,
num_coding: NUM_CODING,
}
}
}
impl ErasureConfig {
pub fn new(num_data: usize, num_coding: usize) -> ErasureConfig {
ErasureConfig {
num_data,
num_coding,
}
}
pub fn num_data(self) -> usize {
self.num_data
}
pub fn num_coding(self) -> usize {
self.num_coding
}
}
type Result<T> = std::result::Result<T, reed_solomon_erasure::Error>;
/// Represents an erasure "session" with a particular configuration and number of data and coding
@ -77,6 +109,12 @@ impl Session {
Ok(Session(rs))
}
pub fn new_from_config(config: &ErasureConfig) -> Result<Session> {
let rs = ReedSolomon::new(config.num_data, config.num_coding)?;
Ok(Session(rs))
}
/// Create coding blocks by overwriting `parity`
pub fn encode(&self, data: &[&[u8]], parity: &mut [&mut [u8]]) -> Result<()> {
self.0.encode_sep(data, parity)?;
@ -136,25 +174,27 @@ impl Session {
let idx;
let first_byte;
if n < NUM_DATA {
if n < self.0.data_shard_count() {
let mut blob = Blob::new(&blocks[n]);
blob.meta.size = blob.data_size() as usize;
data_size = blob.data_size() as usize - BLOB_HEADER_SIZE;
data_size = blob.data_size() as usize;
idx = n as u64 + block_start_idx;
first_byte = blob.data[0];
blob.set_size(data_size);
recovered_data.push(blob);
} else {
let mut blob = Blob::default();
blob.data_mut()[..size].copy_from_slice(&blocks[n]);
blob.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + size].copy_from_slice(&blocks[n]);
blob.meta.size = size;
data_size = size;
idx = (n as u64 + block_start_idx) - NUM_DATA as u64;
idx = n as u64 + block_start_idx - NUM_DATA as u64;
first_byte = blob.data[0];
blob.set_slot(slot);
blob.set_index(idx);
blob.set_size(data_size);
blob.set_coding();
recovered_coding.push(blob);
}
@ -179,6 +219,13 @@ impl CodingGenerator {
}
}
pub fn new_from_config(config: &ErasureConfig) -> Self {
CodingGenerator {
leftover: Vec::with_capacity(config.num_data),
session: Arc::new(Session::new_from_config(config).unwrap()),
}
}
/// Yields next set of coding blobs, if any.
/// Must be called with consecutive data blobs within a slot.
///
@ -226,13 +273,16 @@ impl CodingGenerator {
let index = data_blob.index();
let slot = data_blob.slot();
let id = data_blob.id();
let version = data_blob.version();
let mut coding_blob = Blob::default();
coding_blob.set_index(index);
coding_blob.set_slot(slot);
coding_blob.set_id(&id);
coding_blob.set_version(version);
coding_blob.set_size(max_data_size);
coding_blob.set_coding();
coding_blob.set_erasure_config(&data_blob.erasure_config());
coding_blobs.push(coding_blob);
}
@ -240,7 +290,7 @@ impl CodingGenerator {
if {
let mut coding_ptrs: Vec<_> = coding_blobs
.iter_mut()
.map(|blob| &mut blob.data_mut()[..max_data_size])
.map(|blob| &mut blob.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + max_data_size])
.collect();
self.session.encode(&data_ptrs, coding_ptrs.as_mut_slice())
@ -427,7 +477,7 @@ pub mod test {
}
#[test]
fn test_erasure_generate_coding() {
fn test_generate_coding() {
solana_logger::setup();
// trivial case
@ -449,10 +499,10 @@ pub mod test {
assert_eq!(coding_blobs.len(), NUM_CODING);
for j in 0..NUM_CODING {
assert_eq!(
coding_blobs[j].read().unwrap().index(),
((i / NUM_DATA) * NUM_DATA + j) as u64
);
let coding_blob = coding_blobs[j].read().unwrap();
//assert_eq!(coding_blob.index(), (i * NUM_DATA + j % NUM_CODING) as u64);
assert!(coding_blob.is_coding());
}
test_toss_and_recover(
&coding_generator.session,
@ -654,6 +704,8 @@ pub mod test {
S: Borrow<SlotSpec>,
{
let mut coding_generator = CodingGenerator::default();
let keypair = Keypair::new();
let bytes = keypair.to_bytes();
specs.into_iter().map(move |spec| {
let spec = spec.borrow();
@ -666,14 +718,14 @@ pub mod test {
let set_index = erasure_spec.set_index as usize;
let start_index = set_index * NUM_DATA;
let mut blobs = generate_test_blobs(0, NUM_DATA);
index_blobs(
&blobs,
&Keypair::new().pubkey(),
start_index as u64,
slot,
0,
);
let mut blobs = generate_test_blobs(start_index, NUM_DATA);
let keypair = Keypair::from_bytes(&bytes).unwrap();
index_blobs(&blobs, &keypair.pubkey(), start_index as u64, slot, 0);
// Signing has to be deferred until all data/header fields are set correctly
blobs.iter().for_each(|blob| {
blob.write().unwrap().sign(&keypair);
});
let mut coding_blobs = coding_generator.next(&blobs);
@ -738,9 +790,9 @@ pub mod test {
.into_iter()
.map(|_| {
let mut blob = Blob::default();
blob.data_mut()[..data.len()].copy_from_slice(&data);
blob.set_size(data.len());
blob.sign(&Keypair::new());
blob.data_mut()[..].copy_from_slice(&data);
blob.set_size(BLOB_DATA_SIZE);
blob.set_erasure_config(&ErasureConfig::default());
Arc::new(RwLock::new(blob))
})
.collect();
@ -771,7 +823,7 @@ pub mod test {
if i < NUM_DATA {
&mut blob.data[..size]
} else {
&mut blob.data_mut()[..size]
&mut blob.data[BLOB_HEADER_SIZE..BLOB_HEADER_SIZE + size]
}
})
.collect();

Some files were not shown because too many files have changed in this diff Show More