Compare commits

...

947 Commits

Author SHA1 Message Date
36398bc3f3 Only check TRIGGERED_BUILDKITE_TAG 2019-01-07 19:53:52 -08:00
fa0e1ad356 Don't turn the build red if channel cannot be figured (eg, building a tag) 2019-01-07 19:53:39 -08:00
f56c5dacca Remove some metrics datapoint, as it was causing excessive logging (#2287) (#2291)
- 100 nodes test was bringing down the influx DB server
2019-01-03 10:42:13 -08:00
80e0da132a Rename getConfirmation -> getConfirmationTime 2018-12-22 13:11:10 -08:00
f89debdfa6 Document getConfirmationTime 2018-12-22 13:11:10 -08:00
16f7e46fce Ignore error while enabling nvidia persistence mode (#2265) 2018-12-21 12:47:45 -08:00
3a039c8007 Load nvidia drivers on node startup (#2263) (#2264)
* Load nvidia drivers on node startup

* added new script to enable nvidia driver persistent mode

* remove set -ex
2018-12-21 11:58:06 -08:00
56d5324837 Use CUDA for testnet automation performance calculations (#2259) (#2261) 2018-12-21 05:14:08 -08:00
d3bf0fc707 Use SSD for testnet automation (#2257) (#2258) 2018-12-21 04:52:00 -08:00
f9d8a1d6c0 Rename finality to confirmation (#2250)
* Rename finality to confirmation

* fix cargo fmt errors
2018-12-20 16:03:56 -08:00
70559253ee Use newer votes to calculate confirmation time (#2247) 2018-12-20 16:03:56 -08:00
9c61abe468 Reduce ticks per block to increase voting frequency (#2242) 2018-12-20 16:03:56 -08:00
970954ac3b Stable dashboard can now actually come from the stable channel 2018-12-20 08:06:02 -08:00
39d821ead8 Select correct branch for {testnet,-perf} when using a stable channel tag 2018-12-19 17:47:18 -08:00
e2225d3b71 Add more Azure details 2018-12-19 16:31:28 -08:00
666af1e62d Debug broadcast (#2208)
* Add per cf rocksdb options, increase compaction and flush threads

* Change broadcast stage to bulk write blobs

* add db_ledger function specifically for broadcast

* fix broken tests

* fix benches
2018-12-19 16:11:47 -08:00
2fe3402362 Use SSD for perf testnet (#2227) 2018-12-19 16:11:26 -08:00
14a236198f nit: rename publish-solana-tar.sh to publish-tarball.sh 2018-12-19 14:26:25 -08:00
cc1b43b90a Retire GCP setup 2018-12-19 14:26:25 -08:00
9448f0ce52 Add more Azure CI documentation 2018-12-19 14:26:25 -08:00
59fdd8f6be Move wallet airdrop retries into fullnode start script 2018-12-19 13:49:04 -08:00
7b20318ee4 Run s3cmd in a container to avoid additional CI system dependencies 2018-12-19 13:09:24 -08:00
c3c955b02e Build/install native programs within cargo-install-all.sh 2018-12-19 11:53:08 -08:00
6e56e41461 Document how to create a new Azure CI machine 2018-12-18 23:35:09 -08:00
d74d5e0e44 nit: prevent shellcheck command from getting expanded by default 2018-12-18 18:44:20 -08:00
cac08171de nit: prevent book build from getting expanded by default 2018-12-18 18:44:20 -08:00
6f6c350781 Skip stable-perf of no .rs files are modified in a PR 2018-12-18 18:44:20 -08:00
506724fc93 Remove non-standard : anchors 2018-12-18 18:44:20 -08:00
b4fe70d3d8 Skip bench of no .rs files are modified in a PR 2018-12-18 18:09:59 -08:00
3efbffe4e3 Run coverage when test-coverage.sh is modified 2018-12-18 18:09:59 -08:00
cafa873f06 run tests in single thread so local runs succeed 2018-12-18 17:38:44 -08:00
b4f4347d6e add some more tests (#2217) 2018-12-18 17:27:03 -08:00
5c866dd000 test drive new coverage stuff (#2216) 2018-12-18 16:44:27 -08:00
974249f2a5 Parallelize entry processing in replay stage in validators (#2212)
* Parallelize entry processing in replay stage in validators

- single threaded entry processing is not utlizing CPU cores to the fullest

* fix tests and address review comments
2018-12-18 16:06:05 -08:00
a65022aed7 DbLedger doesn't need to be mut, doesn't need an RwLock (#2215)
* DbLedger doesn't need to be mut, doesn't need an RwLock

* fix erasure cases
2018-12-18 15:18:57 -08:00
b101f40c32 Initial revision 2018-12-18 14:27:37 -08:00
e8e6c70e19 Remove duplicate _ definitions 2018-12-18 14:25:10 -08:00
c8d27f6424 Drop _ to clean up CI logs, apply more -j 2018-12-18 14:11:15 -08:00
287e8cefda Keep gcno files around to prevent breaking CI builds with a warm target/ cache 2018-12-18 14:07:42 -08:00
db8f2d9f07 Make ulimit non-fatal to keep the ci-cuda machine happy 2018-12-18 14:02:43 -08:00
cd6736d70b Remove duplication between test-stable{,-perf}.sh 2018-12-18 14:02:43 -08:00
0d2e3788ba Justify each coverage flag, and other cleanup 2018-12-18 13:03:38 -08:00
c0dcf67ec8 Move book build into test-checks 2018-12-18 13:03:38 -08:00
bc52336a1b affected_files metadata is only available for PR builds 2018-12-18 13:03:38 -08:00
3bfb052b0a Overhaul coverage setup 2018-12-18 10:48:06 -08:00
c71d5a111e Extract grcov download script 2018-12-18 10:48:06 -08:00
437b62c4d9 Upgrade grcov 2018-12-18 10:48:06 -08:00
cbca0ae264 Remove dead code 2018-12-18 10:48:06 -08:00
e0cde7dfc5 Remove stale log section 2018-12-18 10:32:40 -08:00
e720070945 Flip && style 2018-12-18 09:56:43 -08:00
a8ab6f4caf Preserve stable as default, use +nightly to get nightly 2018-12-18 09:54:47 -08:00
b7b1884950 Pass BUILDKITE_COMMIT env var into containers 2018-12-18 08:53:39 -08:00
755064d3e2 Use |cargo +nightly| to avoid assuming nightly is default 2018-12-18 08:44:33 -08:00
24a984086e nightly is now 1.33 2018-12-18 08:44:33 -08:00
4b831d58b7 Don't fiddle with default rust, humans don't like that 2018-12-18 08:44:33 -08:00
62f36037ea Pass CI env var into containers 2018-12-18 00:47:41 -08:00
ffdc1814c6 Add counters for gossip verification failures (#2094) 2018-12-17 20:12:50 -08:00
29776c0283 Publish book only on content changes instead of on every commit 2018-12-17 16:42:22 -08:00
69d7384cc0 Enable leader rotation on edge testnet (#2204) 2018-12-17 16:04:25 -08:00
9720ac0019 Fix try_erasure() (#2185)
* Fix try_erasure bug

* Re-enable asserts in test_replicator_startup

* Add test for out of order process_blobs
2018-12-17 15:34:19 -08:00
fc56e1e517 Correct crate-type to match other native programs 2018-12-17 15:17:13 -08:00
0f4837980f Switch noop from println to solana_logger 2018-12-17 14:56:12 -08:00
9a6e27ac36 Accounts is to big, should be its own module (#2198)
Account module is to big, should be in its own module.
2018-12-17 12:41:23 -08:00
07202205c4 Revert "ignore unstable tests"
This reverts commit bd7ef5d445071329a3b49b1f8be71b602226bbec.
2018-12-17 10:47:32 -08:00
dc56bbeec8 Ensure the full workspace is built for coverage 2018-12-17 10:47:32 -08:00
4be537c51a Temporarily disable nightly build until it can be fixed 2018-12-17 10:15:38 -08:00
66c568ba67 Add wallet sanity timeout 2018-12-17 09:58:34 -08:00
9ff8abaf29 Ensure port is not inuse before selecting it 2018-12-17 09:31:31 -08:00
b7144560c9 Include port number when gossip bind_to fails 2018-12-17 09:31:31 -08:00
4be6d01dfb Move last ids (#2187)
* Break out last_ids into its own module
* Boot SignatureNotFound from BankError
* No longer return BankError from LastIds methods
* No longer piggypack on BankError for a LastIds signature status
* Drop all dependencies on the bank
* SignatureStatus -> Status and LastIds -> StatusDeque
* Unstable tests, issue 2193
2018-12-17 07:55:56 -08:00
aef84320e0 Double cache size for stable-perf 2018-12-16 23:05:44 -08:00
9a5195e79e Remove CARGO_TARGET_CACHE_NAME, use BUILDKITE_LABEL 2018-12-16 23:05:44 -08:00
cc111941bb Cargo.lock 2018-12-16 23:05:44 -08:00
74ee1e5087 Increase the number of files a node may have open at a time 2018-12-15 17:15:22 -08:00
e5d1bd6589 Drop public suffix on build names 2018-12-15 16:54:23 -08:00
6a0f7a5ceb Update command path 2018-12-15 16:54:23 -08:00
554cd03269 Update buildkite badge URL 2018-12-15 16:54:23 -08:00
9995194cf1 Regenerate secrets 2018-12-15 15:27:58 -08:00
1298ab1647 Use ejson to manage build secrets 2018-12-15 15:10:04 -08:00
b8ab3078fb Add pipeline upload script 2018-12-15 15:10:04 -08:00
50e8666a14 Add format-url.sh 2018-12-15 15:10:04 -08:00
0659971ecf Remove unused cargo dependencies 2018-12-14 23:55:56 -08:00
fd562cb9e2 Rust 2018 cleanup 2018-12-14 21:57:15 -08:00
aaa5cd4615 Remove stray keygen 2018-12-14 21:57:15 -08:00
3f835f8ee3 Use proper match condition for duration (#2182) 2018-12-14 21:18:41 -08:00
5bf9a20d42 fullnode-config no longer depends on src/ 2018-12-14 20:13:34 -08:00
eedc8c7812 Move src/netutil.rs into its own crate 2018-12-14 20:13:34 -08:00
f0d1ed0cc4 |cargo test --all| 2018-12-14 19:32:04 -08:00
8ba1aed5a3 Fix up tests 2018-12-14 19:32:04 -08:00
9ef5e51c0f Cleanup slot remnants in db_ledger (#2153)
* Cleanup slot remnants in db_ledger
2018-12-14 17:05:41 -08:00
fe5566d642 Local testnet info (#2174) 2018-12-14 15:55:58 -08:00
4a2933b0b6 Update README.md 2018-12-14 15:55:16 -08:00
8ee0e9632c Switch to using hashbrown version of HashMap and (#2158)
HashSet for improved performance and memory usage
2018-12-14 15:10:10 -08:00
8fcb7112ec Fetch a new last_id to prevent DuplicateSignature errors during AccountInUse retries 2018-12-14 13:33:31 -08:00
6ac466c0a4 Move src/logger.rs into logger/ crate to unify logging across the workspace 2018-12-14 13:10:43 -08:00
d45fcc4381 Move src/wallet.rs into wallet/ crate 2018-12-14 12:15:18 -08:00
a22e1199cf Add fork selection RFC (#2061)
RFC and simulation for fork generation.
2018-12-14 11:15:23 -08:00
79f12d6b55 Move EntryTree back to proposals 2018-12-14 12:12:34 -07:00
483f6702a6 Rewrite synchronization chapter (#2156)
* Rewrite synchronization chapter
* Add synchronization terminology
2018-12-14 11:06:53 -07:00
f6e3464ab9 bench-tps rebase 2018-12-14 09:38:46 -08:00
708876e9a7 Fix CI and related issues in bench-tps
Rename crate to `solana-bench-tps` in its Cargo.toml

Move crate

Add to ci/publish-crate.sh
2018-12-14 09:38:46 -08:00
29d04aa533 Move bench_tps to new crate in workspace
Separate CLI/clap related code, create a new `Config` struct to hold all
configuration/CLI args

Remove most code from `main.rs`

Add a little documentation
2018-12-14 09:38:46 -08:00
6fcccedb70 align tick entries' tick_height with actual number of ticks in bank (#2147) 2018-12-14 02:25:50 -08:00
60f3aeb4ef clippy fix 2018-12-13 23:40:26 -08:00
c1ad987b04 Run checks over all crates in the workspace 2018-12-13 23:40:26 -08:00
9d0b7c6b31 Remove bench_streamer feature 2018-12-13 22:25:27 -08:00
d489cb1a8b Desnake upload_ci_artifact for consistency 2018-12-13 22:25:27 -08:00
0fe6d61036 Move binaries from src/bin into their own crate 2018-12-13 22:25:27 -08:00
092edabd2d Add homepage field to all crates 2018-12-13 22:25:27 -08:00
1a68bce94c Rename fullnode.rs to main.rs 2018-12-13 22:25:27 -08:00
87fe3ade81 Add noop cuda feature entry 2018-12-13 20:08:24 -08:00
accabca618 Find solana-fullnode-cuda 2018-12-13 20:08:24 -08:00
091b21fae7 Vote every number of ticks (#2141)
* Vote every number of ticks

* address review comments

* fix for failing leader rotation tests

* remove check for vote failure from replay tests
(as votes will be cached and transmitted when leader is available)
2018-12-13 18:43:10 -08:00
85398c728a Disable assert in replicator startup test 2018-12-13 16:50:30 -08:00
7325b19aef Do not allocate for each metrics submission (#2146) 2018-12-13 16:40:00 -08:00
7cdbbfa88e Storage stage updates
* Remove logging init from storage program: saw a crash in a test
  indicating the logger being init'ed twice.
* Add entry_height mining proof to indicate which segment the result is
  for
* Add an interface to get storage miner pubkeys for a given entry_height
* Add an interface to get the current storage mining entry_height
* Set the tvu socket to 0.0.0.0:0 in replicator to stop getting entries
  after the desired ledger segment is downloaded.
* Use signature of PoH height to determine which block to download for
  replicator.
2018-12-13 11:30:12 -08:00
3ce3f1adc1 Move book dev instructions out of top-level readme 2018-12-13 11:17:11 -07:00
9880a86f80 remove prev_id, unused (#2150) 2018-12-13 09:24:38 -08:00
647e5d76b0 Move solana-fullnode into fullnode/ 2018-12-13 01:45:29 -08:00
7e4af9382e Move solana-upload-perf into upload-perf/ 2018-12-13 01:06:40 -08:00
282d4a3563 Move solana-keygen into keygen/ 2018-12-13 01:06:40 -08:00
cafeef33c3 Relocate all keypair generation into one location: sdk/src/signature.rs 2018-12-13 01:06:40 -08:00
4f48f1a850 add db_ledger genesis, rework to_blob(), to_blobs() (#2135) 2018-12-12 20:42:12 -08:00
a05a378db4 cleanup 2018-12-12 19:12:51 -08:00
245362db96 Make a dummy version of serving repairs from db_ledger 2018-12-12 19:12:51 -08:00
b1b190b80d Fix too many args in Tvu::new (#2114)
* Reduce args in Tvu::new under to 8

Now pass in sockets through a the crate::tvu::Sockets struct

Move ClusterInfo.keypair to pub(crate) in order to remove redundant
signing keypair parameter

* remove commented code
2018-12-12 18:57:48 -08:00
3408ce89a7 add check_tick_height (#2144) 2018-12-12 18:52:11 -08:00
59a094cb77 Ensure bpf_c files exist to avoid accidental rebuilds as the tree changes 2018-12-12 17:30:41 -08:00
8782b14842 Cargo.lock 2018-12-12 17:14:50 -08:00
0f38b4b856 Remove unused dependencies 2018-12-12 17:14:50 -08:00
75f407e191 Provide entire elf to bpf_loader 2018-12-12 17:14:50 -08:00
4b07778609 Add bench_streamer feature to inhibit building solana-bench-streamer by default
This program is not currently used in any automation and is fairly slow
to build.  Disabling it by default will speed incremental builds.
2018-12-12 16:31:13 -08:00
9b81696a09 remove obsoleted TODO 2018-12-12 16:26:59 -08:00
80e19e0ad7 Encapsulate accounts of solana:🏦:Accounts
Make the field private and expose an account_values() method that
returns the values iterator from the internal hashmap
2018-12-12 16:26:59 -08:00
962e8dca1d Fix markdown 2018-12-12 17:19:46 -07:00
8da4be1b34 Prefer the term 'cluster' over 'network'
Use 'network' for the networking stack. Examples:

* The network drops packets.
* The cluster rejects bad transactions.
* The Solana cluster runs on a gigabit network.
2018-12-12 17:19:46 -07:00
f2ef74d1a1 Consistent naming between ToC and chapters 2018-12-12 17:19:46 -07:00
546c92751b 80-char lines 2018-12-12 17:19:46 -07:00
ae903f190e Broadcast for slots (#2081)
* Insert blobs into db_ledger in broadcast stage to support leader to validator transitions

* Add transmitting real slots to broadcast stage

* Handle real slots instead of default slots in window

* Switch to dummy repair on slots and modify erasure to support leader rotation

* Shorten length of holding locks

* Remove logger from replicator test
2018-12-12 15:58:29 -08:00
bf33d9d703 Disable snap build until #2127 is resolved 2018-12-12 15:13:11 -08:00
3a89d80a61 Update name in TPU 2018-12-12 14:55:27 -07:00
fd45e83651 Add web wallet example 2018-12-12 14:55:27 -07:00
27e2fd9b06 Update README.md 2018-12-12 14:35:22 -07:00
9a49ace606 No longer reserve terms from the terminology chapter
We followed the precedent set by the Rust book here, but now that
proposals are integrated, each proposal can simply include its own
terminology section.
2018-12-12 14:12:07 -07:00
3413ecc2bd Change query used to find list of nodes in the network (#2124)
* Change query used to find list of nodes in the network

* include "All" option for host selection
2018-12-12 12:38:00 -08:00
ad8b095677 Capitalize acronyms in book 2018-12-12 12:15:20 -07:00
38c72070fb Update links 2018-12-12 12:11:12 -07:00
93fe1af1a8 Integrate EntryTree description into the TVU doc 2018-12-12 12:11:12 -07:00
504bf4ba84 Bring drone description into the present 2018-12-12 12:11:12 -07:00
9f9c5fcf10 Migrate all RFC content into the book 2018-12-12 12:11:12 -07:00
90a0237457 Cherrypick recent changes to gossip RFC
Delete the RFC since this is all implemented.

See: 02bfcd23a9
2018-12-12 11:55:07 -07:00
c83538a60c Add new proposal process
And move replication and enclave proposals there to get a feel
for how it'd look.
2018-12-12 11:04:57 -07:00
13d4e3f29f Replace the leader rotation chapter with the latest RFC
The content that was originally copied was split into multiple
RFCs, leaving the book copy to bitrot.
2018-12-12 10:48:58 -07:00
cefbb7c27d Fix shared object relcations with multiple static arrays (#2121) 2018-12-12 08:41:45 -08:00
fa98434096 Update variables in dashboard (#2117)
* Update variables in dashboard

* fix escaped strings for query
2018-12-12 06:06:33 -08:00
af3ca02e35 Switch testnet-edge from snap to tarball
Snap publishing has been failing all day, unclear why.  Potentially
revert this commit if/when resolved.
2018-12-11 23:34:41 -08:00
5c396c222a Clean up install-native-programs.sh usage 2018-12-11 23:29:05 -08:00
088bab61a4 Remove |cargo install| duplication 2018-12-11 23:29:05 -08:00
080d18b06e Only run publish-crate on release branches, clarify crate ordering 2018-12-11 23:29:05 -08:00
54fb4e370c Abort make if scripts/install.sh fails 2018-12-11 21:57:53 -08:00
17f1f40140 branch -> fork 2018-12-11 17:37:54 -07:00
b011ed6358 branch -> fork
Save your branches for git
2018-12-11 17:36:16 -07:00
acbc6335af Minor fixes 2018-12-11 17:33:43 -07:00
511c84760e Fix typos, rendering and old terms 2018-12-11 17:27:54 -07:00
6cbf82dbe0 Delete storage.md 2018-12-11 17:10:01 -07:00
896622de64 Delete empty page
Bring this back in after replication is fully integrated.
2018-12-11 17:09:44 -07:00
1a160a86fa Fix typo and curve corners 2018-12-11 17:07:43 -07:00
11abd3cf6e Update tictactoe.md 2018-12-11 17:03:49 -07:00
9552badb16 Reference tic-tac-toe README instead of copying it
Also expand a bit on how it works.
2018-12-11 16:01:35 -08:00
6fd41beccd Reference the JavaScript API docs more directly 2018-12-11 16:01:35 -08:00
c679dea1b7 Add instructions to build and run tic-tac-toe 2018-12-11 16:01:35 -08:00
4788a4f775 Correctly describe repair and retransmit peers (#2110) 2018-12-11 15:51:47 -08:00
9243bc58db Metrics for window repair (#2106)
* Metrics for window repair

- Also increase max repair length

* fix vote counters, and add repair window graph

* update per node graphs

* revert max repair length change
2018-12-11 15:43:41 -08:00
2238725d1c empty entries -> ticks 2018-12-11 15:26:39 -07:00
bffa9f914c Next leader needs to publish empties 2018-12-11 15:26:39 -07:00
eeb31074de Take 2 2018-12-11 15:26:39 -07:00
af22de2cfa Cleanup leader rotation RFC 2018-12-11 15:26:39 -07:00
1d3f05a9d4 Update validator vote count 2018-12-11 13:32:39 -08:00
935524f20c Fix eh frame relocation (#2109)
* Exclude .eh_frame
2018-12-11 12:14:41 -08:00
5847961fec Fix BPF loader messages (#2098) 2018-12-11 11:20:26 -08:00
40d7f5eff8 Bump libc from 0.2.44 to 0.2.45
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.44 to 0.2.45.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.44...0.2.45)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-11 11:52:27 -07:00
c57dedb034 Add missing ld.lld wrapper needed for shared objects linking 2018-12-11 09:56:20 -08:00
b2d7b34082 Add |./net.sh update| command to live update all network nodes 2018-12-11 09:40:22 -08:00
4d67aca919 add genesis and read_ledger to db_ledger (#2097) 2018-12-11 09:14:23 -08:00
e3dfd7b1ab Allow BPF structure passing and returning (#2100)
* Add BPF struct passing and returning tests
2018-12-11 09:03:37 -08:00
166945a461 Bump serde from 1.0.81 to 1.0.82
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.81 to 1.0.82.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.81...v1.0.82)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-11 08:53:20 -08:00
46866be21d Bump serde_derive from 1.0.81 to 1.0.82
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.81 to 1.0.82.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.81...v1.0.82)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-11 09:21:11 -07:00
154e20484d Use hostname in database if env is set (#2101) 2018-12-10 22:59:38 -08:00
aeee25e703 add tick_height to Entry to be able to repair by period, chain forks of Entries, etc. (#2096) 2018-12-10 20:03:04 -08:00
b51bcb55db Fix broken dashboard counters (#2093) 2018-12-10 16:10:44 -08:00
b5784de33f Disable leader rotation for testnet-automation until it's ready 2018-12-10 15:23:11 -08:00
9556a9be17 Update the artwork 2018-12-10 15:26:43 -07:00
01c524ddd2 Revert changes to counter names 2018-12-10 15:26:43 -07:00
5e703dc70a Free up the term 'replicate' for exclusive use in replicator
Also, align Sockets field names with ContactInfo.
2018-12-10 15:26:43 -07:00
bc96bd3410 Fix peer count in edge dashboard (#2090)
Fixes #2075
2018-12-10 14:24:32 -08:00
094f0a8be3 Leader rotation flag plumbing 2018-12-10 14:07:59 -08:00
3d996bf080 Disable leader rotation on CI testnets until it's ready 2018-12-10 14:07:59 -08:00
4b05ee6811 Add hacky sleep 2018-12-10 14:05:00 -08:00
d7032aeb43 Add vote instruction debug log 2018-12-10 13:24:14 -08:00
4ea1c030bc Give bootstrap leader one more token 2018-12-10 13:24:14 -08:00
172e511e56 Use retry_transfer to test multiple times for replicator tokens
May fix failures in CI where replicator is trying to do an airdrop.
2018-12-10 12:19:00 -08:00
4481efd51e Merge pull request #2084 from CriesofCarrots/fix-wallet-accountinuse
Fix wallet accountinuse
2018-12-10 12:20:55 -07:00
337c2bfd29 Fix spelling 2018-12-10 09:31:17 -08:00
ffc82c027e Fix markdown rendering 2018-12-10 09:53:56 -07:00
e8fd5b4600 Correct keypair argument 2018-12-10 08:41:22 -08:00
67f8916aa8 Bump serde from 1.0.80 to 1.0.81
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.80 to 1.0.81.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.80...v1.0.81)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-10 08:38:52 -08:00
96e01f3a79 Bump itertools from 0.7.11 to 0.8.0
Bumps [itertools](https://github.com/bluss/rust-itertools) from 0.7.11 to 0.8.0.
- [Release notes](https://github.com/bluss/rust-itertools/releases)
- [Commits](https://github.com/bluss/rust-itertools/compare/0.7.11...0.8.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-10 08:58:26 -07:00
1e755f261f Bump serde_derive from 1.0.80 to 1.0.81
Bumps [serde_derive](https://github.com/serde-rs/serde) from 1.0.80 to 1.0.81.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.80...v1.0.81)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-12-10 08:56:45 -07:00
b2ddac610c Add option to skip setup during cluster start 2018-12-10 07:47:15 -08:00
ad05f64b13 crdt-vote-count metric is now named cluster_info-vote-count 2018-12-09 19:23:11 -08:00
9b472d36fc Add --path . to keep new cargo content 2018-12-09 18:09:03 -08:00
b54b0a1d25 Document that -P is now available for |config| 2018-12-09 15:25:27 -08:00
f5794de636 Clean up bootstrap leader terminology in comments and variable names 2018-12-09 15:25:27 -08:00
7ae9d9690b mkdir-p for the caller 2018-12-09 09:41:14 -08:00
db3cca7fbe Display wallet address before airdrop to help with debug on airdrop failures 2018-12-09 09:41:14 -08:00
b9743957fa Make directory to hold programs 2018-12-09 08:38:41 -08:00
0ef099421c cargo fmt 2018-12-08 23:19:55 -07:00
f1ae5b1795 Fix warnings 2018-12-08 23:19:55 -07:00
a8d6c75a24 cargo +nightly fix --features=bpf_c,cuda,erasure,chacha --edition-idioms 2018-12-08 23:19:55 -07:00
1c2394227e Enable Rust 2018 2018-12-08 23:19:55 -07:00
c49e2f8bbd cargo +nightly fix --features=bpf_c,cuda,erasure,chacha --edition 2018-12-08 23:19:55 -07:00
af403ba6fa Ignore broken chacha bench 2018-12-08 23:19:55 -07:00
ec5a8141eb cargo fix --edition 2018-12-08 23:19:55 -07:00
92584bd323 Only run the audit 2018-12-08 23:19:55 -07:00
586d9ee850 fix some nits (#2034)
rework maybe_cargo_install(), renamed to cargo_install_unless, updated to take a command to attempt
2018-12-08 19:14:19 -08:00
2de45a4da5 Update airdrop tokens to 3 for fullnode (#2051)
Filter out leader while computing the super majority stake
2018-12-08 16:54:42 -08:00
f5569e76db Relocate native programs to deps/ subdirectory of the current executable
This layout is `cargo build` compatible, no post-build file moves
required.
2018-12-08 16:31:01 -08:00
3a13ecba1f Upgrade to Rust 1.31.0 2018-12-08 11:45:59 -08:00
73b9ee9e84 Add solana_ prefix to native_loader program
This allows its logging to show up in the default RUST_LOG=solana=info
log setting
2018-12-08 11:04:45 -08:00
b1682558a6 Remove optional --identity argument to simplify command 2018-12-08 10:22:51 -08:00
0a7c07977d Follow-up to 872a3317b 2018-12-08 09:23:08 -08:00
0a83b17cdd Upgrade to Rust 1.31.0 (#2052)
* Upgrade to Rust 1.31.0
* Upgrade nightly
* Fix all clippy warnings
* Revert relaxed version check and update
2018-12-07 20:01:28 -07:00
2bad6584f6 Update solana-genesis arguments 2018-12-07 16:57:02 -08:00
872a3317b5 Fully switch to bootstrap-leader for command-line args 2018-12-07 16:57:02 -08:00
38901002b0 Accept an ip address in addition to domain name 2018-12-07 16:57:02 -08:00
1db6a882bb rsync of genesis ledger now works for non-snap deployments 2018-12-07 16:57:02 -08:00
571522e738 Update jsonrpc version 2018-12-07 17:47:54 -07:00
b5a80d3d49 Update ledger replication chapter (#2029)
* ledger block -> ledger segment

The book already defines a *block* to be a slight variation of
how block-based changes define it. It's the thing the cluster
confirms should be the next set of transactions on the ledger.

* Boot storage description from the book
2018-12-07 16:52:36 -07:00
3441d3399b Replicator rework
* Move more of the replicator logic into the replicator class
* Add support for the RPC interface to query the storage last_id value
  that the replicator would sign and use to pick a block.
* Fix replicator connecting to gossip and change test to exercise that
  scenario.
2018-12-07 15:20:36 -08:00
fa288ab197 Remove note about replicators mining on same identity
Replicators pick their own identity, validators sample from
those.
2018-12-07 14:41:53 -08:00
af11562627 Correct ledger path 2018-12-07 11:32:08 -08:00
286f08f095 Drop old validator name, use fullnode instead 2018-12-07 11:32:08 -08:00
92c3e26c7a Flip symlinks 2018-12-07 11:32:08 -08:00
6516c2532d Ensure native programs for the correct platform are installed 2018-12-07 11:32:08 -08:00
82a0cc9d27 Ensure destination is not present 2018-12-07 11:32:08 -08:00
fa58da2401 Explicitly specific build variant when installing native programs 2018-12-07 11:32:08 -08:00
1ddf93fd86 Strip cp -r arg 2018-12-07 10:43:36 -08:00
cba9c5619e Relax stable version check during the transation period between 1.30 and 1.31 2018-12-06 19:44:47 -08:00
70c149c7da Rename leader/validator to bootstrap-leader/fullnode
Only rsyncing the genesis ledger snuck in here as well
2018-12-06 19:44:47 -08:00
b34e197424 Add newline at end of file 2018-12-06 17:46:46 -08:00
f4b26247c0 Genesis only needs a keypair, not the entire fullnode::Config 2018-12-06 16:31:24 -08:00
8f0a1e32d5 Use consistent naming for the mint id file 2018-12-06 16:31:24 -08:00
c4b8f0cd2f bench-tps will now generate an ephemeral identity if not provided with one
Also simplify scripts as a result
2018-12-06 16:30:48 -08:00
aecb06cd2a Update versions in install-libssl-compatibility.sh (#2044) 2018-12-06 15:57:30 -08:00
e3c4f1f586 Move client keygen into client.sh 2018-12-06 14:49:26 -08:00
97b1156a7a Rename Ncp to GossipService
And BroadcastStage to BroadcastService since it's not included in the
TPU pipeline.
2018-12-06 15:48:19 -07:00
02bfcd23a9 review comments (#2033) 2018-12-06 12:53:57 -08:00
cc2f448d92 Add fullnode --no-leader-rotation flag 2018-12-06 11:30:19 -08:00
b45d07c8cb Remove non-common functions from common.sh 2018-12-06 10:15:14 -08:00
f0fe089013 Adapt testnet-deploy metric datapoint names to {,bootnode-}fullnode 2018-12-06 08:04:33 -08:00
a20c1b4547 Apply review feedback
And take a stab at clarifying some other sections too.
2018-12-06 08:44:01 -07:00
56ffb4385d Use gossip RFC to seed the NCP description
And format the gossip RFC for easy diffing.
2018-12-06 08:44:01 -07:00
db3c5f91b6 Update configure 2018-12-05 22:51:44 -08:00
17204b4696 Use 80-character lines for easy diffing 2018-12-05 22:10:55 -07:00
8a83c45bc6 Use the book conventions for easy migration 2018-12-05 22:10:55 -07:00
a6312ba98f Switch snap to bootstrap-fullnode/fullnode naming 2018-12-05 18:59:43 -08:00
4170f11958 More detail for the storage RFC protocol
And section numbers which can be referenced from github issues.
2018-12-05 17:40:46 -08:00
04a0652614 Generalize net/ from leader/validator to bootstrap-fullnode/fullnode 2018-12-05 17:11:16 -08:00
b880dafe28 Cleanup intro 2018-12-05 15:25:11 -08:00
36530fc7c6 Fix link 2018-12-05 15:41:32 -07:00
4fd4218178 update terminology before tearing into RFCs (#1995)
update terminology before tearing into RFCs
2018-12-05 14:35:41 -08:00
632425c7d7 Move native_loader under programs/native/ 2018-12-05 14:32:42 -08:00
ad3e36a7ab Bump rand from 0.5.5 to 0.6.1 (#1891)
* Bump rand from 0.5.5 to 0.6.1

Bumps [rand](https://github.com/rust-random/rand) from 0.5.5 to 0.6.1.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/commits)

Signed-off-by: dependabot[bot] <support@dependabot.com>

* Fix conflicts and deprecated usages

* Fix benches
2018-12-05 14:12:10 -08:00
a29b307554 Reorg programming model to be more top-down
First explain how a client interacts with existing programs and why
you'd do that. Next, mention that users can contribute their own programs.
Then explain how those programs can be written in any language.
Finally, mention persistent storage, which is only needed by
stateful programs.
2018-12-05 13:36:43 -08:00
1bcafca690 Find test_tx again 2018-12-05 13:29:29 -08:00
5d80edd969 Properly check for failure (can't rely on set -e here) 2018-12-05 13:26:06 -08:00
e21b6d9db3 ensure we'd actually have N hashes per tick (#2011) 2018-12-05 12:49:41 -08:00
9c30bddb88 Rocks db erasure decoding (#1900)
* Change erasure to consume new RocksDb window

* Change tests for erasure

* Remove erasure from window

* Integrate erasure decoding back into window

* Remove corrupted blobs from ledger

* Replace Erasure result with result module's Result
2018-12-05 12:47:19 -08:00
7336645501 Move programs into the executable location so native_loader can find them 2018-12-05 10:49:06 -08:00
59e6bd115e system_program must be a static lib as it allocates Account memory 2018-12-05 10:49:06 -08:00
8597701b0f Expand matching to include optional _program suffix 2018-12-05 10:49:06 -08:00
15aef079e3 Include builtin programs for ledger verification 2018-12-05 10:49:06 -08:00
42689d4842 cargo fmt 2018-12-05 10:49:06 -08:00
6e9b8e21ae Drop new-style Result return to avoid error-type wrangling
Plus a backtrace at the point of failure is always nice
2018-12-05 10:49:06 -08:00
424612ea9d Reduce |ulimit -n| on macOS to max supported amount 2018-12-05 10:49:06 -08:00
5afafd9146 Update list of crates to publish 2018-12-05 10:49:06 -08:00
affa76f81d Initialize logger 2018-12-05 10:49:06 -08:00
340d5d557a Add vote program to workspace 2018-12-05 10:49:06 -08:00
214ed3667c Move system_transaction out of src/ 2018-12-05 10:49:06 -08:00
122627dda2 Move loader_transaction out of src/ 2018-12-05 10:49:06 -08:00
7af95eadcc Move vote_transaction out of src/ 2018-12-05 10:49:06 -08:00
9ee858a00c Move budget_program out of src/ 2018-12-05 10:49:06 -08:00
27d456bf93 Move storage_program out of src/ 2018-12-05 10:49:06 -08:00
ea6e042a6f Move vote_program out of src/ 2018-12-05 10:49:06 -08:00
a594f56c02 Add token_program.rs to sdk/ 2018-12-05 10:49:06 -08:00
e6fa74fe69 Remove custom Error enum, just use ProgramError 2018-12-05 10:49:06 -08:00
f184d69c7a Add account userdata errors 2018-12-05 10:49:06 -08:00
228a5aa75d Remove stray comment 2018-12-05 10:49:06 -08:00
9a4f8199d6 Move system_program out of src/ 2018-12-05 10:49:06 -08:00
ae0be1e857 Remove bpf_loader.rs 2018-12-05 10:49:06 -08:00
d010cac8a5 Remove token_program.rs 2018-12-05 10:49:06 -08:00
63a758508a Add sdk native_loader.rs 2018-12-05 10:49:06 -08:00
bf2658cee0 Apply review feedback 2018-12-05 10:30:16 -08:00
6ecb00a1d8 Add account access rules 2018-12-05 10:30:16 -08:00
1990501786 Describe executable and owner account metadata 2018-12-05 10:30:16 -08:00
963de90b7f Apply review feedback 2018-12-05 10:30:16 -08:00
13c7c3b3a6 Rewrite programming model with developer focus
Previous version talked about concurrency, which is described
in detail in the Anatomy of a Fullnode chapter. App developers
probably don't care that their programs run in parallel with
other programs. From their perspective, there's no difference
between 10x parallelism and a 10x faster CPU.
2018-12-05 10:30:16 -08:00
e4049f3733 Ensure subshell failures are reported 2018-12-05 10:28:03 -08:00
3cefa59a14 Remove stray tabs 2018-12-05 08:11:55 -08:00
0cb5ae41c6 Enable BPF shared objects (#2012)
* Switch to BPF ELF shared objects (.so)
2018-12-04 22:03:32 -08:00
209040e80e Free up term "finality" to imply "economic finality" (#2002)
* leader finality -> confirmation

Free up term "finality" to imply "economic finality."

* Reorder chapters
2018-12-04 20:52:38 -07:00
2112c87e13 Initial vote signing service implementation (#1996)
* Initial vote signing service implementation

- Does not use enclave for secure signing

* fix clippy errors

* added some tests

* more tests

* Address review comments + more tests
2018-12-04 11:10:57 -08:00
da44b0f0f6 Move markdown book theme to its default directory
It was getting in the way of my "git grep".
2018-12-04 10:14:41 -08:00
c1c2f1f0a9 Cleanup ad-hoc rpc address formation
Lots of places where we are forming rpc addresses.
2018-12-03 18:13:55 -08:00
777a0a858e Move ProgramError into sdk/ 2018-12-03 13:50:00 -08:00
68e99c18c0 Remove duplicate SYSTEM_PROGRAM_ID 2018-12-03 13:50:00 -08:00
c99f93e40a Remove signature.rs indirection 2018-12-03 13:50:00 -08:00
969016b9e4 Integrate cleanup from book (#1991)
This is backwards. In the future, I'll make changes to the RFC
first. Once the design is implemented, it can be more of a copy-paste
into the book.
2018-12-03 11:53:03 -07:00
4ae58cc854 Change range of leader scheduler to match current broadcasts (#1920) 2018-12-03 00:10:43 -08:00
1fbbf13ec9 Dissuade DOCKER=1 usage 2018-12-02 23:15:43 -08:00
3f9dc08984 Use docker system includes that now exist 2018-12-02 23:04:00 -08:00
1ddf9960a6 Update to llvm 0.0.4 2018-12-02 21:30:57 -08:00
9f45c0eb03 Set OS correctly 2018-12-02 21:11:56 -08:00
67155861e5 generate.sh output 2018-12-02 21:11:56 -08:00
5111255942 Map native filesystem to same location within docker 2018-12-02 21:11:56 -08:00
b405deb55a Always use llvm-native's include, as llvm-docker has no include 2018-12-02 21:11:56 -08:00
9b5368d0ec fixes to rfcs (#1976) 2018-12-02 16:44:14 -07:00
f8aa806d77 Explain how ledger broadcasting works (#1960) 2018-12-02 16:43:40 -07:00
e98ef7306d Update LLVM (#1987)
Build for all targets, use bzip2
2018-12-02 14:33:07 -08:00
188904c318 Fix Docker paths after move (#1986) 2018-12-02 13:47:05 -08:00
9594293804 Write versions in .. 2018-12-02 12:17:44 -08:00
814801d321 Restore OS macro 2018-12-02 12:17:44 -08:00
0896511b14 Echo install.sh output properly 2018-12-02 12:17:44 -08:00
222b177745 Echo cxx instead of cc when building c++ source files 2018-12-02 12:17:44 -08:00
4189a30b13 Check for version.md instead of README.md 2018-12-02 11:28:19 -08:00
f6f0a5d448 Store version info in version.md instead of README.md 2018-12-02 10:12:16 -08:00
b21facab7b Add metrics for prune messages (#1981) 2018-12-01 14:05:40 -08:00
70312ed77f Package package.sh to avoid a special case 2018-12-01 12:37:57 -08:00
ee9255cb1d Avoid unnecessary llvm/ subdirectory 2018-12-01 12:37:57 -08:00
f045e19ddc Remove version info from llvm/criterion install directory 2018-12-01 12:37:57 -08:00
3f1bececdf Update location of bpf sdk 2018-12-01 12:37:57 -08:00
34c3a0cc1f Add signature verification to gossip (#1937) 2018-12-01 12:00:30 -08:00
8ef73eee51 Reject builds faster: if sanity checks fail don't bother with the rest 2018-12-01 11:43:29 -08:00
e52f3f34a4 Autoinstall dependencies in the SDK itself 2018-12-01 10:47:59 -08:00
27b617b340 Remove upstream LLVM install instructions as we now (temporarily) bundle a forked LLVM 2018-12-01 10:47:59 -08:00
21a73d81ee grooming 2018-12-01 10:47:59 -08:00
7c3e6e8e86 Move bpf-sdk to sdk/bpf 2018-12-01 10:47:59 -08:00
42dc18ddfc Avoid exiting when cmd is not found 2018-11-30 20:44:34 -08:00
801df72680 h4,h5 font size increased 2018-11-30 18:03:55 -08:00
c8f161d17f a custom mdbook theme implemented to improve book style and structure 2018-11-30 18:03:55 -08:00
549bfe7412 Vote signing JSON RPC service (#1965)
* Vote signing JSON RPC service

- barebone service that listens for RPC requests

* Daemon for vote signer service

* Add request APIs for JSON RPC

* Cleanup of cargo dependencies

* Fix compiler error
2018-11-30 15:07:08 -08:00
b00011a3f1 Use custom LLVM (#1971)
BPF SDK uses custom LLVM
2018-11-30 14:33:29 -08:00
3ca826a480 re-enable test_tpu_forwarder (#1964) 2018-11-30 13:52:37 -08:00
b8ebb4d609 Cleanup RFCs on branch generation and leader rotation (#1967)
* rework rfcs

* comments
2018-11-30 12:51:40 -08:00
5321b606c1 update gossip and entrytree RFCs (#1972) 2018-11-30 12:26:46 -08:00
a1ad74a986 Bump nix from 0.11.0 to 0.12.0
Bumps [nix](https://github.com/nix-rust/nix) from 0.11.0 to 0.12.0.
- [Release notes](https://github.com/nix-rust/nix/releases)
- [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md)
- [Commits](https://github.com/nix-rust/nix/compare/v0.11.0...v0.12.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-30 10:39:13 -07:00
29d95328ce Use non-zero exit on channel determination failure 2018-11-30 08:50:17 -08:00
b2eeccbcc2 Find channel-info.sh 2018-11-30 08:49:49 -08:00
bad0b55ab6 Expose which keys signed the Transaction in the SDK 2018-11-30 08:16:23 -08:00
0878bd53d9 Delete stub src/transaction.rs 2018-11-29 23:07:57 -08:00
de910e1169 Make test_pubkey_distribution faster
multi-thread pubkey histogram generation.
2018-11-29 17:37:37 -08:00
f2cf647508 add entry-tree-cache and gossip rfc (#1946) 2018-11-29 15:44:58 -08:00
9684737de7 Add wait before checking confirm again
Otherwise we can quickly check that we
have no signature 4 times in a row.
2018-11-29 15:32:58 -08:00
ecc87ab1aa Add a an optional timeout to thin_client
Such that a negative test like test_transaction_count doesn't
have to wait num_retries * default_timeout.
2018-11-29 13:53:40 -08:00
3cc0dd0d1e stabilize testing with --test-threads=1 2018-11-29 12:54:42 -08:00
fa359c6fc4 Merge vote new and register transactions 2018-11-29 12:31:34 -08:00
5c71f2a439 Add ulimit check to stable test suite
cargo test needs larger ulimit than default as well.
2018-11-29 11:39:42 -08:00
8cc751d1cc Improve RPC service startup error messages with actual error
Error always fixed to message about ports but that's not the only
error that can occur.
2018-11-29 11:39:42 -08:00
978fd6858f Move replicator_startup_test to integration test set
Sometimes fails when run multithreaded with other tests.
2018-11-29 11:39:42 -08:00
41689256c6 Ensure key[0] is signed 2018-11-29 10:26:46 -08:00
99445f475b Add leader rotation links
Avoid the term "leader selection" here. More precise terms are
"leader scheduling", "leader rotation", and "fork selection."
2018-11-28 18:08:05 -08:00
070d6a2faa Drop mention of CLI tooling
This is a "how does it work?" chapter, not "how do I do it?"
2018-11-28 18:08:05 -08:00
3de63570f6 Better formatting and lots of terminology links 2018-11-28 18:08:05 -08:00
8d1ac37734 More terms 2018-11-28 18:08:05 -08:00
36503ead70 Fix capitalization
And delete JSON RPC Service for now, since it currently has no
content.
2018-11-28 18:08:05 -08:00
f4d3b3f0d6 Merged synchronization, PoH and VDF sections 2018-11-28 18:08:05 -08:00
acee1f7c6c Merged synchronization, PoH and VDF sections 2018-11-28 18:08:05 -08:00
c242467fdf Expland cluster overview, integrate Avalanche chapter 2018-11-28 18:08:05 -08:00
47ae25eeb9 Fix link 2018-11-28 17:48:41 -07:00
ddc4e7ffa0 use fewer transactions for the public, "welcome to Solana" demo 2018-11-28 16:23:22 -08:00
6a2ffafdb9 Update docker-solana location for CI 2018-11-28 16:20:02 -08:00
0c091c1b24 Dockerized LLVM (#1914)
Optionally build with dockererized custom llvm
2018-11-28 14:41:53 -08:00
55993ef0ce RFC for rendezvous of vote signing service with validator node (#1947) 2018-11-28 14:19:57 -08:00
30a0820cbe Update README.md 2018-11-28 13:33:55 -08:00
194e3100a9 Additional checks in test_bank_checkpoint_zero_balance (#1943) 2018-11-28 12:40:34 -08:00
8ad4464d4b add tests for other "from" indexes signing (or not) 2018-11-28 07:56:04 -08:00
e7b0a736f5 verify signature is on the from account 2018-11-28 07:56:04 -08:00
fa4bdb4613 add --no-capture to get some logs from flaky tests 2018-11-27 23:24:20 -08:00
167eb01735 optimize bench-tps and rpc_request to work on crappy WSL boxes 2018-11-27 22:45:08 -08:00
8fb5d72b13 Make insufficient tokens message more helpful 2018-11-27 17:37:25 -08:00
83c0711760 Rename SolKeyedAccounts to SolKeyedAccount 2018-11-27 15:36:04 -08:00
8947c5a4aa Set account to default if the balance reaches 0 in a checkpoint bank (#1932)
Fixes: #1931
2018-11-27 14:17:29 -08:00
a7562c9be1 Extract execute_transaction() from the bank 2018-11-27 12:35:52 -07:00
08dc169f94 Hoist load_loaders()
This makes execute_transactions() stateless.
2018-11-27 12:35:52 -07:00
f549d8ac74 Hoist loading of loaders
This might cause a TPS boost in batched BPF transactions, since
now it'll only clone its account once per transaction instead of
once per instruction.
2018-11-27 12:35:52 -07:00
1ac7536286 Pass executable_accounts into with_subset() 2018-11-27 12:35:52 -07:00
ec0a56cb9c Tokens are unsigned 2018-11-27 10:14:37 -08:00
f0d24a68ee Configure -rpath to locate libcriterion 2018-11-26 21:16:42 -08:00
2c529f2118 Ancestor verification for vote signing (#1919) 2018-11-26 19:26:54 -08:00
af1d9345e0 De-dup ci book build 2018-11-26 18:38:57 -08:00
03ce45d93a Fix snap build 2018-11-26 18:38:48 -08:00
1695803248 added branch determination and enclave configuration section to encla… (#1873)
* added branch determination and enclave configuration section to enclave rfc

* spelling and grammar
2018-11-26 17:57:38 -08:00
58e3dd4cb6 Avoid trying to install svgbob when already installed 2018-11-26 17:18:55 -08:00
c7f678688d Stub out log functions when building tests 2018-11-26 15:41:49 -08:00
7bf4c08f70 Add BPF C unittest framework 2018-11-26 12:25:29 -08:00
69beee5416 Install svgbob 2018-11-26 09:44:19 -08:00
2200a31331 Generate book images via Make 2018-11-26 09:44:19 -08:00
88e270723f Move markdown book out of src/ 2018-11-26 09:44:19 -08:00
a13e25f083 Ignore flaky test_tpu_forwarder 2018-11-26 09:27:21 -08:00
826ac80e62 Avoid subverting bool return value 2018-11-26 09:11:40 -08:00
4506584c48 Employ stdbool.h, add stub wchar.h 2018-11-26 09:11:40 -08:00
3d3a30e200 Fix mdbook test 2018-11-26 07:51:10 -08:00
76b83ac0f4 Move testnet demos into the book
Have git readme focus on fullnode development and the book focus on
users.
2018-11-26 07:51:10 -08:00
903a9bfd05 s/contract/program/ 2018-11-26 08:20:42 -07:00
655ee1a64b Fix typos 2018-11-26 08:20:42 -07:00
e0e6c3fdb2 Extract execute_instruction() to seed new runtime module
Fixes #1528
2018-11-26 08:20:42 -07:00
31f00974f2 Hoist the lookup of executable accounts 2018-11-26 08:20:42 -07:00
c3218bb9c2 Hoist tick_height 2018-11-26 08:20:42 -07:00
90fb6ed739 Bump itertools from 0.7.9 to 0.7.11
Bumps [itertools](https://github.com/bluss/rust-itertools) from 0.7.9 to 0.7.11.
- [Release notes](https://github.com/bluss/rust-itertools/releases)
- [Commits](https://github.com/bluss/rust-itertools/compare/0.7.9...0.7.11)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-26 08:19:20 -07:00
d2972024de Uppercase acronyms
Looks like there will be very little Rust code in the markdown book
so switching back to English capitalization conventions.
2018-11-25 22:58:07 -07:00
3f9ad1253d Re-enable fixed tests (#1907) 2018-11-25 20:51:55 -08:00
a556a54dc9 Use title in link 2018-11-25 20:29:45 -07:00
dc0a2ca656 Move disclaimer down a bit
Odd to see a disclaimer before knowing anything about what you're reading
2018-11-25 20:27:35 -07:00
e9f986e54d Boot comma 2018-11-25 20:22:46 -07:00
357d852382 Add title to markdown book 2018-11-25 20:19:45 -07:00
6e00c6790e Move testnet metrics dashboard management out of the Grafana UI 2018-11-25 16:10:25 -08:00
f36604357e Remove CUDA Snap references 2018-11-25 16:08:29 -08:00
c3fb9d5549 Cleanup book (#1904)
* Cleanup book

* Distinguish upstream from downstream validators
* Add BroadcastStage to Fullnode/Tpu diagrams
* First attempt to re-describe the runtime

* Reorg book

Push back details of the fullnode implementation
2018-11-25 16:58:38 -07:00
f5b5c54d7d Update condition for nosigverify (#1903) 2018-11-25 13:11:07 -08:00
9f0b06bb86 Filter out leader node while retransmitting blobs (#1894) 2018-11-24 20:33:49 -08:00
57a384d6a0 Rocks db window service (#1888)
* Add db_window module for windowing functions from RocksDb

* Replace window with db_window functions in window_service

* Fix tests

* Make note of change in db_window

* Create RocksDb ledger in bin/fullnode

* Make db_ledger functions generic

* Add db_ledger to bin/replicator
2018-11-24 19:32:33 -08:00
69802e141f Add the story of how this codebase came to be 2018-11-24 14:39:53 -07:00
6fc02b7424 Detect legacy programs upfront 2018-11-24 11:56:51 -07:00
30cdd85028 Implement the same interface in all builtin programs 2018-11-24 11:56:51 -07:00
871dd47019 Extract the part of execute_instruction that should only return a ProgramError
TODO: hoist load_executable_accounts() and then change
process_instruction() to return ProgramError.
2018-11-24 11:56:51 -07:00
37f8dd57e2 Extract ProgramError from BankError 2018-11-24 11:56:51 -07:00
f827bfd83f Remove instruction index parameter 2018-11-24 11:56:51 -07:00
b3af930153 Rename process_transaction to process_instruction 2018-11-24 11:56:51 -07:00
cd488b7d07 Hoist program static methods to top-level functions 2018-11-24 11:56:51 -07:00
e2373ff51a add nosigverify command line option to ease debug 2018-11-23 16:55:04 -08:00
b3d2c900cd Rename BudgetState to BudgetProgram 2018-11-23 13:25:17 -07:00
d5adec20a3 get_ip_addr: Fall back to loopback if no better option exists 2018-11-23 13:24:41 -05:00
942256a647 Add db_ledger benchmarks (#1875)
* Add db_ledger benchmarks

* ignore benches in CI, due to timeouts
2018-11-23 06:12:43 -08:00
ca39486d06 Bump libc from 0.2.43 to 0.2.44
Bumps [libc](https://github.com/rust-lang/libc) from 0.2.43 to 0.2.44.
- [Release notes](https://github.com/rust-lang/libc/releases)
- [Commits](https://github.com/rust-lang/libc/compare/0.2.43...0.2.44)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-22 12:32:38 -07:00
db632fcc2a Bump tokio from 0.1.11 to 0.1.13
Bumps [tokio](https://github.com/tokio-rs/tokio) from 0.1.11 to 0.1.13.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Changelog](https://github.com/tokio-rs/tokio/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-0.1.11...tokio-0.1.13)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-22 11:12:46 -07:00
a3321a5d80 Fix endianess in db_ledger to account for the default byte-comparator used by Rocksdb (#1885) 2018-11-22 01:35:19 -08:00
521de13571 Add maximum repair length to db_window (#1886)
* Add maximum repair length to db_window
2018-11-21 23:44:49 -08:00
e6f91269ec Use --no-tty with apt-key in Docker 2018-11-21 16:45:48 -08:00
3abf6a8a30 Reorg the markdown book to cater to app devs
First, talk about how a client interacts with Solana to do useful
things. Then describe how the fullnode you're talking to works and
why it's so very fast.  Last, why that fullnode you don't trust
does what you asked it to anyway.
2018-11-21 15:49:57 -08:00
8d7f380dfd Remove extra version check 2018-11-21 14:30:26 -08:00
59163e2dd9 Optimize some CI stuff (#1880)
* CI Optimizations
2018-11-21 12:16:16 -08:00
574021041d Calculate tag in README
Don't have people test-driving old code. Latest tag should be good.
2018-11-21 11:17:23 -07:00
872adf1031 Update README.md 2018-11-20 16:48:18 -08:00
5fc1167802 Update README to say cuda 10.0
Prebuilts fetched with fetch-perf-libs are built
with cuda 10 now.
2018-11-20 10:07:15 -07:00
c89a09e5d0 Fix build issue seen when launching gce instance (#1874) 2018-11-20 07:37:16 -08:00
d9dabdfc74 Rocks db window utils (#1851)
* Implement new ledger module based on RocksDb

* Add db_window module for windowing functions from RocksDb
2018-11-19 23:20:18 -08:00
6b910d1bd4 add tpu_forwarding, simplify ClusterInfo::new() from Result<Self> to Self 2018-11-19 20:45:49 -08:00
1c4f799845 alphabetize deps (#1872) 2018-11-19 20:13:09 -08:00
bbd9ea8c00 Delete settings.rs.foo 2018-11-19 13:39:08 -08:00
fc67a968e8 Use known keys in the unit test to avoid random false positives. 2018-11-19 13:41:24 -07:00
3d113611cc remove Result<> return from ClusterInfo::new() (#1869)
strip Result<> for ClusterInfo::new()
2018-11-19 11:25:14 -08:00
c1af48bd85 Rename program_id => owner 2018-11-18 16:24:13 -08:00
07667771ef Fix Gossip Pushes going to invalid addresses (#1858) 2018-11-17 19:57:28 -08:00
3822c29415 Route program_id to program entrypoint 2018-11-17 19:42:03 -08:00
ff386d6585 Add disclaimer to markdown book
copy-paste from readme
2018-11-17 19:56:08 -07:00
e3ddfd8dff Remove budget RFC
It describes the wallet CLI, not the Budget program. And all the
same content is now maintained in src/wallet.md.
2018-11-17 19:52:00 -07:00
f0c79fdbca Delete 0005-branches-tags-and-channels.md 2018-11-17 18:34:47 -08:00
88ddb31477 teminology cleanup: leader slots and voting rounds 2018-11-17 18:56:13 -07:00
077d1a41f1 Add too book 2018-11-17 18:56:13 -07:00
857ab8662e backticks and missing variable descriptions 2018-11-17 18:56:13 -07:00
a17f9bd0f4 Work towards adding leader rotation to the book 2018-11-17 18:56:13 -07:00
f4b9e93b11 Migrate storage RFC to book 2018-11-17 18:55:08 -07:00
2c11bf2e66 Various book cleanup
* Merge Leader and Validator diagrams
* New sdk-tools diagram
* Move terminology to just after introduction
* Purge use of LAMPORT as an acronym
* Add notes about persistent storage
2018-11-17 17:50:29 -08:00
0e33773e92 Copy release docs into RELEASE.md
Once the repo implements something proposed in an RFC, no need to acknowledge its existence.

@mvines, please update this if it's no longer accurate.
2018-11-17 18:48:53 -07:00
719e14b30a Add an explicit state of a reserved signature
An RPC client that fetches the signature status before the bank finishes
executing the corresponding Transaction should receive SignatureNotFound
instead of Confirmed
2018-11-17 16:40:23 -08:00
38883d1de4 Clarify comment 2018-11-17 16:40:23 -08:00
c6c8351fca Update env_logger requirement from 0.5.12 to 0.6.0
Updates the requirements on [env_logger](https://github.com/sebasmagri/env_logger) to permit the latest version.
- [Release notes](https://github.com/sebasmagri/env_logger/releases)
- [Commits](https://github.com/sebasmagri/env_logger/commits/v0.6.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-11-17 16:30:44 -08:00
043f50487a Document patch version updates after a release is made 2018-11-17 16:29:19 -08:00
3a2b91f1b7 Add Cargo.lock to avoid getting broken by random upstream changes 2018-11-17 15:54:21 -08:00
a76d11d486 Don't ignore Cargo.lock 2018-11-17 15:54:21 -08:00
d1f01b5209 Fix clippy lint 2018-11-17 15:54:21 -08:00
7a54dbf7d5 Restore clippy, and run clippy sooner 2018-11-17 15:54:21 -08:00
33a5d5fe93 Enable debug builds by default for better backtraces 2018-11-17 10:52:08 -08:00
201a4b7b2a Advance input pointer correctly 2018-11-17 10:30:21 -08:00
591a28d516 Avoid extra commit when publishing book 2018-11-17 10:17:52 -08:00
22d160a3c3 Install drone 2018-11-17 17:20:15 +00:00
903c82d7f1 Add timeouts 2018-11-17 09:09:25 -08:00
b2e0395f19 Bump release tarball build timeout (ahem rocksdb) 2018-11-17 08:12:03 -08:00
d96a6b42a5 Move drone into its own crate 2018-11-16 20:42:21 -08:00
cf95708c18 Set drone address to always be the initial network entry point (#1847)
* Set drone address to always be the initial network entry point, so that even when leaders rotate the client can still find the drone

* Extract drone address as a separate argument to bench-tps

* Add drone port to client.sh instead of setting it in bench-tps

* Add drone entrypoint to scripts

* Fix build error
2018-11-16 19:56:26 -08:00
7fe50d6402 Temporarily disable clippy 2018-11-16 19:55:33 -08:00
e1c7b99450 Accounts get kicked if no tokens 2018-11-16 18:53:37 -08:00
12ae7b9a6b Add test for tvu POH verification (#1844) 2018-11-16 15:48:10 -08:00
6ac5700f2e Move metrics into its own crate 2018-11-16 15:10:07 -08:00
a0dd8617be Remove airdrop from fullnode 2018-11-16 13:25:55 -08:00
1576072edb remove spurious eprintln!() 2018-11-16 10:21:58 -08:00
03d206a7ca Check for valid tvu, not tpu in broadcast (#1836) 2018-11-15 23:30:22 -08:00
c973de1d76 Decouple log and metrics rate (#1839)
Use separate env for log and metrics rate.

Set default log level to WARN if unset.
2018-11-15 22:27:16 -08:00
71336965a6 Limit targets to 4 in bench-tps
Transaction got bigger so can only fit 4 targets in a
Transaction now.
2018-11-15 20:25:07 -08:00
e791d0f74d Drone now returns signed airdrop transactions 2018-11-15 17:13:13 -08:00
3543a9a49f Add check for missing signature with fee'ed transaction
And update fetch-perf-libs version
2018-11-15 16:23:13 -08:00
7dd198a99e Change signed_key to index into account_keys
If index is within the signed keys range.
2018-11-15 16:23:13 -08:00
e048116ab2 Remove signed_keys
Use first signatures.len() of account_keys for signing
2018-11-15 16:23:13 -08:00
cda9ad8565 Multiple signatures for transactions
With multiple instructions in a TX may need
multiple signatures.

Fixes #1531
2018-11-15 16:23:13 -08:00
928f375683 Rocks db (#1792)
* Add rocksdb crate

* Implement new ledger module based on RocksDb
2018-11-15 15:53:31 -08:00
d3e521f70e accept other socket errors, ignore unless out of tries (#1835) 2018-11-15 15:49:37 -08:00
96e03eca14 Remove unused dependency 2018-11-15 15:13:50 -08:00
659dfbf51f cargo:rerun always triggers if file does not exist 2018-11-15 14:59:54 -08:00
a7ee428214 Fix build 2018-11-15 14:06:57 -08:00
a41254e18c Add scalable gossip library (#1546)
* Cluster Replicated Data Store

Separate the data storage and merge strategy from the network IO boundary.
Implement an eager push overlay for transporting recent messages.

Simulation shows fast convergence with 20k nodes.
2018-11-15 13:23:26 -08:00
4a3230904e Specify rpc port 2018-11-15 12:32:15 -08:00
c81a3f6ced Fix RPC address clashes on local multi-node testnet (#1821)
* Fix RPC address clashes on local multi-node testnet
2018-11-15 10:42:02 -08:00
a5412fc0cd Fix find port functions 2018-11-15 10:45:39 -07:00
83fc3c10cf Setup CUDA env for local builds 2018-11-15 08:00:52 -08:00
6b6c87e510 Run BPF tests in CI 2018-11-14 17:16:37 -08:00
267f9115ba Add drone RFC (#1754)
* Add stamps RFC

* Don't use the language 'load the program'

* Replace stamps RFC with new more general drone design

* Fix typo

* Describe potential techniques for getting recent last_ids
2018-11-14 15:19:34 -08:00
39c87fd103 Add BPF benchmarks 2018-11-14 12:06:06 -08:00
2ad2fdd235 Remove inline simple program to avoid maintenance burden 2018-11-14 10:39:22 -08:00
1fda4b77ef Expose tick_height to bpf programs 2018-11-14 10:33:27 -08:00
5a8938209b Expose tick_height to native programs 2018-11-14 10:33:27 -08:00
0bf2ff6138 Add convenience macro for native program entrypoint 2018-11-14 10:33:27 -08:00
e33f3a2562 Publish expected native program entrypoint in sdk/ 2018-11-14 10:33:27 -08:00
bba19ce667 Catch up to solana-genesis tokens argument name change 2018-11-14 09:55:33 -08:00
9bf2d1d7b4 Publish BPF SDK to a channel-specific URL to ease downstream pickup 2018-11-14 09:36:44 -08:00
9fe210c454 Add host information to db entries (#1778)
Add new field to each db entry identifying the host
that it originated from.
2018-11-13 21:54:15 -08:00
f99fae3c61 Use exact solana-rbpf version, not maintaining backward compatibility 2018-11-13 17:45:46 -08:00
860dcdb449 Stubs for some libc headers 2018-11-13 17:44:46 -08:00
70cebaf74a Add size_t/ssize_t/sol_memset/sol_strlen 2018-11-13 17:44:46 -08:00
317fe19da7 Fix INC_DIRS usage 2018-11-13 17:44:46 -08:00
e7b6c8b7e0 Accounts get kicked if no tokens 2018-11-13 17:23:13 -08:00
478ba75d6b Update featurized test 2018-11-13 17:19:10 -08:00
4e553ea095 test_replicate fails locally, ignore 2018-11-13 17:13:25 -08:00
0c46f15f94 test_rpc_new fails locally, ignore for now 2018-11-13 17:12:25 -08:00
7b92497d21 Update counters irrespective of logging level (#1799) 2018-11-13 16:55:14 -08:00
4668a798ca Fix Sagar and I crossing wires (#1810) 2018-11-13 15:18:54 -08:00
729d28d910 Add poh verification before processing entries
- Replicate stage now verifies entries delivered
  by the window
- Minor refactor of entries_from_blobs
2018-11-13 14:17:00 -08:00
66e9d30fda Change testnet automation to use TAR instead of snap (#1809) 2018-11-13 13:33:15 -08:00
6335be803c Broadcast last tick before leader rotation (#1766)
* Broadcast last tick before leader rotation to everybody on network

* Add test

* Refactor broadcast
2018-11-13 02:21:37 -08:00
a77b1ff767 Revert "Migrate from ring to ed25519-dalek" (#1798)
* Revert "Migrate from ring to ed25519-dalek"

This reverts commit 7c610b216b.

* Fix test failures with revert
2018-11-12 22:34:43 -08:00
1f6ece233f Remove unused path 2018-11-12 22:24:56 -08:00
d53077bb3e Activate perf-libs compatible CUDA env 2018-11-12 22:24:56 -08:00
2b44d5fb6a Fix snap PR builds 2018-11-12 22:24:56 -08:00
10e1e0c125 Switch to perf-libs v0.11.0 for CUDA 10 support 2018-11-12 20:58:52 -08:00
017c281eaf Remove CUDA support from Snap 2018-11-12 20:31:16 -08:00
c5b1bc1128 Remove obsolete update-default-cuda.sh 2018-11-12 20:31:16 -08:00
dafdab1bbc Add clang dependency to docker images, update validation checks (#1794) 2018-11-12 19:36:36 -08:00
d0ebee5e3b Correct path to solana-perf-CUDA_HOME.txt 2018-11-12 19:17:54 -08:00
aa7c741ec0 Switch to perf-libs v0.10.6 2018-11-12 19:17:54 -08:00
9e7b9487b0 perf-libs now drives setting CUDA_HOME 2018-11-12 18:49:15 -08:00
c7a67b5a02 Add deploy command to test 2018-11-12 18:21:16 -07:00
0e749dad4c Use cluster_info to get rpc address 2018-11-12 18:21:16 -07:00
fa72160c95 add last_id to Entry, PohEntry (#1783)
add prev_id to Entry, PohEntry
2018-11-12 17:03:23 -08:00
851e012c6c Upgrade EC2 image to 18.04 with CUDA 9.2 and 10 2018-11-12 15:17:34 -08:00
7f76403d0a Clean ~/solana during network start to avoid tripping over leftover files 2018-11-12 15:09:14 -08:00
126f065cc9 Extract complex loop from execute_instruction 2018-11-12 14:47:23 -08:00
7ee4dec3f1 Upgrade GCE GPU image to 18.04 2018-11-12 12:18:50 -08:00
c07d09c011 Add net/scp.sh for easier file transfer to/from network nodes 2018-11-12 11:48:53 -08:00
4d98da44e3 Fix possibility of a vote error breaking ledger (#1768)
* Fix possibility of a vote error breaking ledger

* Add test
2018-11-12 11:40:32 -08:00
15c00ea2ef Improve comments 2018-11-12 10:59:01 -08:00
522876c808 Rename Account.program_id to Account.owner 2018-11-12 10:59:01 -08:00
7d05cc8c5d Add missing account fields 2018-11-12 10:59:01 -08:00
49f4be6a2b codemod --extensions rs loader_program_id loader 2018-11-12 10:59:01 -08:00
e702515312 Add basic C++ support 2018-11-12 09:08:40 -08:00
5fce8d2ce1 Don't ignore VoteProgram errors 2018-11-11 22:18:06 -07:00
2696b22348 Cleanup TVU diagram 2018-11-11 20:55:21 -08:00
5df4754579 Don't call instructions transactions 2018-11-11 20:07:15 -08:00
a00284c727 Remove userdata diff and make helper fn 2018-11-11 18:57:28 -07:00
3832602ec4 Move notifications after store_accounts 2018-11-11 18:57:28 -07:00
3466f139a4 set -e shuffling 2018-11-11 16:24:36 -08:00
def7d156f6 codemod --extensions sh '#!/usr/bin/env bash -e' '#!/usr/bin/env bash\nset -e' 2018-11-11 16:24:36 -08:00
33aab094ef codemod --extensions sh '#!/bin/bash' '#!/usr/bin/env bash' 2018-11-11 16:24:36 -08:00
cf6f344ccc Add CUDA_HOME env var to permit overriding the CUDA install location 2018-11-11 16:24:18 -08:00
b670b9bcde Regenerate identity files in CI 2018-11-11 09:22:52 -07:00
fea86b2955 No longer serialize as JSON-encoded pkcs8
That's supposed to be an ASCII format, but we're not making use
of it. We can switch back to that some day, but if we do, it shouldn't
be JSON-encoded.
2018-11-11 09:22:52 -07:00
7c610b216b Migrate from ring to ed25519-dalek
Why?

* Pure Rust, no BoringSSL (or OpenSSL) dependency
* Those avx2 benchmarks
* ring includes far more than what we need
* ring author won't add release tags: https://github.com/briansmith/ring#versioning--stability
2018-11-11 09:22:52 -07:00
bec34496f1 Generate id.json earlier 2018-11-10 18:05:55 -08:00
49014393e1 Be less fancy for bash 4.4 compat 2018-11-10 18:05:55 -08:00
818d03c835 Bump earlyoom version 2018-11-10 15:56:17 -08:00
cdf1a96e23 Revert "V1 Window/Ledger based on RocksDb (#1712)"
This reverts commit bfcdec95cb.
2018-11-09 20:25:53 -07:00
bfcdec95cb V1 Window/Ledger based on RocksDb (#1712)
* Add rocksdb

* Implement new ledger module based on RocksDb
2018-11-09 18:30:26 -08:00
fc55835932 Revert "Boot rpc_port"
This reverts commit 1984b6db06.
2018-11-09 17:52:10 -07:00
3772910bf2 Boot rpc_port 2018-11-09 17:52:10 -07:00
24379c14dc Fix clippy warnings 2018-11-09 17:52:10 -07:00
23846bcf1c Don't require a cluster to query for one's own pubkey 2018-11-09 17:52:10 -07:00
9dd0a6e6a7 Boot drone_addr and rpc_addr from config
WalletConfig is intended for the validated command-line input.
2018-11-09 17:52:10 -07:00
5ca473ac2d Don't get the network from parse_args 2018-11-09 17:52:10 -07:00
e1a551e8f2 Create target/ if it doesn't exist yet 2018-11-09 12:03:07 -08:00
0926702269 Fix grcov download on macos and upload gcda/gcdo files for debugging 2018-11-09 11:19:28 -07:00
0a85347a0d Upgrade Rust stable to 1.30.1
Fixes `cargo doc`
2018-11-09 07:46:51 -08:00
fb59f73c1a Link readme to book (#1750)
* Link readme to book
2018-11-09 07:27:03 -07:00
eaa8b9cb1e Publish book 2018-11-09 02:13:59 -07:00
b8261d7d83 Determine network version for tar and local deploys 2018-11-08 22:02:42 -08:00
f5827d4a83 Fix typo 2018-11-08 17:15:48 -07:00
b0f8a983c4 Add the solana-wallet documentation (#1744)
* Add the solana-wallet documentation

There doesn't seem to be a way to publish bin docs to crates.io.
Until there is, we can include CLI documentation is the appendix
of the markdown book.

* A command to generate all the usage docs

Usage:

$ scripts/wallet-help.sh >> src/wallet.md
2018-11-08 15:42:20 -07:00
56c77bf482 Add IntelliJ files to ignore 2018-11-08 13:00:00 -08:00
d831c5dcc9 remove dead poh code (#1747) 2018-11-08 12:55:23 -08:00
ce474eaf54 Better titles for tpu and tvu 2018-11-08 11:33:52 -07:00
0da1c06b15 Add disk to the hardware used by both Tpu and Tvu 2018-11-08 11:33:52 -07:00
01edc94a4b Move description of the Rust flavor of stages to service.rs 2018-11-08 11:33:52 -07:00
f96563c3f2 Add documentation for pipelining 2018-11-08 11:33:52 -07:00
30697f63f1 add support for slots in erasure (#1736) 2018-11-08 10:20:03 -08:00
433fcef70b Enclave RFC updates for PoH verification (#1739)
* Enclave RFC updates for PoH verification

* fix spelling error
2018-11-08 06:52:14 -08:00
34b5b3d9c5 Add TODO in logs section 2018-11-07 20:46:57 -08:00
ea8b19a40f Update testnet info 2018-11-07 20:43:51 -08:00
b0405db5a9 Assign static IPs to {edge,beta}.testnet.solana.com 2018-11-07 20:11:00 -08:00
f34f0af6b1 Install native programs in the correct location 2018-11-07 19:44:57 -08:00
51ed48941b Continue if docker0 is not present 2018-11-07 19:33:20 -08:00
22b6cbb4da Switch testnet to AWS 2018-11-07 18:57:08 -08:00
87ac549689 Work around AWS key management limitation 2018-11-07 18:48:27 -08:00
2a6046de8e Cleanup TVU code to look like its block diagram (#1737)
* Reorg TVU code to look like TVU diagram

And move channel creation into LedgerWriteStage so that it can
be used in the same was as all the other stages.

* Delete commented out code
2018-11-07 19:25:36 -07:00
25dd5145bb Switch to us-west-1a, us-west-1b is causing trouble 2018-11-07 18:23:28 -08:00
f8f11b7f50 Remove docker0 interface if present 2018-11-07 18:23:24 -08:00
82f914e0dc Work around AWS boot check weirdness 2018-11-07 15:46:04 -08:00
3b41eec199 Shuffle AWS regions 2018-11-07 15:00:55 -08:00
9359cc69d5 Invert gpu check 2018-11-07 14:44:40 -08:00
b02b636b36 Support local tarball deploys 2018-11-07 14:44:40 -08:00
a537154c28 Remove all cuda dependencies from release tarball beyond solana-fullnode-cuda 2018-11-07 14:44:40 -08:00
39e1bdeb71 Initial RFC for use of enclave for vote signing (#1734)
* Initial RFC for use of enclave for vote signing

* Fix grammar

* address review comments
2018-11-07 14:36:16 -08:00
43bd28cdfa Add loader_ prefix to LoaderTransaction methods 2018-11-07 15:06:38 -07:00
6c10458b5b leader slots in Blobs (#1732)
* add leader slot to Blobs
* remove get_X() methods in favor of X() methods for Blob
* add slot to get_scheduled_leader()
2018-11-07 13:18:14 -08:00
3ccbf81646 Update README.md 2018-11-07 13:04:14 -08:00
2e38cd98c0 Update README.md 2018-11-07 12:58:24 -08:00
7780d9bab8 Add ledger write and storage stage to TVU documentation 2018-11-07 12:07:12 -08:00
8feed96eac Update README.md 2018-11-07 11:19:37 -08:00
16d23292dc Improve error messages 2018-11-07 10:35:10 -08:00
812a8bcc6c Permit release tag tarballs 2018-11-07 10:33:58 -08:00
63807935cb Switch testnet/testnet-beta to tarball release 2018-11-07 10:30:02 -08:00
92a8b646df Fix tarball publishing for tags 2018-11-07 10:26:19 -08:00
d9f9e347ab Delete testnet-master, testnet-master-perf 2018-11-07 10:08:29 -08:00
2ef8ebe111 AWS AMIs are region specific 2018-11-07 10:05:58 -08:00
038a46b5ef Integrate the markdown book into the codebase
This implies that the book should describe exactly what is implemented,
and will not lead the way and eventually bitrot as the RFCs do.
2018-11-07 10:58:47 -07:00
3852ad3048 Make markdown docs more modular
No need to assume the book context.
2018-11-07 10:58:47 -07:00
1075a73902 Elf relocations (#1724)
Use relocatable BPF ELFs
2018-11-07 09:40:23 -08:00
863a0c3f8f s/edge/beta/ 2018-11-07 08:54:32 -08:00
f8673931b8 Increase boot timeout 2018-11-07 08:32:15 -08:00
dd4fb7aa90 Add AWS-based nets 2018-11-07 07:47:39 -08:00
2af5aad032 Switch testnet/testnet-perf to the latest beta or stable tag 2018-11-07 07:47:39 -08:00
9027141ff8 Publish release tarballs for tags 2018-11-07 07:47:39 -08:00
c4bc331663 Add support for using a release tar 2018-11-07 07:47:39 -08:00
8be7c13d2d Stub out architecture book (#1674)
* Stub out architecture documentation

* Add book HTML generation and book tests to CI

* Add heading

* Better table of contents

* Reference existing documentation

Move ASCII art from code comments into rendered SVG

* Attempt to fix CI

* Add lamport docs

And truncate lines to 80 characters

* Fix links

And reference shorter, newer description of PoH.

* Replace ASCII art with SVG

* Streamline for Pillbox

* Update path before optional install

* Use $CARGO_HOME instead of $HOME

* Delete code

Attempt to describe all data structures without code.

* Boot RPU from docs, add JsonRpcService

Also, use Rust naming conventions in the block diagrams to
minimize the jump from docs to code.

* Latest code uses tick_height

* Rename bob/ folder to art/

A home for any ASCII art

* Import JSON RPC API

* More mdbook docs

* Add Ncp

* Cleanup links

* Move pipelining description into fullnode description

* Move high-level transaction docs into top-level doc

* Delete unused files
2018-11-06 18:00:58 -07:00
d7ea66b6a1 RPC and Pubsub, bind to 0.0.0.0 2018-11-06 15:45:36 -07:00
371c69d425 Add ledger write stage counters (#1713) 2018-11-06 14:44:54 -08:00
c9c1564d26 Fetch v0.10.5 perf libs (#1727)
- includes SGX enclave for signing
2018-11-06 14:20:22 -08:00
cd18a1b7db t 2018-11-06 14:08:47 -08:00
6aac096c77 Add timeout to prevent a stuck ssh 2018-11-06 14:08:28 -08:00
7b58bd621a Remove node check from client start-up
If the network loses a validator or two, it's the job of the sanity
check to detect this not the bench clients
2018-11-06 13:57:06 -08:00
9b43b00d5c remove tick_count, leader_scheduler, from broadcast code (#1725) 2018-11-06 13:17:41 -08:00
76694bfcf4 remove entry_writer.rs (#1720) 2018-11-06 12:42:31 -08:00
bfad138bb3 Pass any serializable to Transaction constructor 2018-11-06 11:23:59 -07:00
d8d23c9971 Remove unused debug trace 2018-11-06 09:29:39 -08:00
f77b30e81d Fix link 2018-11-06 09:28:55 -07:00
d379478603 Rename stable testnets back to beta 2018-11-06 09:27:40 -07:00
2600684999 Move testnet docs into readme
Also, described testnet and testnet-perf as stable instead of beta.
2018-11-06 09:27:40 -07:00
54968b59bb Update last_id between client retries
Fixes #1694
2018-11-06 09:06:15 -07:00
6b5d12a8bb Set metrics database correctly 2018-11-06 07:25:18 -08:00
c4b9d5d8b9 Remove stray line 2018-11-05 20:53:34 -08:00
f683817b48 Remove RPU; replace with RPC 2018-11-05 20:30:47 -07:00
52491b467a Update testnet deploy docs 2018-11-05 19:12:55 -08:00
7789fda016 Add testnet-manager pipeline 2018-11-05 17:35:30 -08:00
22abc27be4 add tests for bank.purge() (#1711) 2018-11-05 16:43:27 -08:00
c9138f964b Change token type from i64 to u64
Fixes #1526
2018-11-05 15:25:26 -07:00
c4346e6191 Add testnet pipeline for prebuilt images (#1708)
* Add testnet pipeline for prebuilt images

- It'll speed up testnet testing for released images

* removed quotes from variable

* address review comments

* fix testnet automation error
2018-11-05 13:50:33 -08:00
1a7830f460 Set imageName if G 2018-11-05 13:33:42 -08:00
b418c1abab ignore multinode demo logs 2018-11-05 10:57:51 -08:00
1fbf1d2cf2 Add checkpoint, rollback to to bank (#1662)
add linked-list capability to accounts

change accounts from a linked list to a VecDeque

add checkpoint and rollback for lastids

add subscriber notifications for rollbacks

checkpoint transaction count, too
2018-11-05 09:47:41 -08:00
5a85cc4626 Rename buildkite-snap to buildkite-secondary 2018-11-05 08:47:51 -08:00
8041461a07 Bump EC2 validator machine type 2018-11-05 08:47:51 -08:00
2ce72a1683 Update version in readme 2018-11-05 08:05:03 -07:00
eae9372a5d Upgrade GCP CPU-based testnet to 18.04 2018-11-04 19:18:47 -08:00
ed09b2bdb8 Document BPF C program limitations 2018-11-04 12:31:38 -08:00
1d7722043f genesis has 3 entries now 2018-11-02 22:02:13 -07:00
95f9488a70 use default buffer size for index, use BLOB_DATA_SIZE for data buffer (#1693) 2018-11-02 21:52:57 -07:00
e7cbbd8d45 cargo fmt 2018-11-02 19:54:49 -07:00
c8c255ad73 Rename Budget to BudgetExpr 2018-11-02 19:54:49 -07:00
a264f8fa9b Fix |cargo test| 2018-11-02 19:04:59 -07:00
40e945b0c8 Move token_program from src/ to programs/native/ 2018-11-02 18:13:02 -07:00
f3b04894b9 Try harder to snap download 2018-11-03 00:29:13 +00:00
35b7e50166 Rebase on new RFC file naming 2018-11-02 16:52:21 -06:00
6b3f684e2a elw staking rfc revisions 2018-11-02 16:50:06 -06:00
63c66ce765 initial staking design overview 2018-11-02 16:50:06 -06:00
0636399b7a Compute finality computation in new ComputeLeaderFinalityService (#1652)
* Move finality computation into a service run from the banking stage, ComputeLeaderFinalityService

* Change last ids nth to tick height, remove separate tick height from bank
2018-11-02 15:49:14 -07:00
2c74815cc9 ci: correct crates.io publishing order 2018-11-02 15:39:24 -07:00
298bd6479a Add first leader to genesis (#1681)
* Add first leader to genesis entries, consume in genesis.sh

* Set bootstrap leader in the bank on startup, remove instantiation of bootstrap leader from bin/fullnode

* Remove need to initialize bootstrap leader in leader_scheduler, now can be read from genesis entries

* Add separate interface new_with_leader() in mint for creating genesis leader entries
2018-11-02 14:32:05 -07:00
a8481215fa Model the process after Rust's RFC process 2018-11-02 14:55:39 -06:00
b7545b08fa Add process for making architectural changes 2018-11-02 14:55:39 -06:00
cf8f3bcbed Ship native programs in snap 2018-11-01 15:59:41 -07:00
b8534a402d shell 2018-11-01 15:25:27 -07:00
45b9a7f8e9 shell 2018-11-01 14:40:21 -07:00
879431ebcd Add timeout to TcpStream connect, and rename test 2018-11-01 14:13:19 -06:00
102354c218 Add balance check retries 2018-11-01 11:28:33 -06:00
af1283e92c Improve airdrop confirmation logic 2018-11-01 11:28:33 -06:00
6b777b066a Find clang 7 better
If LLVM_DIR is defined, use it to locate clang.  Otherwise use brew on
macOS, and assume clang-7 otherwise
2018-11-01 09:48:38 -07:00
1e01088698 Improve clang install info for Linux 2018-11-01 09:48:38 -07:00
3ea0651078 Rename sol_bpf.h to solana_sdk.h 2018-10-31 23:46:34 -07:00
776b1c2294 sol_bpf.h improvements
- Define NULL
- Add sol_memcmp()
- Use sizeof() more
- Add SOL_ARRAY_SIZE
- Make sol_deserialize() more flexible
2018-10-31 23:46:34 -07:00
dffa2eb04f Do not parallelize deserialize operation (#1663)
Deserialize operations are faster when done serially with the
MT banking stage and helps with performance improvement with
reduced thread context switches.
2018-10-31 22:12:15 -07:00
5ecb9da801 Fix up bpf numeric types 2018-10-31 20:53:44 -07:00
00889c5139 Fix bad function arguments (#1682) 2018-10-31 19:55:58 -07:00
af8dc3fd83 Fix snap build
cuda and chacha features required for chacha_cuda
2018-10-31 17:59:31 -07:00
ba884b4e36 Add thin client test for vote functionality, fix sizing errors in vote contract (#1643)
* Added tests to thin client to test VoteContract calls, fix VoteContract sizing errors

* Calculate upper bound on VoteProgram size at runtime, add test for serializing/deserializing a max sized VoteProgram state
2018-10-31 17:47:50 -07:00
6ddd494826 Improve rpc logging 2018-10-31 15:21:55 -06:00
aa2fd3f3bb Storage RFC grammar 2018-10-31 13:44:21 -07:00
cf00354f42 Add storage stage which does storage mining verification for validators 2018-10-31 13:44:21 -07:00
47f1fa3f2e Remove purging of leader id from cluster info (#1642) 2018-10-31 12:30:48 -07:00
db98f7e0b4 Use env variables to disable validator sanity and ledger verification (#1675) 2018-10-31 12:30:33 -07:00
38ee5c4dfb Program may not exit (#1669)
Cap max executed instructions, report number of executed instructions
2018-10-31 10:59:56 -07:00
aca2f9666d Fix deps (#1672) 2018-10-31 10:12:17 -07:00
b74e085538 SYSTEM_INC_DIRS needs immediate expansion 2018-10-31 07:20:09 -07:00
899de2ff56 Revert inclusion change, fix doc 2018-10-31 07:03:38 -07:00
cf521a5bd2 Fix const 2018-10-31 07:03:38 -07:00
bc13248e1c Fix C programs 2018-10-31 07:03:38 -07:00
0529f36fde Run workspace member's tests (#1666)
Run workspace member's tests
2018-10-30 22:53:36 -07:00
74b4ecb7f3 Upgrade to influx_db_client@0.3.6 2018-10-30 19:44:09 -07:00
333f658eb6 Fix lua_loader tests (#1665) 2018-10-30 18:36:18 -07:00
7cb5c0708b Fetch v0.10.4 which has v100 binary compiled in
This may or may not fix high latencies seen on the snap build on v100.
GPU driver will not have to JIT the device code for V100 though which
is an improvement.
2018-10-30 18:06:16 -07:00
85869552e0 Update testnet scripts to use release tar ball (#1660)
* Update testnet scripts to use release tar ball

* use curl instead of s3cmd
2018-10-30 18:05:38 -07:00
6f9843c14b Publish a tarball of Solana release binaries (#1656)
* Publish a tarball of solana release binaries

* included native programs in Solana release tar

* Remove PR check from publish script
2018-10-30 15:31:52 -07:00
7d44f60e45 Find native program with solana_ prefix 2018-10-30 13:13:37 -07:00
8d16f69bb9 Improve account subscribe/unsubscribe logging 2018-10-30 12:03:35 -07:00
3a73a09391 Avoid panicking when a native library doesn't exist 2018-10-30 12:03:35 -07:00
009c71f7e2 Demote info logs 2018-10-30 12:03:35 -07:00
073d39df44 Add solana_ prefix to loaders so their logs appear in the default RUST_LOG config 2018-10-30 12:03:35 -07:00
ae7222f0df Work around influxdb panic 2018-10-30 12:03:35 -07:00
4d6c54272a Tweak logging 2018-10-30 12:03:35 -07:00
13bfdde228 remove ledger tail code, WINDOW_SIZE begone (#1617)
* remove WINDOW_SIZE, use window.window_size()
* move ledger tail, redundant with ledger-based repair
2018-10-30 10:05:18 -07:00
3cc78d3a41 Added a new remote node configuration script to set rmem/wmem (#1647)
* Added a new remote node configuration script to set rmem/wmem

* Update common.sh for rmem/wmem configuration
2018-10-30 09:17:35 -07:00
45bb97cad6 Permit {INC,LLVM,OUT,SRC,SYSTEM_INC}_DIRs to be overridden 2018-10-30 07:59:07 -07:00
546e4c5696 Remove bpf tictactoe 2018-10-29 21:43:37 -07:00
6b1917b931 Add programs/bpf/c/sdk entries 2018-10-29 20:52:38 -07:00
30b22c8b78 Use NUM_KA 2018-10-29 20:52:38 -07:00
6f5e92e5b3 README updates 2018-10-29 20:52:38 -07:00
cce5c70f29 LD -> LLC 2018-10-29 20:52:38 -07:00
4af7c82ef0 Add extern "C" block 2018-10-29 20:52:38 -07:00
52e5fb7e0c Use #pragma once, it's widely supported
Fix up some spelling too
2018-10-29 20:52:38 -07:00
a013e8ceb1 Rename sol_bpf_c.h to sol_bpf.h 2018-10-29 20:52:38 -07:00
864632b582 slight reformatting 2018-10-29 20:52:38 -07:00
71d6eaacef Apply some const 2018-10-29 20:52:38 -07:00
4aba05d749 Include system includes in .d, remove unneeded tabs 2018-10-29 20:52:38 -07:00
7d335165ec Tune make output 2018-10-29 19:32:47 -07:00
37213209c5 Create programs/bpf/c/sdk/ 2018-10-29 19:10:29 -07:00
fbde9bb731 Run bench-tps for longer duration in testnet (#1638)
- Increased to 2+ hours
2018-10-29 15:03:08 -07:00
f6b1b5ab37 Remove unnecessary checks 2018-10-29 13:27:52 -07:00
7abd456d45 Increase rmem and wmem for remote nodes in testnet (#1635) 2018-10-29 13:04:54 -07:00
f12743de38 Create/publish bpf-sdk tarball 2018-10-29 12:54:57 -07:00
77e10ed757 Add utility to figure the current crate version 2018-10-29 12:54:57 -07:00
ebcb9a2103 Add llvm install info 2018-10-29 10:00:45 -07:00
6fb2e080bc Ignore out/ 2018-10-29 10:00:45 -07:00
3ac5ffc188 Use V=1 for verbosity, easier to type 2018-10-29 10:00:45 -07:00
88187ef282 Find llvm using brew on macOS 2018-10-29 10:00:45 -07:00
489894cb32 Mention logs more 2018-10-27 08:49:52 -07:00
be003970b7 Program_ids were overlapping (#1626)
Program_ids were overlapping
2018-10-26 19:44:53 -07:00
3488ea7d1c Cleanup c programs (#1620)
Cleanup C programs
2018-10-26 19:38:07 -07:00
9a6a399a29 Bump version number to pick up fixed cuda library
Has fix for unaligned memory access in chacha_encrypt_many_sample
function.
2018-10-26 14:57:14 -07:00
7ab65352be Fix featurized integration test (#1621)
Fix featurized integration test
2018-10-26 11:53:44 -07:00
b28fbfa13e Use a smaller test value for window_size
Otherwise this test takes forever to run.
2018-10-26 11:38:55 -07:00
07c656093c Remove tictactoe programs 2018-10-25 21:22:07 -07:00
c9e8346e6a cargo fmt 2018-10-25 17:24:24 -07:00
9e5ac76855 0.11.0 2018-10-25 17:19:07 -07:00
f671b7f63f Publish root crate too 2018-10-25 17:16:18 -07:00
236113e417 cargo fmt 2018-10-25 17:13:41 -07:00
a340b18b19 Upgrade to rust 1.30 2018-10-25 17:13:41 -07:00
f6c8e1a4bf Vote contract (#1552)
* Add Vote Contract

* Move ownership of LeaderScheduler from Fullnode to the bank

* Modified ReplicateStage to consume leader information from bank

* Restart RPC Services in Leader To Validator Transition

* Make VoteContract Context Free

* Remove voting from ClusterInfo and Tpu

* Remove dependency on ActiveValidators in LeaderScheduler

* Switch VoteContract to have two steps 1) Register 2) Vote. Change thin client to create + register a voting account on fullnode startup

* Remove check in leader_to_validator transition for unique references to bank, b/c jsonrpc service and rpcpubsub hold references through jsonhttpserver
2018-10-25 16:58:40 -07:00
160cff4a30 Check for TRIGGERED_BUILDKITE_TAG 2018-10-25 16:37:54 -07:00
48685cf766 0.10.0-pre2 2018-10-25 16:19:31 -07:00
0f32102684 Restrict characters to those supported by semvar_bash 2018-10-25 16:19:00 -07:00
d46682d1f2 Restrict characters to those supported by semvar_bash 2018-10-25 16:12:29 -07:00
55833e20b1 Create Poh Service (#1604)
* Create new Poh Service, replace tick generation in BankingStage
2018-10-25 14:56:21 -07:00
02cfa76916 Plumb GetTransactionCount through solana-wallet 2018-10-25 14:58:51 -06:00
9314eea7e9 Add leader-readiness test to wallet-sanity 2018-10-25 14:58:51 -06:00
1733beabf7 mv common/ sdk/ 2018-10-25 13:26:10 -07:00
471d8f6ff9 Fix up the version references to all other internal crates 2018-10-25 12:54:32 -07:00
e47fcb196b s/solana_program_interface/solana[_-]sdk/g 2018-10-25 12:31:45 -07:00
3ae53961c8 Support prerelease versioning 2018-10-25 12:31:45 -07:00
113b002095 Delete programs/native/move_funds 2018-10-25 11:37:38 -07:00
9447537d8c Increment internal Cargo references to solana_program_interface 2018-10-25 11:03:03 -07:00
7404b8739e Make template headers smaller 2018-10-25 11:51:37 -06:00
7239395d95 Add Issue and PR templates 2018-10-25 11:51:37 -06:00
926d459c8f Script away cargo version bumping 2018-10-25 09:38:58 -07:00
7cabe203dc Sync version with top-level Cargo.toml 2018-10-25 09:38:58 -07:00
1e53f4266a Fetch perf-libs with configurable packet size
sig verify library uses passed in size directly
to get packet size, so rust side can be modified
without changing cuda library.
2018-10-25 08:26:35 -07:00
24b513c3c7 Migrate to latest rbpf (#1605)
Migrate to updated rbpf
2018-10-25 02:58:04 -07:00
b982595c73 Add version check and rustup 2018-10-24 19:48:58 -07:00
af8a36b7fb Exclude chacha_cuda when chacha is disabled 2018-10-24 17:02:46 -07:00
208e7d7943 Explicitly reject transactions larger than PACKET_SIZE 2018-10-24 15:34:27 -07:00
557736f1cf Split leader rotation into separate RFC 2018-10-24 13:16:06 -06:00
61927e1941 Fix compile error for write_entries
Takes a reference now.
2018-10-24 11:31:30 -07:00
fc75827aaf .gitignore *.log 2018-10-24 10:58:27 -07:00
2f2531d921 Add retries to Wallet deploy 2018-10-24 11:13:32 -06:00
d5f20980eb Incorporate preloaded bpf loader 2018-10-24 11:13:32 -06:00
21eae981f9 Add deploy method to solana-wallet 2018-10-24 11:13:32 -06:00
ead7f4287a Storage mining fixups...
* Use IV to make unique identies
* Use hex! macro for hex literal and not string converted to u8 slice
* fix sha sampling to control init/end of sha state
2018-10-24 09:58:41 -07:00
3b33150cfb Bump drone read timeout to 10s
The previous timeout of 3s was not generous enough occasionally
2018-10-24 08:52:41 -07:00
6d34a68e54 Ignore test_leader_restart_validator_start_from_old_ledger (#1586)
Ignore test_leader_restart_validator_start_from_old_ledger
2018-10-23 18:10:31 -07:00
5c483c9928 remove unused variable 2018-10-23 16:52:56 -06:00
a68c99d782 Fix transaction count on testnet dashboard 2018-10-23 16:52:56 -06:00
0aebbae909 Fix message 2018-10-23 15:45:58 -07:00
a3a2215bda Fix warning 2018-10-23 15:45:58 -07:00
eb377993b3 Debug scripts point to debug flavor (#1585) 2018-10-23 14:48:50 -07:00
5ca52d785c Preload BPF loader (#1573)
Preload BPF loader
2018-10-23 14:44:41 -07:00
8d9912b4e2 Move ledger write to its own stage (#1577)
* Move ledger write to its own stage

- Also, rename write_stage to leader_vote_stage, as write functionality
  is moved to a different stage

* Address review comments

* Fix leader rotation test failure

* address review comments
2018-10-23 14:42:48 -07:00
c77b1c9687 i 2018-10-23 14:14:09 -07:00
8849ecd772 capture consensus discussion of 10/10/2018 2018-10-23 15:07:58 -06:00
7977b97227 Surface AccountInUse to JSON RPC users so they know to retry the transaction 2018-10-23 13:55:30 -07:00
4f34822900 Improve logging on various error conditions 2018-10-23 13:40:59 -07:00
bbb38ac106 Increase window size (#1578)
Addresses the following problem
- Validators are not able to keep up with the leader
- The future blobs (outside of window) get dropped
- The validators won't process repair requests for these future blobs
2018-10-23 10:25:01 -07:00
ce934a547e Storage RFC validator incentive clarification 2018-10-23 09:46:38 -06:00
16b19d35dd Disable test_boot_validator_from_file (#1576) 2018-10-23 00:47:15 -07:00
45cfa5b574 Add instruction to transfer account ownership 2018-10-20 21:54:25 -05:00
df9ccce5b2 Remove hostname() from calls to metrics as it's expensive operation (#1557) 2018-10-20 06:38:20 -07:00
f8516b677a Load program data in chunks (#1556)
Load program data in chunks
2018-10-19 18:28:38 -07:00
dfde83bdce Wildcard early OOM deb package revision (#1554) 2018-10-19 14:17:19 -07:00
cb0f19e4f1 Shield rerun-if-changed under the feature flags so
that cargo watch doesn't cause re-build every iteration.
2018-10-19 12:07:29 -07:00
26b99d3f85 Ensure witness and timestamp keys are signed
Before this patch, an attacker could point Budget instructions to
unsigned keys, and authorize a transaction from an unauthorized
party.
2018-10-19 10:06:59 -06:00
2f9c0d1d9e Add method to lookup signed keys 2018-10-19 10:06:59 -06:00
0423cafbeb Cleanup and update Smart Contracts Engine RFC to what is currently in the code (#1539)
* Cleanup and update to the state of the code

* update

* render

* render

* comments on memory allocation
2018-10-19 06:08:49 -07:00
0bd1412562 Switch leader scheduler to use PoH ticks instead of Entry height (#1519)
* Add PoH height to process_ledger()

* Moved broadcast_stage Leader Scheduling logic to use Poh height instead of entry_height

* Moved LeaderScheduler logic to PoH in ReplicateStage

* Fix Leader scheduling tests to use PoH instead of entry height

* Change is_leader detection in repair() to use PoH instead of entry height

* Add tests to LeaderScheduler for new functionality

* fix Entry::new and genesis block PoH counts

* Moved LeaderScheduler to PoH ticks

* Cleanup to resolve PR comments
2018-10-18 22:57:48 -07:00
0339642e77 Added TicTacToe Dashboard and tests (#1547)
* Add tictactoe dashboard and tests
2018-10-18 14:19:25 -07:00
37a0b7b132 Initial validator code for rust side hooks for chacha cuda parallel encrypt 2018-10-18 13:50:19 -07:00
c30b605047 Actually submit the storage mining proof
Get an aidrop so replicator can submit mining transaction

Some other minor type cleanup.
2018-10-18 13:50:19 -07:00
76076d6fad move last_id age checking into the HashMap
* allows for simpler chaining of banks
  * looks 1.5-2% faster than looping through a VecDequeue

TODO: remove timestamp()?
2018-10-18 11:07:00 -07:00
0a819ec4e2 Programs were not spawned by SystemProgram (#1533)
* SystemProgram spawns programs
2018-10-18 10:33:30 -07:00
57a717056e Delegate accounts now record the original approved amount 2018-10-18 08:53:25 -07:00
856c48541f Restore elaborate attack
The test is showing how you can sneak by verify_plan() but not
verify_signature().
2018-10-18 08:46:02 -06:00
2045091c4f Add SystemProgram::Move ix to Budget tx 2018-10-18 08:46:02 -06:00
03ac5a6eef Move all source tokens into Budget account
Budget now assumes the source account holds all tokens the program
should spend.

Note: the static guarantees implied by verify_plan() are meaningless
under the new contract engine. The bank no longer calls it. This
serves as a nice example of where comparing code coverage between
integration tests and unit tests would have shown us where a
change rendered unit tests meaningless.
2018-10-18 08:46:02 -06:00
32fadc9c30 Merge debits and credits
Debits no longer need to be applied before credits. Instead, we
lock any accounts we'd debit and so error out on the second attempt
to lock the same account.
2018-10-18 08:46:02 -06:00
15a89d4f17 Boot Contract type from Budget
In the old bank (before the contract engine), Contract wasn't specific
to Budget. It provided the same service as what is now called
SystemProgram::Move, but without requiring a separate account.
2018-10-18 08:46:02 -06:00
d0f43e9934 consolidate tmp ledgers 2018-10-18 08:45:31 -06:00
31e779d3f2 Added counters to track more metrics on dashboard (#1535)
- Total number of IP packets TX/RX from all nodes in the testnet
- Last consumed index on validator
- Last transmitted index on leader
2018-10-17 17:32:50 -07:00
30c79fd40d Change validator node machine type (#1537)
- The current nodes are using lower RAM compared to leader/clients
2018-10-17 17:16:50 -07:00
639c93460a Write stage optimizations (#1534)
- Testnet dashboard shows that channel pressure for write stage
  is incrementing on every iteration of write.
- This change optimizes ledger writing by removing cloning of map
  and reducing calls to flush
2018-10-17 13:02:32 -07:00
7611730cdb move off /tmp 2018-10-17 12:15:30 -07:00
9df9c1433a remove another use of /tmp 2018-10-17 12:15:30 -07:00
4ea422bcec run integration tests serially 2018-10-17 11:37:10 -07:00
6074e4f962 Attempt to stabilize the test suite
The integration tests are allowed to open sockets, so running them
in parallel may cause "Too many open files" errors. This patch
runs the unit tests in parallel and the integration test serially.
2018-10-17 11:37:10 -07:00
d52e6d01ec typo in readme 2018-10-17 02:04:05 -06:00
63caca33be SystemProgram test was failing due to expected panic 2018-10-16 18:02:44 -07:00
64efa62a74 enable logging in loaders 2018-10-16 16:55:11 -07:00
912eb5e8e9 remove bank.is_leader, dead code (#1516) 2018-10-16 15:26:44 -07:00
bb628e8495 Rename loaders 2018-10-16 14:27:08 -07:00
d0c19c2c97 cargo fmt 2018-10-16 14:11:04 -07:00
926fdb7519 Rename dynamic_program.rs to native_loader.rs 2018-10-16 14:11:04 -07:00
c886625c83 Move from solana/rbpf fork to qmonnet/rbpf (#1511) 2018-10-16 13:13:54 -07:00
f6c10d8a2e Add channel pressure for validator TVU stages (#1509) 2018-10-16 12:54:23 -07:00
2bd877528f Par process entries (#1499)
* Parallel entry processor.
2018-10-16 12:09:48 -07:00
d09889b1dd Program bank integration (#1462)
Native, BPF and Lua loaders integrated into the bank
2018-10-16 09:43:49 -07:00
1b2e9122d5 Pubsub listen on random open port when rpc does (quiet some test errors) 2018-10-16 00:11:26 -06:00
7424388924 Fix session drop 2018-10-16 00:11:26 -06:00
537436bd5e RPC PubSub now uses a well-known socket 2018-10-16 00:11:26 -06:00
32fc0cd7e9 Fix bug introduced during RUST_LOG escaping (#1507)
* Fix bug introduced during RUST_LOG escaping
- remote node configuration should not be quoted

* shellcheck disable SC2090
2018-10-15 16:49:22 -07:00
fb99494858 Improve rpc code coverage (#1487) 2018-10-15 11:01:40 -06:00
5b4d4b97bc Upgrade to latest stable Rust, 1.29.2 2018-10-15 09:54:24 -06:00
c5180c8092 Permit RUST_LOG overrides 2018-10-14 12:40:37 -07:00
515c200d86 Refactor and add test for new Entry::serialized_size() 2018-10-14 10:53:47 -06:00
32aab82e32 Don't allocate to see if transactions will fit in a blob 2018-10-14 10:53:47 -06:00
6aaa350145 effeciently pack gossip responsens and only respond up to max size. (#1493) 2018-10-14 06:45:02 -07:00
d3b4dfe104 Add bool return to entrypoint signature to permit programs to fail transactions 2018-10-13 20:01:43 -07:00
9fc30f6db4 Escape RUST_LOG configuration in remote-node.sh (#1489)
* Escape RUST_LOG configuration in remote-node.sh

- If it was set to #, it was causing other parameters to be commented out

* escape other variables as well

* disabled shell check

* Fix shellcheck error
2018-10-13 13:35:54 -07:00
2d0f07091d Handle dynamic program dlopen failures gracefully 2018-10-13 11:31:10 -07:00
3828eda507 Demote log messages 2018-10-13 11:31:10 -07:00
1e736ec16d Demote log messages 2018-10-12 20:16:57 -07:00
bba6437ea9 Use a single structure for last_ids and last_ids_sigs 2018-10-12 16:39:35 -07:00
e5ab9a856c Upload bench output as build artifacts (#1478)
* Upload bench output as build artifacts

* Fix tags types

* Pull previous stats from metrics

* Change the default branch for comparison

* Fix formatting

* Fix build errors

* Address review comments

* Dedup some common code

* Add eval for channel info to find branch name
2018-10-12 15:13:10 -07:00
1515bba9c6 Use cluster_info in rpc to get current leader addresses (#1480) 2018-10-12 14:25:56 -06:00
14a9ef4bbe move PoH verification off bank.last_id() (#1476) 2018-10-12 11:50:34 -07:00
041040c659 pubsub.rs -> rpc_pubsub.rs 2018-10-12 08:39:06 -07:00
47f69f2d24 1) Switch broken tests to generate an empty tick in their ledgers to use as last_id, 2) Fix bug where PoH generator in BankingStage did not referenced the last tick instead of the last entry on startup, causing ledger verification to fail on the new tick added by the PoH generator (#1479) 2018-10-12 00:39:10 -07:00
9dd4dc2088 Mark failing tests as ignore 2018-10-11 15:32:36 -07:00
b534c32ee3 New minor version for jsonrpc crates 2018-10-11 13:35:06 -06:00
d2712f1457 Specify patch for jsonrpc crates 2018-10-11 11:38:14 -07:00
183f560d06 Add raw entries interface to ledger for getting slices as [u8] 2018-10-11 09:40:34 -07:00
ae150c0897 Remove getAddress, it doesn't exist 2018-10-11 08:28:39 -07:00
606e1396cf Fix link 2018-10-11 08:25:38 -07:00
5c85e037f8 Tick entry ids as only valid last_ids (#1441)
Generate tick entry ids and only register ticks as the last_id expected by the bank.  Since the bank is MT, the in-flight pipeline of transactions cannot be close to the end of the queue or there is a high possibility that a starved thread will encode an expired last_id into the ledger.  The banking_stage therefore uses a shorter age limit for encoded last_ids then the validators.

Bench client doesn't send transactions that are older then 30 seconds.
2018-10-10 17:23:06 -07:00
5c523716aa Ship native programs 2018-10-10 16:49:48 -07:00
5f8cbf359e Use cdylib to avoid runtime libstd dependencies 2018-10-10 16:49:48 -07:00
e83834e6be Build native programs in release configuration 2018-10-10 16:49:48 -07:00
02225aa95c Look for native programs in same directory as the current executable 2018-10-10 16:49:48 -07:00
9931ac9780 Leader scheduler plumbing (#1440)
* Added LeaderScheduler module and tests

* plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage

* Add LeaderScheduler plumbing for Tvu, window, and tests

* Fix bank and switch tests to use new LeaderScheduler

* move leader rotation check from window service to replicate stage

* Add replicate_stage leader rotation exit test

* removed leader scheduler from the window service and associated modules/tests

* Corrected is_leader calculation in repair() function in window.rs

* Integrate LeaderScheduler with write_stage for leader to validator transitions

* Integrated LeaderScheduler with BroadcastStage

* Removed gossip leader rotation from crdt

* Add multi validator, leader test

* Comments and cleanup

* Remove unneeded checks from broadcast stage

* Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role

* Set new leader in validator -> validator transitions

* Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail

* Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops

* Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
2ba2bc72ca Cleanup multisig lua 2018-10-10 17:17:17 -06:00
45b8ba9ede Demo M-N multisig library in Lua 2018-10-10 17:17:17 -06:00
40968e09b7 Do a *little* more than noop 2018-10-10 15:57:30 -07:00
262f26cf76 SystemProgram transactions now fail on invalid arguments 2018-10-10 15:19:03 -07:00
785c619198 Add pubsub module for rpc info subscriptions (#1439) 2018-10-10 14:51:43 -06:00
24a993710d Avoid panic when account.source is None 2018-10-10 10:53:00 -07:00
c240bb12ae Change buildkite agent for testnet automation 2018-10-09 15:04:55 -07:00
eed3b9db94 Add ERC20-like Token program 2018-10-09 12:53:37 -07:00
29a8823db1 Env variables for testnet-automation parameters (#1455)
- This will enable us to create custom pipelines for field events
2018-10-09 11:50:56 -07:00
a80955eacb Change format of data for TPS/Finality metrics in testnet automation (#1446)
* Change format of data for TPS/Finality metrics in testnet automation

* Revert number of nodes for testnet automation

* Split python command to its own script

* Fix python command line arguments
2018-10-09 10:35:01 -07:00
9716c3de71 Add an abort test to justify a key field 2018-10-09 11:06:48 -06:00
34fa3208e0 Demo self-modifying Lua program
Also, drop dependency on bincode.
2018-10-09 11:06:48 -06:00
9c4e19958b Use accounts[1] for Lua code and tx userdata as arg data
This makes the Lua version nearly identical to the C one.
2018-10-09 11:06:48 -06:00
0403299728 Add context-free Lua smart contracts
lua_State is not preserved across runs and account userdata is not converted into
Lua values. All this allows us to do is manipulate the number of tokens
in each account and DoS the Fullnode with those three little words,
"repeat until false".

Why bother? Research. rlua's project goals are well-aligned with the LAMPORT runtime.

What's next:
* rlua to add security limits, such as number of instructions executed
* Add a way to deserialize Account::userdata OR use Account::program_id
  to look up a metatable for lua_newuserdata().
2018-10-09 11:06:48 -06:00
95701114e3 Crdt -> ClusterInfo 2018-10-09 03:49:39 -06:00
a99d17c3ac put temp, test files in OUT_DIR (#1448) 2018-10-08 16:15:17 -07:00
517149d325 Move rpc request methods from wallet into separate module 2018-10-08 13:02:08 -06:00
32aa2575b5 Purge BudgetTransaction from entry 2018-10-08 11:34:04 -07:00
8fe7b96629 Purge BudgetTransaction from banking_stage 2018-10-08 11:34:04 -07:00
9350619afa log to influx once (#1438) 2018-10-06 14:37:14 -07:00
d8d8f0bfc8 Fund all the keys with move many transactions (#1436)
* Fund all the keys with move many transactions

* logs
2018-10-05 16:45:27 -07:00
0a39722719 Add support to trigger testnet from a PR (#1434)
* Add support for different node counts

* Update variable names

* Delete network even after failures

* Add array for node counts

* Changed number of nodes to a space separated string of numbers

* Adjust number of nodes

* Snap will not be published if the env variable DO_NOT_PUBLISH_SNAP is set

* Address review comments

* Replaced influx db URL
2018-10-05 16:32:05 -07:00
9c0fa4d1d2 Upload coverage HTML reports (#1421)
Uploads two reports to Buildkite, one from cargo-cov and one from lcov via grcov.  The lcov one is busted on linux and is what we need to bring codecov.io back up again. It works great on macos if you wanted to generate them locally and prefer lcov HTML reports.

* Also comment out non-coverage build to speed things up.
2018-10-05 10:17:35 -07:00
da0404ad03 Reduce maintenance of maintainers list 2018-10-04 23:05:08 -07:00
b508fdb62c Cleanup field names 2018-10-04 16:51:05 -07:00
680f90df21 Fix comment 2018-10-04 14:21:06 -07:00
1a68807ad9 Enable mt-bank (#1368)
* Enable mt-bank

* cleanup and interleaving lock tests
2018-10-04 13:15:54 -07:00
d901767b54 Makefile is not relevant 2018-10-04 10:35:48 -07:00
13d4443d4d Add BPF support & C-based BPF tic-tac-toe (#1422)
Add initial support for BPF and a C port of tictactoe
2018-10-04 09:44:44 -07:00
74b63c12a0 Add tests to LeaderScheduler to increase code coverage 2018-10-03 21:58:29 -07:00
cd42f6591a PR fixes - remove redundant case 2018-10-03 21:58:29 -07:00
5491422b12 Fix validator_to_leader_transition test to not start up tpu after shutting down tvu, as the tpu now outputs ticks that will mess up the verification check 2018-10-03 21:58:29 -07:00
23f3ff3cf0 Added LeaderScheduler module and tests 2018-10-03 21:58:29 -07:00
f90488c77b Demote 'not enough peers in crdt table' log message 2018-10-02 22:00:54 -07:00
beb4536841 Run a fullnode+drone automatically when the container starts up 2018-10-02 18:09:35 -07:00
3fa46dd66d Add replicator sha sampling
replicator will submit mining proofs with the result of sampling
the encrypted file with a hashing algorithm.
2018-10-02 17:04:46 -07:00
ad5fcf778f Publish minimal Solana docker images to dockerhub 2018-10-02 16:57:48 -07:00
83b000ae88 Remove SNAP_ prefix 2018-10-02 16:57:48 -07:00
33e179caa6 Update sha2 requirement from 0.7.0 to 0.8.0
Updates the requirements on [sha2](https://github.com/RustCrypto/hashes) to permit the latest version.
- [Release notes](https://github.com/RustCrypto/hashes/releases)
- [Commits](https://github.com/RustCrypto/hashes/commits/sha2-v0.8.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-10-02 09:00:05 -06:00
b1e941cab9 Return all instances 2018-10-01 07:51:48 -07:00
6db961d256 Correct comment 2018-09-30 00:08:09 -07:00
83409ded59 Correctly deserialize large userdata 2018-09-29 19:39:54 -07:00
396b2e9772 Ignore keep alive for completed games 2018-09-29 19:39:54 -07:00
94459deb94 Disable codecov.io reporting 2018-09-28 19:19:16 -07:00
660af84b8d Use the same versions of llvm-cov and libprofile 2018-09-28 19:19:16 -07:00
7b31020903 Add back llvm-dev for llvm-cov 2018-09-28 19:19:16 -07:00
9a4143b4d9 Upgrade llvm-dev and boot kcov
Need clang-dev, not llvm-dev because cargo-cov looks for libprofile
in a clang installation directory.
2018-09-28 19:19:16 -07:00
aebc47ad55 Attempt coverage reporting 2018-09-28 19:19:16 -07:00
b6b5455917 Fix test in coverage build 2018-09-28 19:19:16 -07:00
5bc01cd51a Revive code coverage 2018-09-28 19:19:16 -07:00
c79acac37b Add tic-tac-toe dashboard program 2018-09-28 18:48:34 -07:00
a5f2aa6777 s/grid/board/g 2018-09-28 18:48:34 -07:00
4169e5c510 Simplify game setup messaging 2018-09-28 18:48:34 -07:00
0727c440b3 Add KeepAlive message so players can detect abandoned games 2018-09-28 18:48:34 -07:00
19a7ff0c43 Pin down nightly in benchmark build 2018-09-28 19:29:50 -06:00
5f18403199 Upgrade nightly 2018-09-28 19:29:50 -06:00
9f325fca09 Re-enable cargo audit 2018-09-28 17:53:41 -06:00
10d08acefa Reenable cargo audit 2018-09-28 17:53:41 -06:00
52d50e6bc4 Update for new solana-jsonrpc 2018-09-28 17:53:41 -06:00
e7de7c32db Transactions with multiple programs. (#1381)
Transactions contain a vector of instructions that are executed atomically.
Bench shows a 2.3x speed up when using 5 instructions per tx.
2018-09-28 16:16:35 -07:00
a5f07638ec Use static str define for ledger files 2018-09-28 14:23:37 -07:00
aa2a3fe201 Add chacha module to encrypt ledger files 2018-09-28 14:23:37 -07:00
abd13ba4ca move program tests to integration 2018-09-28 11:30:10 -07:00
485ba093b3 Install kcov to CI environment 2018-09-28 11:20:27 -06:00
36b18e4fb5 Create new wallet on each run of wallet-sanity 2018-09-28 07:39:31 -07:00
8d92232949 Specify zone 2018-09-28 07:32:49 -07:00
e4d8c094a4 Include -z when deleting network 2018-09-27 21:27:09 -07:00
d26e1c51a9 0.10.0 2018-09-27 16:38:53 -07:00
398 changed files with 46184 additions and 14739 deletions

31
.buildkite/env/README.md vendored Normal file
View File

@ -0,0 +1,31 @@
[ejson](https://github.com/Shopify/ejson) and
[ejson2env](https://github.com/Shopify/ejson2env) are used to manage access
tokens and other secrets required for CI.
#### Setup
```bash
$ sudo gem install ejson ejson2env
```
then obtain the necessary keypair and place it in `/opt/ejson/keys/`.
#### Usage
Run the following command to decrypt the secrets into the environment:
```bash
eval $(ejson2env secrets.ejson)
```
#### Managing secrets.ejson
To decrypt `secrets.ejson` for modification, run:
```bash
$ ejson decrypt secrets.ejson -o secrets_unencrypted.ejson
```
Edit, then run the following to re-encrypt the file **BEFORE COMMITING YOUR
CHANGES**:
```bash
$ ejson encrypt secrets_unencrypted.ejson
$ mv secrets_unencrypted.ejson secrets.ejson
```

10
.buildkite/env/secrets.ejson vendored Normal file
View File

@ -0,0 +1,10 @@
{
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": {
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
}
}

View File

@ -3,15 +3,14 @@
#
# Save target/ for the next CI build on this machine
#
if [[ -n $CARGO_TARGET_CACHE_NAME ]]; then
(
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
mkdir -p "$d"
set -x
rsync -a --delete --link-dest="$PWD" target "$d"
du -hs "$d"
)
fi
(
set -x
d=$HOME/cargo-target-cache/"$BUILDKITE_LABEL"
mkdir -p "$d"
set -x
rsync -a --delete --link-dest="$PWD" target "$d"
du -hs "$d"
)
#
# Add job_stats data point

View File

@ -1,4 +1,7 @@
#!/bin/bash -e
#!/usr/bin/env bash
set -e
eval "$(ejson2env .buildkite/env/secrets.ejson)"
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
# interprets this as the start of a log group.
@ -8,20 +11,19 @@ export PS4="++"
#
# Restore target/ from the previous CI build on this machine
#
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || (
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
(
set -x
d=$HOME/cargo-target-cache/"$BUILDKITE_LABEL"
if [[ -d $d ]]; then
du -hs "$d"
read -r cacheSizeInGB _ < <(du -s --block-size=1000000000 "$d")
if [[ $cacheSizeInGB -gt 5 ]]; then
if [[ $cacheSizeInGB -gt 10 ]]; then
echo "$d has gotten too large, removing it"
rm -rf "$d"
fi
fi
mkdir -p "$d"/target
set -x
rsync -a --delete --link-dest="$d" "$d"/target .
)

20
.buildkite/pipeline-upload.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
#
# This script is used to upload the full buildkite pipeline. The steps defined
# in the buildkite UI should simply be:
#
# steps:
# - command: ".buildkite/pipeline-upload.sh"
#
set -e
cd "$(dirname "$0")"/..
buildkite-agent pipeline upload ci/buildkite.yml
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
# Add helpful link back to the corresponding Github Pull Request
buildkite-agent annotate --style info --context pr-backlink \
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
fi

6
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,6 @@
#### Problem
#### Proposed Solution

5
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,5 @@
#### Problem
#### Summary of Changes
Fixes #

22
.gitignore vendored
View File

@ -1,16 +1,20 @@
Cargo.lock
/target/
/book/html/
/book/src/img/
/book/src/tests.ok
**/*.rs.bk
.cargo
# node configuration files
# node config that is rsynced
/config/
/config-private/
/config-drone/
/config-validator/
/config-client/
/multinode-demo/test/config-client/
# node config that remains local
/config-local/
# test temp files, ledgers, etc.
/farf/
# log files
*.log
log-*.txt
# intellij files
/.idea/
/solana.iml

View File

@ -17,7 +17,7 @@ Rust coding conventions
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly:
```rust
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#[allow(clippy::too_many_arguments)]
```
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.
@ -30,7 +30,7 @@ Rust coding conventions
* For function and method names, use `<verb>_<subject>`. For unit tests, that verb should
always be `test` and for benchmarks the verb should always be `bench`. Avoid namespacing
function names with some arbitrary word. Avoid abreviating words in function names.
function names with some arbitrary word. Avoid abbreviating words in function names.
* As they say, "When in Rome, do as the Romans do." A good patch should acknowledge the coding
conventions of the code that surrounds it, even in the case where that code has not yet been
@ -43,11 +43,27 @@ Terminology
Inventing new terms is allowed, but should only be done when the term is widely used and
understood. Avoid introducing new 3-letter terms, which can be confused with 3-letter acronyms.
Some terms we currently use regularly in the codebase:
[Terms currently in use](book/src/terminology.md)
* fullnode: n. A fully participating network node.
* hash: n. A SHA-256 Hash.
* keypair: n. A Ed25519 key-pair, containing a public and private key.
* pubkey: n. The public key of a Ed25519 key-pair.
* sigverify: v. To verify a Ed25519 digital signature.
Proposing architectural changes
---
Solana's architecture is described by a book generated from markdown files in
the `book/src/` directory, maintained by an *editor* (currently @garious). To
change the architecture, you'll need to at least propose a change the content
under the [Proposed
Changes](https://solana-labs.github.io/solana/proposals.html) chapter. Here's
the full process:
1. Propose to a change to the architecture by creating a PR that adds a
markdown document to the directory `book/src/` and references it from the
[table of contents](book/src/SUMMARY.md). Add the editor and any relevant
*maintainers* to the PR review.
2. The PR being merged indicates your proposed change was accepted and that the
editor and maintainers support your plan of attack.
3. Submit PRs that implement the proposal. When the implementation reveals the
need for tweaks to the architecture, be sure to update the proposal and have
that change reviewed by the same people as in step 1.
4. Once the implementation is complete, the editor will then work to integrate
the document into the book.

2987
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,120 +1,68 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.9.0"
version = "0.11.0"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
homepage = "https://solana.com/"
readme = "README.md"
repository = "https://github.com/solana-labs/solana"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
]
authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0"
[[bin]]
name = "solana-upload-perf"
path = "src/bin/upload-perf.rs"
[[bin]]
name = "solana-bench-streamer"
path = "src/bin/bench-streamer.rs"
[[bin]]
name = "solana-bench-tps"
path = "src/bin/bench-tps.rs"
[[bin]]
name = "solana-drone"
path = "src/bin/drone.rs"
[[bin]]
name = "solana-replicator"
path = "src/bin/replicator.rs"
[[bin]]
name = "solana-fullnode"
path = "src/bin/fullnode.rs"
[[bin]]
name = "solana-fullnode-config"
path = "src/bin/fullnode-config.rs"
[[bin]]
name = "solana-genesis"
path = "src/bin/genesis.rs"
[[bin]]
name = "solana-ledger-tool"
path = "src/bin/ledger-tool.rs"
[[bin]]
name = "solana-keygen"
path = "src/bin/keygen.rs"
[[bin]]
name = "solana-wallet"
path = "src/bin/wallet.rs"
edition = "2018"
[badges]
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features]
unstable = []
ipv6 = []
bpf_c = ["solana-bpfloader/bpf_c"]
chacha = []
cuda = []
erasure = []
ipv6 = []
test = []
unstable = []
[dependencies]
atty = "0.2"
bincode = "1.0.0"
bs58 = "0.2.0"
bv = { version = "0.10.0", features = ["serde"] }
byteorder = "1.2.1"
bytes = "0.4"
chrono = { version = "0.4.0", features = ["serde"] }
clap = "2.31"
dirs = "1.0.2"
env_logger = "0.5.12"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
getopts = "0.2"
influx_db_client = "0.3.4"
solana-jsonrpc-core = "0.1"
solana-jsonrpc-http-server = "0.1"
solana-jsonrpc-macros = "0.1"
ipnetwork = "0.12.7"
itertools = "0.7.8"
libc = "0.2.43"
libloading = "0.5.0"
hashbrown = "0.1.7"
indexmap = "1.0"
itertools = "0.8.0"
libc = "0.2.45"
log = "0.4.2"
matches = "0.1.6"
nix = "0.11.0"
pnet_datalink = "0.21.0"
rand = "0.5.1"
nix = "0.12.0"
rand = "0.6.1"
rand_chacha = "0.1.0"
rayon = "1.0.0"
reqwest = "0.9.0"
ring = "0.13.2"
sha2 = "0.7.0"
serde = "1.0.27"
serde_cbor = "0.9.0"
serde_derive = "1.0.27"
rocksdb = "0.10.1"
serde = "1.0.82"
serde_derive = "1.0.82"
serde_json = "1.0.10"
socket2 = "0.3.8"
solana_program_interface = { path = "common" }
sys-info = "0.5.6"
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.11.0" }
solana-drone = { path = "drone", version = "0.11.0" }
solana-jsonrpc-core = "0.4.0"
solana-jsonrpc-http-server = "0.4.0"
solana-jsonrpc-macros = "0.4.0"
solana-jsonrpc-pubsub = "0.4.0"
solana-jsonrpc-ws-server = "0.4.0"
solana-logger = { path = "logger", version = "0.11.0" }
solana-metrics = { path = "metrics", version = "0.11.0" }
solana-native-loader = { path = "programs/native/native_loader", version = "0.11.0" }
solana-netutil = { path = "netutil", version = "0.11.0" }
solana-sdk = { path = "sdk", version = "0.11.0" }
solana-system-program = { path = "programs/native/system", version = "0.11.0" }
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
[dev-dependencies]
noop = { path = "programs/noop" }
print = { path = "programs/print" }
move_funds = { path = "programs/move_funds" }
hex-literal = "0.1.1"
matches = "0.1.6"
[[bench]]
name = "bank"
@ -122,6 +70,9 @@ name = "bank"
[[bench]]
name = "banking_stage"
[[bench]]
name = "db_ledger"
[[bench]]
name = "ledger"
@ -131,18 +82,36 @@ name = "signature"
[[bench]]
name = "sigverify"
[[bench]]
required-features = ["chacha"]
name = "chacha"
[workspace]
members = [
".",
"common",
"programs/noop",
"programs/print",
"programs/move_funds",
]
default-members = [
".",
"common",
"programs/noop",
"programs/print",
"programs/move_funds",
"bench-streamer",
"bench-tps",
"drone",
"fullnode",
"fullnode-config",
"genesis",
"keygen",
"ledger-tool",
"logger",
"metrics",
"programs/bpf/rust/noop",
"programs/native/bpf_loader",
"programs/native/budget",
"programs/native/erc20",
"programs/native/lua_loader",
"programs/native/native_loader",
"programs/native/noop",
"programs/native/storage",
"programs/native/system",
"programs/native/vote",
"replicator",
"sdk",
"upload-perf",
"vote-signer",
"wallet",
]

329
README.md
View File

@ -1,9 +1,9 @@
[![Solana crate](https://img.shields.io/crates/v/solana.svg)](https://crates.io/crates/solana)
[![Solana documentation](https://docs.rs/solana/badge.svg)](https://docs.rs/solana)
[![Build status](https://badge.buildkite.com/d4c4d7da9154e3a8fb7199325f430ccdb05be5fc1e92777e51.svg?branch=master)](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
[![Build status](https://badge.buildkite.com/8cc350de251d61483db98bdfc895b9ea0ac8ffa4a32ee850ed.svg?branch=master)](https://buildkite.com/solana-labs/solana/builds?branch=master)
[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)
Blockchain, Rebuilt for Scale
Blockchain Rebuilt for Scale
===
Solana&trade; is a new blockchain architecture built from the ground up for scale. The architecture supports
@ -21,246 +21,12 @@ It's possible for a centralized database to process 710,000 transactions per sec
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
Testnet Demos
Architecture
===
The Solana repo contains all the scripts you might need to spin up your own
local testnet. Depending on what you're looking to achieve, you may want to
run a different variation, as the full-fledged, performance-enhanced
multinode testnet is considerably more complex to set up than a Rust-only,
singlenode testnode. If you are looking to develop high-level features, such
as experimenting with smart contracts, save yourself some setup headaches and
stick to the Rust-only singlenode demo. If you're doing performance optimization
of the transaction pipeline, consider the enhanced singlenode demo. If you're
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
to reproduce our TPS metrics, run the enhanced multinode demo.
For all four variations, you'd need the latest Rust toolchain and the Solana
source code:
First, install Rust's package manager Cargo.
```bash
$ curl https://sh.rustup.rs -sSf | sh
$ source $HOME/.cargo/env
```
Now checkout the code from github:
```bash
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
The demo code is sometimes broken between releases as we add new low-level
features, so if this is your first time running the demo, you'll improve
your odds of success if you check out the
[latest release](https://github.com/solana-labs/solana/releases)
before proceeding:
```bash
$ git checkout v0.8.0
```
Configuration Setup
---
The network is initialized with a genesis ledger and leader/validator configuration files.
These files can be generated by running the following script.
```bash
$ ./multinode-demo/setup.sh
```
Drone
---
In order for the leader, client and validators to work, we'll need to
spin up a drone to give out some test tokens. The drone delivers Milton
Friedman-style "air drops" (free tokens to requesting clients) to be used in
test transactions.
Start the drone on the leader node with:
```bash
$ ./multinode-demo/drone.sh
```
Singlenode Testnet
---
Before you start a fullnode, make sure you know the IP address of the machine you
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
open on all the machines you want to test with.
Now start the server in a separate shell:
```bash
$ ./multinode-demo/leader.sh
```
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
The drone does not need to be running for subsequent leader starts.
Multinode Testnet
---
To run a multinode testnet, after starting a leader node, spin up some validator nodes in
separate shells:
```bash
$ ./multinode-demo/validator.sh
```
To run a performance-enhanced leader or validator (on Linux),
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
your system:
```bash
$ ./fetch-perf-libs.sh
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh
```
Testnet Client Demo
---
Now that your singlenode or multinode testnet is up and running let's send it
some transactions!
In a separate shell start the client:
```bash
$ ./multinode-demo/client.sh # runs against localhost by default
```
What just happened? The client demo spins up several threads to send 500,000 transactions
to the testnet as quickly as it can. The client then pings the testnet periodically to see
how many transactions it processed in that time. Take note that the demo intentionally
floods the network with UDP packets, such that the network will almost certainly drop a
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
demo completes after it has convinced itself the testnet won't process any additional
transactions. You should see several TPS measurements printed to the screen. In the
multinode variation, you'll see TPS measurements for each validator node as well.
Public Testnet
--------------
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
```bash
$ ./multinode-demo/client.sh --network $(dig +short testnet.solana.com):8001 --identity config-private/client-id.json --duration 60
```
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
Linux Snap
---
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
easily get Solana running on supported Linux systems without building anything
from source. The `edge` Snap channel is updated daily with the latest
development from the `master` branch. To install:
```bash
$ sudo snap install solana --edge --devmode
```
(`--devmode` flag is required only for `solana.fullnode-cuda`)
Once installed the usual Solana programs will be available as `solona.*` instead
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
Update to the latest version at any time with:
```bash
$ snap info solana
$ sudo snap refresh solana --devmode
```
### Daemon support
The snap supports running a leader, validator or leader+drone node as a system
daemon.
Run `sudo snap get solana` to view the current daemon configuration. To view
daemon logs:
1. Run `sudo snap logs -n=all solana` to view the daemon initialization log
2. Runtime logging can be found under `/var/snap/solana/current/leader/`,
`/var/snap/solana/current/validator/`, or `/var/snap/solana/current/drone/` depending
on which `mode=` was selected. Within each log directory the file `current`
contains the latest log, and the files `*.s` (if present) contain older rotated
logs.
Disable the daemon at any time by running:
```bash
$ sudo snap set solana mode=
```
Runtime configuration files for the daemon can be found in
`/var/snap/solana/current/config`.
#### Leader daemon
```bash
$ sudo snap set solana mode=leader
```
If CUDA is available:
```bash
$ sudo snap set solana mode=leader enable-cuda=1
```
`rsync` must be configured and running on the leader.
1. Ensure rsync is installed with `sudo apt-get -y install rsync`
2. Edit `/etc/rsyncd.conf` to include the following
```
[config]
path = /var/snap/solana/current/config
hosts allow = *
read only = true
```
3. Run `sudo systemctl enable rsync; sudo systemctl start rsync`
4. Test by running `rsync -Pzravv rsync://<ip-address-of-leader>/config
solana-config` from another machine. **If the leader is running on a cloud
provider it may be necessary to configure the Firewall rules to permit ingress
to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
To run both the Leader and Drone:
```bash
$ sudo snap set solana mode=leader+drone
```
#### Validator daemon
```bash
$ sudo snap set solana mode=validator
```
If CUDA is available:
```bash
$ sudo snap set solana mode=validator enable-cuda=1
```
By default the validator will connect to **testnet.solana.com**, override
the leader IP address by running:
```bash
$ sudo snap set solana mode=validator leader-address=127.0.0.1 #<-- change IP address
```
It's assumed that the leader will be running `rsync` configured as described in
the previous **Leader daemon** section.
Before you jump into the code, review the online book [Solana: Blockchain Rebuilt for Scale](https://solana-labs.github.io/solana/).
Developing
===
@ -276,7 +42,7 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt-preview
```
If your rustc version is lower than 1.26.1, please update it:
If your rustc version is lower than 1.31.0, please update it:
```bash
$ rustup update
@ -285,7 +51,7 @@ $ rustup update
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
```bash
$ sudo apt-get install libssl-dev pkg-config zlib1g-dev
$ sudo apt-get install libssl-dev pkg-config zlib1g-dev llvm clang
```
Download the source code:
@ -295,13 +61,19 @@ $ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
Build
```bash
$ cargo build --all
```
Testing
---
Run the test suite:
```bash
$ cargo test
$ cargo test --all
```
To emulate all the tests that will run on a Pull Request, run:
@ -310,32 +82,50 @@ To emulate all the tests that will run on a Pull Request, run:
$ ./ci/run-local.sh
```
Debugging
Local Testnet
---
There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis. Before running a leader or validator set the normal RUST\_LOG environment variable.
Start your own testnet locally, instructions are in the book [Solana: Blockchain Rebuild for Scale: Getting Started](https://solana-labs.github.io/solana/getting-started.html).
For example, to enable info everywhere and debug only in the solana::banking_stage module:
Remote Testnets
---
```bash
$ export RUST_LOG=info,solana::banking_stage=debug
```
We maintain several testnets:
Generally we are using debug for infrequent debug messages, trace for potentially frequent
messages and info for performance-related logging.
* `testnet` - public stable testnet accessible via testnet.solana.com, with an https proxy for web apps at api.testnet.solana.com. Runs 24/7
* `testnet-beta` - public beta channel testnet accessible via beta.testnet.solana.com. Runs 24/7
* `testnet-edge` - public edge channel testnet accessible via edge.testnet.solana.com. Runs 24/7
* `testnet-perf` - permissioned stable testnet running a 24/7 soak test
* `testnet-beta-perf` - permissioned beta channel testnet running a multi-hour soak test weekday mornings
* `testnet-edge-perf` - permissioned edge channel testnet running a multi-hour soak test weekday mornings
You can also attach to a running process with GDB. The leader's process is named
_solana-fullnode_:
## Deploy process
```bash
$ sudo gdb
attach <PID>
set logging on
thread apply all bt
```
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
Each testnet can be manually manipulated from buildkite as well. The `-perf`
testnets use a release tarball while the non`-perf` builds use the snap build
(we've observed that the snap build runs slower than a tarball but this has yet
to be root caused).
## How do I reset the testnet?
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
and when prompted select the desired testnet
## How can I scale the tx generation rate?
Increase the TX rate by increasing the number of cores on the client machine which is running
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
variable `RAYON_NUM_THREADS=<xx>`
## How can I test a change on the testnet?
Currently, a merged PR is the only way to test a change on the testnet. But you
can run your own testnet using the scripts in the `net/` directory.
## Adjusting the number of clients or validators on the testnet
Edit `ci/testnet-manager.sh`
This will dump all the threads stack traces into gdb.txt
Benchmarking
---
@ -354,28 +144,19 @@ $ cargo +nightly bench --features="unstable"
Release Process
---
The release process for this project is described [here](rfcs/rfc-005-branches-tags-and-channels.md).
The release process for this project is described [here](RELEASE.md).
Code coverage
---
To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
in Rust nightly.
To generate code coverage statistics:
```bash
$ cargo +nightly install cargo-cov
$ scripts/coverage.sh
$ open target/cov/lcov-local/index.html
```
Run cargo-cov and generate a report:
```bash
$ cargo +nightly cov test
$ cargo +nightly cov report --open
```
The coverage report will be written to `./target/cov/report/index.html`
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
@ -388,3 +169,5 @@ problem is solved by this code?" On the other hand, if a test does fail and you
better way to solve the same problem, a Pull Request with your solution would most certainly be
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
send us that patch!

View File

@ -1,8 +1,64 @@
# Solana Release process
## Introduction
## Branches and Tags
Solana uses a channel-oriented, date-based branching process described [here](https://github.com/solana-labs/solana/blob/master/rfcs/rfc-005-branches-tags-and-channels.md).
```
========================= master branch (edge channel) =======================>
\ \ \
\___v0.7.0 tag \ \
\ \ v0.9.0 tag__\
\ v0.8.0 tag__\ \
v0.7.1 tag__\ \ v0.9 branch (beta channel)
\___v0.7.2 tag \___v0.8.1 tag
\ \
\ \
v0.7 branch v0.8 branch (stable channel)
```
### master branch
All new development occurs on the `master` branch.
Bug fixes that affect a `vX.Y` branch are first made on `master`. This is to
allow a fix some soak time on `master` before it is applied to one or more
stabilization branches.
Merging to `master` first also helps ensure that fixes applied to one release
are present for future releases. (Sometimes the joy of landing a critical
release blocker in a branch causes you to forget to propagate back to
`master`!)"
Once the bug fix lands on `master` it is cherry-picked into the `vX.Y` branch
and potentially the `vX.Y-1` branch. The exception to this rule is when a bug
fix for `vX.Y` doesn't apply to `master` or `vX.Y-1`.
Immediately after a new stabilization branch is forged, the `Cargo.toml` minor
version (*Y*) in the `master` branch is incremented by the release engineer.
Incrementing the major version of the `master` branch is outside the scope of
this document.
### v*X.Y* stabilization branches
These are stabilization branches for a given milestone. They are created off
the `master` branch as late as possible prior to the milestone release.
### v*X.Y.Z* release tag
The release tags are created as desired by the owner of the given stabilization
branch, and cause that *X.Y.Z* release to be shipped to https://crates.io,
https://snapcraft.io/, and elsewhere.
Immediately after a new v*X.Y.Z* branch tag has been created, the `Cargo.toml`
patch version number (*Z*) of the stabilization branch is incremented by the
release engineer.
## Channels
Channels are used by end-users (humans and bots) to consume the branches
described in the previous section, so they may automatically update to the most
recent version matching their desired stability.
There are three release channels that map to branches as follows:
* edge - tracks the `master` branch, least stable.
* beta - tracks the largest (and latest) `vX.Y` stabilization branch, more stable.
* stable - tracks the second largest `vX.Y` stabilization branch, most stable.
## Release Steps
@ -12,9 +68,9 @@ When cutting a new channel branch these pre-steps are required:
1. Pick your branch point for release on master.
2. Create the branch. The name should be "v" + the first 2 "version" fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9".
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0).
4. Push your new branch to solana.git
5. Land your Carto.toml change as a master PR.
4. Push the new branch to the solana repository
3. Update Cargo.toml on master to the next semantic version (e.g. 0.9.0 -> 0.10.0) by running `./scripts/increment-cargo-version.sh`.
5. Land your Cargo.toml change as a master PR.
At this point, ci/channel-info.sh should show your freshly cut release branch as "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
@ -23,10 +79,12 @@ At this point, ci/channel-info.sh should show your freshly cut release branch as
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
1. Go [there ;)](https://github.com/solana-labs/solana/releases).
2. Click "Draft new release".
2. Click "Draft new release". The release tag must exactly match the `version` field in `/Cargo.toml` prefixed by `v` (ie, `<branchname>.X`).
3. If the first major release on the branch (e.g. v0.8.0), paste in [this template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md) and fill it in.
4. Test the release by generating a tag using semver's rules. First try at a release should be <branchname>.X-rc.0.
4. Test the release by generating a tag using semver's rules. First try at a release should be `<branchname>.X-rc.0`.
5. Verify release automation:
1. [Crates.io](https://crates.io/crates/solana) should have an updated Solana version.
2. ...
6. After testnet deployment, verify that testnets are running correct software. http://metrics.solana.com should show testnet running on a hash from your newly created branch.
7. Once the release has been made, update Cargo.toml on release to the next semantic version (e.g. 0.9.0 -> 0.9.1) by running `./scripts/increment-cargo-version.sh patch`.

17
bench-streamer/Cargo.toml Normal file
View File

@ -0,0 +1,17 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
solana = { path = "..", version = "0.11.0" }
solana-logger = { path = "../logger", version = "0.11.0" }
solana-netutil = { path = "../netutil", version = "0.11.0" }
[features]
cuda = []

View File

@ -1,8 +1,4 @@
extern crate clap;
extern crate solana;
use clap::{App, Arg};
use solana::netutil::bind_to;
use solana::packet::{Packet, SharedPackets, BLOB_SIZE, PACKET_DATA_SIZE};
use solana::result::Result;
use solana::streamer::{receiver, PacketReceiver};
@ -62,7 +58,8 @@ fn main() -> Result<()> {
.value_name("NUM")
.takes_value(true)
.help("Use NUM receive sockets"),
).get_matches();
)
.get_matches();
if let Some(n) = matches.value_of("num-recv-sockets") {
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
@ -76,7 +73,7 @@ fn main() -> Result<()> {
let mut read_channels = Vec::new();
let mut read_threads = Vec::new();
for _ in 0..num_sockets {
let read = bind_to(port, false).unwrap();
let read = solana_netutil::bind_to(port, false).unwrap();
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
addr = read.local_addr().unwrap();

21
bench-tps/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.11.0"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.32.0"
rayon = "1.0.3"
serde_json = "1.0.10"
solana = { path = "..", version = "0.11.0" }
solana-drone = { path = "../drone", version = "0.11.0" }
solana-logger = { path = "../logger", version = "0.11.0" }
solana-metrics = { path = "../metrics", version = "0.11.0" }
solana-sdk = { path = "../sdk", version = "0.11.0" }
[features]
cuda = []

530
bench-tps/src/bench.rs Normal file
View File

@ -0,0 +1,530 @@
use solana_metrics;
use rayon::prelude::*;
use solana::client::mk_client;
use solana::cluster_info::NodeInfo;
use solana::thin_client::ThinClient;
use solana_drone::drone::request_airdrop_transaction;
use solana_metrics::influxdb;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::timing::timestamp;
use solana_sdk::timing::{duration_as_ms, duration_as_s};
use solana_sdk::transaction::Transaction;
use std::cmp;
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use std::time::Instant;
pub struct NodeStats {
/// Maximum TPS reported by this node
pub tps: f64,
/// Total transactions reported by this node
pub tx: u64,
}
pub const MAX_SPENDS_PER_TX: usize = 4;
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
pub fn metrics_submit_token_balance(token_balance: u64) {
println!("Token balance: {}", token_balance);
solana_metrics::submit(
influxdb::Point::new("bench-tps")
.add_tag("op", influxdb::Value::String("token_balance".to_string()))
.add_field("balance", influxdb::Value::Integer(token_balance as i64))
.to_owned(),
);
}
pub fn sample_tx_count(
exit_signal: &Arc<AtomicBool>,
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
first_tx_count: u64,
v: &NodeInfo,
sample_period: u64,
) {
let mut client = mk_client(&v);
let mut now = Instant::now();
let mut initial_tx_count = client.transaction_count();
let mut max_tps = 0.0;
let mut total;
let log_prefix = format!("{:21}:", v.tpu.to_string());
loop {
let tx_count = client.transaction_count();
assert!(
tx_count >= initial_tx_count,
"expected tx_count({}) >= initial_tx_count({})",
tx_count,
initial_tx_count
);
let duration = now.elapsed();
now = Instant::now();
let sample = tx_count - initial_tx_count;
initial_tx_count = tx_count;
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
if tps > max_tps {
max_tps = tps;
}
if tx_count > first_tx_count {
total = tx_count - first_tx_count;
} else {
total = 0;
}
println!(
"{} {:9.2} TPS, Transactions: {:6}, Total transactions: {}",
log_prefix, tps, sample, total
);
sleep(Duration::new(sample_period, 0));
if exit_signal.load(Ordering::Relaxed) {
println!("{} Exiting validator thread", log_prefix);
let stats = NodeStats {
tps: max_tps,
tx: total,
};
maxes.write().unwrap().push((v.tpu, stats));
break;
}
}
}
/// Send loopback payment of 0 tokens and confirm the network processed it
pub fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, id: &Keypair) {
let transfer_start = Instant::now();
let mut poll_count = 0;
loop {
if poll_count > 0 && poll_count % 8 == 0 {
println!(
"polling for barrier transaction confirmation, attempt {}",
poll_count
);
}
*last_id = barrier_client.get_last_id();
let signature = barrier_client
.transfer(0, &id, id.pubkey(), last_id)
.expect("Unable to send barrier transaction");
let confirmatiom = barrier_client.poll_for_signature(&signature);
let duration_ms = duration_as_ms(&transfer_start.elapsed());
if confirmatiom.is_ok() {
println!("barrier transaction confirmed in {} ms", duration_ms);
solana_metrics::submit(
influxdb::Point::new("bench-tps")
.add_tag(
"op",
influxdb::Value::String("send_barrier_transaction".to_string()),
)
.add_field("poll_count", influxdb::Value::Integer(poll_count))
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
.to_owned(),
);
// Sanity check that the client balance is still 1
let balance = barrier_client
.poll_balance_with_timeout(
&id.pubkey(),
&Duration::from_millis(100),
&Duration::from_secs(10),
)
.expect("Failed to get balance");
if balance != 1 {
panic!("Expected an account balance of 1 (balance: {}", balance);
}
break;
}
// Timeout after 3 minutes. When running a CPU-only leader+validator+drone+bench-tps on a dev
// machine, some batches of transactions can take upwards of 1 minute...
if duration_ms > 1000 * 60 * 3 {
println!("Error: Couldn't confirm barrier transaction!");
exit(1);
}
let new_last_id = barrier_client.get_last_id();
if new_last_id == *last_id {
if poll_count > 0 && poll_count % 8 == 0 {
println!("last_id is not advancing, still at {:?}", *last_id);
}
} else {
*last_id = new_last_id;
}
poll_count += 1;
}
}
pub fn generate_txs(
shared_txs: &SharedTransactions,
source: &[Keypair],
dest: &[Keypair],
threads: usize,
reclaim: bool,
leader: &NodeInfo,
) {
let mut client = mk_client(leader);
let last_id = client.get_last_id();
let tx_count = source.len();
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
let signing_start = Instant::now();
let pairs: Vec<_> = if !reclaim {
source.iter().zip(dest.iter()).collect()
} else {
dest.iter().zip(source.iter()).collect()
};
let transactions: Vec<_> = pairs
.par_iter()
.map(|(id, keypair)| {
(
Transaction::system_new(id, keypair.pubkey(), 1, last_id),
timestamp(),
)
})
.collect();
let duration = signing_start.elapsed();
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
let bsps = (tx_count) as f64 / ns as f64;
let nsps = ns as f64 / (tx_count) as f64;
println!(
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time, {}",
bsps * 1_000_000_f64,
nsps / 1_000_f64,
duration_as_ms(&duration),
last_id,
);
solana_metrics::submit(
influxdb::Point::new("bench-tps")
.add_tag("op", influxdb::Value::String("generate_txs".to_string()))
.add_field(
"duration",
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
)
.to_owned(),
);
let sz = transactions.len() / threads;
let chunks: Vec<_> = transactions.chunks(sz).collect();
{
let mut shared_txs_wl = shared_txs.write().unwrap();
for chunk in chunks {
shared_txs_wl.push_back(chunk.to_vec());
}
}
}
pub fn do_tx_transfers(
exit_signal: &Arc<AtomicBool>,
shared_txs: &SharedTransactions,
leader: &NodeInfo,
shared_tx_thread_count: &Arc<AtomicIsize>,
total_tx_sent_count: &Arc<AtomicUsize>,
) {
let client = mk_client(&leader);
loop {
let txs;
{
let mut shared_txs_wl = shared_txs.write().unwrap();
txs = shared_txs_wl.pop_front();
}
if let Some(txs0) = txs {
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
println!(
"Transferring 1 unit {} times... to {}",
txs0.len(),
leader.tpu
);
let tx_len = txs0.len();
let transfer_start = Instant::now();
for tx in txs0 {
let now = timestamp();
if now > tx.1 && now - tx.1 > 1000 * 30 {
continue;
}
client.transfer_signed(&tx.0).unwrap();
}
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
total_tx_sent_count.fetch_add(tx_len, Ordering::Relaxed);
println!(
"Tx send done. {} ms {} tps",
duration_as_ms(&transfer_start.elapsed()),
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
);
solana_metrics::submit(
influxdb::Point::new("bench-tps")
.add_tag("op", influxdb::Value::String("do_tx_transfers".to_string()))
.add_field(
"duration",
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
)
.add_field("count", influxdb::Value::Integer(tx_len as i64))
.to_owned(),
);
}
if exit_signal.load(Ordering::Relaxed) {
break;
}
}
}
pub fn verify_funding_transfer(client: &mut ThinClient, tx: &Transaction, amount: u64) -> bool {
for a in &tx.account_keys[1..] {
if client.get_balance(a).unwrap_or(0) >= amount {
return true;
}
}
false
}
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
/// on every iteration. This allows us to replay the transfers because the source is either empty,
/// or full
pub fn fund_keys(client: &mut ThinClient, source: &Keypair, dests: &[Keypair], tokens: u64) {
let total = tokens * dests.len() as u64;
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
println!("funding keys {}", dests.len());
while !notfunded.is_empty() {
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
let mut to_fund = vec![];
println!("creating from... {}", funded.len());
for f in &mut funded {
let max_units = cmp::min(notfunded.len(), MAX_SPENDS_PER_TX);
if max_units == 0 {
break;
}
let start = notfunded.len() - max_units;
let per_unit = f.1 / (max_units as u64);
let moves: Vec<_> = notfunded[start..]
.iter()
.map(|k| (k.pubkey(), per_unit))
.collect();
notfunded[start..]
.iter()
.for_each(|k| new_funded.push((k, per_unit)));
notfunded.truncate(start);
if !moves.is_empty() {
to_fund.push((f.0, moves));
}
}
// try to transfer a "few" at a time with recent last_id
// assume 4MB network buffers, and 512 byte packets
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
let mut tries = 0;
// this set of transactions just initializes us for bookkeeping
#[allow(clippy::clone_double_ref)] // sigh
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
(
k.clone(),
Transaction::system_move_many(k, &m, Default::default(), 0),
)
})
.collect();
let amount = chunk[0].1[0].1;
while !to_fund_txs.is_empty() {
let receivers = to_fund_txs
.iter()
.fold(0, |len, (_, tx)| len + tx.instructions.len());
println!(
"{} {} to {} in {} txs",
if tries == 0 {
"transferring"
} else {
" retrying"
},
amount,
receivers,
to_fund_txs.len(),
);
let last_id = client.get_last_id();
// re-sign retained to_fund_txes with updated last_id
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[k], last_id);
});
to_fund_txs.iter().for_each(|(_, tx)| {
client.transfer_signed(&tx).expect("transfer");
});
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
tries += 1;
}
println!("transferred");
});
println!("funded: {} left: {}", new_funded.len(), notfunded.len());
funded = new_funded;
}
}
pub fn airdrop_tokens(
client: &mut ThinClient,
drone_addr: &SocketAddr,
id: &Keypair,
tx_count: u64,
) {
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_token_balance(starting_balance);
println!("starting balance {}", starting_balance);
if starting_balance < tx_count {
let airdrop_amount = tx_count - starting_balance;
println!(
"Airdropping {:?} tokens from {} for {}",
airdrop_amount,
drone_addr,
id.pubkey(),
);
let last_id = client.get_last_id();
match request_airdrop_transaction(&drone_addr, &id.pubkey(), airdrop_amount, last_id) {
Ok(transaction) => {
let signature = client.transfer_signed(&transaction).unwrap();
client.poll_for_signature(&signature).unwrap();
}
Err(err) => {
panic!(
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
err, drone_addr, airdrop_amount
);
}
};
let current_balance = client.poll_get_balance(&id.pubkey()).unwrap_or_else(|e| {
println!("airdrop error {}", e);
starting_balance
});
println!("current balance {}...", current_balance);
metrics_submit_token_balance(current_balance);
if current_balance - starting_balance != airdrop_amount {
println!(
"Airdrop failed! {} {} {}",
id.pubkey(),
current_balance,
starting_balance
);
exit(1);
}
}
}
pub fn compute_and_report_stats(
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
sample_period: u64,
tx_send_elapsed: &Duration,
total_tx_send_count: usize,
) {
// Compute/report stats
let mut max_of_maxes = 0.0;
let mut max_tx_count = 0;
let mut nodes_with_zero_tps = 0;
let mut total_maxes = 0.0;
println!(" Node address | Max TPS | Total Transactions");
println!("---------------------+---------------+--------------------");
for (sock, stats) in maxes.read().unwrap().iter() {
let maybe_flag = match stats.tx {
0 => "!!!!!",
_ => "",
};
println!(
"{:20} | {:13.2} | {} {}",
(*sock).to_string(),
stats.tps,
stats.tx,
maybe_flag
);
if stats.tps == 0.0 {
nodes_with_zero_tps += 1;
}
total_maxes += stats.tps;
if stats.tps > max_of_maxes {
max_of_maxes = stats.tps;
}
if stats.tx > max_tx_count {
max_tx_count = stats.tx;
}
}
if total_maxes > 0.0 {
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
let average_max = total_maxes / num_nodes_with_tps as f64;
println!(
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
average_max, nodes_with_zero_tps
);
}
println!(
"\nHighest TPS: {:.2} sampling period {}s max transactions: {} clients: {} drop rate: {:.2}",
max_of_maxes,
sample_period,
max_tx_count,
maxes.read().unwrap().len(),
(total_tx_send_count as u64 - max_tx_count) as f64 / total_tx_send_count as f64,
);
println!(
"\tAverage TPS: {}",
max_tx_count as f32 / duration_as_s(tx_send_elapsed)
);
}
// First transfer 3/4 of the tokens to the dest accounts
// then ping-pong 1/4 of the tokens back to the other account
// this leaves 1/4 token buffer in each account
pub fn should_switch_directions(num_tokens_per_account: u64, i: u64) -> bool {
i % (num_tokens_per_account / 4) == 0 && (i >= (3 * num_tokens_per_account) / 4)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(20, 0), false);
assert_eq!(should_switch_directions(20, 1), false);
assert_eq!(should_switch_directions(20, 14), false);
assert_eq!(should_switch_directions(20, 15), true);
assert_eq!(should_switch_directions(20, 16), false);
assert_eq!(should_switch_directions(20, 19), false);
assert_eq!(should_switch_directions(20, 20), true);
assert_eq!(should_switch_directions(20, 21), false);
assert_eq!(should_switch_directions(20, 99), false);
assert_eq!(should_switch_directions(20, 100), true);
assert_eq!(should_switch_directions(20, 101), false);
}
}

166
bench-tps/src/cli.rs Normal file
View File

@ -0,0 +1,166 @@
use std::net::SocketAddr;
use std::process::exit;
use std::time::Duration;
use clap::{crate_version, App, Arg, ArgMatches};
use solana_drone::drone::DRONE_PORT;
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
/// Holds the configuration for a single run of the benchmark
pub struct Config {
pub network_addr: SocketAddr,
pub drone_addr: SocketAddr,
pub id: Keypair,
pub threads: usize,
pub num_nodes: usize,
pub duration: Duration,
pub tx_count: usize,
pub sustained: bool,
pub reject_extra_nodes: bool,
pub converge_only: bool,
}
impl Default for Config {
fn default() -> Config {
Config {
network_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
drone_addr: SocketAddr::from(([127, 0, 0, 1], DRONE_PORT)),
id: Keypair::new(),
threads: 4,
num_nodes: 1,
duration: Duration::new(std::u64::MAX, 0),
tx_count: 500_000,
sustained: false,
reject_extra_nodes: false,
converge_only: false,
}
}
}
/// Defines and builds the CLI args for a run of the benchmark
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
App::new("solana-bench-tps")
.version(crate_version!())
.arg(
Arg::with_name("network")
.short("n")
.long("network")
.value_name("HOST:PORT")
.takes_value(true)
.help("Rendezvous with the network at this gossip entry point; defaults to 127.0.0.1:8001"),
)
.arg(
Arg::with_name("drone")
.short("d")
.long("drone")
.value_name("HOST:PORT")
.takes_value(true)
.help("Location of the drone; defaults to network:DRONE_PORT"),
)
.arg(
Arg::with_name("identity")
.short("i")
.long("identity")
.value_name("PATH")
.takes_value(true)
.help("File containing a client identity (keypair)"),
)
.arg(
Arg::with_name("num-nodes")
.short("N")
.long("num-nodes")
.value_name("NUM")
.takes_value(true)
.help("Wait for NUM nodes to converge"),
)
.arg(
Arg::with_name("reject-extra-nodes")
.long("reject-extra-nodes")
.help("Require exactly `num-nodes` on convergence. Appropriate only for internal networks"),
)
.arg(
Arg::with_name("threads")
.short("t")
.long("threads")
.value_name("NUM")
.takes_value(true)
.help("Number of threads"),
)
.arg(
Arg::with_name("duration")
.long("duration")
.value_name("SECS")
.takes_value(true)
.help("Seconds to run benchmark, then exit; default is forever"),
)
.arg(
Arg::with_name("converge-only")
.long("converge-only")
.help("Exit immediately after converging"),
)
.arg(
Arg::with_name("sustained")
.long("sustained")
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
)
.arg(
Arg::with_name("tx_count")
.long("tx_count")
.value_name("NUM")
.takes_value(true)
.help("Number of transactions to send per batch")
)
}
/// Parses a clap `ArgMatches` structure into a `Config`
/// # Arguments
/// * `matches` - command line arguments parsed by clap
/// # Panics
/// Panics if there is trouble parsing any of the arguments
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
let mut args = Config::default();
if let Some(addr) = matches.value_of("network") {
args.network_addr = addr.parse().unwrap_or_else(|e| {
eprintln!("failed to parse network: {}", e);
exit(1)
});
}
if let Some(addr) = matches.value_of("drone") {
args.drone_addr = addr.parse().unwrap_or_else(|e| {
eprintln!("failed to parse drone address: {}", e);
exit(1)
});
}
if matches.is_present("identity") {
args.id = read_keypair(matches.value_of("identity").unwrap())
.expect("can't read client identity");
}
if let Some(t) = matches.value_of("threads") {
args.threads = t.to_string().parse().expect("can't parse threads");
}
if let Some(n) = matches.value_of("num-nodes") {
args.num_nodes = n.to_string().parse().expect("can't parse num-nodes");
}
if let Some(duration) = matches.value_of("duration") {
args.duration = Duration::new(
duration.to_string().parse().expect("can't parse duration"),
0,
);
}
if let Some(s) = matches.value_of("tx_count") {
args.tx_count = s.to_string().parse().expect("can't parse tx_account");
}
args.sustained = matches.is_present("sustained");
args.converge_only = matches.is_present("converge-only");
args.reject_extra_nodes = matches.is_present("reject-extra-nodes");
args
}

304
bench-tps/src/main.rs Normal file
View File

@ -0,0 +1,304 @@
mod bench;
mod cli;
use solana::client::mk_client;
use solana::cluster_info::{ClusterInfo, NodeInfo};
use solana::gossip_service::GossipService;
use solana::service::Service;
use solana::signature::GenKeys;
use solana::thin_client::poll_gossip_for_leader;
use solana_metrics;
use solana_sdk::signature::KeypairUtil;
use std::collections::VecDeque;
use std::process::exit;
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::thread::Builder;
use std::time::Duration;
use std::time::Instant;
use crate::bench::*;
/// Creates a cluster and waits for the network to converge, returning the peers, leader, and gossip service
/// # Arguments
/// `leader` - the input leader node
/// `exit_signal` - atomic bool used to signal early exit to cluster
/// `num_nodes` - the number of nodes
/// # Panics
/// Panics if the spy node `RwLock` somehow ends up unreadable
fn converge(
leader: &NodeInfo,
exit_signal: &Arc<AtomicBool>,
num_nodes: usize,
) -> (Vec<NodeInfo>, Option<NodeInfo>, GossipService) {
//lets spy on the network
let (node, gossip_socket) = ClusterInfo::spy_node();
let mut spy_cluster_info = ClusterInfo::new(node);
spy_cluster_info.insert_info(leader.clone());
spy_cluster_info.set_leader(leader.id);
let spy_ref = Arc::new(RwLock::new(spy_cluster_info));
let gossip_service = GossipService::new(&spy_ref, None, gossip_socket, exit_signal.clone());
let mut v: Vec<NodeInfo> = vec![];
// wait for the network to converge, 30 seconds should be plenty
for _ in 0..30 {
{
let spy_ref = spy_ref.read().unwrap();
println!("{}", spy_ref.node_info_trace());
if spy_ref.leader_data().is_some() {
v = spy_ref.rpc_peers();
if v.len() >= num_nodes {
println!("CONVERGED!");
break;
} else {
println!(
"{} node(s) discovered (looking for {} or more)",
v.len(),
num_nodes
);
}
}
}
sleep(Duration::new(1, 0));
}
let leader = spy_ref.read().unwrap().leader_data().cloned();
(v, leader, gossip_service)
}
fn main() {
solana_logger::setup();
solana_metrics::set_panic_hook("bench-tps");
let matches = cli::build_args().get_matches();
let cfg = cli::extract_args(&matches);
let cli::Config {
network_addr: network,
drone_addr,
id,
threads,
num_nodes,
duration,
tx_count,
sustained,
reject_extra_nodes,
converge_only,
} = cfg;
println!("Looking for leader at {:?}", network);
let leader = poll_gossip_for_leader(network, None).expect("unable to find leader on network");
let exit_signal = Arc::new(AtomicBool::new(false));
let (nodes, leader, gossip_service) = converge(&leader, &exit_signal, num_nodes);
if nodes.len() < num_nodes {
println!(
"Error: Insufficient nodes discovered. Expecting {} or more",
num_nodes
);
exit(1);
}
if reject_extra_nodes && nodes.len() > num_nodes {
println!(
"Error: Extra nodes discovered. Expecting exactly {}",
num_nodes
);
exit(1);
}
if leader.is_none() {
println!("no leader");
exit(1);
}
if converge_only {
return;
}
let leader = leader.unwrap();
println!("leader RPC is at {} {}", leader.rpc, leader.id);
let mut client = mk_client(&leader);
let mut barrier_client = mk_client(&leader);
let mut seed = [0u8; 32];
seed.copy_from_slice(&id.public_key_bytes()[..32]);
let mut rnd = GenKeys::new(seed);
println!("Creating {} keypairs...", tx_count * 2);
let mut total_keys = 0;
let mut target = tx_count * 2;
while target > 0 {
total_keys += target;
target /= MAX_SPENDS_PER_TX;
}
let gen_keypairs = rnd.gen_n_keypairs(total_keys as u64);
let barrier_id = rnd.gen_n_keypairs(1).pop().unwrap();
println!("Get tokens...");
let num_tokens_per_account = 20;
// Sample the first keypair, see if it has tokens, if so then resume
// to avoid token loss
let keypair0_balance = client
.poll_get_balance(&gen_keypairs.last().unwrap().pubkey())
.unwrap_or(0);
if num_tokens_per_account > keypair0_balance {
let extra = num_tokens_per_account - keypair0_balance;
let total = extra * (gen_keypairs.len() as u64);
airdrop_tokens(&mut client, &drone_addr, &id, total);
println!("adding more tokens {}", extra);
fund_keys(&mut client, &id, &gen_keypairs, extra);
}
let start = gen_keypairs.len() - (tx_count * 2) as usize;
let keypairs = &gen_keypairs[start..];
airdrop_tokens(&mut barrier_client, &drone_addr, &barrier_id, 1);
println!("Get last ID...");
let mut last_id = client.get_last_id();
println!("Got last ID {:?}", last_id);
let first_tx_count = client.transaction_count();
println!("Initial transaction count {}", first_tx_count);
// Setup a thread per validator to sample every period
// collect the max transaction rate and total tx count seen
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
println!("Sampling TPS every {} second...", sample_period);
let v_threads: Vec<_> = nodes
.into_iter()
.map(|v| {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
})
.unwrap()
})
.collect();
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
let s_threads: Vec<_> = (0..threads)
.map(|_| {
let exit_signal = exit_signal.clone();
let shared_txs = shared_txs.clone();
let leader = leader.clone();
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
let total_tx_sent_count = total_tx_sent_count.clone();
Builder::new()
.name("solana-client-sender".to_string())
.spawn(move || {
do_tx_transfers(
&exit_signal,
&shared_txs,
&leader,
&shared_tx_active_thread_count,
&total_tx_sent_count,
);
})
.unwrap()
})
.collect();
// generate and send transactions for the specified duration
let start = Instant::now();
let mut reclaim_tokens_back_to_source_account = false;
let mut i = keypair0_balance;
while start.elapsed() < duration {
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_token_balance(balance);
// ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual
// accounts
let len = tx_count as usize;
generate_txs(
&shared_txs,
&keypairs[..len],
&keypairs[len..],
threads,
reclaim_tokens_back_to_source_account,
&leader,
);
// In sustained mode overlap the transfers with generation
// this has higher average performance but lower peak performance
// in tested environments.
if !sustained {
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
sleep(Duration::from_millis(100));
}
}
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
// to validate the network is still functional.
send_barrier_transaction(&mut barrier_client, &mut last_id, &barrier_id);
i += 1;
if should_switch_directions(num_tokens_per_account, i) {
reclaim_tokens_back_to_source_account = !reclaim_tokens_back_to_source_account;
}
}
// Stop the sampling threads so it will collect the stats
exit_signal.store(true, Ordering::Relaxed);
println!("Waiting for validator threads...");
for t in v_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
// join the tx send threads
println!("Waiting for transmit threads...");
for t in s_threads {
if let Err(err) = t.join() {
println!(" join() failed with: {:?}", err);
}
}
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_token_balance(balance);
compute_and_report_stats(
&maxes,
sample_period,
&start.elapsed(),
total_tx_sent_count.load(Ordering::Relaxed),
);
// join the cluster_info client threads
gossip_service.join().unwrap();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(20, 0), false);
assert_eq!(should_switch_directions(20, 1), false);
assert_eq!(should_switch_directions(20, 14), false);
assert_eq!(should_switch_directions(20, 15), true);
assert_eq!(should_switch_directions(20, 16), false);
assert_eq!(should_switch_directions(20, 19), false);
assert_eq!(should_switch_directions(20, 20), true);
assert_eq!(should_switch_directions(20, 21), false);
assert_eq!(should_switch_directions(20, 99), false);
assert_eq!(should_switch_directions(20, 100), true);
assert_eq!(should_switch_directions(20, 101), false);
}
}

View File

@ -1,17 +1,14 @@
#![feature(test)]
extern crate bincode;
extern crate rayon;
extern crate solana;
extern crate test;
use bincode::serialize;
use rayon::prelude::*;
use solana::bank::*;
use solana::hash::hash;
use solana::mint::Mint;
use solana::signature::{Keypair, KeypairUtil};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use solana::status_deque::MAX_ENTRY_IDS;
use solana_sdk::hash::hash;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::transaction::Transaction;
use test::Bencher;
#[bench]
@ -21,30 +18,35 @@ fn bench_process_transaction(bencher: &mut Bencher) {
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
.into_iter()
.map(|_| {
// Seed the 'from' account.
let rando0 = Keypair::new();
let tx = Transaction::system_move(
&mint.keypair(),
rando0.pubkey(),
10_000,
mint.last_id(),
bank.last_id(),
0,
);
assert!(bank.process_transaction(&tx).is_ok());
assert_eq!(bank.process_transaction(&tx), Ok(()));
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = Keypair::new();
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, last_id, 0);
assert!(bank.process_transaction(&tx).is_ok());
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, bank.last_id(), 0);
assert_eq!(bank.process_transaction(&tx), Ok(()));
// Finally, return the transaction to the benchmark.
tx
}).collect();
})
.collect();
let mut id = bank.last_id();
for _ in 0..(MAX_ENTRY_IDS - 1) {
bank.register_tick(&id);
id = hash(&id.as_ref())
}
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.

View File

@ -1,9 +1,5 @@
#![feature(test)]
extern crate bincode;
extern crate rand;
extern crate rayon;
extern crate solana;
extern crate solana_program_interface;
extern crate test;
use rand::{thread_rng, Rng};
@ -13,10 +9,12 @@ use solana::banking_stage::{BankingStage, NUM_THREADS};
use solana::entry::Entry;
use solana::mint::Mint;
use solana::packet::to_packets_chunked;
use solana::signature::{KeypairUtil, Signature};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use solana_program_interface::pubkey::Pubkey;
use solana::status_deque::MAX_ENTRY_IDS;
use solana_sdk::hash::hash;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::transaction::Transaction;
use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
@ -49,6 +47,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let (verified_sender, verified_receiver) = channel();
let bank = Arc::new(Bank::new(&mint));
let dummy_leader_id = Keypair::new().pubkey();
let dummy = Transaction::system_move(
&mint.keypair(),
mint.keypair().pubkey(),
@ -63,21 +62,23 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.keys[0] = Pubkey::new(&from[0..32]);
new.keys[1] = Pubkey::new(&to[0..32]);
new.signature = Signature::new(&sig[0..64]);
new.account_keys[0] = Pubkey::new(&from[0..32]);
new.account_keys[1] = Pubkey::new(&to[0..32]);
new.signatures = vec![Signature::new(&sig[0..64])];
new
}).collect();
})
.collect();
// fund all the accounts
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
&mint.keypair(),
tx.keys[0],
mint_total / txes as i64,
tx.account_keys[0],
mint_total / txes as u64,
mint.last_id(),
0,
);
assert!(bank.process_transaction(&fund).is_ok());
let x = bank.process_transaction(&fund);
assert!(x.is_ok());
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
@ -96,15 +97,136 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
.map(|x| {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
}).collect();
let (_stage, signal_receiver) = BankingStage::new(&bank, verified_receiver, Default::default());
})
.collect();
let (_stage, signal_receiver) = BankingStage::new(
&bank,
verified_receiver,
Default::default(),
&mint.last_id(),
None,
dummy_leader_id,
);
let mut id = mint.last_id();
for _ in 0..MAX_ENTRY_IDS {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
bencher.iter(move || {
// make sure the tx last id is still registered
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
bank.register_tick(&mint.last_id());
}
for v in verified.chunks(verified.len() / NUM_THREADS) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes);
bank.clear_signatures();
});
}
#[bench]
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
let progs = 4;
let txes = 1000 * NUM_THREADS;
let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let (verified_sender, verified_receiver) = channel();
let bank = Arc::new(Bank::new(&mint));
let dummy_leader_id = Keypair::new().pubkey();
let dummy = Transaction::system_move(
&mint.keypair(),
mint.keypair().pubkey(),
1,
mint.last_id(),
0,
);
let transactions: Vec<_> = (0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
new.account_keys[0] = Pubkey::new(&from[0..32]);
new.account_keys[1] = Pubkey::new(&to[0..32]);
let prog = new.instructions[0].clone();
for i in 1..progs {
//generate programs that spend to random keys
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let to_key = Pubkey::new(&to[0..32]);
new.account_keys.push(to_key);
assert_eq!(new.account_keys.len(), i + 2);
new.instructions.push(prog.clone());
assert_eq!(new.instructions.len(), i + 1);
new.instructions[i].accounts[1] = 1 + i as u8;
assert_eq!(new.key(i, 1), Some(&to_key));
assert_eq!(
new.account_keys[new.instructions[i].accounts[1] as usize],
to_key
);
}
assert_eq!(new.instructions.len(), progs);
new.signatures = vec![Signature::new(&sig[0..64])];
new
})
.collect();
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
&mint.keypair(),
tx.account_keys[0],
mint_total / txes as u64,
mint.last_id(),
0,
);
assert!(bank.process_transaction(&fund).is_ok());
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
.into_iter()
.map(|x| {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
})
.collect();
let (_stage, signal_receiver) = BankingStage::new(
&bank,
verified_receiver,
Default::default(),
&mint.last_id(),
None,
dummy_leader_id,
);
let mut id = mint.last_id();
for _ in 0..MAX_ENTRY_IDS {
id = hash(&id.as_ref());
bank.register_tick(&id);
}
bencher.iter(move || {
// make sure the transactions are still valid
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
bank.register_tick(&mint.last_id());
}
for v in verified.chunks(verified.len() / NUM_THREADS) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes);
bank.clear_signatures();
// make sure the tx last id is still registered
bank.register_entry_id(&mint.last_id());
});
}

29
benches/chacha.rs Normal file
View File

@ -0,0 +1,29 @@
//#![feature(test)]
//
//extern crate solana;
//extern crate test;
//
//use solana::chacha::chacha_cbc_encrypt_files;
//use std::fs::remove_file;
//use std::fs::File;
//use std::io::Write;
//use std::path::Path;
//use test::Bencher;
//
//#[bench]
//fn bench_chacha_encrypt(bench: &mut Bencher) {
// let in_path = Path::new("bench_chacha_encrypt_file_input.txt");
// let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc");
// {
// let mut in_file = File::create(in_path).unwrap();
// for _ in 0..1024 {
// in_file.write("123456foobar".as_bytes()).unwrap();
// }
// }
// bench.iter(move || {
// chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap();
// });
//
// remove_file(in_path).unwrap();
// remove_file(out_path).unwrap();
//}

199
benches/db_ledger.rs Normal file
View File

@ -0,0 +1,199 @@
#![feature(test)]
use rand;
extern crate test;
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use rocksdb::{Options, DB};
use solana::db_ledger::{DataCf, DbLedger, LedgerColumnFamilyRaw};
use solana::ledger::{get_tmp_ledger_path, make_large_test_entries, make_tiny_test_entries, Block};
use solana::packet::{Blob, BLOB_HEADER_SIZE};
use test::Bencher;
// Given some blobs and a ledger at ledger_path, benchmark writing the blobs to the ledger
fn bench_write_blobs(bench: &mut Bencher, blobs: &mut [&mut Blob], ledger_path: &str) {
let db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let slot = 0;
let num_blobs = blobs.len();
bench.iter(move || {
for blob in blobs.iter_mut() {
let index = blob.index().unwrap();
let key = DataCf::key(slot, index);
let size = blob.size().unwrap();
db_ledger
.data_cf
.put(&db_ledger.db, &key, &blob.data[..BLOB_HEADER_SIZE + size])
.unwrap();
blob.set_index(index + num_blobs as u64).unwrap();
}
});
DB::destroy(&Options::default(), &ledger_path)
.expect("Expected successful database destruction");
}
// Insert some blobs into the ledger in preparation for read benchmarks
fn setup_read_bench(
db_ledger: &mut DbLedger,
num_small_blobs: u64,
num_large_blobs: u64,
slot: u64,
) {
// Make some big and small entries
let mut entries = make_large_test_entries(num_large_blobs as usize);
entries.extend(make_tiny_test_entries(num_small_blobs as usize));
// Convert the entries to blobs, write the blobs to the ledger
let shared_blobs = entries.to_blobs();
for b in shared_blobs.iter() {
b.write().unwrap().set_slot(slot).unwrap();
}
db_ledger
.write_shared_blobs(&shared_blobs)
.expect("Expectd successful insertion of blobs into ledger");
}
// Write small blobs to the ledger
#[bench]
#[ignore]
fn bench_write_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_write_small");
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let shared_blobs = entries.to_blobs();
let mut blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.write().unwrap()).collect();
let mut blobs: Vec<&mut Blob> = blob_locks.iter_mut().map(|b| &mut **b).collect();
bench_write_blobs(bench, &mut blobs, &ledger_path);
}
// Write big blobs to the ledger
#[bench]
#[ignore]
fn bench_write_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_write_big");
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let shared_blobs = entries.to_blobs();
let mut blob_locks: Vec<_> = shared_blobs.iter().map(|b| b.write().unwrap()).collect();
let mut blobs: Vec<&mut Blob> = blob_locks.iter_mut().map(|b| &mut **b).collect();
bench_write_blobs(bench, &mut blobs, &ledger_path);
}
#[bench]
#[ignore]
fn bench_read_sequential(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_read_sequential");
let mut db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small blobs into the ledger
let num_small_blobs = 32 * 1024;
let num_large_blobs = 32 * 1024;
let total_blobs = num_small_blobs + num_large_blobs;
let slot = 0;
setup_read_bench(&mut db_ledger, num_small_blobs, num_large_blobs, slot);
let num_reads = total_blobs / 15;
let mut rng = rand::thread_rng();
bench.iter(move || {
// Generate random starting point in the range [0, total_blobs - 1], read num_reads blobs sequentially
let start_index = rng.gen_range(0, num_small_blobs + num_large_blobs);
for i in start_index..start_index + num_reads {
let _ =
db_ledger
.data_cf
.get_by_slot_index(&db_ledger.db, slot, i as u64 % total_blobs);
}
});
DB::destroy(&Options::default(), &ledger_path)
.expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_read_random(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_read_random");
let mut db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
// Insert some big and small blobs into the ledger
let num_small_blobs = 32 * 1024;
let num_large_blobs = 32 * 1024;
let total_blobs = num_small_blobs + num_large_blobs;
let slot = 0;
setup_read_bench(&mut db_ledger, num_small_blobs, num_large_blobs, slot);
let num_reads = total_blobs / 15;
// Generate a num_reads sized random sample of indexes in range [0, total_blobs - 1],
// simulating random reads
let mut rng = rand::thread_rng();
let indexes: Vec<usize> = (0..num_reads)
.map(|_| rng.gen_range(0, total_blobs) as usize)
.collect();
bench.iter(move || {
for i in indexes.iter() {
let _ = db_ledger
.data_cf
.get_by_slot_index(&db_ledger.db, slot, *i as u64);
}
});
DB::destroy(&Options::default(), &ledger_path)
.expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_insert_data_blob_small(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_insert_data_blob_small");
let db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = make_tiny_test_entries(num_entries);
let mut shared_blobs = entries.to_blobs();
shared_blobs.shuffle(&mut thread_rng());
bench.iter(move || {
for blob in shared_blobs.iter_mut() {
let index = blob.read().unwrap().index().unwrap();
db_ledger.write_shared_blobs(vec![blob.clone()]).unwrap();
blob.write()
.unwrap()
.set_index(index + num_entries as u64)
.unwrap();
}
});
DB::destroy(&Options::default(), &ledger_path)
.expect("Expected successful database destruction");
}
#[bench]
#[ignore]
fn bench_insert_data_blob_big(bench: &mut Bencher) {
let ledger_path = get_tmp_ledger_path("bench_insert_data_blob_big");
let db_ledger =
DbLedger::open(&ledger_path).expect("Expected to be able to open database ledger");
let num_entries = 32 * 1024;
let entries = make_large_test_entries(num_entries);
let mut shared_blobs = entries.to_blobs();
shared_blobs.shuffle(&mut thread_rng());
bench.iter(move || {
for blob in shared_blobs.iter_mut() {
let index = blob.read().unwrap().index().unwrap();
db_ledger.write_shared_blobs(vec![blob.clone()]).unwrap();
blob.write()
.unwrap()
.set_index(index + num_entries as u64)
.unwrap();
}
});
DB::destroy(&Options::default(), &ledger_path)
.expect("Expected successful database destruction");
}

View File

@ -1,12 +1,13 @@
#![feature(test)]
extern crate solana;
extern crate test;
use solana::hash::{hash, Hash};
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
use solana::signature::{Keypair, KeypairUtil};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use solana::entry::reconstruct_entries_from_blobs;
use solana::ledger::{next_entries, Block};
use solana_sdk::hash::{hash, Hash};
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction::SystemTransaction;
use solana_sdk::transaction::Transaction;
use test::Bencher;
#[bench]
@ -20,6 +21,6 @@ fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
bencher.iter(|| {
let blobs = entries.to_blobs();
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap(), entries);
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap().0, entries);
});
}

View File

@ -1,5 +1,5 @@
#![feature(test)]
extern crate solana;
extern crate test;
use solana::signature::GenKeys;

View File

@ -1,12 +1,10 @@
#![feature(test)]
extern crate bincode;
extern crate rayon;
extern crate solana;
extern crate test;
use solana::packet::to_packets;
use solana::sigverify;
use solana::system_transaction::test_tx;
use solana::test_tx::test_tx;
use test::Bencher;
#[bench]

26
book/README.md Normal file
View File

@ -0,0 +1,26 @@
Building the Solana book
---
Install the book's dependnecies, build, and test the book:
```bash
$ ./build.sh
```
Run any Rust tests in the markdown:
```bash
$ make test
```
Render markdown as HTML:
```bash
$ make build
```
Render and view the book:
```bash
$ make open
```

25
book/art/data-plane.bob Normal file
View File

@ -0,0 +1,25 @@
.-------------.
| |
.-------------+ Leader +══════════════╗
| | | ║
| `-------------` ║
v v
.-------------. .-------------.
| +--------------------------->| |
.----+ Validator 1 | | Validator 2 +═══╗
| | |<═══════════════════════════+ | ║
| `------+------` `------+------` ║
| | ║ ║
| `------------------------------. ║ ║
| | ║ ║
| ╔════════════════════════════════╝ ║
| ║ | ║
V v V v
.-------------. .-------------. .-------------. .-------------.
| | | | | | | |
| Validator 3 +------>| Validator 4 +══════>| Validator 5 +------>| Validator 6 |
| | | | | | | |
`-------------` `-------------` `-------------` `------+------`
^ ║
║ ║
╚═════════════════════════════════════════════════════════════════╝

View File

@ -0,0 +1,13 @@
validator action
+----+ ----------------
| | L1 | E1
| +----+ / \ vote(E1)
| | L2 | E2 x
| +----+ / \ / \ vote(E2)
time | | L3 | E3 x E3' x
| +----+ / \ / \ / \ / \ slash(E3)
| | L4 | x x E4 x x x x x
| +----+ | | | | | | | | vote(E4)
v | L5 | xx xx xx E5 xx xx xx xx
+----+ hang on to E4 and E5 for more...

27
book/art/fullnode.bob Normal file
View File

@ -0,0 +1,27 @@
.---------------------------.
| Fullnode |
| |
.--------. | .------------------. |
| |---->| | |
| Client | | | JSON RPC Service | |
| |<----| | |
`----+---` | `------------------` |
| | ^ | .------------------.
| | | .----------------. | | Validators |
| | | | Gossip Service +----->| |
| | | `--------+-------` | | .------------. |
| | | ^ | | | | | |
| | | | v | | | Upstream | |
| | .--+---. .-+---. | | | Validators | |
| | | Bank |<--| TVU |<--------------+ | |
| | `------` `-----` | | `------------` |
| | ^ | | |
| | | | | .------------. |
| | .--+--. .-----------. | | | | |
`-------->| TPU +-->| Broadcast +--------->| Downstream | |
| `-----` | Service | | | | Validators | |
| `-----------` | | | | |
| | | `------------` |
`---------------------------` | |
`------------------`

9
book/art/runtime.bob Normal file
View File

@ -0,0 +1,9 @@
.-----------. .-------------. .--------------. .--------------------.
| sigverify +--->| lock memory +--->| validate fee +--->| allocate accounts +--->
`-----------` `-------------` `--------------` `--------------------`
.------------. .---------. .--------------. .--------------.
--->| load data +--->| execute +--->| commit data +-->|unlock memory |
`------------` `---------` `--------------` `--------------`

20
book/art/sdk-tools.bob Normal file
View File

@ -0,0 +1,20 @@
.----------------------------------------.
| Solana Runtime |
| |
| .------------. .------------. |
| | | | | |
.-------->| Verifier +-->| Accounts | |
| | | | | | |
.----------. | | `------------` `------------` |
| +--------` | ^ |
| Client | | LoadAccounts | |
| +--------. | .----------------` |
`----------` | | | |
| | .------+-----. .-------------. |
| | | | | | |
`-------->| Loader +-->| Interpreter | |
| | | | | |
| `------------` `-------------` |
| |
`----------------------------------------`

18
book/art/tpu.bob Normal file
View File

@ -0,0 +1,18 @@
.------------------------------------------------------.
| TPU .-------------. |
| | PoH Service | |
| `--------+----` |
| ^ | |
| | v |
| .-------. .-----------. .-+-------. .--------. | .------------.
.---------. | | Fetch | | SigVerify | | Banking | | Ledger | | | Broadcast |
| Clients |--->| Stage |->| Stage |->| Stage |-->| Write +---->| Service |
`---------` | | | | | | | | Stage | | | |
| `-------` `-----------` `----+----` `--------` | `------------`
| | |
`---------------------------------|--------------------`
|
v
.------.
| Bank |
`------`

22
book/art/tvu.bob Normal file
View File

@ -0,0 +1,22 @@
.--------.
| Leader |
`--------`
^
|
.------------------------------------|---------------------------------.
| TVU | |
| | |
| .-------. .------------. .----+---. .--------. .---------. |
.------------. | | Blob | | Retransmit | | Replay | | Ledger | | Storage | |
| Upstream +----->| Fetch |-->| Stage |-->| Stage |-->| Write |-->| Stage | |
| Validators | | | Stage | | | | | | Stage | | | |
`------------` | `-------` `----+-------` `----+---` `--------` `---------` |
| ^ | | |
| | | | |
`--------|----------|----------------|---------------------------------`
| | |
| V v
.+-----------. .------.
| Gossip | | Bank |
| Service | `------`
`------------`

10
book/book.toml Normal file
View File

@ -0,0 +1,10 @@
[book]
title = "Solana: Blockchain Rebuilt for Scale"
authors = ["The Solana Team"]
[build]
build-dir = "html"
create-missing = false
[output.html]
theme = "theme"

18
book/build.sh Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
cargo_install_unless() {
declare crate=$1
shift
"$@" > /dev/null 2>&1 || \
cargo install "$crate"
}
export PATH=$CARGO_HOME/bin:$PATH
cargo_install_unless mdbook mdbook --help
cargo_install_unless svgbob_cli svgbob --help
make -j"$(nproc)"

33
book/makefile Normal file
View File

@ -0,0 +1,33 @@
BOB_SRCS=$(wildcard art/*.bob)
MD_SRCS=$(wildcard src/*.md)
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg)
all: html/index.html
test: src/tests.ok
open: all
mdbook build --open
watch: $(SVG_IMGS)
mdbook watch
src/img/%.svg: art/%.bob
@mkdir -p $(@D)
svgbob < $< > $@
src/%.md: %.md
@mkdir -p $(@D)
@cp $< $@
src/tests.ok: $(SVG_IMGS) $(MD_SRCS)
mdbook test
touch $@
html/index.html: src/tests.ok
mdbook build
clean:
rm -f $(SVG_IMGS) src/tests.ok
rm -rf html

37
book/src/SUMMARY.md Normal file
View File

@ -0,0 +1,37 @@
# Solana Architecture
- [Introduction](introduction.md)
- [Terminology](terminology.md)
- [Getting Started](getting-started.md)
- [Example: Web Wallet](webwallet.md)
- [Programming Model](programs.md)
- [Example: Tic-Tac-Toe](tictactoe.md)
- [Drones](drones.md)
- [A Solana Cluster](cluster.md)
- [Synchronization](synchronization.md)
- [Leader Rotation](leader-rotation.md)
- [Fork Generation](fork-generation.md)
- [Anatomy of a Fullnode](fullnode.md)
- [TPU](tpu.md)
- [TVU](tvu.md)
- [Gossip Service](gossip.md)
- [The Runtime](runtime.md)
- [Proposed Architectural Changes](proposals.md)
- [Ledger Replication](ledger-replication.md)
- [Secure Enclave](enclave.md)
- [Staking Rewards](staking-rewards.md)
- [Fork Selection](fork-selection.md)
- [Entry Tree](entry-tree.md)
## Appendix
- [Appendix](appendix.md)
- [JSON RPC API](jsonrpc-api.md)
- [JavaScript API](javascript-api.md)
- [solana-wallet CLI](wallet.md)

4
book/src/appendix.md Normal file
View File

@ -0,0 +1,4 @@
# Appendix
The following sections contain reference material you may find useful in your
Solana journey.

99
book/src/cluster.md Normal file
View File

@ -0,0 +1,99 @@
# A Solana Cluster
A Solana cluster is a set of fullnodes working together to serve client
transactions and maintain the integrity of the ledger. Many clusters may
coexist. When two clusters share a common genesis block, they attempt to
converge. Otherwise, they simply ignore the existence of the other.
Transactions sent to the wrong one are quietly rejected. In this chapter, we'll
discuss how a cluster is created, how nodes join the cluster, how they share
the ledger, how they ensure the ledger is replicated, and how they cope with
buggy and malicious nodes.
## Creating a Cluster
Before starting any fullnodes, one first needs to create a *genesis block*.
The block contains entries referencing two public keys, a *mint* and a
*bootstrap leader*. The fullnode holding the bootstrap leader's secret key is
responsible for appending the first entries to the ledger. It initializes its
internal state with the mint's account. That account will hold the number of
native tokens defined by the genesis block. The second fullnode then contacts
the bootstrap leader to register as a *validator* or *replicator*. Additional
fullnodes then register with any registered member of the cluster.
A validator receives all entries from the leader and submits votes confirming
those entries are valid. After voting, the validator is expected to store those
entries until replicator nodes submit proofs that they have stored copies of
it. Once the validator observes a sufficient number of copies exist, it deletes
its copy.
## Joining a Cluster
Fullnodes and replicators enter the cluster via registration messages sent to
its *control plane*. The control plane is implemented using a *gossip*
protocol, meaning that a node may register with any existing node, and expect
its registration to propagate to all nodes in the cluster. The time it takes
for all nodes to synchronize is proportional to the square of the number of
nodes participating in the cluster. Algorithmically, that's considered very
slow, but in exchange for that time, a node is assured that it eventually has
all the same information as every other node, and that that information cannot
be censored by any one node.
## Sending Transactions to a Cluster
Clients send transactions to any fullnode's Transaction Processing Unit (TPU)
port. If the node is in the validator role, it forwards the transaction to the
designated leader. If in the leader role, the node bundles incoming
transactions, timestamps them creating an *entry*, and pushes them onto the
cluster's *data plane*. Once on the data plane, the transactions are validated
by validator nodes and replicated by replicator nodes, effectively appending
them to the ledger.
## Confirming Transactions
A Solana cluster is capable of subsecond *confirmation* for up to 150 nodes
with plans to scale up to hundreds of thousands of nodes. Once fully
implemented, confirmation times are expected to increase only with the
logarithm of the number of validators, where the logarithm's base is very high.
If the base is one thousand, for example, it means that for the first thousand
nodes, confirmation will be the duration of three network hops plus the time it
takes the slowest validator of a supermajority to vote. For the next million
nodes, confirmation increases by only one network hop.
Solana defines confirmation as the duration of time from when the leader
timestamps a new entry to the moment when it recognizes a supermajority of
ledger votes.
A gossip network is much too slow to achieve subsecond confirmation once the
network grows beyond a certain size. The time it takes to send messages to all
nodes is proportional to the square of the number of nodes. If a blockchain
wants to achieve low confirmation and attempts to do it using a gossip network,
it will be forced to centralize to just a handful of nodes.
Scalable confirmation can be achieved using the follow combination of
techniques:
1. Timestamp transactions with a VDF sample and sign the timestamp.
2. Split the transactions into batches, send each to separate nodes and have
each node share its batch with its peers.
3. Repeat the previous step recursively until all nodes have all batches.
Solana rotates leaders at fixed intervals, called *slots*. Each leader may only
produce entries during its allotted slot. The leader therefore timestamps
transactions so that validators may lookup the public key of the designated
leader. The leader then signs the timestamp so that a validator may verify the
signature, proving the signer is owner of the designated leader's public key.
Next, transactions are broken into batches so that a node can send transactions
to multiple parties without making multiple copies. If, for example, the leader
needed to send 60 transactions to 6 nodes, it would break that collection of 60
into batches of 10 transactions and send one to each node. This allows the
leader to put 60 transactions on the wire, not 60 transactions for each node.
Each node then shares its batch with its peers. Once the node has collected all
6 batches, it reconstructs the original set of 60 transactions.
A batch of transactions can only be split so many times before it is so small
that header information becomes the primary consumer of network bandwidth. At
the time of this writing, the approach is scaling well up to about 150
validators. To scale up to hundreds of thousands of validators, each node can
apply the same technique as the leader node to another set of nodes of equal
size. We call the technique *data plane fanout*, but it is not yet implemented.

86
book/src/drones.md Normal file
View File

@ -0,0 +1,86 @@
# Creating Signing Services with Drones
This chapter defines an off-chain service called a *drone*, which acts as
custodian of a user's private key. In its simplest form, it can be used to
create *airdrop* transactions, a token transfer from the drone's account to a
client's account.
## Signing Service
A drone is a simple signing service. It listens for requests to sign
*transaction data*. Once received, the drone validates the request however it
sees fit. It may, for example, only accept transaction data with a
`SystemInstruction::Move` instruction transferring only up to a certain amount
of tokens. If the drone accepts the transaction, it returns an `Ok(Signature)`
where `Signature` is a signature of the transaction data using the drone's
private key. If it rejects the transaction data, it returns a `DroneError`
describing why.
## Examples
### Granting access to an on-chain game
Creator of on-chain game tic-tac-toe hosts a drone that responds to airdrop
requests containing an `InitGame` instruction. The drone signs the transaction
data in the request and returns it, thereby authorizing its account to pay the
transaction fee and as well as seeding the game's account with enough tokens to
play it. The user then creates a transaction for its transaction data and the
drones signature and submits it to the Solana cluster. Each time the user
interacts with the game, the game pays the user enough tokens to pay the next
transaction fee to advance the game. At that point, the user may choose to keep
the tokens instead of advancing the game. If the creator wants to defend
against that case, they could require the user to return to the drone to sign
each instruction.
### Worldwide airdrop of a new token
Creator of a new on-chain token (ERC-20 interface), may wish to do a worldwide
airdrop to distribute its tokens to millions of users over just a few seconds.
That drone cannot spend resources interacting with the Solana cluster. Instead,
the drone should only verify the client is unique and human, and then return
the signature. It may also want to listen to the Solana cluster for recent
entry IDs to support client retries and to ensure the airdrop is targeting the
desired cluster.
## Attack vectors
### Invalid last_id
The drone may prefer its airdrops only target a particular Solana cluster. To
do that, it listens to the cluster for new entry IDs and ensure any requests
reference a recent one.
Note: to listen for new entry IDs assumes the drone is either a fullnode or a
*light* client. At the time of this writing, light clients have not been
implemented and no proposal describes them. This document assumes one of the
following approaches be taken:
1. Define and implement a light client
2. Embed a fullnode
3. Query the jsonrpc API for the latest last id at a rate slightly faster than
ticks are produced.
### Double spends
A client may request multiple airdrops before the first has been submitted to
the ledger. The client may do this maliciously or simply because it thinks the
first request was dropped. The drone should not simply query the cluster to
ensure the client has not already received an airdrop. Instead, it should use
`last_id` to ensure the previous request is expired before signing another.
Note that the Solana cluster will reject any transaction with a `last_id`
beyond a certain *age*.
### Denial of Service
If the transaction data size is smaller than the size of the returned signature
(or descriptive error), a single client can flood the network. Considering
that a simple `Move` operation requires two public keys (each 32 bytes) and a
`fee` field, and that the returned signature is 64 bytes (and a byte to
indicate `Ok`), consideration for this attack may not be required.
In the current design, the drone accepts TCP connections. This allows clients
to DoS the service by simply opening lots of idle connections. Switching to UDP
may be preferred. The transaction data will be smaller than a UDP packet since
the transaction sent to the Solana cluster is already pinned to using UDP.

181
book/src/enclave.md Normal file
View File

@ -0,0 +1,181 @@
# Signing using Secure Enclave
This document defines the security mechanism of signing keys used by the
fullnodes. Every node contains an asymmetric key that's used for signing
and verifying the votes. The node signs the vote transactions using its private
key. Other entities can verify the signature using the node's public key.
The node's stake or its resources could be compromised if its private key is
used to sign incorrect data (e.g. voting on multiple forks of the ledger). So,
it's important to safeguard the private key.
Secure Enclaves (such as SGX) provide a layer of memory and computation
protection. An enclave can be used to generate an asymmetric key and keep the
private key in its protected memory. It can expose an API that user (untrusted)
code can use for signing the transactions.
## Message Flow
1. The node initializes the enclave at startup
* The enclave generates an asymmetric key and returns the public key to the
node
* The keypair is ephemeral. A new keypair is generated on node bootup. A
new keypair might also be generated at runtime based on some TBD
criteria.
* The enclave returns its attestation report to the node
2. The node performs attestation of the enclave (e.g using Intel's IAS APIs)
* The node ensures that the Secure Enclave is running on a TPM and is
signed by a trusted party
3. The owner of the node grants ephemeral key permission to use its stake. This
process is TBD.
4. The node's untrusted, non-enclave software calls trusted enclave software
using its interface to sign transactions and other data.
* In case of vote signing, the node needs to verify the PoH. The PoH
verification is an integral part of signing. The enclave would be
presented with some verifiable data that it'll check before signing the vote.
* The process of generating the verifiable data in untrusted space is TBD
## PoH Verification
1. When the node votes on an en entry `X`, there's a lockout period `N`, for
which it cannot vote on a fork that does not contain `X` in its history.
2. Every time the node votes on the derivative of `X`, say `X+y`, the lockout
period for `X` increases by a factor `F` (i.e. the duration node cannot vote on
a fork that does not contain `X` increases).
* The lockout period for `X+y` is still `N` until the node votes again.
3. The lockout period increment is capped (e.g. factor `F` applies maximum 32
times).
4. The signing enclave must not sign a vote that violates this policy. This
means
* Enclave is initialized with `N`, `F` and `Factor cap`
* Enclave stores `Factor cap` number of entry IDs on which the node had
previously voted
* The sign request contains the entry ID for the new vote
* Enclave verifies that new vote's entry ID is on the correct fork
(following the rules #1 and #2 above)
## Ancestor Verification
This is alternate, albeit, less certain approach to verifying voting fork.
1. The validator maintains an active set of nodes in the cluster
2. It observes the votes from the active set in the last voting period
3. It stores the ancestor/last_tick at which each node voted
4. It sends new vote request to vote-signing service
* It includes previous votes from nodes in the active set, and their
corresponding ancestors
5. The signer checks if the previous votes contains a vote from the validator,
and the vote ancestor matches with majority of the nodes
* It signs the new vote if the check is successful
* It asserts (raises an alarm of some sort) if the check is unsuccessful
The premise is that the validator can be spoofed at most once to vote on
incorrect data. If someone hijacks the validator and submits a vote request for
bogus data, that vote will not be included in the PoH (as it'll be rejected by
the cluster). The next time the validator sends a request to sign the vote, the
signing service will detect that validator's last vote is missing (as part of
#5 above).
## Fork determination
Due to the fact that the enclave cannot process PoH, it has no direct knowledge
of fork history of a submitted validator vote. Each enclave should be initiated
with the current *active set* of public keys. A validator should submit its
current vote along with the votes of the active set (including itself) that it
observed in the slot of its previous vote. In this way, the enclave can surmise
the votes accompanying the validator's previous vote and thus the fork being
voted on. This is not possible for the validator's initial submitted vote, as
it will not have a 'previous' slot to reference. To account for this, a short
voting freeze should apply until the second vote is submitted containing the
votes within the active set, along with it's own vote, at the height of the
initial vote.
## Enclave configuration
A staking client should be configurable to prevent voting on inactive forks.
This mechanism should use the client's known active set `N_active` along with a
threshold vote `N_vote` and a threshold depth `N_depth` to determine whether or
not to continue voting on a submitted fork. This configuration should take the
form of a rule such that the client will only vote on a fork if it observes
more than `N_vote` at `N_depth`. Practically, this represents the client from
confirming that it has observed some probability of economic finality of the
submitted fork at a depth where an additional vote would create a lockout for
an undesirable amount of time if that fork turns out not to be live.
## Signing service
The signing service consists of a a JSON RPC server, and a request processor.
At startup, it starts the RPC server at a configured port and waits for
client/validator requests. It expects the following type of requests.
1. Register a new validator node
* The request contains validator's identity (public key)
* The request is signed with validator's private key
* The service will drop the request if signature of the request cannot be
verified
* The service will create a new voting asymmetric key for the validator,
and return the public key as a response
* If a validator retries to register, it'll return the public key from the
pre-existing keypair
2. Sign a vote
* The request contains voting transaction, and all verification data (as
described in Ancestor Verification)
* The request is signed with validator's private key
* The service will drop the request if signature of the request cannot be
verified
* The service will verify the voting data
* The service will return a signed transaction (or signature for the
transaction)
The service could potentially have different variations, depending on the
hardware platform capabilities. For example, if the hardware supports a secure
enclave, the service can offload asymmetric key generation, and private key
protection to the enclave. A less secure implementation of the service could
simply carry the keypair in the process memory.
## Validator voting
A validator node, at startup, creates a new vote account and registers it with
the cluster. This is done by submitting a new "vote register" transaction. The
transaction contains validator's keypair, it's vote signing public key, and
some additional information. The other nodes on the cluster process this
transaction and include the new validator in the active set.
Subsequently, the validator submits a "new vote" transaction on a voting event.
This vote is signed with validator's voting private key.
The validator code will change to interface with Signing service for "vote
register" and "new vote" use cases.
### Configuration
The validator node will be configured with Signing service's network endpoint
(IP/Port).
### Register
At startup, the validator will call Signing service using JSON RPC to register
itself. The RPC call will return the voting public key for the validator node.
The validator will create a new "vote register" transaction including this
public key in it, and submit it to the cluster.
### Collect votes for last period
The validator will look up the votes submitted by all the nodes in the cluster
for the last voting period. This information will be submitted to signing
service with new vote signing request.
### New Vote Signing
The validator will create a "new vote" transaction and send it to the signing
service using JSON RPC. The RPC request will also include the vote verification
data. On success, RPC call will return the signature for the vote. On failure,
RPC call will return the failure code.
## Challenges
1. The nodes are currently being configured with asymmetric keys that are
generated and stored in PKCS8 files.
2. The genesis block contains an entry that's signed with leader's private key.
This entry is used to identify the primordial leader.
3. Generation of verifiable data in untrusted space for PoH verification in the
enclave.
4. Need infrastructure for granting stake to an ephemeral key.

121
book/src/entry-tree.md Normal file
View File

@ -0,0 +1,121 @@
# Entry Tree
This document proposes a change to ledger and window to support Solana's [fork
generation](fork-generation.md) behavior.
## Current Design
### Functionality of Window And Ledger
The basic responsibilities of the window and the ledger in a Solana fullnode
are:
1. Window: serve as a temporary, RAM-backed store of blobs of the PoH chain
for re-ordering and assembly into contiguous blocks to be sent to the bank
for verification.
2. Window: serve as a RAM-backed repair facility for other validator nodes,
which may query the network for as-yet unreceived blobs.
3. Ledger: provide disk-based storage of the PoH chain in case of node
restart.
4. Ledger: provide disk-backed repair facility for when the (smaller)
RAM-backed window doesn't cover the repair request.
The window is at the front of a validator node's processing pipeline, blobs are
received, cached, re-ordered before being deserialized into Entries, passed to
the bank for verification, and finally on to the ledger, which is at the back
of a validator node's pipeline.
The window holds blobs (the over-the-air format, serialized Entries,
one-per-blob). The ledger holds serialized Entries without any blob
information.
### Limitations
#### One-dimensional key space
The window and the ledger are indexed by ledger height, which is number of
Entries ever generated in the PoH chain until the current blob. This
limitation prevents the window and the ledger from storing the overlapping
histories possible in Solana's consensus protocol.
#### Limited caching
The window is a circular buffer. It cannot accept blobs that are farther in
the future than the window is currently working. If a blob arrives that is too
far ahead, it is dropped and will subsequently need to be repaired, incurring
further delay for the node.
#### Loss of blob signatures
Because the blob signatures are stripped before being stored by the ledger,
repair requests served from the ledger can't be verified to the original
leader.
#### Rollback and checkpoint, switching forks, separate functions
The window and the ledger can't handle replay of alternate forks. Once a Blob
has passed through the window, it's in the past. The replay stage of a
validator will need to roll back to a previous checkpoint and decode an
alternate set of Blobs to the Bank. The separated and one-way nature of window
and ledger makes this hard.
## New Design
A unified window and ledger allows a validator to record every blob it observes
on the network, in any order, as long as the blob is consistent with the
network's leader schedule.
Blobs are moved to a fork-able key space the tuple of `leader slot` + `blob
index` (within the slot). This permits the skip-list structure of the Solana
protocol to be stored in its entirety, without a-priori choosing which fork to
follow, which Entries to persist or when to persist them.
Repair requests for recent blobs are served out of RAM or recent files and out
of deeper storage for less recent blobs, as implemented by the store backing
EntryTree.
### Functionalities of EntryTree
1. Persistence: the EntryTree lives in the front of the nodes verification
pipeline, right behind network receive and signature verification. If the
blob received is consistent with the leader schedule (i.e. was signed by the
leader for the indicated slot), it is immediately stored.
2. Repair: repair is the same as window repair above, but able to serve any
blob that's been received. EntryTree stores blobs with signatures,
preserving the chain of origination.
3. Forks: EntryTree supports random access of blobs, so can support a
validator's need to rollback and replay from a Bank checkpoint.
4. Restart: with proper pruning/culling, the EntryTree can be replayed by
ordered enumeration of entries from slot 0. The logic of the replay stage
(i.e. dealing with forks) will have to be used for the most recent entries in
the EntryTree.
### Interfacing with Bank
The bank exposes to replay stage:
1. prev_id: which PoH chain it's working on as indicated by the id of the last
entry it processed
2. tick_height: the ticks in the PoH chain currently being verified by this
bank
3. votes: a stack of records that contain
1. prev_ids: what anything after this vote must chain to in PoH
2. tick height: the tick_height at which this vote was cast
3. lockout period: how long a chain must be observed to be in the ledger to
be able to be chained below this vote
Replay stage uses EntryTree APIs to find the longest chain of entries it can
hang off a previous vote. If that chain of entries does not hang off the
latest vote, the replay stage rolls back the bank to that vote and replays the
chain from there.
### Pruning EntryTree
Once EntryTree entries are old enough, representing all the possible forks
becomes less useful, perhaps even problematic for replay upon restart. Once a
validator's votes have reached max lockout, however, any EntryTree contents
that are not on the PoH chain for that vote for can be pruned, expunged.
Replicator nodes will be responsible for storing really old ledger contents,
and validators need only persist their bank periodically.

102
book/src/fork-generation.md Normal file
View File

@ -0,0 +1,102 @@
# Fork Generation
The chapter describes how forks naturally occur as a consequence of [leader
rotation](leader-rotation.md).
## Overview
Nodes take turns being leader and generating the PoH that encodes state
changes. The cluster can tolerate loss of connection to any leader by
synthesizing what the leader ***would*** have generated had it been connected
but not ingesting any state changes. The possible number of forks is thereby
limited to a "there/not-there" skip list of forks that may arise on leader
rotation slot boundaries. At any given slot, only a single leader's
transactions will be accepted.
## Message Flow
1. Transactions are ingested by the current leader.
2. Leader filters valid transactions.
3. Leader executes valid transactions updating its state.
4. Leader packages transactions into entries based off its current PoH slot.
5. Leader transmits the entries to validator nodes (in signed blobs)
1. The PoH stream includes ticks; empty entries that indicate liveness of
the leader and the passage of time on the cluster.
2. A leader's stream begins with the tick entries necessary complete the PoH
back to the leaders most recently observed prior leader slot.
6. Validators retransmit entries to peers in their set and to further
downstream nodes.
7. Validators validate the transactions and execute them on their state.
8. Validators compute the hash of the state.
9. At specific times, i.e. specific PoH tick counts, validators transmit votes
to the leader.
1. Votes are signatures of the hash of the computed state at that PoH tick
count
2. Votes are also propagated via gossip
10. Leader executes the votes as any other transaction and broadcasts them to
the cluster.
11. Validators observe their votes and all the votes from the cluster.
## Partitions, Forks
Forks can arise at PoH tick counts that correspond to a vote. The next leader
may not have observed the last vote slot and may start their slot with
generated virtual PoH entries. These empty ticks are generated by all nodes in
the cluster at a cluster-configured rate for hashes/per/tick `Z`.
There are only two possible versions of the PoH during a voting slot: PoH with
`T` ticks and entries generated by the current leader, or PoH with just ticks.
The "just ticks" version of the PoH can be thought of as a virtual ledger, one
that all nodes in the cluster can derive from the last tick in the previous
slot.
Validators can ignore forks at other points (e.g. from the wrong leader), or
slash the leader responsible for the fork.
Validators vote based on a greedy choice to maximize their reward described in
[forks selection](fork-selection.md).
### Validator's View
#### Time Progression The diagram below represents a validator's view of the
PoH stream with possible forks over time. L1, L2, etc. are leader slot, and
`E`s represent entries from that leader during that leader's slot. The 'x's
represent ticks only, and time flows downwards in the diagram.
<img alt="Fork generation" src="img/fork-generation.svg" class="center"/>
Note that an `E` appearing on 2 forks at the same slot is a slashable
condition, so a validator observing `E3` and `E3'` can slash L3 and safely
choose `x` for that slot. Once a validator commits to a forks, other forks can
be discarded below that tick count. For any slot, validators need only
consider a single "has entries" chain or a "ticks only" chain to be proposed by
a leader. But multiple virtual entries may overlap as they link back to the a
previous slot.
#### Time Division
It's useful to consider leader rotation over PoH tick count as time division of
the job of encoding state for the cluster. The following table presents the
above tree of forks as a time-divided ledger.
leader slot | L1 | L2 | L3 | L4 | L5
-------|----|----|----|----|----
data | E1| E2 | E3 | E4 | E5
ticks since prev | | | | x | xx
Note that only data from leader L3 will be accepted during leader slot L3.
Data from L3 may include "catchup" ticks back to a slot other than L2 if L3 did
not observe L2's data. L4 and L5's transmissions include the "ticks to prev"
PoH entries.
This arrangement of the network data streams permits nodes to save exactly this
to the ledger for replay, restart, and checkpoints.
### Leader's View
When a new leader begins a slot, it must first transmit any PoH (ticks)
required to link the new slot with the most recently observed and voted slot.
The fork the leader proposes would link the current slot to a previous fork
that the leader has voted on with virtual ticks.

155
book/src/fork-selection.md Normal file
View File

@ -0,0 +1,155 @@
# Fork Selection
This article describes Solana's *Nakomoto Fork Selection* algorithm based on time
locks. It satisfies the following properties:
* A voter can eventually recover from voting on a fork that doesn't become the
fork with the desired network finality.
* If the voters share a common ancestor then they will converge to a fork
containing that ancestor no matter how they are partitioned. The converged
ancestor may not be the latest possible ancestor at the start of the fork.
* Rollback requires exponentially more time for older votes than for newer
votes.
* Voters have the freedom to set a minimum network confirmation threshold
before committing a vote to a higher lockout. This allows each voter to make
a trade-off between risk and reward. See [cost of rollback](#cost-of-rollback).
## Time
For networks like Solana, time can be the PoH hash count, which is a VDF that
provides a source of time before consensus. Other networks adopting this
approach would need to consider a global source of time.
For Solana, time uniquely identifies a specific leader for fork generation. At
any given time only 1 leader, which can be computed from the ledger itself, can
propose a fork. For more details, see [fork generation](fork-generation.md)
and [leader rotation](leader-rotation.md).
## Algorithm
The basic idea to this approach is to stack consensus votes. Each vote in the
stack is a confirmation of a fork. Each confirmed fork is an ancestor of the
fork above it. Each consensus vote has a `lockout` in units of time before the
validator can submit a vote that does not contain the confirmed fork as an
ancestor.
When a vote is added to the stack, the lockouts of all the previous votes in
the stack are doubled (more on this in [Rollback](#Rollback)). With each new
vote, a voter commits the previous votes to an ever-increasing lockout. At 32
votes we can consider the vote to be at `max lockout` any votes with a lockout
equal to or above `1<<32` are dequeued (FIFO). Dequeuing a vote is the trigger
for a reward. If a vote expires before it is dequeued, it and all the votes
above it are popped (LIFO) from the vote stack. The voter needs to start
rebuilding the stack from that point.
### Rollback
Before a vote is pushed to the stack, all the votes leading up to vote with a
lower lock time than the new vote are popped. After rollback lockouts are not
doubled until the voter catches up to the rollback height of votes.
For example, a vote stack with the following state:
| vote | vote time | lockout | lock expiration time |
|-----:|----------:|--------:|---------------------:|
| 4 | 4 | 2 | 6 |
| 3 | 3 | 4 | 7 |
| 2 | 2 | 8 | 10 |
| 1 | 1 | 16 | 17 |
*Vote 5* is at time 9, and the resulting state is
| vote | vote time | lockout | lock expiration time |
|-----:|----------:|--------:|---------------------:|
| 5 | 9 | 2 | 11 |
| 2 | 2 | 8 | 10 |
| 1 | 1 | 16 | 17 |
*Vote 6* is at time 10
| vote | vote time | lockout | lock expiration time |
|-----:|----------:|--------:|---------------------:|
| 6 | 10 | 2 | 12 |
| 5 | 9 | 4 | 13 |
| 2 | 2 | 8 | 10 |
| 1 | 1 | 16 | 17 |
At time 10 the new votes caught up to the previous votes. But *vote 2* expires
at 10, so the when *vote 7* at time 11 is applied the votes including and above
*vote 2* will be popped.
| vote | vote time | lockout | lock expiration time |
|-----:|----------:|--------:|---------------------:|
| 7 | 11 | 2 | 13 |
| 1 | 1 | 16 | 17 |
The lockout for vote 1 will not increase from 16 until the stack contains 5
votes.
### Slashing and Rewards
The purpose of the lockout is to force a voter to commit opportunity cost to a
specific fork. Voters that violate the lockouts and vote for a diverging fork
within the lockout should be punished. Slashing or simply freezing the voter
from rewards for a long period of time can be used as punishment.
Voters should be rewarded for selecting the fork that the rest of the network
selected as often as possible. This is well-aligned with generating a reward
when the vote stack is full and the oldest vote needs to be dequeued. Thus a
reward should be generated for each successful dequeue.
### Cost of Rollback
Cost of rollback of *fork A* is defined as the cost in terms of lockout time to
the validators to confirm any other fork that does not include *fork A* as an
ancestor.
The **Economic Finality** of *fork A* can be calculated as the loss of all the
rewards from rollback of *fork A* and its descendants, plus the opportunity
cost of reward due to the exponentially growing lockout of the votes that have
confirmed *fork A*.
### Thresholds
Each voter can independently set a threshold of network commitment to a fork
before that voter commits to a fork. For example, at vote stack index 7, the
lockout is 256 time units. A voter may withhold votes and let votes 0-7 expire
unless the vote at index 7 has at greater than 50% commitment in the network.
This allows each voter to independently control how much risk to commit to a
fork. Committing to forks at a higher frequency would allow the voter to earn
more rewards.
### Algorithm parameters
These parameters need to be tuned.
* Number of votes in the stack before dequeue occurs (32).
* Rate of growth for lockouts in the stack (2x).
* Starting default lockout (2).
* Threshold depth for minimum network commitment before committing to the fork
(8).
* Minimum network commitment size at threshold depth (50%+).
### Free Choice
A "Free Choice" is an unenforcible voter action. A voter that maximizes
self-reward over all possible futures should behave in such a way that the
system is stable, and the local greedy choice should result in a greedy choice
over all possible futures. A set of voter that are engaging in choices to
disrupt the protocol should be bound by their stake weight to the denial of
service. Two options exits for voter:
* a voter can outrun previous voters in virtual generation and submit a
concurrent fork
* a voter can withhold a vote to observe multiple forks before voting
In both cases, the voters in the network have several forks to pick from
concurrently, even though each fork represents a different height. In both
cases it is impossible for the protocol to detect if the voter behavior is
intentional or not.
### Greedy Choice for Concurrent Forks
When evaluating multiple forks, each voter should pick the fork that will
maximize economic finality for the network, or the latest fork if all are equal.

29
book/src/fullnode.md Normal file
View File

@ -0,0 +1,29 @@
# Anatomy of a Fullnode
<img alt="Fullnode block diagrams" src="img/fullnode.svg" class="center"/>
## Pipelining
The fullnodes make extensive use of an optimization common in CPU design,
called *pipelining*. Pipelining is the right tool for the job when there's a
stream of input data that needs to be processed by a sequence of steps, and
there's different hardware responsible for each. The quintessential example is
using a washer and dryer to wash/dry/fold several loads of laundry. Washing
must occur before drying and drying before folding, but each of the three
operations is performed by a separate unit. To maximize efficiency, one creates
a pipeline of *stages*. We'll call the washer one stage, the dryer another, and
the folding process a third. To run the pipeline, one adds a second load of
laundry to the washer just after the first load is added to the dryer.
Likewise, the third load is added to the washer after the second is in the
dryer and the first is being folded. In this way, one can make progress on
three loads of laundry simultaneously. Given infinite loads, the pipeline will
consistently complete a load at the rate of the slowest stage in the pipeline.
## Pipelining in the Fullnode
The fullnode contains two pipelined processes, one used in leader mode called
the TPU and one used in validator mode called the TVU. In both cases, the
hardware being pipelined is the same, the network input, the GPU cards, the CPU
cores, writes to disk, and the network output. What it does with that hardware
is different. The TPU exists to create ledger entries whereas the TVU exists
to validate them.

254
book/src/getting-started.md Normal file
View File

@ -0,0 +1,254 @@
# Getting Started
The Solana git repository contains all the scripts you might need to spin up your
own local testnet. Depending on what you're looking to achieve, you may want to
run a different variation, as the full-fledged, performance-enhanced
multinode testnet is considerably more complex to set up than a Rust-only,
singlenode testnode. If you are looking to develop high-level features, such
as experimenting with smart contracts, save yourself some setup headaches and
stick to the Rust-only singlenode demo. If you're doing performance optimization
of the transaction pipeline, consider the enhanced singlenode demo. If you're
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
to reproduce our TPS metrics, run the enhanced multinode demo.
For all four variations, you'd need the latest Rust toolchain and the Solana
source code:
First, install Rust's package manager Cargo.
```bash
$ curl https://sh.rustup.rs -sSf | sh
$ source $HOME/.cargo/env
```
Now checkout the code from github:
```bash
$ git clone https://github.com/solana-labs/solana.git
$ cd solana
```
The demo code is sometimes broken between releases as we add new low-level
features, so if this is your first time running the demo, you'll improve
your odds of success if you check out the
[latest release](https://github.com/solana-labs/solana/releases)
before proceeding:
```bash
$ TAG=$(git describe --tags $(git rev-list --tags --max-count=1))
$ git checkout $TAG
```
### Configuration Setup
The network is initialized with a genesis ledger and fullnode configuration files.
These files can be generated by running the following script.
```bash
$ ./multinode-demo/setup.sh
```
### Drone
In order for the fullnodes and clients to work, we'll need to
spin up a drone to give out some test tokens. The drone delivers Milton
Friedman-style "air drops" (free tokens to requesting clients) to be used in
test transactions.
Start the drone with:
```bash
$ ./multinode-demo/drone.sh
```
### Singlenode Testnet
Before you start a fullnode, make sure you know the IP address of the machine you
want to be the bootstrap leader for the demo, and make sure that udp ports 8000-10000 are
open on all the machines you want to test with.
Now start the bootstrap leader in a separate shell:
```bash
$ ./multinode-demo/bootstrap-leader.sh
```
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
The drone does not need to be running for subsequent leader starts.
### Multinode Testnet
To run a multinode testnet, after starting a leader node, spin up some
additional full nodes in separate shells:
```bash
$ ./multinode-demo/fullnode-x.sh
```
To run a performance-enhanced full node on Linux,
[CUDA 10.0](https://developer.nvidia.com/cuda-downloads) must be installed on
your system:
```bash
$ ./fetch-perf-libs.sh
$ SOLANA_CUDA=1 ./multinode-demo/bootstrap-leader.sh
$ SOLANA_CUDA=1 ./multinode-demo/fullnode-x.sh
```
### Testnet Client Demo
Now that your singlenode or multinode testnet is up and running let's send it
some transactions!
In a separate shell start the client:
```bash
$ ./multinode-demo/client.sh # runs against localhost by default
```
What just happened? The client demo spins up several threads to send 500,000 transactions
to the testnet as quickly as it can. The client then pings the testnet periodically to see
how many transactions it processed in that time. Take note that the demo intentionally
floods the network with UDP packets, such that the network will almost certainly drop a
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
demo completes after it has convinced itself the testnet won't process any additional
transactions. You should see several TPS measurements printed to the screen. In the
multinode variation, you'll see TPS measurements for each validator node as well.
### Testnet Debugging
There are some useful debug messages in the code, you can enable them on a per-module and per-level
basis. Before running a leader or validator set the normal RUST\_LOG environment variable.
For example
* To enable `info` everywhere and `debug` only in the solana::banking_stage module:
```bash
$ export RUST_LOG=solana=info,solana::banking_stage=debug
```
* To enable BPF program logging:
```bash
$ export RUST_LOG=solana_bpf_loader=trace
```
Generally we are using `debug` for infrequent debug messages, `trace` for potentially frequent
messages and `info` for performance-related logging.
You can also attach to a running process with GDB. The leader's process is named
_solana-fullnode_:
```bash
$ sudo gdb
attach <PID>
set logging on
thread apply all bt
```
This will dump all the threads stack traces into gdb.txt
## Public Testnet
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
```bash
$ ./multinode-demo/client.sh --network $(dig +short testnet.solana.com):8001 --duration 60
```
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
## Linux Snap
A Linux [Snap](https://snapcraft.io/) is available, which can be used to easily
get Solana running on supported Linux systems without building anything from
source for evaluation. Note that CUDA is not supported by the Snap so
performance will be limited.
The `edge` Snap channel is updated daily with the latest
development from the `master` branch. To install:
```bash
$ sudo snap install solana --edge --devmode
```
Once installed the usual Solana programs will be available as `solona.*` instead
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
Update to the latest version at any time with:
```bash
$ snap info solana
$ sudo snap refresh solana --devmode
```
### Daemon Support
The snap supports running fullnodes and a drone as system daemons.
Run `sudo snap get solana` to view the current daemon configuration. To view
daemon logs:
1. Run `sudo snap logs -n=all solana` to view the daemon initialization log
2. Runtime logging can be found under `/var/snap/solana/current/bootstrap-leader/`,
`/var/snap/solana/current/fullnode/`, or `/var/snap/solana/current/drone/` depending
on which `mode=` was selected. Within each log directory the file `current`
contains the latest log, and the files `*.s` (if present) contain older rotated
logs.
Disable the daemon at any time by running:
```bash
$ sudo snap set solana mode=
```
Runtime configuration files for the daemon can be found in
`/var/snap/solana/current/config`.
#### Leader Daemon
```bash
$ sudo snap set solana mode=bootstrap-leader
```
`rsync` must be configured and running on the leader.
1. Ensure rsync is installed with `sudo apt-get -y install rsync`
2. Edit `/etc/rsyncd.conf` to include the following
```ini
[config]
path = /var/snap/solana/current/config
hosts allow = *
read only = true
```
3. Run `sudo systemctl enable rsync; sudo systemctl start rsync`
4. Test by running `rsync -Pzravv rsync://<ip-address-of-leader>/config
solana-config` from another machine. **If the leader is running on a cloud
provider it may be necessary to configure the Firewall rules to permit ingress
to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
To run both the Leader and Drone:
```bash
$ sudo snap set solana mode=bootstrap-leader+drone
```
#### Validator daemon
```bash
$ sudo snap set solana mode=fullnode
```
By default the node will attempt to connect to **testnet.solana.com**, override the
cluster entrypoint IP address by running:
```bash
$ sudo snap set solana mode=fullnode entrypoint-ip=127.0.0.1 #<-- change IP address
```
It's assumed that the node at the entrypoint IP will be running `rsync`
configured as described in the previous **Leader daemon** section.

79
book/src/gossip.md Normal file
View File

@ -0,0 +1,79 @@
# Gossip Service
The Gossip Service acts as a gateway to nodes in the control plane. Fullnodes
use the service to ensure information is available to all other nodes in a cluster.
The service broadcasts information using a gossip protocol.
## Gossip Overview
Nodes continuously share signed data objects among themselves in order to
manage a cluster. For example, they share their contact information, ledger
height, and votes.
Every tenth of a second, each node sends a "push" message and/or a "pull"
message. Push and pull messages may elicit responses, and push messages may be
forwarded on to others in the cluster.
Gossip runs on a well-known UDP/IP port or a port in a well-known range. Once
a cluster is bootstrapped, nodes advertise to each other where to find their
gossip endpoint (a socket address).
## Gossip Records
Records shared over gossip are arbitrary, but signed and versioned (with a
timestamp) as needed to make sense to the node receiving them. If a node
recieves two records from the same source, it it updates its own copy with the
record with the most recent timestamp.
## Gossip Service Interface
### Push Message
A node sends a push message to tells the cluster it has information to share.
Nodes send push messages to `PUSH_FANOUT` push peers.
Upon receiving a push message, a node examines the message for:
1. Duplication: if the message has been seen before, the node responds with
`PushMessagePrune` and drops the message
2. New data: if the message is new to the node
* Stores the new information with an updated version in its cluster info and
purges any previous older value
* Stores the message in `pushed_once` (used for detecting duplicates,
purged after `PUSH_MSG_TIMEOUT * 5` ms)
* Retransmits the messages to its own push peers
3. Expiration: nodes drop push messages that are older than `PUSH_MSG_TIMEOUT`
### Push Peers, Prune Message
A nodes selects its push peers at random from the active set of known peers.
The node keeps this selection for a relatively long time. When a prune message
is received, the node drops the push peer that sent the prune. Prune is an
indication that there is another, faster path to that node than direct push.
The set of push peers is kept fresh by rotating a new node into the set every
`PUSH_MSG_TIMEOUT/2` milliseconds.
### Pull Message
A node sends a pull message to ask the cluster if there is any new information.
A pull message is sent to a single peer at random and comprises a Bloom filter
that represents things it already has. A node receiving a pull message
iterates over its values and constructs a pull response of things that miss the
filter and would fit in a message.
A node constructs the pull Bloom filter by iterating over current values and
recently purged values.
A node handles items in a pull response the same way it handles new data in a
push message.
## Purging
Nodes retain prior versions of values (those updated by a pull or push) and
expired values (those older than `GOSSIP_PULL_CRDS_TIMEOUT_MS`) in
`purged_values` (things I recently had). Nodes purge `purged_values` that are
older than `5 * GOSSIP_PULL_CRDS_TIMEOUT_MS`.

117
book/src/introduction.md Normal file
View File

@ -0,0 +1,117 @@
# What is Solana?
Solana is the name of an open source project that is implementing a new
high-performance, permissionless blockchain. Solana is also the name of a
company headquartered in San Francisco that maintains the open source project.
# About this Book
This book describes the Solana open source project, a blockchain built from the
ground up for scale. The book covers why to use it, how to use it, how it
works, and why it will continue to work long after the company Solana closes
its doors. The goal of the Solana architecture is to demonstrate there exists a
set of software algorithms that when used in combination to implement a
blockchain, removes software as a performance bottleneck, allowing transaction
throughput to scale proportionally with network bandwidth. The architecture
goes on to satisfy all three desirable properties of a proper blockchain: that
it not only be scalable, but that it is also secure and decentralized.
The architecture describes a theoretical upper bound of 710 thousand
transactions per second (tps) on a standard gigabit network and 28.4 million
tps on 40 gigabit. Furthermore, the architecture supports safe, concurrent
execution of programs authored in general purpose programming languages such as
C or Rust.
# Disclaimer
All claims, content, designs, algorithms, estimates, roadmaps, specifications,
and performance measurements described in this project are done with the
author's best effort. It is up to the reader to check and validate their
accuracy and truthfulness. Furthermore, nothing in this project constitutes a
solicitation for investment.
# History of the Solana Codebase
In November of 2017 Anatoly Yakovenko published a whitepaper describing Proof
of History, a technique for keeping time between computers that do not trust
one another. From Anatoly's previous experience designing distributed systems
at Qualcomm, Mesosphere and Dropbox, he knew that a reliable clock makes
network synchronization very simple. When synchronization is simple the
resulting network can be blazing fast, bound only by network bandwidth.
Anatoly watched as blockchain systems without clocks, such as Bitcoin and
Ethereum, struggled to scale beyond 15 transactions per second worldwide when
centralized payment systems such as Visa required peaks of 65,000. Without a
clock, it was clear they'd never graduate to being the global payment system or
global supercomputer they had dreamed to be. When Anatoly solved the problem of
getting computers that dont trust each other to agree on time, he knew he had
the key to bring 40 years of distributed systems research to the world of
blockchain. The resulting cluster wouldn't be just 10 times faster, or a 100
times, or a 1,000 times, but 10,000 times faster right out of the gate!
Anatoly's implementation began in a private codebase and was implemented in the
C programming language. Greg Fitzgerald, who had previously worked with Anatoly
at semiconductor giant Qualcomm Incorporated, encouraged him to reimplement the
project in the Rust programming language. Greg had worked on the LLVM compiler
infrastructure, which underlies both the Clang C/C++ compiler as well as the
Rust compiler. Greg claimed that the language's safety guarantees would improve
software productivity and that its lack of a garbage collector would allow
programs to perform as well as those written in C. Anatoly gave it a shot and
just two weeks later, had migrated his entire codebase to Rust. Sold. With
plans to weave all the world's transactions together on a single, scalable
blockchain, Anatoly called the project Loom.
On February 13th of 2018, Greg began prototyping the first open source
implementation of Anatoly's whitepaper. The project was published to GitHub
under the name Silk in the loomprotocol organization. On February 28th, Greg
made his first release, demonstrating 10 thousand signed transactions could be
verified and processed in just over half a second. Shortly after, another
former Qualcomm cohort, Stephen Akridge, demonstrated throughput could be
massively improved by offloading signature verification to graphics processors.
Anatoly recruited Greg, Stephen and three others to co-found a company, then
called Loom.
Around the same time, Ethereum-based project Loom Network sprung up and many
people were confused if they were the same project. The Loom team decided it
would rebrand. They chose the name Solana, a nod to a small beach town North of
San Diego called Solana Beach, where Anatoly, Greg and Stephen lived and surfed
for three years when they worked for Qualcomm. On March 28th, the team created
the Solana Labs GitHub organization and renamed Greg's prototype Silk to
Solana.
In June of 2018, the team scaled up the technology to run on cloud-based
networks and on July 19th, published a 50-node, permissioned, public testnet
consistently supporting bursts of 250,000 transactions per second. In the most
recent release, v0.10 Pillbox, the team published a permissioned testnet
running 150 nodes on a gigabit network and demonstrated soak tests processing
an *average* of 200 thousand transactions per second with bursts over 500
thousand. The project was also extended to support on-chain programs written in
the C programming language and run concurrently in a safe execution environment
called BPF. Next step: going permissionless.
# What is a Solana Cluster?
A cluster is a set of computers that work together and can be viewed from the
outside as a single system. A Solana cluster is a set of independently owned
computers working together (and sometimes against each other) to verify the
output of untrusted, user-submitted programs. A Solana cluster can be utilized
any time a user wants to preserve an immutable record of events in time or
programmatic interpretations of those events. One use is to track which of the
computers did meaningful work to keep the cluster running. Another use might be
to track the possession of real-world assets. In each case, the cluster
produces a record of events called the ledger. It will be preserved for the
lifetime of the cluster. As long as someone somewhere in the world maintains a
copy of the ledger, the output of its programs (which may contain a record of
who possesses what) will forever be reproducible, independent of the
organization that launched it.
# What are Sols?
A sol is the name of Solana's native token, which can be passed to nodes in a
Solana cluster in exchange for running an on-chain program or validating its
output. The Solana protocol defines that only 1 billion sols will ever exist,
but that the system may perform micropayments of fractional sols and that a sol
may be split as many as 34 times. The fractional sol is called a *lamport*. It
is named in honor of Solana's biggest technical influence, [Leslie
Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value
of approximately 0.0000000000582 sol (2^-34).

View File

@ -0,0 +1,3 @@
# JavaScript API
See [solana-web3](https://solana-labs.github.io/solana-web3.js/).

View File

@ -1,28 +1,42 @@
Solana JSON RPC API
JSON RPC API
===
Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification.
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
RPC Endpoint
RPC HTTP Endpoint
---
**Default port:** 8899
eg. http://localhost:8899, http://192.168.1.88:8899
RPC PubSub WebSocket Endpoint
---
**Default port:** 8900
eg. ws://localhost:8900, http://192.168.1.88:8900
Methods
---
* [confirmTransaction](#confirmtransaction)
* [getAddress](#getaddress)
* [getBalance](#getbalance)
* [getAccountInfo](#getaccountinfo)
* [getBalance](#getbalance)
* [getConfirmationTime](#getconfirmationTime)
* [getLastId](#getlastid)
* [getSignatureStatus](#getsignaturestatus)
* [getTransactionCount](#gettransactioncount)
* [requestAirdrop](#requestairdrop)
* [sendTransaction](#sendtransaction)
* [startSubscriptionChannel](#startsubscriptionchannel)
* [Subscription Websocket](#subscription-websocket)
* [accountSubscribe](#accountsubscribe)
* [accountUnsubscribe](#accountunsubscribe)
* [signatureSubscribe](#signaturesubscribe)
* [signatureUnsubscribe](#signatureunsubscribe)
Request Formatting
---
@ -108,16 +122,18 @@ Returns all information associated with the account of provided Pubkey
The result field will be a JSON object with the following sub fields:
* `tokens`, number of tokens assigned to this account, as a signed 64-bit integer
* `program_id`, array of 32 bytes representing the program this account has been assigned to
* `owner`, array of 32 bytes representing the program this account has been assigned to
* `userdata`, array of bytes representing any userdata associated with the account
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["FVxxngPx368XvMCoeskdd6U8cZJFsfa1BEtGWqyAxRj4"]}' http://localhost:8899
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
```
---
@ -155,6 +171,7 @@ events.
* `Confirmed` - Transaction was successful
* `SignatureNotFound` - Unknown transaction
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
* `AccountInUse` - Another Transaction had a write lock one of the Accounts specified in this Transaction. The Transaction may succeed if retried
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
##### Example:
@ -185,6 +202,25 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":268,"id":1}
```
---
### getConfirmationTime
Returns the current cluster confirmation time in milliseconds
##### Parameters:
None
##### Results:
* `integer` - confirmation time in milliseconds, as unsigned 64-bit integer
##### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getConfirmationTime"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":500,"id":1}
```
---
### requestAirdrop
@ -227,3 +263,99 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
```
---
### Subscription Websocket
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
- Submit subscription requests to the websocket using the methods below
- Multiple subscriptions may be active at once
---
### accountSubscribe
Subscribe to an account to receive notifications when the userdata for a given account public key changes
##### Parameters:
* `string` - account Pubkey, as base-58 encoded string
##### Results:
* `integer` - Subscription id (needed to unsubscribe)
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
```
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
---
### accountUnsubscribe
Unsubscribe from account userdata change notifications
##### Parameters:
* `integer` - id of account Subscription to cancel
##### Results:
* `bool` - unsubscribe success message
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```
---
### signatureSubscribe
Subscribe to a transaction signature to receive notification when the transaction is confirmed
On `signatureNotification`, the subscription is automatically cancelled
##### Parameters:
* `string` - Transaction Signature, as base-58 encoded string
##### Results:
* `integer` - subscription id (needed to unsubscribe)
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
```
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
```
---
### signatureUnsubscribe
Unsubscribe from account userdata change notifications
##### Parameters:
* `integer` - id of account subscription to cancel
##### Results:
* `bool` - unsubscribe success message
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```

View File

@ -0,0 +1,52 @@
# Leader Rotation
At any given moment, a cluster expects only one fullnode to produce ledger
entries. By having only one leader at a time, all validators are able to replay
identical copies of the ledger. The drawback of only one leader at a time,
however, is that a malicious leader is cabable of censoring votes and
transactions. Since censoring cannot be distinguished from the network dropping
packets, the cluster cannot simply elect a single node to hold the leader role
indefinitely. Instead, the cluster minimizes the influence of a malcioius
leader by rotating which node takes the lead.
Each validator selects the expected leader using the same algorithm, described
below. When the validator receives a new signed ledger entry, it can be certain
that entry was produced by the expected leader.
## Leader Schedule Generation
Leader schedule is generated using a predefined seed. The process is as follows:
1. Periodically use the PoH tick height (a monotonically increasing counter) to
seed a stable pseudo-random algorithm.
2. At that height, sample the bank for all the staked accounts with leader
identities that have voted within a cluster-configured number of ticks. The
sample is called the *active set*.
3. Sort the active set by stake weight.
4. Use the random seed to select nodes weighted by stake to create a
stake-weighted ordering.
5. This ordering becomes valid after a cluster-configured number of ticks.
The seed that is selected is predictable but unbiasable. There is no grinding
attack to influence its outcome. The active set, however, can be biased by a
leader by censoring validator votes. To reduce the likelihood of censorship,
the active set is sampled many slots in advance, such that votes will have been
collected by multiple leaders. If even one node is honest, the malicious
leaders will not be able to use censorship to influence the leader schedule.
## Appending Entries
The lifetime of a leader schedule is called an *epoch*. The epoch is split into
*slots*, where each slot has a duration of `T` PoH ticks.
A leader transmits entries during its slot. After `T` ticks, all the
validators switch to the next scheduled leader. Validators must ignore entries
sent outside a leader's assigned slot.
All `T` ticks must be observed by the next leader for it to build its own
entries on. If entries are not observed (leader is down) or entries are invalid
(leader is buggy or malicious), the next leader must produce ticks to fill the
previous leader's slot. Note that the next leader should do repair requests in
parallel, and postpone sending ticks until it is confident other validators
also failed to observe the previous leader's entries. If a leader incorrectly
builds on its own ticks, the leader following it must replace all its ticks.

View File

@ -0,0 +1,266 @@
# Ledger Replication
At full capacity on a 1gbps network solana will generate 4 petabytes of data
per year. To prevent the network from centralizing around full nodes that have
to store the full data set this protocol proposes a way for mining nodes to
provide storage capacity for pieces of the network.
The basic idea to Proof of Replication is encrypting a dataset with a public
symmetric key using CBC encryption, then hash the encrypted dataset. The main
problem with the naive approach is that a dishonest storage node can stream the
encryption and delete the data as its hashed. The simple solution is to force
the hash to be done on the reverse of the encryption, or perhaps with a random
order. This ensures that all the data is present during the generation of the
proof and it also requires the validator to have the entirety of the encrypted
data present for verification of every proof of every identity. So the space
required to validate is `number_of_proofs * data_size`
## Terminology
#### replicator
Storage mining client, stores some part of the ledger enumerated in blocks and
submits storage proofs to the chain. Not a full-node.
#### ledger segment
Portion of the ledger which is downloaded by the replicator where storage proof
data is derived.
#### CBC block
Smallest encrypted chunk of ledger, an encrypted ledger segment would be made of
many CBC blocks. `ledger_segment_size / cbc_block_size` to be exact.
#### storage proof
A set of sha hash state which is constructed by sampling the encrypted version
of the stored ledger segment at certain offsets.
#### fake storage proof
A proof which has the same format as a storage proof, but the sha state is
actually from hashing a known ledger value which the storage client can reveal
and is also easily verifiable by the network on-chain.
#### storage proof confirmation
A transaction by a validator which indicates the set of real and fake proofs
submitted by a storage miner. The transaction would contain a list of proof
hash values and a bit which says if this hash is valid or fake.
#### storage proof challenge
A transaction from a replicator that verifiably proves that a validator
confirmed a fake proof.
#### storage proof claim
A transaction from a validator which is after the timeout period given from the
storage proof confirmation and which no successful challenges have been
observed which rewards the parties of the storage proofs and confirmations.
#### storage validation capacity
The number of keys and samples that a validator can verify each storage epoch.
## Optimization with PoH
Our improvement on this approach is to randomly sample the encrypted segments
faster than it takes to encrypt, and record the hash of those samples into the
PoH ledger. Thus the segments stay in the exact same order for every PoRep and
verification can stream the data and verify all the proofs in a single batch.
This way we can verify multiple proofs concurrently, each one on its own CUDA
core. The total space required for verification is `1_ledger_segment +
2_cbc_blocks * number_of_identities` with core count of equal to
`number_of_identities`. We use a 64-byte chacha CBC block size.
## Network
Validators for PoRep are the same validators that are verifying transactions.
They have some stake that they have put up as collateral that ensures that
their work is honest. If you can prove that a validator verified a fake PoRep,
then the validators stake can be slashed.
Replicators are specialized *light clients*. They download a part of the ledger
and store it, and provide PoReps of storing the ledger. For each verified PoRep
replicators earn a reward of sol from the mining pool.
## Constraints
We have the following constraints:
* Verification requires generating the CBC blocks. That requires space of 2
blocks per identity, and 1 CUDA core per identity for the same dataset. So as
many identities at once should be batched with as many proofs for those
identities verified concurrently for the same dataset.
* Validators will randomly sample the set of storage proofs to the set that
they can handle, and only the creators of those chosen proofs will be
rewarded. The validator can run a benchmark whenever its hardware configuration
changes to determine what rate it can validate storage proofs.
## Validation and Replication Protocol
### Constants
1. NUM\_STORAGE\_ENTRIES: Number of entries in a segment of ledger data. The
unit of storage for a replicator.
2. NUM\_KEY\_ROTATION\_TICKS: Number of ticks to save a PoH value and cause a
key generation for the section of ledger just generated and the rotation of
another key in the set.
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
claim to be successfully rewarded.
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
mining proof claim has to contain to be valid for a reward.
5. NUM\_STORAGE\_SAMPLES: Number of samples required for a storage mining
proof.
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
encrypted state.
### Validator behavior
1. Validator joins the network and submits a storage validation capacity
transaction which tells the network how many proofs it can process in a given
period defined by NUM\_KEY\_ROTATION\_TICKS.
2. Every NUM\_KEY\_ROTATION\_TICKS the validator stores the PoH value at that
height.
3. Every NUM\_KEY\_ROTATION\_TICKS it also validates samples received from
replicators. It signs the PoH hash at that point and uses the following
algorithm with the signature as the input:
- The low 5 bits of the first byte of the signature creates an index into
another starting byte of the signature.
- The validator then looks at the set of storage proofs where the byte of
the proof's sha state vector starting from the low byte matches exactly
with the chosen byte(s) of the signature.
- If the set of proofs is larger than the validator can handle, then it
increases to matching 2 bytes in the signature.
- Validator continues to increase the number of matching bytes until a
workable set is found.
- It then creates a mask of valid proofs and fake proofs and sends it to
the leader. This is a storage proof confirmation transaction.
4. The storage proof confirmation transaction is integrated into the ledger.
5. After a lockout period of NUM\_SECONDS\_STORAGE\_LOCKOUT seconds, the
validator then submits a storage proof claim transaction which then causes the
distribution of the storage reward if no challenges were seen for the proof to
the validators and replicators party to the proofs.
6. Validator responds to RPC interfaces for what the last storage epoch PoH
value is and its entry\_height.
### Replicator behavior
1. Since a replicator is somewhat of a light client and not downloading all the
ledger data, they have to rely on other full nodes (validators) for
information. Any given validator may or may not be malicious and give incorrect
information, although there are not any obvious attack vectors that this could
accomplish besides having the replicator do extra wasted work. For many of the
operations there are number of options depending on how paranoid a replicator
is:
- (a) replicator can ask a validator
- (b) replicator can ask multiple validators
- (c) replicator can subscribe to the full transaction stream and generate
the information itself
- (d) replicator can subscribe to an abbreviated transaction stream to
generate the information itself
2. A replicator obtains the PoH hash corresponding to the last key rotation
along with its entry\_height.
3. The replicator signs the PoH hash with its keypair. That signature is the
seed used to pick the segment to replicate and also the encryption key. The
replicator mods the signature with the entry\_height to get which segment to
replicate.
4. The replicator retrives the ledger by asking peer validators and
replicators. See 6.5.
5. The replicator then encrypts that segment with the key with chacha algorithm
in CBC mode with NUM\_CHACHA\_ROUNDS of encryption.
6. The replicator initializes a chacha rng with the signature from step 2 as
the seed.
7. The replicator generates NUM\_STORAGE\_SAMPLES samples in the range of the
entry size and samples the encrypted segment with sha256 for 32-bytes at each
offset value. Sampling the state should be faster than generating the encrypted
segment.
8. The replicator sends a PoRep proof transaction which contains its sha state
at the end of the sampling operation, its seed and the samples it used to the
current leader and it is put onto the ledger.
9. The replicator then generates another set of offsets which it submits a fake
proof with an incorrect sha state. It can be proven to be fake by providing the
seed for the hash result.
- A fake proof should consist of a replicator hash of a signature of a PoH
value. That way when the replicator reveals the fake proof, it can be
verified on chain.
10. The replicator monitors the ledger, if it sees a fake proof integrated, it
creates a challenge transaction and submits it to the current leader. The
transacation proves the validator incorrectly validated a fake storage proof.
The replicator is rewarded and the validator's staking balance is slashed or
frozen.
### Finding who has a given block of ledger
1. Validators monitor the transaction stream for storage mining proofs, and
keep a mapping of ledger segments by entry\_height to public keys. When it sees
a storage mining proof it updates this mapping and provides an RPC interface
which takes an entry\_height and hands back a list of public keys. The client
then looks up in their cluster\_info table to see which network address that
corresponds to and sends a repair request to retrieve the necessary blocks of
ledger.
2. Validators would need to prune this list which it could do by periodically
looking at the oldest entries in its mappings and doing a network query to see
if the storage host is still serving the first entry.
## Sybil attacks
For any random seed, we force everyone to use a signature that is derived from
a PoH hash. Everyone must use the same count, so the same PoH hash is signed by
every participant. The signatures are then each cryptographically tied to the
keypair, which prevents a leader from grinding on the resulting value for more
than 1 identity.
Since there are many more client identities then encryption identities, we need
to split the reward for multiple clients, and prevent Sybil attacks from
generating many clients to acquire the same block of data. To remain BFT we
want to avoid a single human entity from storing all the replications of a
single chunk of the ledger.
Our solution to this is to force the clients to continue using the same
identity. If the first round is used to acquire the same block for many client
identities, the second round for the same client identities will force a
redistribution of the signatures, and therefore PoRep identities and blocks.
Thus to get a reward for replicators need to store the first block for free and
the network can reward long lived client identities more than new ones.
## Validator attacks
- If a validator approves fake proofs, replicator can easily out them by
showing the initial state for the hash.
- If a validator marks real proofs as fake, no on-chain computation can be done
to distinguish who is correct. Rewards would have to rely on the results from
multiple validators in a stake-weighted fashion to catch bad actors and
replicators from being locked out of the network.
- Validator stealing mining proof results for itself. The proofs are derived
from a signature from a replicator, since the validator does not know the
private key used to generate the encryption key, it cannot be the generator of
the proof.
## Reward incentives
Fake proofs are easy to generate but difficult to verify. For this reason,
PoRep proof transactions generated by replicators may require a higher fee than
a normal transaction to represent the computational cost required by
validators.
Some percentage of fake proofs are also necessary to receive a reward from
storage mining.
## Notes
* We can reduce the costs of verification of PoRep by using PoH, and actually
make it feasible to verify a large number of proofs for a global dataset.
* We can eliminate grinding by forcing everyone to sign the same PoH hash and
use the signatures as the seed
* The game between validators and replicators is over random blocks and random
encryption identities and random data samples. The goal of randomization is
to prevent colluding groups from having overlap on data or validation.
* Replicator clients fish for lazy validators by submitting fake proofs that
they can prove are fake.
* To defend against Sybil client identities that try to store the same block we
force the clients to store for multiple rounds before receiving a reward.
* Validators should also get rewarded for validating submitted storage proofs
as incentive for storing the ledger. They can only validate proofs if they
are storing that slice of the ledger.

67
book/src/programs.md Normal file
View File

@ -0,0 +1,67 @@
# Programming Model
A client *app* interacts with a Solana cluster by sending it *transactions*
with one or more *instructions*. The Solana *runtime* passes those instructions
to user-contributed *programs*. An instruction might, for example, tell a
program to move *tokens* from one *account* to another or create an interactive
contract that governs how tokens are moved. Instructions are executed
atomically. If any instruction is invalid, any changes made within the
transaction are discarded.
## Deploying Programs to a Cluster
<img alt="SDK tools" src="img/sdk-tools.svg" class="center"/>
As shown in the diagram above a client creates a program and compiles it to an
ELF shared object containing BPF bytecode and sends it to the Solana cluster.
The cluster stores the program locally and makes it available to clients via a
*program ID*. The program ID is a *public key* generated by the client and is
used to reference the program in subsequent transactions.
A program may be written in any programming language that can target the
Berkley Packet Filter (BPF) safe execution environment. The Solana SDK offers
the best support for C programs, which is compiled to BPF using the [LLVM
compiler infrastructure](https://llvm.org). Alternatively, a client might
choose to bypass LLVM and use Python, Lua or C++ to generate BPF directly via
the [BPF Compiler Collection](https://github.com/iovisor/bcc) (BCC).
## Storing State between Transactions
If the program needs to store state between transactions, it does so using
*accounts*. Accounts are similar to files in operating systems such as Linux.
Like a file, an account may hold arbitrary data and that data persists beyond
the lifetime of a program. Also like a file, an account includes metadata that
tells the runtime who is allowed to access the data and how. Unlike a file, the
account includes metadata for the lifetime of the file. That lifetime is
expressed in "tokens", which is a number of fractional native tokens, called
*lamports*. Accounts are held in validator memory and pay "rent" to stay there.
Each fullnode periodically scan all accounts and collects rent. Any account
that drops to zero lamports is purged.
If an account is marked "executable", it will only be used by a *loader* to run
programs. For example, a BPF-compiled program is marked executable and loaded
by the BPF loader. No program is allowed to modify the contents of an
executable account.
An account also includes "owner" metadata. The owner is a program ID. The
runtime grants the program write access to the account if its ID matches the
owner. If an account is not owned by a program, the program is permitted to
read its data and credit the account.
In the same way that a Linux user uses a path to look up a file, a Solana
client uses public keys to look up accounts. To create an account, the client
generates a *keypair* and registers its public key using the CreateAccount
instruction. Once registered, transactions reference account keys to grant
programs access to accounts. The runtime grants programs read access by
default. To grant write access, the client must either assign the account to a
program or sign the transaction using the keypair's *secret key*. Since only
the holder of the secret key can produce valid signatures matching the
account's public key, the runtime recognizes the signature as authorization to
modify account data or debit the account.
After the runtime executes each of the transaction's instructions, it uses the
account metadata and transaction signatures to verify that none of the access
rules were violated. If a program violates an access rule, the runtime discards
all account changes made by all instructions and marks the transaction as
failed.

7
book/src/proposals.md Normal file
View File

@ -0,0 +1,7 @@
# Proposed Architectural Changes
The following architectural proposals have been accepted by the Solana team, but
are not yet fully implemented. The proposals may be implemented as described,
implemented differently as issues in the designs become evident, or not
implemented at all. If implemented, the descriptions will be moved from this
section to earlier chapters in a future version of this book.

84
book/src/runtime.md Normal file
View File

@ -0,0 +1,84 @@
# The Runtime
The runtime is a concurrent transaction processor. Transactions specify their
data dependencies upfront and dynamic memory allocation is explicit. By
separating program code from the state it operates on, the runtime is able to
choreograph concurrent access. Transactions accessing only credit-only
accounts are executed in parallel whereas transactions accessing writable
accounts are serialized. The runtime interacts with the program through an
entrypoint with a well-defined interface. The userdata stored in an account is
an opaque type, an array of bytes. The program has full control over its
contents.
The transaction structure specifies a list of public keys and signatures for
those keys and a sequential list of instructions that will operate over the
states associated with the account keys. For the transaction to be committed
all the instructions must execute successfully; if any abort the whole
transaction fails to commit.
### Account Structure
Accounts maintain a token balance and program-specific memory.
# Transaction Engine
The engine maps public keys to accounts and routes them to the program's
entrypoint.
## Execution
Transactions are batched and processed in a pipeline
<img alt="Runtime pipeline" src="img/runtime.svg" class="center"/>
At the *execute* stage, the loaded pages have no data dependencies, so all the
programs can be executed in parallel.
The runtime enforces the following rules:
1. Only the *owner* program may modify the contents of an account. This means
that upon assignment userdata vector is guaranteed to be zero.
2. Total balances on all the accounts is equal before and after execution of a
transaction.
3. After the transaction is executed, balances of credit-only accounts must be
greater than or equal to the balances before the transaction.
4. All instructions in the transaction executed atomically. If one fails, all
account modifications are discarded.
Execution of the program involves mapping the program's public key to an
entrypoint which takes a pointer to the transaction, and an array of loaded
pages.
## SystemProgram Interface
The interface is best described by the `Instruction::userdata` that the user
encodes.
* `CreateAccount` - This allows the user to create and assign an account to a
Program.
* `Assign` - allows the user to assign an existing account to a program.
* `Move` - moves tokens between account's that are associated with
* `Spawn` - spawn a new program from an account
## Notes
1. There is no dynamic memory allocation. Client's need to use `CreateAccount`
instructions to create memory before passing it to another program. This
instruction can be composed into a single transaction with the call to the
program itself.
2. Runtime guarantees that when memory is assigned to the program it is zero
initialized.
3. Runtime guarantees that a program's code is the only thing that can modify
memory that its assigned to
4. Runtime guarantees that the program can only spend tokens that are in
accounts that are assigned to it
5. Runtime guarantees the balances belonging to accounts are balanced before
and after the transaction
6. Runtime guarantees that multiple instructions all executed successfully when
a transaction is committed.
# Future Work
* [Continuations and Signals for long running
Transactions](https://github.com/solana-labs/solana/issues/1485)

168
book/src/staking-rewards.md Normal file
View File

@ -0,0 +1,168 @@
# Staking Rewards
Initial Proof of Stake (PoS) (i.e. using in-protocol asset, SOL, to provide
secure consensus) design ideas outlined here. Solana will implement a proof of
stake reward/security scheme for node validators in the cluster. The purpose is
threefold:
- Align validator incentives with that of the greater cluster through
skin-in-the-game deposits at risk
- Avoid 'nothing at stake' fork voting issues by implementing slashing rules
aimed at promoting fork convergence
- Provide an avenue for validator rewards provided as a function of validator
participation in the cluster.
While many of the details of the specific implementation are currently under
consideration and are expected to come into focus through specific modeling
studies and parameter exploration on the Solana testnet, we outline here our
current thinking on the main components of the PoS system. Much of this
thinking is based on the current status of Casper FFG, with optimizations and
specific attributes to be modified as is allowed by Solana's Proof of History
(PoH) blockchain data structure.
### General Overview
Solana's ledger validation design is based on a rotating, stake-weighted
randomly selected leader broadcasting transactions in a PoH data
structure to validating nodes. These nodes, upon receiving the leader's
broadcast, have the opportunity to vote on the current state and PoH height by
signing a transaction into the PoH stream.
To become a Solana validator, a fullnode must deposit/lock-up some amount
of SOL in a contract. This SOL will not be accessible for a specific time
period. The precise duration of the staking lockup period has not been
determined. However we can consider three phases of this time for which
specific parameters will be necessary:
- *Warm-up period*: which SOL is deposited and inaccessible to the node,
however PoH transaction validation has not begun. Most likely on the order of
days to weeks
- *Validation period*: a minimum duration for which the deposited SOL will be
inaccessible, at risk of slashing (see slashing rules below) and earning
rewards for the validator participation. Likely duration of months to a
year.
- *Cool-down period*: a duration of time following the submission of a
'withdrawal' transaction. During this period validation responsibilities have
been removed and the funds continue to be inaccessible. Accumulated rewards
should be delivered at the end of this period, along with the return of the
initial deposit.
Solana's trustless sense of time and ordering provided by its PoH data
structure, along with its
[avalanche](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast
and transmission design, should provide subsecond confirmation times that scale
with the log of the number of nodes in the cluster. This means we shouldn't
have to restrict the number of validating nodes with a prohibitive 'minimum
deposits' and expect nodes to be able to become validators with nominal amounts
of SOL staked. This should also render validation pools, a proposed solution
for economic censorship imposed by minimum staking amounts currently described
in Casper, unnecessary and remove the concern for needing to put slashable
stake at risk while relying on others to play by the rules.
### Stake-weighted Rewards
Rewards are expected to be paid out to active validators as a function of
validator activity and as a proportion of the percentage of SOL they have at
stake out of the entirety of the staking pool.
We expect to define a baseline annual validator payout/inflation rate based on
the total SOL deposited. E.g. 10% annual interest on SOL deposited with X total
SOL deposited as slashable on the cluster. This is the same design as currently
proposed in Casper FFG which has additionally specifies how inflation rates
adjust as a function of total ETH deposited. Specifically, Casper validator
returns are proportional to the inverse square root of the total deposits and
initial annual rates are estimated as:
| Deposit Size | Annual Validator Interest |
|-------------:|--------------------------:|
| 2.5M ETH | 10.12% |
| 10M ETH | 5.00% |
| 20M ETH | 3.52% |
| 40M ETH | 2.48% |
This has the nice property of potentially incentivizing participation around a
target deposit size. Incentivisation of specific participation rates more
directly (rather than deposit size) may something also worth exploring.
The specifics of the Solana validator reward scheme are to be worked out in
parallel with a design for transaction fee assignment as well as our storage
mining reward scheme.
### Slashing rules
Unlike Proof of Work (PoW) where off-chain capital expenses are already
deployed at the time of block construction/voting, PoS systems require
capital-at-risk to prevent a logical/optimal strategy of multiple chain voting.
We intend to implement slashing rules which, if broken, result some amount of
the offending validator's deposited stake to be removed from circulation. Given
the ordering properties of the PoH data structure, we believe we can simplify
our slashing rules to the level of a voting lockout time assigned per vote.
I.e. Each vote has an associated lockout time (PoH duration) that represents a
duration by any additional vote from that validator must be in a PoH that
contains the original vote, or a portion of that validator's stake is
slashable. This duration time is a function of the initial vote PoH count and
all additional vote PoH counts. It will likely take the form:
Lockout<sub>i</sub>(PoH<sub>i</sub>, PoH<sub>j</sub>) = PoH<sub>j</sub> + K *
exp((PoH<sub>j</sub> - PoH<sub>i</sub>) / K)
Where PoH<sub>i</sub> is the height of the vote that the lockout is to be
applied to and PoH<sub>j</sub> is the height of the current vote on the same
fork. If the validator submits a vote on a different PoH fork on any
PoH<sub>k</sub> where k > j > i and PoH<sub>k</sub> < Lockout(PoH<sub>i</sub>,
PoH<sub>j</sub>), then a portion of that validator's stake is at risk of being
slashed.
In addition to the functional form lockout described above, early
implementation may be a numerical approximation based on a First In, First Out
(FIFO) data structure and the following logic:
- FIFO queue holding 32 votes per active validator
- new votes are pushed on top of queue (`push_front`)
- expired votes are popped off top (`pop_front`)
- as votes are pushed into the queue, the lockout of each queued vote doubles
- votes are removed from back of queue if `queue.len() > 32`
- the earliest and latest height that has been removed from the back of the
queue should be stored
It is likely that a reward will be offered as a % of the slashed amount to any
node that submits proof of this slashing condition being violated to the PoH.
#### Partial Slashing
In the schema described so far, when a validator votes on a given PoH stream,
they are committing themselves to that fork for a time determined by the vote
lockout. An open question is whether validators will be hesitant to begin
voting on an available fork if the penalties are perceived too harsh for an
honest mistake or flipped bit.
One way to address this concern would be a partial slashing design that results
in a slashable amount as a function of either:
1. the fraction of validators, out of the total validator pool, that were also
slashed during the same time period (ala Casper)
2. the amount of time since the vote was cast (e.g. a linearly increasing % of
total deposited as slashable amount over time), or both.
This is an area currently under exploration
### Penalties
As previously discussed, annual validator reward rates are to be specified as a
function of total amount staked. The cluster rewards validators who are online
and actively participating in the validation process throughout the entirety of
their *validation period*. For validators that go offline/fail to validate
transactions during this period, their annual reward is effectively reduced.
Similarly, we may consider an algorithmic reduction in a validator's active
amount staked amount in the case that they are offline. I.e. if a validator is
inactive for some amount of time, either due to a partition or otherwise, the
amount of their stake that is considered active (eligible to earn rewards)
may be reduced. This design would be structured to help long-lived partitions
to eventually reach finality on their respective chains as the % of non-voting
total stake is reduced over time until a super-majority can be achieved by the
active validators in each partition. Similarly, upon re-engaging, the active
amount staked will come back online at some defined rate. Different rates of
stake reduction may be considered depending on the size of the partition/active
set.

View File

@ -0,0 +1,87 @@
# Synchronization
Fast, reliable synchronization is the biggest reason Solana is able to achieve
such high throughput. Traditional blockchains synchronize on large chunks of
transactions called blocks. By synchronizing on blocks, a transaction cannot be
processed until a duration called "block time" has passed. In Proof of Work
consensus, these block times need to be very large (~10 minutes) to minimize
the odds of multiple fullnodes producing a new valid block at the same time.
There's no such constraint in Proof of Stake consensus, but without reliable
timestamps, a fullnode cannot determine the order of incoming blocks. The
popular workaround is to tag each block with a [wallclock
timestamp](https://en.bitcoin.it/wiki/Block_timestamp). Because of clock drift
and variance in network latencies, the timestamp is only accurate within an
hour or two. To workaround the workaround, these systems lengthen block times
to provide reasonable certainty that the median timestamp on each block is
always increasing.
Solana takes a very different approach, which it calls *Proof of History* or
*PoH*. Leader nodes "timestamp" blocks with cryptographic proofs that some
duration of time has passed since the last proof. All data hashed into the
proof most certainly have occurred before the proof was generated. The node
then shares the new block with validator nodes, which are able to verify those
proofs. The blocks can arrive at validators in any order or even could be
replayed years later. With such reliable synchronization guarantees, Solana is
able to break blocks into smaller batches of transactions called *entries*.
Entries are streamed to validators in realtime, before any notion of block
consensus.
Solana technically never sends a *block*, but uses the term to describe the
sequence of entries that fullnodes vote on to achieve *confirmation*. In that
way, Solana's confirmation times can be compared apples to apples to
block-based systems. The current implementation sets block time to 800ms.
What's happening under the hood is that entries are streamed to validators as
quickly as a leader node can batch a set of valid transactions into an entry.
Validators process those entries long before it is time to vote on their
validity. By processing the transactions optimistically, there is effectively
no delay between the time the last entry is received and the time when the node
can vote. In the event consensus is **not** achieved, a node simply rolls back
its state. This optimisic processing technique was introduced in 1981 and
called [Optimistic Concurrency
Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). It
can be applied to blockchain architecture where a cluster votes on a hash that
represents the full ledger up to some *block height*. In Solana, it is
implemented trivially using the last entry's PoH hash.
### Relationship to VDFs
The Proof of History technique was first described for use in blockchain by
Solana in November of 2017. In June of the following year, a similar technique
was described at Stanford and called a [verifiable delay
function](https://eprint.iacr.org/2018/601.pdf) or *VDF*.
A desirable property of a VDF is that verification time is very fast. Solana's
approach to verifying its delay function is proportional to the time it took to
create it. Split over a 4000 core GPU, it is sufficiently fast for Solana's
needs, but if you asked the authors the paper cited above, they might tell you
([and have](https://github.com/solana-labs/solana/issues/388)) that Solana's
approach is algorithmically slow it shouldn't be called a VDF. We argue the
term VDF should represent the category of verifiable delay functions and not
just the subset with certain performance characteristics. Until that's
resolved, Solana will likely continue using the term PoH for its
application-specific VDF.
Another difference between PoH and VDFs is that a VDF is used only for tracking
duration. PoH's hash chain, on the other hand, includes hashes of any data the
application observed. That data is a double-edged sword. On one side, the data
"proves history" - that the data most certainly existed before hashes after it.
On the side, it means the application can manipulate the hash chain by changing
*when* the data is hashed. The PoH chain therefore does not serve as a good
source of randomness whereas a VDF without that data could. Solana's [leader
rotation algorithm](#leader-rotation), for example, is derived only from the
VDF *height* and not its hash at that height.
### Relationship to Consensus Mechanisms
Proof of History is not a consensus mechanism, but it is used to improve the
performance of Solana's Proof of Stake consensus. It is also used to improve
the performance of the data plane and replication protocols.
### More on Proof of History
* [water clock
analogy](https://medium.com/solana-labs/proof-of-history-explained-by-a-water-clock-e682183417b8)
* [Proof of History
overview](https://medium.com/solana-labs/proof-of-history-a-clock-for-blockchain-cf47a61a9274)

259
book/src/terminology.md Normal file
View File

@ -0,0 +1,259 @@
# Terminology
The following terms are used throughout this book.
#### account
A persistent file addressed by [public key](#public-key) and with
[lamports](#lamport) tracking its lifetime.
#### app
A front-end application that interacts with a Solana cluster.
#### blob
A fraction of a [block](#block); the smallest unit sent between
[fullnodes](#fullnode).
#### block
A contiguous set of [entries](#entry) on the ledger covered by a
[vote](#ledger-vote). The duration of a block is some cluster-configured
number of [ticks](#tick). Also called [voting period](#voting-period).
#### block height
The number of [blocks](#block) beneath the current block plus one. The [genesis
block](#genesis-block), for example, has block height 1.
#### bootstrap leader
The first [fullnode](#fullnode) to take the [leader](#leader) role.
#### client
A [node](#node) that utilizes the [cluster](#cluster).
#### cluster
A set of [fullnodes](#fullnode) maintaining a single [ledger](#ledger).
#### confirmation
The wallclock duration between a [leader](#leader) creating a [tick
entry](#tick) and recognizing a supermajority of [ledger votes](#ledger-vote)
with a ledger interpretation that matches the leader's.
#### control plane
A gossip network connecting all [nodes](#node) of a [cluster](#cluster).
#### data plane
A multicast network used to efficiently validate [entries](#entry) and gain
consensus.
#### drone
An off-chain service that acts as a custodian for a user's private key. It
typically serves to validate and sign transactions.
#### entry
An entry on the [ledger](#ledger) either a [tick](#tick) or a [transactions
entry](#transactions-entry).
#### epoch
The time, i.e. number of [slots](#slot), for which a [leader
schedule](#leader-schedule) is valid.
#### fork
A [ledger](#ledger) derived from common entries but then diverged.
#### fullnode
A full participant in the [cluster](#cluster) either a [leader](#leader) or
[validator](#validator) node.
#### fullnode state
The result of interpreting all programs on the ledger a given [tick
height](#tick-height). It includes at least the set of all [accounts](#account)
holding nonzero [native tokens](#native-tokens).
#### genesis block
The first [block](#block) of the [ledger](#ledger).
#### hash
A digital fingerprint of a sequence of bytes.
#### instruction
The smallest unit of a [program](#program) that a [client](#client) can include
in a [transaction](#instruction).
#### keypair
A [public key](#public-key) and coesponding [secret key](#secret-key).
#### lamport
A fractional [native token](#native-token) with the value of approximately
0.0000000000582 [sol](#sol) (2^-34).
#### loader
A [program](#program) with the ability to interpret the binary encoding of
other on-chain programs.
#### leader
The role of a [fullnode](#fullnode) when it is appending [entries](#entry) to
the [ledger](#ledger).
#### leader schedule
A sequence of [fullnode](#fullnode) [public keys](#public-key). The cluster
uses the leader schedule to determine which fullnode is the [leader](#leader)
at any moment in time.
#### ledger
A list of [entries](#entry) containing [transactions](#transaction) signed by
[clients](#client).
#### ledger vote
A [hash](#hash) of the [fullnode's state](#fullnode-state) at a given [tick
height](#tick-height). It comprises a validator's affirmation that a
[block](#block) it has received has been verified, as well as a promise not to
vote for a conflicting [block](#block) (i.e. [fork](#fork)) for a specific
amount of time, the [lockout](#lockout) period.
#### light client
A type of [client](#client) that can verify it's pointing to a valid
[cluster](#cluster). It performs more ledger verification than a [thin
client](#thin-client) and less than a [fullnode](#fullnode).
#### lockout
The duration of time for which a [fullnode](#fullnode) is unable to
[vote](#ledger-vote) on another [fork](#fork).
#### native token
The [token](#token) used to track work done by [nodes](#node) in a cluster.
#### node
A computer particpating in a [cluster](#cluster).
#### node count
The number of [fullnodes](#fullnode) participating in a [cluster](#cluster).
#### PoH
See [Proof of History](#proof-of-history).
#### program
The code that interprets [instructions](#instruction).
#### program ID
The public key of the [account](#account) containing a [program](#program).
#### Proof of History
A stack of proofs, each which proves that some data existed before the proof
was created and that a precise duration of time passed before the previous
proof. Like a [VDF](#verifiable-delay-function), a Proof of History can be
verified in less time than it took to produce.
#### public key
The public key of a [keypair](#keypair).
#### runtime
The component of a [fullnode](#fullnode) responsible for [program](#program)
execution.
#### secret key
The private key of a [keypair](#keypair).
#### slot
The time (i.e. number of [blocks](#block)) for which a [leader](#leader)
ingests transactions and produces [entries](#entry).
#### sol
The [native token](#native-token) tracked by a [cluster](#cluster) recognized
by the company Solana.
#### stake
Tokens forfeit to the [cluster](#cluster) if malicious [fullnode](#fullnode)
behavior can be proven.
#### thin client
A type of [client](#client) that trusts it is communicating with a valid
[cluster](#cluster).
#### tick
A ledger [entry](#entry) that estimates wallclock duration.
#### tick height
The Nth [tick](#tick) in the [ledger](#ledger).
#### token
A scarce, fungible member of a set of tokens.
#### tps
[Transactions](#transaction) per second.
#### transaction
One or more [instructions](#instruction) signed by the [client](#client) and
executed atomically.
#### transactions entry
A set of [transactions](#transaction) that may be executed in parallel.
#### validator
The role of a [fullnode](#fullnode) when it is validating the
[leader's](#leader) latest [entries](#entry).
#### VDF
See [verifiable delay function](#verifiable-delay-function).
#### verifiable delay function
A function that takes a fixed amount of time to execute that produces a proof
that it ran, which can then be verified in less time than it took to produce.
#### vote
See [ledger vote](#ledger-vote).
#### voting period
The duration of a [block](#block).

35
book/src/tictactoe.md Normal file
View File

@ -0,0 +1,35 @@
# Example app: Tic-Tac-Toe
[Click here to play
Tic-Tac-Toe](https://solana-example-tictactoe.herokuapp.com/) on the Solana
testnet. Open the link and wait for another player to join, or open the link
in a second browser tab to play against yourself. You will see that every
move a player makes stores a transaction on the ledger.
## Build and run Tic-Tac-Toe locally
First fetch the latest release of the example code:
```sh
$ git clone https://github.com/solana-labs/example-tictactoe.git
$ cd example-tictactoe
$ TAG=$(git describe --tags $(git rev-list --tags
--max-count=1))
$ git checkout $TAG
```
Next, follow the steps in the git repository's
[README](https://github.com/solana-labs/example-tictactoe/blob/master/README.md).
## Getting tokens to users
You may have noticed you interacted with the Solana cluster without first
needing to aquire tokens to pay transaction fees. Under the hood, the web
app creates a new ephemeral identity and sends a request to an off-chain
service for a signed transation authorizes a user to start a new game.
The service is called a *drone*. When the app sends the signed transaction
to the Solana cluster, the drone's tokens are spent to pay the transaction
fee and start the game. In a real world app, the drone might request the user
watch an ad or pass a CAPTCHA before signing over its tokens.

3
book/src/tpu.md Normal file
View File

@ -0,0 +1,3 @@
# The Transaction Processing Unit
<img alt="TPU Block Diagram" src="img/tpu.svg" class="center"/>

3
book/src/tvu.md Normal file
View File

@ -0,0 +1,3 @@
# The Transaction Validation Unit
<img alt="TVU Block Diagram" src="img/tvu.svg" class="center"/>

353
book/src/wallet.md Normal file
View File

@ -0,0 +1,353 @@
## solana-wallet CLI
The [solana crate](https://crates.io/crates/solana) is distributed with a command-line interface tool
### Examples
#### Get Pubkey
```sh
// Command
$ solana-wallet address
// Return
<PUBKEY>
```
#### Airdrop Tokens
```sh
// Command
$ solana-wallet airdrop 123
// Return
"Your balance is: 123"
```
#### Get Balance
```sh
// Command
$ solana-wallet balance
// Return
"Your balance is: 123"
```
#### Confirm Transaction
```sh
// Command
$ solana-wallet confirm <TX_SIGNATURE>
// Return
"Confirmed" / "Not found"
```
#### Deploy program
```sh
// Command
$ solana-wallet deploy <PATH>
// Return
<PROGRAM_ID>
```
#### Unconditional Immediate Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123
// Return
<TX_SIGNATURE>
```
#### Post-Dated Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--after 2018-12-24T23:59:00 --require-timestamp-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
*`require-timestamp-from` is optional. If not provided, the transaction will expect a timestamp signed by this wallet's secret key*
#### Authorized Transfer
A third party must send a signature to unlock the tokens.
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--require-signature-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Post-Dated and Authorized Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--after 2018-12-24T23:59 --require-timestamp-from <PUBKEY> \
--require-signature-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Multiple Witnesses
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--require-signature-from <PUBKEY> \
--require-signature-from <PUBKEY>
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Cancelable Transfer
```sh
// Command
$ solana-wallet pay <PUBKEY> 123 \
--require-signature-from <PUBKEY> \
--cancelable
// Return
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
```
#### Cancel Transfer
```sh
// Command
$ solana-wallet cancel <PROCESS_ID>
// Return
<TX_SIGNATURE>
```
#### Send Signature
```sh
// Command
$ solana-wallet send-signature <PUBKEY> <PROCESS_ID>
// Return
<TX_SIGNATURE>
```
#### Indicate Elapsed Time
Use the current system time:
```sh
// Command
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID>
// Return
<TX_SIGNATURE>
```
Or specify some other arbitrary timestamp:
```sh
// Command
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
// Return
<TX_SIGNATURE>
```
### Usage
```manpage
solana-wallet 0.11.0
USAGE:
solana-wallet [OPTIONS] [SUBCOMMAND]
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
-k, --keypair <PATH> /path/to/id.json
-n, --network <HOST:PORT> Rendezvous with the network at this gossip entry point; defaults to 127.0.0.1:8001
--proxy <URL> Address of TLS proxy
--port <NUM> Optional rpc-port configuration to connect to non-default nodes
--timeout <SECS> Max seconds to wait to get necessary gossip from the network
SUBCOMMANDS:
address Get your public key
airdrop Request a batch of tokens
balance Get your balance
cancel Cancel a transfer
confirm Confirm transaction by signature
deploy Deploy a program
get-transaction-count Get current transaction count
help Prints this message or the help of the given subcommand(s)
pay Send a payment
send-signature Send a signature to authorize a transfer
send-timestamp Send a timestamp to unlock a transfer
```
```manpage
solana-wallet-address
Get your public key
USAGE:
solana-wallet address
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
```
```manpage
solana-wallet-airdrop
Request a batch of tokens
USAGE:
solana-wallet airdrop <NUM>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
ARGS:
<NUM> The number of tokens to request
```
```manpage
solana-wallet-balance
Get your balance
USAGE:
solana-wallet balance
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
```
```manpage
solana-wallet-cancel
Cancel a transfer
USAGE:
solana-wallet cancel <PROCESS_ID>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
ARGS:
<PROCESS_ID> The process id of the transfer to cancel
```
```manpage
solana-wallet-confirm
Confirm transaction by signature
USAGE:
solana-wallet confirm <SIGNATURE>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
ARGS:
<SIGNATURE> The transaction signature to confirm
```
```manpage
solana-wallet-deploy
Deploy a program
USAGE:
solana-wallet deploy <PATH>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
ARGS:
<PATH> /path/to/program.o
```
```manpage
solana-wallet-get-transaction-count
Get current transaction count
USAGE:
solana-wallet get-transaction-count
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
```
```manpage
solana-wallet-pay
Send a payment
USAGE:
solana-wallet pay [FLAGS] [OPTIONS] <PUBKEY> <NUM>
FLAGS:
--cancelable
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
--after <DATETIME> A timestamp after which transaction will execute
--require-timestamp-from <PUBKEY> Require timestamp from this third party
--require-signature-from <PUBKEY>... Any third party signatures required to unlock the tokens
ARGS:
<PUBKEY> The pubkey of recipient
<NUM> The number of tokens to send
```
```manpage
solana-wallet-send-signature
Send a signature to authorize a transfer
USAGE:
solana-wallet send-signature <PUBKEY> <PROCESS_ID>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
ARGS:
<PUBKEY> The pubkey of recipient
<PROCESS_ID> The process id of the transfer to authorize
```
```manpage
solana-wallet-send-timestamp
Send a timestamp to unlock a transfer
USAGE:
solana-wallet send-timestamp [OPTIONS] <PUBKEY> <PROCESS_ID>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
--date <DATETIME> Optional arbitrary timestamp to apply
ARGS:
<PUBKEY> The pubkey of recipient
<PROCESS_ID> The process id of the transfer to unlock
```

13
book/src/webwallet.md Normal file
View File

@ -0,0 +1,13 @@
# Example app: Web Wallet
## Build and run a web wallet locally
First fetch the example code:
```sh
$ git clone https://github.com/solana-labs/example-webwallet.git
$ cd example-webwallet
```
Next, follow the steps in the git repository's
[README](https://github.com/solana-labs/example-webwallet/blob/master/README.md).

600
book/theme/book.js Normal file
View File

@ -0,0 +1,600 @@
"use strict";
// Fix back button cache problem
window.onunload = function () { };
// Global variable, shared between modules
function playpen_text(playpen) {
let code_block = playpen.querySelector("code");
if (window.ace && code_block.classList.contains("editable")) {
let editor = window.ace.edit(code_block);
return editor.getValue();
} else {
return code_block.textContent;
}
}
(function codeSnippets() {
// Hide Rust code lines prepended with a specific character
var hiding_character = "#";
function fetch_with_timeout(url, options, timeout = 6000) {
return Promise.race([
fetch(url, options),
new Promise((_, reject) => setTimeout(() => reject(new Error('timeout')), timeout))
]);
}
var playpens = Array.from(document.querySelectorAll(".playpen"));
if (playpens.length > 0) {
fetch_with_timeout("https://play.rust-lang.org/meta/crates", {
headers: {
'Content-Type': "application/json",
},
method: 'POST',
mode: 'cors',
})
.then(response => response.json())
.then(response => {
// get list of crates available in the rust playground
let playground_crates = response.crates.map(item => item["id"]);
playpens.forEach(block => handle_crate_list_update(block, playground_crates));
});
}
function handle_crate_list_update(playpen_block, playground_crates) {
// update the play buttons after receiving the response
update_play_button(playpen_block, playground_crates);
// and install on change listener to dynamically update ACE editors
if (window.ace) {
let code_block = playpen_block.querySelector("code");
if (code_block.classList.contains("editable")) {
let editor = window.ace.edit(code_block);
editor.addEventListener("change", function (e) {
update_play_button(playpen_block, playground_crates);
});
}
}
}
// updates the visibility of play button based on `no_run` class and
// used crates vs ones available on http://play.rust-lang.org
function update_play_button(pre_block, playground_crates) {
var play_button = pre_block.querySelector(".play-button");
// skip if code is `no_run`
if (pre_block.querySelector('code').classList.contains("no_run")) {
play_button.classList.add("hidden");
return;
}
// get list of `extern crate`'s from snippet
var txt = playpen_text(pre_block);
var re = /extern\s+crate\s+([a-zA-Z_0-9]+)\s*;/g;
var snippet_crates = [];
var item;
while (item = re.exec(txt)) {
snippet_crates.push(item[1]);
}
// check if all used crates are available on play.rust-lang.org
var all_available = snippet_crates.every(function (elem) {
return playground_crates.indexOf(elem) > -1;
});
if (all_available) {
play_button.classList.remove("hidden");
} else {
play_button.classList.add("hidden");
}
}
function run_rust_code(code_block) {
var result_block = code_block.querySelector(".result");
if (!result_block) {
result_block = document.createElement('code');
result_block.className = 'result hljs language-bash';
code_block.append(result_block);
}
let text = playpen_text(code_block);
var params = {
version: "stable",
optimize: "0",
code: text
};
if (text.indexOf("#![feature") !== -1) {
params.version = "nightly";
}
result_block.innerText = "Running...";
fetch_with_timeout("https://play.rust-lang.org/evaluate.json", {
headers: {
'Content-Type': "application/json",
},
method: 'POST',
mode: 'cors',
body: JSON.stringify(params)
})
.then(response => response.json())
.then(response => result_block.innerText = response.result)
.catch(error => result_block.innerText = "Playground Communication: " + error.message);
}
// Syntax highlighting Configuration
hljs.configure({
tabReplace: ' ', // 4 spaces
languages: [], // Languages used for auto-detection
});
if (window.ace) {
// language-rust class needs to be removed for editable
// blocks or highlightjs will capture events
Array
.from(document.querySelectorAll('code.editable'))
.forEach(function (block) { block.classList.remove('language-rust'); });
Array
.from(document.querySelectorAll('code:not(.editable)'))
.forEach(function (block) { hljs.highlightBlock(block); });
} else {
Array
.from(document.querySelectorAll('code'))
.forEach(function (block) { hljs.highlightBlock(block); });
}
// Adding the hljs class gives code blocks the color css
// even if highlighting doesn't apply
Array
.from(document.querySelectorAll('code'))
.forEach(function (block) { block.classList.add('hljs'); });
Array.from(document.querySelectorAll("code.language-rust")).forEach(function (block) {
var code_block = block;
var pre_block = block.parentNode;
// hide lines
var lines = code_block.innerHTML.split("\n");
var first_non_hidden_line = false;
var lines_hidden = false;
var trimmed_line = "";
for (var n = 0; n < lines.length; n++) {
trimmed_line = lines[n].trim();
if (trimmed_line[0] == hiding_character && trimmed_line[1] != hiding_character) {
if (first_non_hidden_line) {
lines[n] = "<span class=\"hidden\">" + "\n" + lines[n].replace(/(\s*)# ?/, "$1") + "</span>";
}
else {
lines[n] = "<span class=\"hidden\">" + lines[n].replace(/(\s*)# ?/, "$1") + "\n" + "</span>";
}
lines_hidden = true;
}
else if (first_non_hidden_line) {
lines[n] = "\n" + lines[n];
}
else {
first_non_hidden_line = true;
}
if (trimmed_line[0] == hiding_character && trimmed_line[1] == hiding_character) {
lines[n] = lines[n].replace("##", "#")
}
}
code_block.innerHTML = lines.join("");
// If no lines were hidden, return
if (!lines_hidden) { return; }
var buttons = document.createElement('div');
buttons.className = 'buttons';
buttons.innerHTML = "<button class=\"fa fa-expand\" title=\"Show hidden lines\" aria-label=\"Show hidden lines\"></button>";
// add expand button
pre_block.insertBefore(buttons, pre_block.firstChild);
pre_block.querySelector('.buttons').addEventListener('click', function (e) {
if (e.target.classList.contains('fa-expand')) {
var lines = pre_block.querySelectorAll('span.hidden');
e.target.classList.remove('fa-expand');
e.target.classList.add('fa-compress');
e.target.title = 'Hide lines';
e.target.setAttribute('aria-label', e.target.title);
Array.from(lines).forEach(function (line) {
line.classList.remove('hidden');
line.classList.add('unhidden');
});
} else if (e.target.classList.contains('fa-compress')) {
var lines = pre_block.querySelectorAll('span.unhidden');
e.target.classList.remove('fa-compress');
e.target.classList.add('fa-expand');
e.target.title = 'Show hidden lines';
e.target.setAttribute('aria-label', e.target.title);
Array.from(lines).forEach(function (line) {
line.classList.remove('unhidden');
line.classList.add('hidden');
});
}
});
});
Array.from(document.querySelectorAll('pre code')).forEach(function (block) {
var pre_block = block.parentNode;
if (!pre_block.classList.contains('playpen')) {
var buttons = pre_block.querySelector(".buttons");
if (!buttons) {
buttons = document.createElement('div');
buttons.className = 'buttons';
pre_block.insertBefore(buttons, pre_block.firstChild);
}
var clipButton = document.createElement('button');
clipButton.className = 'fa fa-copy clip-button';
clipButton.title = 'Copy to clipboard';
clipButton.setAttribute('aria-label', clipButton.title);
clipButton.innerHTML = '<i class=\"tooltiptext\"></i>';
buttons.insertBefore(clipButton, buttons.firstChild);
}
});
// Process playpen code blocks
Array.from(document.querySelectorAll(".playpen")).forEach(function (pre_block) {
// Add play button
var buttons = pre_block.querySelector(".buttons");
if (!buttons) {
buttons = document.createElement('div');
buttons.className = 'buttons';
pre_block.insertBefore(buttons, pre_block.firstChild);
}
var runCodeButton = document.createElement('button');
runCodeButton.className = 'fa fa-play play-button';
runCodeButton.hidden = true;
runCodeButton.title = 'Run this code';
runCodeButton.setAttribute('aria-label', runCodeButton.title);
var copyCodeClipboardButton = document.createElement('button');
copyCodeClipboardButton.className = 'fa fa-copy clip-button';
copyCodeClipboardButton.innerHTML = '<i class="tooltiptext"></i>';
copyCodeClipboardButton.title = 'Copy to clipboard';
copyCodeClipboardButton.setAttribute('aria-label', copyCodeClipboardButton.title);
buttons.insertBefore(runCodeButton, buttons.firstChild);
buttons.insertBefore(copyCodeClipboardButton, buttons.firstChild);
runCodeButton.addEventListener('click', function (e) {
run_rust_code(pre_block);
});
let code_block = pre_block.querySelector("code");
if (window.ace && code_block.classList.contains("editable")) {
var undoChangesButton = document.createElement('button');
undoChangesButton.className = 'fa fa-history reset-button';
undoChangesButton.title = 'Undo changes';
undoChangesButton.setAttribute('aria-label', undoChangesButton.title);
buttons.insertBefore(undoChangesButton, buttons.firstChild);
undoChangesButton.addEventListener('click', function () {
let editor = window.ace.edit(code_block);
editor.setValue(editor.originalCode);
editor.clearSelection();
});
}
});
})();
(function themes() {
var html = document.querySelector('html');
var themeToggleButton = document.getElementById('theme-toggle');
var themePopup = document.getElementById('theme-list');
var themeColorMetaTag = document.querySelector('meta[name="theme-color"]');
var stylesheets = {
ayuHighlight: document.querySelector("[href$='ayu-highlight.css']"),
tomorrowNight: document.querySelector("[href$='tomorrow-night.css']"),
highlight: document.querySelector("[href$='highlight.css']"),
};
function showThemes() {
themePopup.style.display = 'block';
themeToggleButton.setAttribute('aria-expanded', true);
themePopup.querySelector("button#" + document.body.className).focus();
}
function hideThemes() {
themePopup.style.display = 'none';
themeToggleButton.setAttribute('aria-expanded', false);
themeToggleButton.focus();
}
function set_theme(theme) {
let ace_theme;
if (theme == 'coal' || theme == 'navy') {
stylesheets.ayuHighlight.disabled = true;
stylesheets.tomorrowNight.disabled = false;
stylesheets.highlight.disabled = true;
ace_theme = "ace/theme/tomorrow_night";
} else if (theme == 'ayu') {
stylesheets.ayuHighlight.disabled = false;
stylesheets.tomorrowNight.disabled = true;
stylesheets.highlight.disabled = true;
ace_theme = "ace/theme/tomorrow_night";
} else {
stylesheets.ayuHighlight.disabled = true;
stylesheets.tomorrowNight.disabled = true;
stylesheets.highlight.disabled = false;
ace_theme = "ace/theme/dawn";
}
setTimeout(function () {
themeColorMetaTag.content = getComputedStyle(document.body).backgroundColor;
}, 1);
if (window.ace && window.editors) {
window.editors.forEach(function (editor) {
editor.setTheme(ace_theme);
});
}
var previousTheme;
try { previousTheme = localStorage.getItem('mdbook-theme'); } catch (e) { }
if (previousTheme === null || previousTheme === undefined) { previousTheme = 'light'; }
try { localStorage.setItem('mdbook-theme', theme); } catch (e) { }
document.body.className = theme;
html.classList.remove(previousTheme);
html.classList.add(theme);
}
// Set theme
var theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = 'light'; }
set_theme(theme);
// themeToggleButton.addEventListener('click', function () {
// if (themePopup.style.display === 'block') {
// hideThemes();
// } else {
// showThemes();
// }
// });
themePopup.addEventListener('click', function (e) {
var theme = e.target.id || e.target.parentElement.id;
set_theme(theme);
});
themePopup.addEventListener('focusout', function(e) {
// e.relatedTarget is null in Safari and Firefox on macOS (see workaround below)
if (!!e.relatedTarget && !themeToggleButton.contains(e.relatedTarget) && !themePopup.contains(e.relatedTarget)) {
hideThemes();
}
});
// Should not be needed, but it works around an issue on macOS & iOS: https://github.com/rust-lang-nursery/mdBook/issues/628
document.addEventListener('click', function(e) {
if (themePopup.style.display === 'block' && !themeToggleButton.contains(e.target) && !themePopup.contains(e.target)) {
hideThemes();
}
});
document.addEventListener('keydown', function (e) {
if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; }
if (!themePopup.contains(e.target)) { return; }
switch (e.key) {
case 'Escape':
e.preventDefault();
hideThemes();
break;
case 'ArrowUp':
e.preventDefault();
var li = document.activeElement.parentElement;
if (li && li.previousElementSibling) {
li.previousElementSibling.querySelector('button').focus();
}
break;
case 'ArrowDown':
e.preventDefault();
var li = document.activeElement.parentElement;
if (li && li.nextElementSibling) {
li.nextElementSibling.querySelector('button').focus();
}
break;
case 'Home':
e.preventDefault();
themePopup.querySelector('li:first-child button').focus();
break;
case 'End':
e.preventDefault();
themePopup.querySelector('li:last-child button').focus();
break;
}
});
})();
(function sidebar() {
var html = document.querySelector("html");
var sidebar = document.getElementById("sidebar");
var sidebarLinks = document.querySelectorAll('#sidebar a');
var sidebarToggleButton = document.getElementById("sidebar-toggle");
var firstContact = null;
function showSidebar() {
html.classList.remove('sidebar-hidden')
html.classList.add('sidebar-visible');
Array.from(sidebarLinks).forEach(function (link) {
link.setAttribute('tabIndex', 0);
});
sidebarToggleButton.setAttribute('aria-expanded', true);
sidebar.setAttribute('aria-hidden', false);
try { localStorage.setItem('mdbook-sidebar', 'visible'); } catch (e) { }
}
function hideSidebar() {
html.classList.remove('sidebar-visible')
html.classList.add('sidebar-hidden');
Array.from(sidebarLinks).forEach(function (link) {
link.setAttribute('tabIndex', -1);
});
sidebarToggleButton.setAttribute('aria-expanded', false);
sidebar.setAttribute('aria-hidden', true);
try { localStorage.setItem('mdbook-sidebar', 'hidden'); } catch (e) { }
}
// Toggle sidebar
sidebarToggleButton.addEventListener('click', function sidebarToggle() {
if (html.classList.contains("sidebar-hidden")) {
showSidebar();
} else if (html.classList.contains("sidebar-visible")) {
hideSidebar();
} else {
if (getComputedStyle(sidebar)['transform'] === 'none') {
hideSidebar();
} else {
showSidebar();
}
}
});
document.addEventListener('touchstart', function (e) {
firstContact = {
x: e.touches[0].clientX,
time: Date.now()
};
}, { passive: true });
document.addEventListener('touchmove', function (e) {
if (!firstContact)
return;
var curX = e.touches[0].clientX;
var xDiff = curX - firstContact.x,
tDiff = Date.now() - firstContact.time;
if (tDiff < 250 && Math.abs(xDiff) >= 150) {
if (xDiff >= 0 && firstContact.x < Math.min(document.body.clientWidth * 0.25, 300))
showSidebar();
else if (xDiff < 0 && curX < 300)
hideSidebar();
firstContact = null;
}
}, { passive: true });
// Scroll sidebar to current active section
var activeSection = sidebar.querySelector(".active");
if (activeSection) {
sidebar.scrollTop = activeSection.offsetTop;
}
})();
(function chapterNavigation() {
document.addEventListener('keydown', function (e) {
if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; }
if (window.search && window.search.hasFocus()) { return; }
switch (e.key) {
case 'ArrowRight':
e.preventDefault();
var nextButton = document.querySelector('.nav-chapters.next');
if (nextButton) {
window.location.href = nextButton.href;
}
break;
case 'ArrowLeft':
e.preventDefault();
var previousButton = document.querySelector('.nav-chapters.previous');
if (previousButton) {
window.location.href = previousButton.href;
}
break;
}
});
})();
(function clipboard() {
var clipButtons = document.querySelectorAll('.clip-button');
function hideTooltip(elem) {
elem.firstChild.innerText = "";
elem.className = 'fa fa-copy clip-button';
}
function showTooltip(elem, msg) {
elem.firstChild.innerText = msg;
elem.className = 'fa fa-copy tooltipped';
}
var clipboardSnippets = new Clipboard('.clip-button', {
text: function (trigger) {
hideTooltip(trigger);
let playpen = trigger.closest("pre");
return playpen_text(playpen);
}
});
Array.from(clipButtons).forEach(function (clipButton) {
clipButton.addEventListener('mouseout', function (e) {
hideTooltip(e.currentTarget);
});
});
clipboardSnippets.on('success', function (e) {
e.clearSelection();
showTooltip(e.trigger, "Copied!");
});
clipboardSnippets.on('error', function (e) {
showTooltip(e.trigger, "Clipboard error!");
});
})();
(function scrollToTop () {
var menuTitle = document.querySelector('.menu-title');
menuTitle.addEventListener('click', function () {
document.scrollingElement.scrollTo({ top: 0, behavior: 'smooth' });
});
})();
(function autoHideMenu() {
var menu = document.getElementById('menu-bar');
var previousScrollTop = document.scrollingElement.scrollTop;
document.addEventListener('scroll', function () {
if (menu.classList.contains('folded') && document.scrollingElement.scrollTop < previousScrollTop) {
menu.classList.remove('folded');
} else if (!menu.classList.contains('folded') && document.scrollingElement.scrollTop > previousScrollTop) {
menu.classList.add('folded');
}
if (!menu.classList.contains('bordered') && document.scrollingElement.scrollTop > 0) {
menu.classList.add('bordered');
}
if (menu.classList.contains('bordered') && document.scrollingElement.scrollTop === 0) {
menu.classList.remove('bordered');
}
previousScrollTop = document.scrollingElement.scrollTop;
}, { passive: true });
})();

530
book/theme/css/chrome.css Normal file
View File

@ -0,0 +1,530 @@
/* CSS for UI elements (a.k.a. chrome) */
@import 'variables.css';
::-webkit-scrollbar {
background: var(--bg);
}
::-webkit-scrollbar-thumb {
background: var(--scrollbar);
}
#searchresults a,
a:visited,
a > .hljs {
color: #000;
}
#searchresults a:hover {
text-decoration: underline;
}
.content a {
color: #000;
}
/* Menu Bar */
#menu-bar {
position: -webkit-sticky;
position: sticky;
top: 0;
padding: 0 15px;
padding: 0;
z-index: 101;
width: 100%;
/* margin: auto calc(0px - var(--page-padding)); */
}
#menu-bar > #menu-bar-sticky-container {
display: flex;
flex-wrap: wrap;
background-color: var(--bg);
border-bottom-color: var(--bg);
border-bottom-width: 1px;
border-bottom-style: solid;
}
.js #menu-bar > #menu-bar-sticky-container {
transition: transform 0.3s;
}
#menu-bar.bordered > #menu-bar-sticky-container {
border-bottom-color: var(--table-border-color);
}
#menu-bar i, #menu-bar .icon-button {
position: relative;
padding: 0 8px;
z-index: 10;
line-height: 50px;
cursor: pointer;
transition: color 0.5s;
}
@media only screen and (max-width: 420px) {
#menu-bar i, #menu-bar .icon-button {
padding: 0 5px;
}
}
.icon-button {
border: none;
background: none;
padding: 0;
color: inherit;
}
.icon-button i {
margin: 0;
}
#print-button {
margin: 0 15px;
}
html:not(.sidebar-visible) #menu-bar:not(:hover).folded > #menu-bar-sticky-container {
transform: translateY(-60px);
}
.left-buttons {
display: flex;
margin: 0 5px;
}
.no-js .left-buttons {
display: none;
}
.menu-title {
display: inline-block;
font-weight: 200;
font-size: 20px;
line-height: 50px;
text-align: center;
margin: 0;
flex: 1;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.js .menu-title {
cursor: pointer;
}
.menu-bar,
.menu-bar:visited,
.nav-chapters,
.nav-chapters:visited,
.mobile-nav-chapters,
.mobile-nav-chapters:visited,
.menu-bar .icon-button,
.menu-bar a i {
color: var(--icons);
}
.menu-bar i:hover,
.menu-bar .icon-button:hover,
.nav-chapters:hover,
.mobile-nav-chapters i:hover {
color: var(--icons-hover);
}
/* Nav Icons */
.nav-chapters {
font-size: 2.5em;
text-align: center;
text-decoration: none;
position: fixed;
top: 50px; /* Height of menu-bar */
bottom: 0;
margin: 0;
max-width: 150px;
min-width: 90px;
display: flex;
justify-content: center;
align-content: center;
flex-direction: column;
transition: color 0.5s;
}
.nav-chapters:hover { text-decoration: none; }
.nav-wrapper {
margin-top: 50px;
display: none;
}
.mobile-nav-chapters {
font-size: 2.5em;
text-align: center;
text-decoration: none;
width: 90px;
border-radius: 5px;
background-color: var(--sidebar-bg);
}
.previous {
float: left;
}
.next {
float: right;
right: 15px;
}
@media only screen and (max-width: 1080px) {
.nav-wide-wrapper { display: none; }
.nav-wrapper { display: block; }
}
@media only screen and (max-width: 1380px) {
.sidebar-visible .nav-wide-wrapper { display: none; }
.sidebar-visible .nav-wrapper { display: block; }
}
/* Inline code */
:not(pre) > .hljs {
display: inline-block;
vertical-align: middle;
padding: 0.1em 0.3em;
border-radius: 3px;
color: var(--inline-code-color);
}
a:hover > .hljs {
text-decoration: underline;
}
pre {
position: relative;
}
pre > .buttons {
position: absolute;
z-index: 100;
right: 5px;
top: 5px;
color: var(--sidebar-fg);
cursor: pointer;
}
pre > .buttons :hover {
color: var(--sidebar-active);
}
pre > .buttons i {
margin-left: 8px;
}
pre > .buttons button {
color: inherit;
background: transparent;
border: none;
cursor: inherit;
}
pre > .result {
margin-top: 10px;
}
/* Search */
#searchresults a {
text-decoration: none;
}
mark {
border-radius: 2px;
padding: 0 3px 1px 3px;
margin: 0 -3px -1px -3px;
background-color: var(--search-mark-bg);
transition: background-color 300ms linear;
cursor: pointer;
}
mark.fade-out {
background-color: rgba(0,0,0,0) !important;
cursor: auto;
}
.searchbar-outer {
margin-left: auto;
margin-right: auto;
max-width: var(--content-max-width);
}
#searchbar {
width: 100%;
margin: 5px auto 0px auto;
padding: 10px 16px;
transition: box-shadow 300ms ease-in-out;
border: 1px solid var(--searchbar-border-color);
border-radius: 3px;
background-color: var(--searchbar-bg);
color: var(--searchbar-fg);
}
#searchbar:focus,
#searchbar.active {
box-shadow: 0 0 3px var(--searchbar-shadow-color);
}
.searchresults-header {
font-weight: normal;
font-size: 1em;
padding: 18px 28px 0 28px;
color: var(--searchresults-header-fg);
}
.searchresults-outer {
margin-left: auto;
margin-right: auto;
max-width: var(--content-max-width);
border-bottom: 1px dashed var(--searchresults-border-color);
}
ul#searchresults {
list-style: none;
padding-left: 28px;
}
ul#searchresults li {
margin: 10px 0px;
padding: 2px;
border-radius: 2px;
}
ul#searchresults li.focus {
background-color: var(--searchresults-li-bg);
}
ul#searchresults span.teaser {
display: block;
clear: both;
margin: 5px 0 0 20px;
font-size: 0.8em;
}
ul#searchresults span.teaser em {
font-weight: bold;
font-style: normal;
}
/* Sidebar */
.sidebar {
position: fixed;
left: 0;
top: 0;
bottom: 0;
width: var(--sidebar-width);
overflow-y: auto;
font-size: 0.875em;
box-sizing: border-box;
-webkit-overflow-scrolling: touch;
overscroll-behavior-y: contain;
background-color: var(--sidebar-bg);
color: var(--sidebar-fg);
}
.sidebar img {
display: block;
max-width: 70%;
margin: 0 auto;
}
.js .sidebar {
transition: transform 0.3s; /* Animation: slide away */
}
.sidebar code {
line-height: 2em;
}
.sidebar-hidden .sidebar {
transform: translateX(calc(0px - var(--sidebar-width)));
}
.sidebar::-webkit-scrollbar {
background: var(--sidebar-bg);
}
.sidebar::-webkit-scrollbar-thumb {
background: var(--scrollbar);
}
.sidebar-visible .page-wrapper {
transform: translateX(var(--sidebar-width));
}
@media only screen and (min-width: 620px) {
.sidebar-visible .page-wrapper {
transform: none;
margin-left: var(--sidebar-width);
}
}
.chapter {
list-style: none outside none;
padding-left: 0;
line-height: 2.2em;
margin-top: 0;
}
.chapter li {
color: var(--sidebar-non-existant);
}
.chapter li a {
color: var(--sidebar-fg);
display: block;
padding: 0;
text-decoration: none;
padding-left: 25px;
font-size: 13px;
padding-top: 0.3em;
padding-bottom: 0.3em;
font-weight: normal;
}
.chapter li a strong {
font-weight: normal;
}
.chapter li a:hover { color: var(--sidebar-active);
background: #00A670; }
.chapter li .active {
/* Animate color change */
color: var(--sidebar-active);
background: #00A670;
}
.content a:hover {
color: #000;
background: none;
}
.spacer {
width: 100%;
height: 3px;
margin: 5px 0px;
}
.chapter .spacer {
background-color: var(--sidebar-spacer);
}
@media (-moz-touch-enabled: 1), (pointer: coarse) {
/* .chapter li a { padding: 5px 0; } */
.spacer { margin: 10px 0; }
}
.section {
list-style: none outside none;
padding-left: 20px;
line-height: 1.9em;
}
/* Theme Menu Popup */
.theme-popup {
position: absolute;
left: 10px;
top: 50px;
z-index: 1000;
border-radius: 4px;
font-size: 0.7em;
color: var(--fg);
background: var(--theme-popup-bg);
border: 1px solid var(--theme-popup-border);
margin: 0;
padding: 0;
list-style: none;
display: none;
}
.theme-popup .default {
color: var(--icons);
}
.theme-popup .theme {
width: 100%;
border: 0;
margin: 0;
padding: 2px 10px;
line-height: 25px;
white-space: nowrap;
text-align: left;
cursor: pointer;
color: inherit;
background: inherit;
font-size: inherit;
}
.theme-popup .theme:hover {
background-color: var(--theme-hover);
}
.theme-popup .theme:hover:first-child,
.theme-popup .theme:hover:last-child {
border-top-left-radius: inherit;
border-top-right-radius: inherit;
}
.content h1 {
font-size: 25px;
font-weight: 300;
padding: 0 28px;
padding-top: 0.5em;
padding-bottom: 0.5em;
margin-bottom: 21px;
margin-top: 2em;
border-top: 1px solid #e5e5e5;
border-bottom: 1px solid #e5e5e5;
background-color: #fff;
font-family: Poppins, sans-serif;
}
.content p {
line-height: 1.6;
margin-top: 0;
padding: 0 28px;
}
.content h2 {
font-family: Poppins, sans-serif;
font-size: 15px;
font-weight: 300;
margin-top: 2em;
margin-bottom: 0;
padding: 0 28px;
padding-top: 1.2em;
padding-bottom: 1.2em;
}
.content h3 {
font-size: 15px;
margin-top: 2.5em;
margin-bottom: 0.8em;
padding: 0 28px;
}
.content code {
background-color: rgba(0,0,0,0.05);
padding: 3px;
border-radius: 3px;
font-family: Consolas, Menlo, Monaco, "Lucida Console", "Liberation Mono", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace, serif;
font-size: 13px;
line-height: 1.5;
color: #333;
}
.language-ini.hljs,
.language-manpage.hljs,
.language-sh.hljs,
.language-bash.hljs {
background-color: #262B26;
color: #fff;
margin: 0;
padding-top: 2em;
padding-bottom: 2em;
padding: 2em 28px;
}
.content h4,
.content h5 {
font-size: 15px;
margin-top: 2.5em;
margin-bottom: 0.8em;
padding: 0 28px;
}
.content table {
margin-bottom: 1em;
}
.content ul {
padding: 0 28px;
padding-left: 43px;
}
.content ul li {
line-height: 1.6;
margin-top: 0;
}
.content ul p {
padding: 0;
margin: 0;
}
.content pre {
padding: 0 28px;
}

155
book/theme/css/general.css Normal file
View File

@ -0,0 +1,155 @@
/* Base styles and content styles */
@import 'variables.css';
html {
font-family: Lato, 'Helvetica Neue', 'Arial', sans-serif;
color: var(--fg);
background-color: var(--bg);
text-size-adjust: none;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
body {
margin: 0;
font-size: 1rem;
overflow-x: hidden;
font-family: Lato, 'Helvetica Neue', 'Arial', sans-serif;
font-size: 14px;
font-weight: 300;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
code {
font-family: Consolas, Menlo, Monaco, "Lucida Console", "Liberation Mono", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Courier New", monospace, serif;
font-size: 13px; /* please adjust the ace font size accordingly in editor.js */
line-height: 1.5;
}
.left { float: left; }
.right { float: right; }
.hidden { display: none; }
.play-button.hidden { display: none; }
h1, h2, h3 { margin-top: 2.5em; }
h4, h5 { margin-top: 2em; }
.header + .header h3,
.header + .header h4,
.header + .header h5 {
margin-top: 1em;
}
a.header:target h1:before,
a.header:target h2:before,
a.header:target h3:before,
a.header:target h4:before {
display: inline-block;
content: "»";
margin-left: -30px;
width: 30px;
}
.page {
outline: 0;
/* padding: 0 var(--page-padding); */
}
.page-wrapper {
box-sizing: border-box;
}
.js .page-wrapper {
transition: margin-left 0.3s ease, transform 0.3s ease; /* Animation: slide away */
}
.content {
overflow-y: auto;
padding-bottom: 50px;
}
.content main {
margin-left: auto;
margin-right: auto;
max-width: var(--content-max-width);
}
.content a:hover { text-decoration: underline; }
.content img { max-width: 100%; }
.content .header:link,
.content .header:visited {
color: var(--fg);
}
.content .header:link,
.content .header:visited:hover {
text-decoration: none;
}
table {
margin: 0 auto;
border-collapse: collapse;
}
table td {
padding: 3px 20px;
border: 1px var(--table-border-color) solid;
}
table thead {
background: var(--table-header-bg);
}
table thead td {
font-weight: 700;
border: none;
}
table thead tr {
border: 1px var(--table-header-bg) solid;
}
/* Alternate background colors for rows */
table tbody tr:nth-child(2n) {
background: var(--table-alternate-bg);
}
blockquote {
margin: 20px 0;
padding: 0 20px;
color: var(--fg);
background-color: var(--quote-bg);
border-top: .1em solid var(--quote-border);
border-bottom: .1em solid var(--quote-border);
}
:not(.footnote-definition) + .footnote-definition,
.footnote-definition + :not(.footnote-definition) {
margin-top: 2em;
}
.footnote-definition {
font-size: 0.9em;
margin: 0.5em 0;
}
.footnote-definition p {
display: inline;
}
.tooltiptext {
position: absolute;
visibility: hidden;
color: #fff;
background-color: #333;
transform: translateX(-50%); /* Center by moving tooltip 50% of its width left */
left: -8px; /* Half of the width of the icon */
top: -35px;
font-size: 0.8em;
text-align: center;
border-radius: 6px;
padding: 5px 8px;
margin: 5px;
z-index: 1000;
}
.tooltipped .tooltiptext {
visibility: visible;
}
*:focus,
*:active,
*:hover {
outline: none;
}

54
book/theme/css/print.css Normal file
View File

@ -0,0 +1,54 @@
#sidebar,
#menu-bar,
.nav-chapters,
.mobile-nav-chapters {
display: none;
}
#page-wrapper.page-wrapper {
transform: none;
margin-left: 0px;
overflow-y: initial;
}
#content {
max-width: none;
margin: 0;
padding: 0;
}
.page {
overflow-y: initial;
}
code {
background-color: #666666;
border-radius: 5px;
/* Force background to be printed in Chrome */
-webkit-print-color-adjust: exact;
}
pre > .buttons {
z-index: 2;
}
a, a:visited, a:active, a:hover {
color: #4183c4;
text-decoration: none;
}
h1, h2, h3, h4, h5, h6 {
page-break-inside: avoid;
page-break-after: avoid;
}
pre, code {
page-break-inside: avoid;
white-space: pre-wrap;
}
.fa {
display: none !important;
}

View File

@ -0,0 +1,210 @@
/* Globals */
:root {
--sidebar-width: 300px;
/* --page-padding: 15px; */
--content-max-width: 100%;
}
/* Themes */
.ayu {
--bg: hsl(210, 25%, 8%);
--fg: #c5c5c5;
--sidebar-bg: #14191f;
--sidebar-fg: #c8c9db;
--sidebar-non-existant: #5c6773;
--sidebar-active: #ffb454;
--sidebar-spacer: #2d334f;
--scrollbar: var(--sidebar-fg);
--icons: #737480;
--icons-hover: #b7b9cc;
--links: #0096cf;
--inline-code-color: #ffb454;
--theme-popup-bg: #14191f;
--theme-popup-border: #5c6773;
--theme-hover: #191f26;
--quote-bg: hsl(226, 15%, 17%);
--quote-border: hsl(226, 15%, 22%);
--table-border-color: hsl(210, 25%, 13%);
--table-header-bg: hsl(210, 25%, 28%);
--table-alternate-bg: hsl(210, 25%, 11%);
--searchbar-border-color: #848484;
--searchbar-bg: #424242;
--searchbar-fg: #fff;
--searchbar-shadow-color: #d4c89f;
--searchresults-header-fg: #666;
--searchresults-border-color: #888;
--searchresults-li-bg: #252932;
--search-mark-bg: #e3b171;
}
.coal {
--bg: hsl(200, 7%, 8%);
--fg: #98a3ad;
--sidebar-bg: #292c2f;
--sidebar-fg: #fff;
--sidebar-non-existant: #505254;
--sidebar-active: #fff;
--sidebar-spacer: #393939;
--scrollbar: var(--sidebar-fg);
--icons: #43484d;
--icons-hover: #b3c0cc;
--links: #2b79a2;
--inline-code-color: #c5c8c6;;
--theme-popup-bg: #141617;
--theme-popup-border: #43484d;
--theme-hover: #1f2124;
--quote-bg: hsl(234, 21%, 18%);
--quote-border: hsl(234, 21%, 23%);
--table-border-color: hsl(200, 7%, 13%);
--table-header-bg: hsl(200, 7%, 28%);
--table-alternate-bg: hsl(200, 7%, 11%);
--searchbar-border-color: #aaa;
--searchbar-bg: #b7b7b7;
--searchbar-fg: #000;
--searchbar-shadow-color: #aaa;
--searchresults-header-fg: #666;
--searchresults-border-color: #98a3ad;
--searchresults-li-bg: #2b2b2f;
--search-mark-bg: #355c7d;
}
.light {
--bg: #f7f7f7;
--fg: #333333;
--sidebar-bg: #050505;
--sidebar-fg: #fff;
--sidebar-non-existant: #aaaaaa;
--sidebar-active: #fff;
--sidebar-spacer: #f4f4f4;
--scrollbar: #cccccc;
--icons: #cccccc;
--icons-hover: #333333;
--links: #000;
--inline-code-color: #6e6b5e;
--theme-popup-bg: #fafafa;
--theme-popup-border: #cccccc;
--theme-hover: #e6e6e6;
--quote-bg: hsl(197, 37%, 96%);
--quote-border: hsl(197, 37%, 91%);
--table-border-color: hsl(0, 0%, 95%);
--table-header-bg: hsl(0, 0%, 80%);
--table-alternate-bg: hsl(0, 0%, 97%);
--searchbar-border-color: #aaa;
--searchbar-bg: #fafafa;
--searchbar-fg: #000;
--searchbar-shadow-color: #aaa;
--searchresults-header-fg: #666;
--searchresults-border-color: #888;
--searchresults-li-bg: #e4f2fe;
--search-mark-bg: #a2cff5;
}
.navy {
--bg: hsl(226, 23%, 11%);
--fg: #bcbdd0;
--sidebar-bg: #282d3f;
--sidebar-fg: #c8c9db;
--sidebar-non-existant: #505274;
--sidebar-active: #2b79a2;
--sidebar-spacer: #2d334f;
--scrollbar: var(--sidebar-fg);
--icons: #737480;
--icons-hover: #b7b9cc;
--links: #2b79a2;
--inline-code-color: #c5c8c6;;
--theme-popup-bg: #161923;
--theme-popup-border: #737480;
--theme-hover: #282e40;
--quote-bg: hsl(226, 15%, 17%);
--quote-border: hsl(226, 15%, 22%);
--table-border-color: hsl(226, 23%, 16%);
--table-header-bg: hsl(226, 23%, 31%);
--table-alternate-bg: hsl(226, 23%, 14%);
--searchbar-border-color: #aaa;
--searchbar-bg: #aeaec6;
--searchbar-fg: #000;
--searchbar-shadow-color: #aaa;
--searchresults-header-fg: #5f5f71;
--searchresults-border-color: #5c5c68;
--searchresults-li-bg: #242430;
--search-mark-bg: #a2cff5;
}
.rust {
--bg: hsl(60, 9%, 87%);
--fg: #262625;
--sidebar-bg: #3b2e2a;
--sidebar-fg: #c8c9db;
--sidebar-non-existant: #505254;
--sidebar-active: #e69f67;
--sidebar-spacer: #45373a;
--scrollbar: var(--sidebar-fg);
--icons: #737480;
--icons-hover: #262625;
--links: #2b79a2;
--inline-code-color: #6e6b5e;
--theme-popup-bg: #e1e1db;
--theme-popup-border: #b38f6b;
--theme-hover: #99908a;
--quote-bg: hsl(60, 5%, 75%);
--quote-border: hsl(60, 5%, 70%);
--table-border-color: hsl(60, 9%, 82%);
--table-header-bg: #b3a497;
--table-alternate-bg: hsl(60, 9%, 84%);
--searchbar-border-color: #aaa;
--searchbar-bg: #fafafa;
--searchbar-fg: #000;
--searchbar-shadow-color: #aaa;
--searchresults-header-fg: #666;
--searchresults-border-color: #888;
--searchresults-li-bg: #dec2a2;
--search-mark-bg: #e69f67;
}

BIN
book/theme/favicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

69
book/theme/highlight.css Normal file
View File

@ -0,0 +1,69 @@
/* Base16 Atelier Dune Light - Theme */
/* by Bram de Haan (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/dune) */
/* Original Base16 color scheme by Chris Kempson (https://github.com/chriskempson/base16) */
/* Atelier-Dune Comment */
.hljs-comment,
.hljs-quote {
color: #AAA;
}
/* Atelier-Dune Red */
.hljs-variable,
.hljs-template-variable,
.hljs-attribute,
.hljs-tag,
.hljs-name,
.hljs-regexp,
.hljs-link,
.hljs-name,
.hljs-selector-id,
.hljs-selector-class {
color: #f92672;
}
/* Atelier-Dune Orange */
.hljs-number,
.hljs-meta,
.hljs-built_in,
.hljs-builtin-name,
.hljs-literal,
.hljs-type,
.hljs-params {
color: #f6aa11;
}
/* Atelier-Dune Green */
.hljs-string,
.hljs-symbol,
.hljs-bullet {
color: #60ac39;
}
/* Atelier-Dune Blue */
.hljs-title,
.hljs-section {
color: #6684e1;
}
/* Atelier-Dune Purple */
.hljs-keyword,
.hljs-selector-tag {
color: #b854d4;
}
.hljs {
display: block;
overflow-x: auto;
background: #f1f1f1;
color: #6e6b5e;
padding: 0.5em;
}
.hljs-emphasis {
font-style: italic;
}
.hljs-strong {
font-weight: bold;
}

2
book/theme/highlight.js Normal file

File diff suppressed because one or more lines are too long

230
book/theme/index.hbs Normal file
View File

@ -0,0 +1,230 @@
<!DOCTYPE HTML>
<html lang="{{ language }}" class="sidebar-visible no-js">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>{{ title }}</title>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
<meta name="description" content="{{ description }}">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff" />
<link rel="shortcut icon" href="{{ path_to_root }}{{ favicon }}">
<link rel="stylesheet" href="{{ path_to_root }}css/variables.css">
<link rel="stylesheet" href="{{ path_to_root }}css/general.css">
<link rel="stylesheet" href="{{ path_to_root }}css/chrome.css">
<link rel="stylesheet" href="{{ path_to_root }}css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="{{ path_to_root }}FontAwesome/css/font-awesome.css">
<link href="https://fonts.googleapis.com/css?family=Lato:300,400|Poppins:300,400" rel="stylesheet">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" href="{{ path_to_root }}highlight.css">
<link rel="stylesheet" href="{{ path_to_root }}tomorrow-night.css">
<link rel="stylesheet" href="{{ path_to_root }}ayu-highlight.css">
<!-- Custom theme stylesheets -->
{{#each additional_css}}
<link rel="stylesheet" href="{{ ../path_to_root }}{{ this }}">
{{/each}}
{{#if mathjax_support}}
<!-- MathJax -->
<script async type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
{{/if}}
</head>
<body class="light">
<!-- Provide site root to javascript -->
<script type="text/javascript">var path_to_root = "{{ path_to_root }}";</script>
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script type="text/javascript">
try {
var theme = localStorage.getItem('mdbook-theme');
var sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script type="text/javascript">
var theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = 'light'; }
document.body.className = theme;
document.querySelector('html').className = theme + ' js';
</script>
<!-- Hide / unhide sidebar before it is displayed -->
<script type="text/javascript">
var html = document.querySelector('html');
var sidebar = 'hidden';
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
}
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<img src="https://manuel-calavera.github.io/images/logo.png" alt="">
{{#toc}}{{/toc}}
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
{{> header}}
<div id="menu-bar" class="menu-bar">
<div id="menu-bar-sticky-container">
<div class="left-buttons">
<button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</button>
{{!-- <button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button> --}}
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="light">Light <span class="default">(default)</span></button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
{{#if search_enabled}}
<button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
{{/if}}
</div>
<h1 class="menu-title">{{ book_title }}</h1>
<div class="right-buttons">
<a href="{{ path_to_root }}print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
</div>
</div>
</div>
{{#if search_enabled}}
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" name="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
{{/if}}
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script type="text/javascript">
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
{{{ content }}}
</main>
</div>
</div>
</div>
{{#if livereload}}
<!-- Livereload script (if served using the cli tool) -->
<script type="text/javascript">
var socket = new WebSocket("{{{livereload}}}");
socket.onmessage = function (event) {
if (event.data === "reload") {
socket.close();
location.reload(true); // force reload from server (not from cache)
}
};
window.onbeforeunload = function() {
socket.close();
}
</script>
{{/if}}
{{#if google_analytics}}
<!-- Google Analytics Tag -->
<script type="text/javascript">
var localAddrs = ["localhost", "127.0.0.1", ""];
// make sure we don't activate google analytics if the developer is
// inspecting the book locally...
if (localAddrs.indexOf(document.location.hostname) === -1) {
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '{{google_analytics}}', 'auto');
ga('send', 'pageview');
}
</script>
{{/if}}
{{#if playpen_js}}
<script src="{{ path_to_root }}ace.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}editor.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}mode-rust.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}theme-dawn.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}theme-tomorrow_night.js" type="text/javascript" charset="utf-8"></script>
{{/if}}
{{#if search_js}}
<script src="{{ path_to_root }}elasticlunr.min.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}mark.min.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}searcher.js" type="text/javascript" charset="utf-8"></script>
{{/if}}
<script src="{{ path_to_root }}clipboard.min.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}highlight.js" type="text/javascript" charset="utf-8"></script>
<script src="{{ path_to_root }}book.js" type="text/javascript" charset="utf-8"></script>
<!-- Custom JS scripts -->
{{#each additional_js}}
<script type="text/javascript" src="{{ ../path_to_root }}{{this}}"></script>
{{/each}}
{{#if is_print}}
{{#if mathjax_support}}
<script type="text/javascript">
window.addEventListener('load', function() {
MathJax.Hub.Register.StartupHook('End', function() {
window.setTimeout(window.print, 100);
});
});
</script>
{{else}}
<script type="text/javascript">
window.addEventListener('load', function() {
window.setTimeout(window.print, 100);
});
</script>
{{/if}}
{{/if}}
</body>
</html>

View File

@ -2,32 +2,44 @@ use std::env;
use std::fs;
fn main() {
println!("cargo:rerun-if-changed=target/perf-libs");
println!("cargo:rerun-if-changed=build.rs");
// Ensure target/perf-libs/ exists. It's been observed that
// a cargo:rerun-if-changed= directive with a non-existent
// directory triggers a rebuild on every |cargo build| invocation
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
fs::create_dir_all("target/perf-libs").unwrap_or_else(|err| {
if err.kind() != std::io::ErrorKind::AlreadyExists {
panic!("Unable to create target/perf-libs: {:?}", err);
}
});
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
if cuda || erasure {
if chacha || cuda || erasure {
println!("cargo:rerun-if-changed=target/perf-libs");
println!("cargo:rustc-link-search=native=target/perf-libs");
}
if chacha {
println!("cargo:rerun-if-changed=target/perf-libs/libcpu-crypt.a");
}
if cuda {
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
let cuda_home = match env::var("CUDA_HOME") {
Ok(cuda_home) => cuda_home,
Err(_) => String::from("/usr/local/cuda"),
};
println!("cargo:rerun-if-changed=target/perf-libs/libcuda-crypt.a");
println!("cargo:rustc-link-lib=static=cuda-crypt");
println!("cargo:rustc-link-search=native={}/lib64", cuda_home);
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=cuda");
println!("cargo:rustc-link-lib=dylib=cudadevrt");
}
if erasure {
println!("cargo:rerun-if-changed=target/perf-libs/libgf_complete.so");
println!("cargo:rerun-if-changed=target/perf-libs/libJerasure.so");
println!("cargo:rustc-link-lib=dylib=Jerasure");
println!("cargo:rustc-link-lib=dylib=gf_complete");
}

View File

@ -14,6 +14,76 @@ products over to a GPU instance for testing.
## Buildkite Agent Management
### Buildkite Azure Setup
Create a new Azure-based "queue=default" agent by running the following command:
```
$ az vm create \
--resource-group ci \
--name XXX \
--image boilerplate \
--admin-username $(whoami) \
--ssh-key-value ~/.ssh/id_rsa.pub
```
The "boilerplate" image contains all the required packages pre-installed so the
new machine should immediately show up in the Buildkite agent list once it has
been provisioned and be ready for service.
Creating a "queue=cuda" agent follows the same process but additionally:
1. Resize the image from the Azure port to include a GPU
2. Edit the tags field in /etc/buildkite-agent/buildkite-agent.cfg to `tags="queue=cuda,queue=default"`
and decrease the value of the priority field by one
#### Updating the CI Disk Image
1. Create a new VM Instance as described above
1. Modify it as required
1. When ready, ssh into the instance and start a root shell with `sudo -i`. Then
prepare it for deallocation by running:
`waagent -deprovision+user; cd /etc; ln -s ../run/systemd/resolve/stub-resolv.conf resolv.conf`
1. Run `az vm deallocate --resource-group ci --name XXX`
1. Run `az vm generalize --resource-group ci --name XXX`
1. Run `az image create --resource-group ci --source XXX --name boilerplate`
1. Goto the `ci` resource group in the Azure portal and remove all resources
with the XXX name in them
## Reference
This section contains details regarding previous CI setups that have been used,
and that we may return to one day.
### Buildkite AWS CloudFormation Setup
**AWS CloudFormation is currently inactive, although it may be restored in the
future**
AWS CloudFormation can be used to scale machines up and down based on the
current CI load. If no machine is currently running it can take up to 60
seconds to spin up a new instance, please remain calm during this time.
#### AMI
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
Use the following process to update this AMI as dependencies change:
```bash
$ export AWS_ACCESS_KEY_ID=my_access_key
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
$ cd elastic-ci-stack-for-aws/
$ make build
$ make build-ami
```
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
AMI. For example:
```
amazon-ebs: AMI: ami-07118545e8b4ce6dc
```
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
*apply* the stack changes.
### Buildkite GCP Setup
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
@ -50,40 +120,6 @@ newly created Disk image.
instances to 0 and wait for them all to terminate, (b) Update the Instance
template and restore the number of instances to the original value.
8. Clean up the previous version by deleting it from Instance Templates and
Images.
## Reference
### Buildkite AWS CloudFormation Setup
**AWS CloudFormation is currently inactive, although it may be restored in the
future**
AWS CloudFormation can be used to scale machines up and down based on the
current CI load. If no machine is currently running it can take up to 60
seconds to spin up a new instance, please remain calm during this time.
#### AMI
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
Use the following process to update this AMI as dependencies change:
```bash
$ export AWS_ACCESS_KEY_ID=my_access_key
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
$ cd elastic-ci-stack-for-aws/
$ make build
$ make build-ami
```
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
AMI. For example:
```
amazon-ebs: AMI: ami-07118545e8b4ce6dc
```
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
*apply* the stack changes.
Images.

11
ci/_ Normal file
View File

@ -0,0 +1,11 @@
# Buildkite log management helper
#
# See https://buildkite.com/docs/pipelines/managing-log-output
#
# |source| me
#
_() {
echo "--- $*"
"$@"
}

33
ci/affects-files.sh Executable file
View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
#
# Checks if a CI build affects one or more path patterns. Each command-line
# argument is checked in series.
#
# Bash regular expressions are permitted in the pattern:
# ./affects-files.sh .rs$ -- any file or directory ending in .rs
# ./affects-files.sh .rs -- also matches foo.rs.bar
# ./affects-files.sh ^snap/ -- anything under the snap/ subdirectory
# ./affects-files.sh snap/ -- also matches foo/snap/
#
set -e
cd "$(dirname "$0")"/..
if ci/is-pr.sh; then
affectedFiles="$(buildkite-agent meta-data get affected_files)"
echo "Affected files in this PR: $affectedFiles"
IFS=':' read -ra files <<< "$affectedFiles"
for pattern in "$@"; do
for file in "${files[@]}"; do
if [[ $file =~ $pattern ]]; then
exit 0
fi
done
done
exit 1
fi
# affected_files metadata is not currently available for non-PR builds, so assume
# the worse (affected)
exit 0

View File

@ -1,32 +1,20 @@
#!/bin/bash -e
#!/usr/bin/env bash
#
# Audits project dependencies for security vulnerabilities
#
set -e
cd "$(dirname "$0")/.."
source ci/_
export RUST_BACKTRACE=1
rustc --version
cargo --version
cargo_install_unless() {
declare crate=$1
shift
_() {
echo "--- $*"
"$@"
"$@" > /dev/null 2>&1 || \
_ cargo install "$crate"
}
maybe_cargo_install() {
for cmd in "$@"; do
set +e
cargo "$cmd" --help > /dev/null 2>&1
declare exitcode=$?
set -e
if [[ $exitcode -eq 101 ]]; then
_ cargo install cargo-"$cmd"
fi
done
}
cargo_install_unless cargo-audit cargo audit --version
maybe_cargo_install audit tree
_ cargo tree
_ cargo audit || true
_ cargo audit

View File

@ -0,0 +1,20 @@
steps:
#- command: "ci/snap.sh"
# timeout_in_minutes: 40
# name: "snap"
- command: "sdk/docker-solana/build.sh"
timeout_in_minutes: 20
name: "publish docker"
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate"
branches: "!master"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- command: "ci/publish-tarball.sh"
timeout_in_minutes: 25
name: "publish tarball"
- command: "ci/publish-book.sh"
timeout_in_minutes: 15
name: "publish book"

View File

@ -1,4 +0,0 @@
steps:
- command: "ci/snap.sh"
timeout_in_minutes: 40
name: "snap [public]"

View File

@ -1,45 +1,37 @@
steps:
- command: "ci/docker-run.sh solanalabs/rust:1.29.1 ci/test-stable.sh"
name: "stable [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable"
timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-bench.sh"
name: "bench [public]"
env:
CARGO_TARGET_CACHE_NAME: "nightly"
timeout_in_minutes: 30
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
name: "shellcheck"
timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-09-03 ci/test-nightly.sh || true"
name: "nightly [public]"
env:
CARGO_TARGET_CACHE_NAME: "nightly"
- command: "ci/docker-run.sh solanalabs/rust:1.31.0 ci/test-checks.sh"
name: "checks"
timeout_in_minutes: 30
- wait
- command: "ci/test-stable-perf.sh"
name: "stable-perf [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable-perf"
name: "stable-perf"
timeout_in_minutes: 20
agents:
- "queue=cuda"
- command: "ci/test-bench.sh"
name: "bench"
timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust:1.31.0 ci/test-stable.sh"
name: "stable"
timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-12-18 ci/test-coverage.sh"
name: "coverage"
timeout_in_minutes: 30
# TODO: Fix and re-enable test-large-network.sh
# - command: "ci/test-large-network.sh || true"
# name: "large-network [public] [ignored]"
# env:
# CARGO_TARGET_CACHE_NAME: "stable"
# name: "large-network [ignored]"
# timeout_in_minutes: 20
# agents:
# - "queue=large"
- command: "ci/pr-snap.sh"
timeout_in_minutes: 20
name: "snap [public]"
name: "snap"
branches: "pull/*"
- wait
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate [public]"
- trigger: "solana-snap"
- trigger: "solana-secondary"
branches: "!pull/*"
async: true
build:

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
#
# Computes the current branch names of the edge, beta and stable
# channels, as well as the latest tagged release for beta and stable.

17
ci/crate-version.sh Executable file
View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
#
# Outputs the current crate version
#
set -e
cd "$(dirname "$0")"/..
while read -r name equals value _; do
if [[ $name = version && $equals = = ]]; then
echo "${value//\"/}"
exit 0
fi
done < <(cat Cargo.toml)
echo Unable to locate version in Cargo.toml 1>&2
exit 1

View File

@ -1,4 +1,5 @@
#!/bin/bash -e
#!/usr/bin/env bash
set -e
usage() {
echo "Usage: $0 [--nopull] [docker image name] [command]"
@ -64,8 +65,10 @@ ARGS+=(
--env BUILDKITE
--env BUILDKITE_AGENT_ACCESS_TOKEN
--env BUILDKITE_BRANCH
--env BUILDKITE_COMMIT
--env BUILDKITE_JOB_ID
--env BUILDKITE_TAG
--env CI
--env CODECOV_TOKEN
--env CRATES_IO_TOKEN
--env SNAPCRAFT_CREDENTIALS_KEY

View File

@ -1,10 +1,14 @@
FROM solanalabs/rust
ARG date
RUN set -x && \
rustup install nightly-$date && \
rustup default nightly-$date && \
rustup component add clippy-preview --toolchain=nightly-$date && \
rustc --version && \
cargo --version && \
cargo +nightly-$date install cargo-cov
RUN set -x \
&& rustup install nightly-$date \
&& rustup show \
&& mv /usr/local/rustup/toolchains/nightly-$date-* \
/usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu \
&& rustup show \
&& rustc --version \
&& cargo --version \
&& rustc +nightly --version \
&& cargo +nightly --version

View File

@ -1,4 +1,5 @@
#!/bin/bash -ex
#!/usr/bin/env bash
set -ex
cd "$(dirname "$0")"

View File

@ -1,24 +1,28 @@
# Note: when the rust version is changed also modify
# ci/buildkite.yml to pick up the new image tag
FROM rust:1.29.1
FROM rust:1.31.0
RUN set -x && \
apt update && \
apt-get install apt-transport-https && \
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main > /etc/apt/sources.list.d/llvm.list && \
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
apt update && \
apt install -y \
RUN set -x \
&& apt update \
&& apt-get install apt-transport-https \
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
&& echo deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-7 main > /etc/apt/sources.list.d/llvm.list \
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - \
&& apt update \
&& apt install -y \
buildkite-agent \
clang-7 \
cmake \
llvm-6.0 \
lcov \
libclang-common-7-dev \
llvm-7 \
rsync \
sudo \
&& \
rustup component add rustfmt-preview && \
rustup component add clippy-preview && \
rm -rf /var/lib/apt/lists/* && \
rustc --version && \
cargo --version
\
&& rustup component add rustfmt \
&& rustup component add clippy \
&& rm -rf /var/lib/apt/lists/* \
&& rustc --version \
&& cargo --version

View File

@ -1,4 +1,5 @@
#!/bin/bash -ex
#!/usr/bin/env bash
set -ex
cd "$(dirname "$0")"

View File

@ -1,4 +1,5 @@
#!/bin/bash -ex
#!/usr/bin/env bash
set -ex
cd "$(dirname "$0")"

20
ci/format-url.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
#
# Formats a URL to be clickable from a Buildkite log
#
if [[ $# -eq 0 ]]; then
echo "Usage: $0 url"
exit 1
fi
if [[ -z $BUILDKITE ]]; then
echo "$1"
else
# shellcheck disable=SC2001
URL="$(echo "$1" | sed 's/;/%3b/g')" # Escape ;
printf '\033]1339;url='
echo -n "$URL"
printf '\a\n'
fi

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
#
# Regular maintenance performed on a buildkite agent to control disk usage
#

View File

@ -1,4 +1,5 @@
#!/bin/bash -e
#!/usr/bin/env bash
set -e
#
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
# to how solana-ci-gate is used to trigger PR builds rather than using the

View File

@ -1,16 +1,17 @@
#!/bin/bash -e
#!/usr/bin/env bash
set -e
#
# Perform a quick sanity test on a leader, drone, validator and client running
# locally on the same machine
#
cd "$(dirname "$0")"/..
source ci/upload_ci_artifact.sh
source ci/upload-ci-artifact.sh
source scripts/configure-metrics.sh
multinode-demo/setup.sh
backgroundCommands="drone leader validator validator-x"
backgroundCommands="drone bootstrap-leader fullnode fullnode-x"
pids=()
for cmd in $backgroundCommands; do
@ -44,7 +45,7 @@ shutdown() {
echo "--- Upload artifacts"
for cmd in $backgroundCommands; do
declare logfile=log-$cmd.txt
upload_ci_artifact "$logfile"
upload-ci-artifact "$logfile"
tail "$logfile"
done
@ -64,7 +65,7 @@ flag_error() {
echo "--- Wallet sanity"
(
set -x
scripts/wallet-sanity.sh
timeout 60s scripts/wallet-sanity.sh
) || flag_error
echo "--- Node count"
@ -72,8 +73,12 @@ echo "--- Node count"
source multinode-demo/common.sh
set -x
client_id=/tmp/client-id.json-$$
$solana_keygen -o $client_id
$solana_bench_tps --identity $client_id --num-nodes 3 --reject-extra-nodes --converge-only
$solana_keygen -o $client_id || exit $?
$solana_bench_tps \
--identity $client_id \
--num-nodes 3 \
--reject-extra-nodes \
--converge-only || exit $?
rm -rf $client_id
) || flag_error
@ -83,8 +88,8 @@ echo "--- Ledger verification"
(
source multinode-demo/common.sh
set -x
cp -R "$SOLANA_CONFIG_DIR"/ledger /tmp/ledger-$$
$solana_ledger_tool --ledger /tmp/ledger-$$ verify
cp -R "$SOLANA_CONFIG_DIR"/bootstrap-leader-ledger /tmp/ledger-$$
$solana_ledger_tool --ledger /tmp/ledger-$$ verify || exit $?
rm -rf /tmp/ledger-$$
) || flag_error

View File

@ -1,18 +1,13 @@
#!/bin/bash -e
#!/usr/bin/env bash
#
# Only run snap.sh for pull requests that modify files under /snap
#
set -e
cd "$(dirname "$0")"/..
cd "$(dirname "$0")"
ci/affects-files.sh ^snap/ || {
echo "Skipping snap build as no files under /snap were modified"
exit 0
}
if ./is-pr.sh; then
affected_files="$(buildkite-agent meta-data get affected_files)"
echo "Affected files in this PR: $affected_files"
if [[ ! ":$affected_files:" =~ :snap/ ]]; then
echo "Skipping snap build as no files under /snap were modified"
exit 0
fi
exec ./snap.sh
else
echo "Skipping snap build as this is not a pull request"
fi
exec ci/snap.sh

33
ci/publish-book.sh Executable file
View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
book/build.sh
echo --- create book repo
(
set -x
cd book/html/
git init .
git config user.email "maintainers@solana.com"
git config user.name "$(basename "$0")"
git add ./* ./.nojekyll
git commit -m "${BUILDKITE_COMMIT:-local}"
)
echo --- publish
if [[ $BUILDKITE_BRANCH = master ]]; then
cd book/html/
git remote add origin git@github.com:solana-labs/solana.git
git fetch origin gh-pages
if ! git diff HEAD origin/gh-pages --quiet; then
git push -f origin HEAD:gh-pages
else
echo "Content unchanged, publish skipped"
fi
else
echo "Publish skipped"
fi
exit 0

39
ci/publish-bpf-sdk.sh Executable file
View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
echo --- Creating tarball
(
set -x
sdk/bpf/scripts/package.sh
[[ -f bpf-sdk.tar.bz2 ]]
)
echo --- AWS S3 Store
if [[ -z $CHANNEL ]]; then
echo Skipped
else
(
set -x
docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/bpf-sdk.tar.bz2 \
s3://solana-sdk/"$CHANNEL"/bpf-sdk.tar.bz2
)
fi
exit 0

View File

@ -1,19 +1,59 @@
#!/bin/bash -e
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
if [[ -z "$BUILDKITE_TAG" ]]; then
# Skip publish if this is not a tagged release
exit 0
# List of internal crates to publish
#
# IMPORTANT: the order of the CRATES *is* significant. Crates must be published
# before the crates that depend on them. Note that this information is already
# expressed in the various Cargo.toml files, and ideally would not be duplicated
# here. (TODO: figure the crate ordering dynamically)
#
CRATES=(
logger
netutil
sdk
keygen
metrics
drone
programs/native/{budget,bpf_loader,lua_loader,native_loader,noop,system,vote}
.
fullnode
genesis
ledger-tool
wallet
)
maybePackage="echo Package skipped"
maybePublish="echo Publish skipped"
# Only package/publish if this is a tagged release
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
maybePackage="cargo package"
# Only publish if there's no human around
if [[ -n $CI ]]; then
maybePublish="cargo publish --token $CRATES_IO_TOKEN"
if [[ -z "$CRATES_IO_TOKEN" ]]; then
echo CRATES_IO_TOKEN undefined
exit 1
fi
fi
fi
if [[ -z "$CRATES_IO_TOKEN" ]]; then
echo CRATES_IO_TOKEN undefined
exit 1
fi
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
ci/docker-run.sh rust \
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
for crate in "${CRATES[@]}"; do
if [[ ! -r $crate/Cargo.toml ]]; then
echo "Error: $crate/Cargo.toml does not exist"
exit 1
fi
echo "-- $crate"
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
(
set -x
ci/docker-run.sh rust bash -exc "cd $crate; $maybePackage; $maybePublish"
)
done
exit 0

73
ci/publish-metrics-dashboard.sh Executable file
View File

@ -0,0 +1,73 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
if [[ -z $BUILDKITE ]]; then
echo BUILDKITE not defined
exit 1
fi
if [[ -z $CHANNEL ]]; then
CHANNEL=$(buildkite-agent meta-data get "channel" --default "")
fi
if [[ -z $CHANNEL ]]; then
(
cat <<EOF
steps:
- block: "Select Dashboard"
fields:
- select: "Channel"
key: "channel"
options:
- label: "stable"
value: "stable"
- label: "edge"
value: "edge"
- label: "beta"
value: "beta"
- command: "ci/$(basename "$0")"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
ci/channel-info.sh
eval "$(ci/channel-info.sh)"
case $CHANNEL in
edge)
CHANNEL_BRANCH=$EDGE_CHANNEL
;;
beta)
CHANNEL_BRANCH=$BETA_CHANNEL
;;
stable)
CHANNEL_BRANCH=$STABLE_CHANNEL
;;
*)
echo "Error: Invalid CHANNEL=$CHANNEL"
exit 1
;;
esac
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
branch: "$CHANNEL_BRANCH"
env:
CHANNEL: "$CHANNEL"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
set -x
exec metrics/publish-metrics-dashboard.sh "$CHANNEL"

85
ci/publish-tarball.sh Executable file
View File

@ -0,0 +1,85 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH ]]; then
DRYRUN="echo"
CHANNEL=unknown
fi
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
if [[ -n "$BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$BUILDKITE_TAG
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
else
CHANNEL_OR_TAG=$CHANNEL
fi
if [[ -z $CHANNEL_OR_TAG ]]; then
echo Unable to determine channel to publish into, exiting.
exit 1
fi
echo --- Creating tarball
(
set -x
rm -rf solana-release/
mkdir solana-release/
(
echo "$CHANNEL_OR_TAG"
git rev-parse HEAD
) > solana-release/version.txt
scripts/cargo-install-all.sh solana-release
./fetch-perf-libs.sh
# shellcheck source=/dev/null
source ./target/perf-libs/env.sh
(
cd fullnode
cargo install --path . --features=cuda --root ../solana-release-cuda
)
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
tar jvcf solana-release.tar.bz2 solana-release/
)
echo --- Saving build artifacts
source ci/upload-ci-artifact.sh
upload-ci-artifact solana-release.tar.bz2
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
echo Skipped due to DO_NOT_PUBLISH_TAR
exit 0
fi
echo --- AWS S3 Store
(
set -x
$DRYRUN docker run \
--rm \
--env AWS_ACCESS_KEY_ID \
--env AWS_SECRET_ACCESS_KEY \
--volume "$PWD:/solana" \
eremite/aws-cli:2018.12.18 \
/usr/bin/s3cmd --acl-public put /solana/solana-release.tar.bz2 \
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
echo Published to:
$DRYRUN ci/format-url.sh http://solana-release.s3.amazonaws.com/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
)
echo --- ok

Some files were not shown because too many files have changed in this diff Show More