Compare commits

...

314 Commits
v0.9 ... v0.10

Author SHA1 Message Date
14306a33e7 Stable dashboard can now actually come from the stable channel 2018-12-20 08:05:32 -08:00
babc3847d7 Select correct branch for {testnet,-perf} when using a stable channel tag 2018-12-19 17:50:15 -08:00
40fd1befa5 Add pipeline upload script 2018-12-15 16:06:36 -08:00
7808af9a65 Regenerate secrets 2018-12-15 15:27:42 -08:00
3c17732826 Use ejson to manage build secrets 2018-12-15 15:10:44 -08:00
77aee571ad crdt-vote-count metric is now named cluster_info-vote-count 2018-12-09 19:28:30 -08:00
a01b55c580 Add newline at end of file 2018-12-06 17:47:00 -08:00
0ecdc64302 Update versions in install-libssl-compatibility.sh 2018-12-06 16:35:31 -08:00
ba06082d58 Move testnet metrics dashboard management out of the Grafana UI 2018-11-25 16:10:29 -08:00
08e9c1a96e Add Cargo.lock 2018-11-17 16:06:40 -08:00
9f38b86df8 Revert "Temporarily disable clippy"
This reverts commit ca12faca9c.
2018-11-17 16:06:40 -08:00
ca12faca9c Temporarily disable clippy 2018-11-17 09:43:33 -08:00
97a0791f3f Add timeouts 2018-11-17 09:09:49 -08:00
4791c7e0a7 Bump earlyoom version 2018-11-10 15:56:37 -08:00
1ba13fe180 v0.10.5 2018-11-09 11:55:30 -08:00
9a30100a9c Create target/ if it doesn't exist yet 2018-11-09 11:52:19 -08:00
aa741b3147 v0.10.4 2018-11-09 10:29:32 -08:00
09db7b5b52 Determine network version for tar and local deploys 2018-11-09 10:27:18 -08:00
fa9faa2cec Upgrade Rust stable to 1.30.1
Fixes `cargo doc`
2018-11-09 10:25:00 -08:00
d2dc585974 Update wallet to pass full ELFs (#1738) 2018-11-08 09:03:48 -08:00
6721bdde3d v0.10.3 2018-11-07 21:39:51 -08:00
a733873b8f v0.10.2 2018-11-07 20:13:17 -08:00
7c02bbc47c Assign static IPs to {edge,beta}.testnet.solana.com 2018-11-07 20:11:53 -08:00
16a815d2b1 Install native programs in the correct location 2018-11-07 19:44:39 -08:00
ddb490e2fb Continue if docker0 is not present 2018-11-07 19:33:36 -08:00
242d0a23fb Switch testnet to AWS 2018-11-07 18:56:45 -08:00
869009243d Work around AWS key management limitation 2018-11-07 18:48:05 -08:00
7b61f5279c Switch to us-west-1a, us-west-1b is causing trouble 2018-11-07 18:22:24 -08:00
7ef0b815ec Remove docker0 interface if present 2018-11-07 17:49:57 -08:00
8742de789e Shuffle AWS regions 2018-11-07 17:49:57 -08:00
bfadd7b787 Work around AWS boot check weirdness 2018-11-07 15:47:47 -08:00
2e14bfcf4e Shuffle AWS regions 2018-11-07 15:43:56 -08:00
a19426f055 Revert "Restore testnet/testnet-perf to tip of beta channel for now"
This reverts commit 0d0a1c2919.
2018-11-07 15:43:56 -08:00
df366017a7 Invert gpu check 2018-11-07 13:50:42 -08:00
7d76badd03 Support local tarball deploys 2018-11-07 13:43:36 -08:00
8047ab777c Remove all cuda dependencies from release tarball beyond solana-fullnode-cuda 2018-11-07 13:43:24 -08:00
0d0a1c2919 Restore testnet/testnet-perf to tip of beta channel for now 2018-11-07 21:21:33 +00:00
1da90017ce Permit release tag tarballs 2018-11-07 10:33:20 -08:00
0909618efa Switch testnet/testnet-beta to tarball release 2018-11-07 10:29:53 -08:00
28bb7849f4 Fix tarball publishing for tags 2018-11-07 10:26:07 -08:00
9cffd3a1ea AWS AMIs are region specific 2018-11-07 10:04:45 -08:00
917151ce54 s/edge/beta/ 2018-11-07 08:54:44 -08:00
6dcd127634 Increase boot timeout 2018-11-07 08:32:03 -08:00
af66edf8c0 Add AWS-based nets 2018-11-07 07:52:34 -08:00
ab5b921e8f Set imageName if G 2018-11-07 07:52:29 -08:00
6c2843543b Bump EC2 validator machine type 2018-11-07 07:52:25 -08:00
85f74cc537 Upgrade GCP CPU-based testnet to 18.04 2018-11-07 07:52:19 -08:00
43665115b4 Switch testnet/testnet-perf to the latest beta or stable tag 2018-11-07 07:48:33 -08:00
156115c04c Publish release tarballs for tags 2018-11-07 07:48:30 -08:00
a66577eb87 Add support for using a release tar 2018-11-07 07:48:02 -08:00
3345d059e8 Elf relocations (#1724)
Use relocatable BPF ELFs
2018-11-06 14:28:46 -08:00
8c8c5de779 Remove unused debug trace 2018-11-06 14:19:07 -08:00
f03e971598 t 2018-11-06 14:06:07 -08:00
b4a1cdceaa Add timeout to prevent a stuck ssh 2018-11-06 14:02:27 -08:00
b250d20059 Remove node check from client start-up
If the network loses a validator or two, it's the job of the sanity
check to detect this not the bench clients
2018-11-06 13:59:42 -08:00
dc3b270410 Remove bpf tictactoe 2018-11-06 12:17:52 -08:00
9d5092a71c Set metrics database correctly 2018-11-06 07:24:49 -08:00
a287c9e5fa Remove stray line 2018-11-05 20:53:50 -08:00
ee85d534f9 Update testnet deploy docs 2018-11-05 19:12:43 -08:00
6e1b291c17 Add testnet-manager pipeline 2018-11-05 17:35:55 -08:00
68f7b1ecf3 Rename buildkite-snap to buildkite-secondary 2018-11-05 08:48:09 -08:00
58fe5cabd6 Document BPF C program limitations 2018-11-04 12:30:05 -08:00
8993c6ae24 Try harder to snap download 2018-11-03 00:29:48 +00:00
0e56473add 0.10.1 2018-11-02 16:30:13 -07:00
f6b709ca48 ci: correct crates.io publishing order 2018-11-02 15:36:32 -07:00
ffa1fa557b Ship native programs in snap 2018-11-01 15:59:24 -07:00
e7631c85a1 Update bpf.mk 2018-11-01 15:25:43 -07:00
edeadb503f shell 2018-11-01 14:40:38 -07:00
d2044f2562 Find clang 7 better
If LLVM_DIR is defined, use it to locate clang.  Otherwise use brew on
macOS, and assume clang-7 otherwise
2018-11-01 09:46:47 -07:00
5703c740cf Improve clang install info for Linux 2018-11-01 09:46:47 -07:00
6ae20e78e2 Rename sol_bpf.h to solana_sdk.h 2018-10-31 23:39:59 -07:00
506fc3baeb sol_bpf.h improvements
- Define NULL
- Add sol_memcmp()
- Use sizeof() more
- Add SOL_ARRAY_SIZE
- Make sol_deserialize() more flexible
2018-10-31 23:39:59 -07:00
68523f4a7f Fix up bpf numeric types 2018-10-31 21:16:16 -07:00
beae217ab9 Remove purging of leader id from cluster info (#1677) 2018-10-31 13:09:44 -07:00
2c8c117e3c Use env variables to disable validator sanity and ledger verification (#1675) (#1676) 2018-10-31 12:54:40 -07:00
3a1285ebe5 Program may not exit (#1669)
Cap max executed instructions, report number of executed instructions
2018-10-31 11:15:08 -07:00
e2660f2ac1 Fix deps (#1672) 2018-10-31 11:14:41 -07:00
22eb1b977f Fix lua_loader tests (#1665) 2018-10-31 09:22:41 -07:00
43ef8d7bb7 SYSTEM_INC_DIRS needs immediate expansion 2018-10-31 09:22:41 -07:00
d9271f2d30 Revert inclusion change, fix doc 2018-10-31 09:22:41 -07:00
dfbfd4d4dd Fix const 2018-10-31 09:22:41 -07:00
9cb262ad4b Fix C programs 2018-10-31 09:22:41 -07:00
73ee0cb100 Run workspace member's tests (#1666)
Run workspace member's tests
2018-10-31 09:22:41 -07:00
9a6154beaf Upgrade to influx_db_client@0.3.6 2018-10-31 09:22:41 -07:00
3f494bb91b Update testnet scripts to use release tar ball (#1660) (#1664)
* Update testnet scripts to use release tar ball

* use curl instead of s3cmd
2018-10-30 18:29:07 -07:00
2eb312796d Publish a tarball of Solana release binaries (#1656) (#1658)
* Publish a tarball of solana release binaries

* included native programs in Solana release tar

* Remove PR check from publish script
2018-10-30 15:55:50 -07:00
3fb86662fb Find native program with solana_ prefix 2018-10-30 13:12:59 -07:00
dce31f6002 Improve account subscribe/unsubscribe logging 2018-10-30 12:10:25 -07:00
39c42a6aba Avoid panicking when a native library doesn't exist 2018-10-30 12:10:25 -07:00
9961c0ee0a Demote info logs 2018-10-30 12:10:25 -07:00
3f843f21b9 Add solana_ prefix to loaders so their logs appear in the default RUST_LOG config 2018-10-30 11:24:18 -07:00
d07961a58b Work around influxdb panic 2018-10-30 11:24:18 -07:00
b85aa9282e Tweak logging 2018-10-30 11:24:18 -07:00
1cd354cf15 Added a new remote node configuration script to set rmem/wmem (#1647) (#1648)
* Added a new remote node configuration script to set rmem/wmem

* Update common.sh for rmem/wmem configuration
2018-10-30 10:48:56 -07:00
92cd2d09ed Permit {INC,LLVM,OUT,SRC,SYSTEM_INC}_DIRs to be overridden 2018-10-30 07:59:22 -07:00
a40122548f Add programs/bpf/c/sdk entries 2018-10-29 20:52:34 -07:00
6e27f797bd Use NUM_KA 2018-10-29 20:52:34 -07:00
476a585222 README updates 2018-10-29 20:52:34 -07:00
aa74ddb6c0 LD -> LLC 2018-10-29 20:52:34 -07:00
95921ce129 Add extern "C" block 2018-10-29 20:52:34 -07:00
ee6d00a2fe Use #pragma once, it's widely supported
Fix up some spelling too
2018-10-29 20:52:34 -07:00
212cbc4977 Rename sol_bpf_c.h to sol_bpf.h 2018-10-29 20:52:34 -07:00
a6af1ba08d slight reformatting 2018-10-29 20:52:34 -07:00
ee27e9e1cf Apply some const 2018-10-29 20:52:34 -07:00
4d21ee0546 Include system includes in .d, remove unneeded tabs 2018-10-29 20:52:34 -07:00
493a2477b5 Tune make output 2018-10-29 19:32:20 -07:00
e284af33b9 Create programs/bpf/c/sdk/ 2018-10-29 19:10:54 -07:00
f0aa14e135 Run bench-tps for longer duration in testnet (#1638) (#1639)
- Increased to 2+ hours
2018-10-29 15:23:01 -07:00
fb9d8dfa99 Increase rmem and wmem for remote nodes in testnet (#1635) (#1637) 2018-10-29 14:36:26 -07:00
4b02bbc802 Remove unnecessary checks 2018-10-29 13:27:14 -07:00
18cf660f61 Create/publish bpf-sdk tarball 2018-10-29 13:04:20 -07:00
376303a1eb Add utility to figure the current crate version 2018-10-29 13:04:20 -07:00
f295eb06d0 Add llvm install info 2018-10-29 09:44:03 -07:00
f423f61d8b Ignore out/ 2018-10-29 09:44:03 -07:00
94b06b2cbf Use V=1 for verbosity, easier to type 2018-10-29 09:44:03 -07:00
9b2fc8cde7 Find llvm using brew on macOS 2018-10-29 09:44:03 -07:00
d810752e86 Remove VoteProgram references 2018-10-26 21:10:05 -07:00
fdaad1d85b Program_ids were overlapping (#1626)
Program_ids were overlapping
2018-10-26 21:10:05 -07:00
7f29c1fe23 Cleanup c programs (#1620)
Cleanup C programs
2018-10-26 21:10:05 -07:00
68df9d06db Bump version number to pick up fixed cuda library
Has fix for unaligned memory access in chacha_encrypt_many_sample
function.
2018-10-26 21:10:05 -07:00
b60cb48c18 Use a smaller test value for window_size
Otherwise this test takes forever to run.
2018-10-26 21:10:05 -07:00
0fee854220 Revert "Vote contract (#1552)"
This reverts commit f6c8e1a4bf.
2018-10-26 09:50:35 -07:00
0cc7bbfe7d Revert "cargo fmt"
This reverts commit 68834bd4c5.
2018-10-26 09:50:35 -07:00
68834bd4c5 cargo fmt 2018-10-25 17:24:40 -07:00
2df40cf9c9 Revert "0.10.0-pre2"
This reverts commit 48685cf766.
2018-10-25 17:20:37 -07:00
f671b7f63f Publish root crate too 2018-10-25 17:16:18 -07:00
236113e417 cargo fmt 2018-10-25 17:13:41 -07:00
a340b18b19 Upgrade to rust 1.30 2018-10-25 17:13:41 -07:00
f6c8e1a4bf Vote contract (#1552)
* Add Vote Contract

* Move ownership of LeaderScheduler from Fullnode to the bank

* Modified ReplicateStage to consume leader information from bank

* Restart RPC Services in Leader To Validator Transition

* Make VoteContract Context Free

* Remove voting from ClusterInfo and Tpu

* Remove dependency on ActiveValidators in LeaderScheduler

* Switch VoteContract to have two steps 1) Register 2) Vote. Change thin client to create + register a voting account on fullnode startup

* Remove check in leader_to_validator transition for unique references to bank, b/c jsonrpc service and rpcpubsub hold references through jsonhttpserver
2018-10-25 16:58:40 -07:00
160cff4a30 Check for TRIGGERED_BUILDKITE_TAG 2018-10-25 16:37:54 -07:00
48685cf766 0.10.0-pre2 2018-10-25 16:19:31 -07:00
0f32102684 Restrict characters to those supported by semvar_bash 2018-10-25 16:19:00 -07:00
d46682d1f2 Restrict characters to those supported by semvar_bash 2018-10-25 16:12:29 -07:00
55833e20b1 Create Poh Service (#1604)
* Create new Poh Service, replace tick generation in BankingStage
2018-10-25 14:56:21 -07:00
02cfa76916 Plumb GetTransactionCount through solana-wallet 2018-10-25 14:58:51 -06:00
9314eea7e9 Add leader-readiness test to wallet-sanity 2018-10-25 14:58:51 -06:00
1733beabf7 mv common/ sdk/ 2018-10-25 13:26:10 -07:00
471d8f6ff9 Fix up the version references to all other internal crates 2018-10-25 12:54:32 -07:00
e47fcb196b s/solana_program_interface/solana[_-]sdk/g 2018-10-25 12:31:45 -07:00
3ae53961c8 Support prerelease versioning 2018-10-25 12:31:45 -07:00
113b002095 Delete programs/native/move_funds 2018-10-25 11:37:38 -07:00
9447537d8c Increment internal Cargo references to solana_program_interface 2018-10-25 11:03:03 -07:00
7404b8739e Make template headers smaller 2018-10-25 11:51:37 -06:00
7239395d95 Add Issue and PR templates 2018-10-25 11:51:37 -06:00
926d459c8f Script away cargo version bumping 2018-10-25 09:38:58 -07:00
7cabe203dc Sync version with top-level Cargo.toml 2018-10-25 09:38:58 -07:00
1e53f4266a Fetch perf-libs with configurable packet size
sig verify library uses passed in size directly
to get packet size, so rust side can be modified
without changing cuda library.
2018-10-25 08:26:35 -07:00
24b513c3c7 Migrate to latest rbpf (#1605)
Migrate to updated rbpf
2018-10-25 02:58:04 -07:00
b982595c73 Add version check and rustup 2018-10-24 19:48:58 -07:00
af8a36b7fb Exclude chacha_cuda when chacha is disabled 2018-10-24 17:02:46 -07:00
208e7d7943 Explicitly reject transactions larger than PACKET_SIZE 2018-10-24 15:34:27 -07:00
557736f1cf Split leader rotation into separate RFC 2018-10-24 13:16:06 -06:00
61927e1941 Fix compile error for write_entries
Takes a reference now.
2018-10-24 11:31:30 -07:00
fc75827aaf .gitignore *.log 2018-10-24 10:58:27 -07:00
2f2531d921 Add retries to Wallet deploy 2018-10-24 11:13:32 -06:00
d5f20980eb Incorporate preloaded bpf loader 2018-10-24 11:13:32 -06:00
21eae981f9 Add deploy method to solana-wallet 2018-10-24 11:13:32 -06:00
ead7f4287a Storage mining fixups...
* Use IV to make unique identies
* Use hex! macro for hex literal and not string converted to u8 slice
* fix sha sampling to control init/end of sha state
2018-10-24 09:58:41 -07:00
3b33150cfb Bump drone read timeout to 10s
The previous timeout of 3s was not generous enough occasionally
2018-10-24 08:52:41 -07:00
6d34a68e54 Ignore test_leader_restart_validator_start_from_old_ledger (#1586)
Ignore test_leader_restart_validator_start_from_old_ledger
2018-10-23 18:10:31 -07:00
5c483c9928 remove unused variable 2018-10-23 16:52:56 -06:00
a68c99d782 Fix transaction count on testnet dashboard 2018-10-23 16:52:56 -06:00
0aebbae909 Fix message 2018-10-23 15:45:58 -07:00
a3a2215bda Fix warning 2018-10-23 15:45:58 -07:00
eb377993b3 Debug scripts point to debug flavor (#1585) 2018-10-23 14:48:50 -07:00
5ca52d785c Preload BPF loader (#1573)
Preload BPF loader
2018-10-23 14:44:41 -07:00
8d9912b4e2 Move ledger write to its own stage (#1577)
* Move ledger write to its own stage

- Also, rename write_stage to leader_vote_stage, as write functionality
  is moved to a different stage

* Address review comments

* Fix leader rotation test failure

* address review comments
2018-10-23 14:42:48 -07:00
c77b1c9687 i 2018-10-23 14:14:09 -07:00
8849ecd772 capture consensus discussion of 10/10/2018 2018-10-23 15:07:58 -06:00
7977b97227 Surface AccountInUse to JSON RPC users so they know to retry the transaction 2018-10-23 13:55:30 -07:00
4f34822900 Improve logging on various error conditions 2018-10-23 13:40:59 -07:00
bbb38ac106 Increase window size (#1578)
Addresses the following problem
- Validators are not able to keep up with the leader
- The future blobs (outside of window) get dropped
- The validators won't process repair requests for these future blobs
2018-10-23 10:25:01 -07:00
ce934a547e Storage RFC validator incentive clarification 2018-10-23 09:46:38 -06:00
16b19d35dd Disable test_boot_validator_from_file (#1576) 2018-10-23 00:47:15 -07:00
45cfa5b574 Add instruction to transfer account ownership 2018-10-20 21:54:25 -05:00
df9ccce5b2 Remove hostname() from calls to metrics as it's expensive operation (#1557) 2018-10-20 06:38:20 -07:00
f8516b677a Load program data in chunks (#1556)
Load program data in chunks
2018-10-19 18:28:38 -07:00
dfde83bdce Wildcard early OOM deb package revision (#1554) 2018-10-19 14:17:19 -07:00
cb0f19e4f1 Shield rerun-if-changed under the feature flags so
that cargo watch doesn't cause re-build every iteration.
2018-10-19 12:07:29 -07:00
26b99d3f85 Ensure witness and timestamp keys are signed
Before this patch, an attacker could point Budget instructions to
unsigned keys, and authorize a transaction from an unauthorized
party.
2018-10-19 10:06:59 -06:00
2f9c0d1d9e Add method to lookup signed keys 2018-10-19 10:06:59 -06:00
0423cafbeb Cleanup and update Smart Contracts Engine RFC to what is currently in the code (#1539)
* Cleanup and update to the state of the code

* update

* render

* render

* comments on memory allocation
2018-10-19 06:08:49 -07:00
0bd1412562 Switch leader scheduler to use PoH ticks instead of Entry height (#1519)
* Add PoH height to process_ledger()

* Moved broadcast_stage Leader Scheduling logic to use Poh height instead of entry_height

* Moved LeaderScheduler logic to PoH in ReplicateStage

* Fix Leader scheduling tests to use PoH instead of entry height

* Change is_leader detection in repair() to use PoH instead of entry height

* Add tests to LeaderScheduler for new functionality

* fix Entry::new and genesis block PoH counts

* Moved LeaderScheduler to PoH ticks

* Cleanup to resolve PR comments
2018-10-18 22:57:48 -07:00
0339642e77 Added TicTacToe Dashboard and tests (#1547)
* Add tictactoe dashboard and tests
2018-10-18 14:19:25 -07:00
37a0b7b132 Initial validator code for rust side hooks for chacha cuda parallel encrypt 2018-10-18 13:50:19 -07:00
c30b605047 Actually submit the storage mining proof
Get an aidrop so replicator can submit mining transaction

Some other minor type cleanup.
2018-10-18 13:50:19 -07:00
76076d6fad move last_id age checking into the HashMap
* allows for simpler chaining of banks
  * looks 1.5-2% faster than looping through a VecDequeue

TODO: remove timestamp()?
2018-10-18 11:07:00 -07:00
0a819ec4e2 Programs were not spawned by SystemProgram (#1533)
* SystemProgram spawns programs
2018-10-18 10:33:30 -07:00
57a717056e Delegate accounts now record the original approved amount 2018-10-18 08:53:25 -07:00
856c48541f Restore elaborate attack
The test is showing how you can sneak by verify_plan() but not
verify_signature().
2018-10-18 08:46:02 -06:00
2045091c4f Add SystemProgram::Move ix to Budget tx 2018-10-18 08:46:02 -06:00
03ac5a6eef Move all source tokens into Budget account
Budget now assumes the source account holds all tokens the program
should spend.

Note: the static guarantees implied by verify_plan() are meaningless
under the new contract engine. The bank no longer calls it. This
serves as a nice example of where comparing code coverage between
integration tests and unit tests would have shown us where a
change rendered unit tests meaningless.
2018-10-18 08:46:02 -06:00
32fadc9c30 Merge debits and credits
Debits no longer need to be applied before credits. Instead, we
lock any accounts we'd debit and so error out on the second attempt
to lock the same account.
2018-10-18 08:46:02 -06:00
15a89d4f17 Boot Contract type from Budget
In the old bank (before the contract engine), Contract wasn't specific
to Budget. It provided the same service as what is now called
SystemProgram::Move, but without requiring a separate account.
2018-10-18 08:46:02 -06:00
d0f43e9934 consolidate tmp ledgers 2018-10-18 08:45:31 -06:00
31e779d3f2 Added counters to track more metrics on dashboard (#1535)
- Total number of IP packets TX/RX from all nodes in the testnet
- Last consumed index on validator
- Last transmitted index on leader
2018-10-17 17:32:50 -07:00
30c79fd40d Change validator node machine type (#1537)
- The current nodes are using lower RAM compared to leader/clients
2018-10-17 17:16:50 -07:00
639c93460a Write stage optimizations (#1534)
- Testnet dashboard shows that channel pressure for write stage
  is incrementing on every iteration of write.
- This change optimizes ledger writing by removing cloning of map
  and reducing calls to flush
2018-10-17 13:02:32 -07:00
7611730cdb move off /tmp 2018-10-17 12:15:30 -07:00
9df9c1433a remove another use of /tmp 2018-10-17 12:15:30 -07:00
4ea422bcec run integration tests serially 2018-10-17 11:37:10 -07:00
6074e4f962 Attempt to stabilize the test suite
The integration tests are allowed to open sockets, so running them
in parallel may cause "Too many open files" errors. This patch
runs the unit tests in parallel and the integration test serially.
2018-10-17 11:37:10 -07:00
d52e6d01ec typo in readme 2018-10-17 02:04:05 -06:00
63caca33be SystemProgram test was failing due to expected panic 2018-10-16 18:02:44 -07:00
64efa62a74 enable logging in loaders 2018-10-16 16:55:11 -07:00
912eb5e8e9 remove bank.is_leader, dead code (#1516) 2018-10-16 15:26:44 -07:00
bb628e8495 Rename loaders 2018-10-16 14:27:08 -07:00
d0c19c2c97 cargo fmt 2018-10-16 14:11:04 -07:00
926fdb7519 Rename dynamic_program.rs to native_loader.rs 2018-10-16 14:11:04 -07:00
c886625c83 Move from solana/rbpf fork to qmonnet/rbpf (#1511) 2018-10-16 13:13:54 -07:00
f6c10d8a2e Add channel pressure for validator TVU stages (#1509) 2018-10-16 12:54:23 -07:00
2bd877528f Par process entries (#1499)
* Parallel entry processor.
2018-10-16 12:09:48 -07:00
d09889b1dd Program bank integration (#1462)
Native, BPF and Lua loaders integrated into the bank
2018-10-16 09:43:49 -07:00
1b2e9122d5 Pubsub listen on random open port when rpc does (quiet some test errors) 2018-10-16 00:11:26 -06:00
7424388924 Fix session drop 2018-10-16 00:11:26 -06:00
537436bd5e RPC PubSub now uses a well-known socket 2018-10-16 00:11:26 -06:00
32fc0cd7e9 Fix bug introduced during RUST_LOG escaping (#1507)
* Fix bug introduced during RUST_LOG escaping
- remote node configuration should not be quoted

* shellcheck disable SC2090
2018-10-15 16:49:22 -07:00
fb99494858 Improve rpc code coverage (#1487) 2018-10-15 11:01:40 -06:00
5b4d4b97bc Upgrade to latest stable Rust, 1.29.2 2018-10-15 09:54:24 -06:00
c5180c8092 Permit RUST_LOG overrides 2018-10-14 12:40:37 -07:00
515c200d86 Refactor and add test for new Entry::serialized_size() 2018-10-14 10:53:47 -06:00
32aab82e32 Don't allocate to see if transactions will fit in a blob 2018-10-14 10:53:47 -06:00
6aaa350145 effeciently pack gossip responsens and only respond up to max size. (#1493) 2018-10-14 06:45:02 -07:00
d3b4dfe104 Add bool return to entrypoint signature to permit programs to fail transactions 2018-10-13 20:01:43 -07:00
9fc30f6db4 Escape RUST_LOG configuration in remote-node.sh (#1489)
* Escape RUST_LOG configuration in remote-node.sh

- If it was set to #, it was causing other parameters to be commented out

* escape other variables as well

* disabled shell check

* Fix shellcheck error
2018-10-13 13:35:54 -07:00
2d0f07091d Handle dynamic program dlopen failures gracefully 2018-10-13 11:31:10 -07:00
3828eda507 Demote log messages 2018-10-13 11:31:10 -07:00
1e736ec16d Demote log messages 2018-10-12 20:16:57 -07:00
bba6437ea9 Use a single structure for last_ids and last_ids_sigs 2018-10-12 16:39:35 -07:00
e5ab9a856c Upload bench output as build artifacts (#1478)
* Upload bench output as build artifacts

* Fix tags types

* Pull previous stats from metrics

* Change the default branch for comparison

* Fix formatting

* Fix build errors

* Address review comments

* Dedup some common code

* Add eval for channel info to find branch name
2018-10-12 15:13:10 -07:00
1515bba9c6 Use cluster_info in rpc to get current leader addresses (#1480) 2018-10-12 14:25:56 -06:00
14a9ef4bbe move PoH verification off bank.last_id() (#1476) 2018-10-12 11:50:34 -07:00
041040c659 pubsub.rs -> rpc_pubsub.rs 2018-10-12 08:39:06 -07:00
47f69f2d24 1) Switch broken tests to generate an empty tick in their ledgers to use as last_id, 2) Fix bug where PoH generator in BankingStage did not referenced the last tick instead of the last entry on startup, causing ledger verification to fail on the new tick added by the PoH generator (#1479) 2018-10-12 00:39:10 -07:00
9dd4dc2088 Mark failing tests as ignore 2018-10-11 15:32:36 -07:00
b534c32ee3 New minor version for jsonrpc crates 2018-10-11 13:35:06 -06:00
d2712f1457 Specify patch for jsonrpc crates 2018-10-11 11:38:14 -07:00
183f560d06 Add raw entries interface to ledger for getting slices as [u8] 2018-10-11 09:40:34 -07:00
ae150c0897 Remove getAddress, it doesn't exist 2018-10-11 08:28:39 -07:00
606e1396cf Fix link 2018-10-11 08:25:38 -07:00
5c85e037f8 Tick entry ids as only valid last_ids (#1441)
Generate tick entry ids and only register ticks as the last_id expected by the bank.  Since the bank is MT, the in-flight pipeline of transactions cannot be close to the end of the queue or there is a high possibility that a starved thread will encode an expired last_id into the ledger.  The banking_stage therefore uses a shorter age limit for encoded last_ids then the validators.

Bench client doesn't send transactions that are older then 30 seconds.
2018-10-10 17:23:06 -07:00
5c523716aa Ship native programs 2018-10-10 16:49:48 -07:00
5f8cbf359e Use cdylib to avoid runtime libstd dependencies 2018-10-10 16:49:48 -07:00
e83834e6be Build native programs in release configuration 2018-10-10 16:49:48 -07:00
02225aa95c Look for native programs in same directory as the current executable 2018-10-10 16:49:48 -07:00
9931ac9780 Leader scheduler plumbing (#1440)
* Added LeaderScheduler module and tests

* plumbing for LeaderScheduler in Fullnode + tests. Add vote processing for active set to ReplicateStage and WriteStage

* Add LeaderScheduler plumbing for Tvu, window, and tests

* Fix bank and switch tests to use new LeaderScheduler

* move leader rotation check from window service to replicate stage

* Add replicate_stage leader rotation exit test

* removed leader scheduler from the window service and associated modules/tests

* Corrected is_leader calculation in repair() function in window.rs

* Integrate LeaderScheduler with write_stage for leader to validator transitions

* Integrated LeaderScheduler with BroadcastStage

* Removed gossip leader rotation from crdt

* Add multi validator, leader test

* Comments and cleanup

* Remove unneeded checks from broadcast stage

* Fix case where a validator/leader need to immediately transition on startup after reading ledger and seeing they are not in the correct role

* Set new leader in validator -> validator transitions

* Clean up for PR comments, refactor LeaderScheduler from process_entry/process_ledger_tail

* Cleaned out LeaderScheduler options, implemented LeaderScheduler strategy that only picks the bootstrap leader to support existing tests, drone/airdrops

* Ignore test_full_leader_validator_network test due to bug where the next leader in line fails to get the last entry before rotation (b/c it hasn't started up yet). Added a test test_dropped_handoff_recovery go track this bug
2018-10-10 16:49:41 -07:00
2ba2bc72ca Cleanup multisig lua 2018-10-10 17:17:17 -06:00
45b8ba9ede Demo M-N multisig library in Lua 2018-10-10 17:17:17 -06:00
40968e09b7 Do a *little* more than noop 2018-10-10 15:57:30 -07:00
262f26cf76 SystemProgram transactions now fail on invalid arguments 2018-10-10 15:19:03 -07:00
785c619198 Add pubsub module for rpc info subscriptions (#1439) 2018-10-10 14:51:43 -06:00
24a993710d Avoid panic when account.source is None 2018-10-10 10:53:00 -07:00
c240bb12ae Change buildkite agent for testnet automation 2018-10-09 15:04:55 -07:00
eed3b9db94 Add ERC20-like Token program 2018-10-09 12:53:37 -07:00
29a8823db1 Env variables for testnet-automation parameters (#1455)
- This will enable us to create custom pipelines for field events
2018-10-09 11:50:56 -07:00
a80955eacb Change format of data for TPS/Finality metrics in testnet automation (#1446)
* Change format of data for TPS/Finality metrics in testnet automation

* Revert number of nodes for testnet automation

* Split python command to its own script

* Fix python command line arguments
2018-10-09 10:35:01 -07:00
9716c3de71 Add an abort test to justify a key field 2018-10-09 11:06:48 -06:00
34fa3208e0 Demo self-modifying Lua program
Also, drop dependency on bincode.
2018-10-09 11:06:48 -06:00
9c4e19958b Use accounts[1] for Lua code and tx userdata as arg data
This makes the Lua version nearly identical to the C one.
2018-10-09 11:06:48 -06:00
0403299728 Add context-free Lua smart contracts
lua_State is not preserved across runs and account userdata is not converted into
Lua values. All this allows us to do is manipulate the number of tokens
in each account and DoS the Fullnode with those three little words,
"repeat until false".

Why bother? Research. rlua's project goals are well-aligned with the LAMPORT runtime.

What's next:
* rlua to add security limits, such as number of instructions executed
* Add a way to deserialize Account::userdata OR use Account::program_id
  to look up a metatable for lua_newuserdata().
2018-10-09 11:06:48 -06:00
95701114e3 Crdt -> ClusterInfo 2018-10-09 03:49:39 -06:00
a99d17c3ac put temp, test files in OUT_DIR (#1448) 2018-10-08 16:15:17 -07:00
517149d325 Move rpc request methods from wallet into separate module 2018-10-08 13:02:08 -06:00
32aa2575b5 Purge BudgetTransaction from entry 2018-10-08 11:34:04 -07:00
8fe7b96629 Purge BudgetTransaction from banking_stage 2018-10-08 11:34:04 -07:00
9350619afa log to influx once (#1438) 2018-10-06 14:37:14 -07:00
d8d8f0bfc8 Fund all the keys with move many transactions (#1436)
* Fund all the keys with move many transactions

* logs
2018-10-05 16:45:27 -07:00
0a39722719 Add support to trigger testnet from a PR (#1434)
* Add support for different node counts

* Update variable names

* Delete network even after failures

* Add array for node counts

* Changed number of nodes to a space separated string of numbers

* Adjust number of nodes

* Snap will not be published if the env variable DO_NOT_PUBLISH_SNAP is set

* Address review comments

* Replaced influx db URL
2018-10-05 16:32:05 -07:00
9c0fa4d1d2 Upload coverage HTML reports (#1421)
Uploads two reports to Buildkite, one from cargo-cov and one from lcov via grcov.  The lcov one is busted on linux and is what we need to bring codecov.io back up again. It works great on macos if you wanted to generate them locally and prefer lcov HTML reports.

* Also comment out non-coverage build to speed things up.
2018-10-05 10:17:35 -07:00
da0404ad03 Reduce maintenance of maintainers list 2018-10-04 23:05:08 -07:00
b508fdb62c Cleanup field names 2018-10-04 16:51:05 -07:00
680f90df21 Fix comment 2018-10-04 14:21:06 -07:00
1a68807ad9 Enable mt-bank (#1368)
* Enable mt-bank

* cleanup and interleaving lock tests
2018-10-04 13:15:54 -07:00
d901767b54 Makefile is not relevant 2018-10-04 10:35:48 -07:00
13d4443d4d Add BPF support & C-based BPF tic-tac-toe (#1422)
Add initial support for BPF and a C port of tictactoe
2018-10-04 09:44:44 -07:00
74b63c12a0 Add tests to LeaderScheduler to increase code coverage 2018-10-03 21:58:29 -07:00
cd42f6591a PR fixes - remove redundant case 2018-10-03 21:58:29 -07:00
5491422b12 Fix validator_to_leader_transition test to not start up tpu after shutting down tvu, as the tpu now outputs ticks that will mess up the verification check 2018-10-03 21:58:29 -07:00
23f3ff3cf0 Added LeaderScheduler module and tests 2018-10-03 21:58:29 -07:00
f90488c77b Demote 'not enough peers in crdt table' log message 2018-10-02 22:00:54 -07:00
beb4536841 Run a fullnode+drone automatically when the container starts up 2018-10-02 18:09:35 -07:00
3fa46dd66d Add replicator sha sampling
replicator will submit mining proofs with the result of sampling
the encrypted file with a hashing algorithm.
2018-10-02 17:04:46 -07:00
ad5fcf778f Publish minimal Solana docker images to dockerhub 2018-10-02 16:57:48 -07:00
83b000ae88 Remove SNAP_ prefix 2018-10-02 16:57:48 -07:00
33e179caa6 Update sha2 requirement from 0.7.0 to 0.8.0
Updates the requirements on [sha2](https://github.com/RustCrypto/hashes) to permit the latest version.
- [Release notes](https://github.com/RustCrypto/hashes/releases)
- [Commits](https://github.com/RustCrypto/hashes/commits/sha2-v0.8.0)

Signed-off-by: dependabot[bot] <support@dependabot.com>
2018-10-02 09:00:05 -06:00
b1e941cab9 Return all instances 2018-10-01 07:51:48 -07:00
6db961d256 Correct comment 2018-09-30 00:08:09 -07:00
83409ded59 Correctly deserialize large userdata 2018-09-29 19:39:54 -07:00
396b2e9772 Ignore keep alive for completed games 2018-09-29 19:39:54 -07:00
94459deb94 Disable codecov.io reporting 2018-09-28 19:19:16 -07:00
660af84b8d Use the same versions of llvm-cov and libprofile 2018-09-28 19:19:16 -07:00
7b31020903 Add back llvm-dev for llvm-cov 2018-09-28 19:19:16 -07:00
9a4143b4d9 Upgrade llvm-dev and boot kcov
Need clang-dev, not llvm-dev because cargo-cov looks for libprofile
in a clang installation directory.
2018-09-28 19:19:16 -07:00
aebc47ad55 Attempt coverage reporting 2018-09-28 19:19:16 -07:00
b6b5455917 Fix test in coverage build 2018-09-28 19:19:16 -07:00
5bc01cd51a Revive code coverage 2018-09-28 19:19:16 -07:00
c79acac37b Add tic-tac-toe dashboard program 2018-09-28 18:48:34 -07:00
a5f2aa6777 s/grid/board/g 2018-09-28 18:48:34 -07:00
4169e5c510 Simplify game setup messaging 2018-09-28 18:48:34 -07:00
0727c440b3 Add KeepAlive message so players can detect abandoned games 2018-09-28 18:48:34 -07:00
19a7ff0c43 Pin down nightly in benchmark build 2018-09-28 19:29:50 -06:00
5f18403199 Upgrade nightly 2018-09-28 19:29:50 -06:00
9f325fca09 Re-enable cargo audit 2018-09-28 17:53:41 -06:00
10d08acefa Reenable cargo audit 2018-09-28 17:53:41 -06:00
52d50e6bc4 Update for new solana-jsonrpc 2018-09-28 17:53:41 -06:00
e7de7c32db Transactions with multiple programs. (#1381)
Transactions contain a vector of instructions that are executed atomically.
Bench shows a 2.3x speed up when using 5 instructions per tx.
2018-09-28 16:16:35 -07:00
a5f07638ec Use static str define for ledger files 2018-09-28 14:23:37 -07:00
aa2a3fe201 Add chacha module to encrypt ledger files 2018-09-28 14:23:37 -07:00
abd13ba4ca move program tests to integration 2018-09-28 11:30:10 -07:00
485ba093b3 Install kcov to CI environment 2018-09-28 11:20:27 -06:00
36b18e4fb5 Create new wallet on each run of wallet-sanity 2018-09-28 07:39:31 -07:00
8d92232949 Specify zone 2018-09-28 07:32:49 -07:00
e4d8c094a4 Include -z when deleting network 2018-09-27 21:27:09 -07:00
d26e1c51a9 0.10.0 2018-09-27 16:38:53 -07:00
183 changed files with 22063 additions and 4022 deletions

31
.buildkite/env/README.md vendored Normal file
View File

@ -0,0 +1,31 @@
[ejson](https://github.com/Shopify/ejson) and
[ejson2env](https://github.com/Shopify/ejson2env) are used to manage access
tokens and other secrets required for CI.
#### Setup
```bash
$ sudo gem install ejson ejson2env
```
then obtain the necessary keypair and place it in `/opt/ejson/keys/`.
#### Usage
Run the following command to decrypt the secrets into the environment:
```bash
eval $(ejson2env secrets.ejson)
```
#### Managing secrets.ejson
To decrypt `secrets.ejson` for modification, run:
```bash
$ ejson decrypt secrets.ejson -o secrets_unencrypted.ejson
```
Edit, then run the following to re-encrypt the file **BEFORE COMMITING YOUR
CHANGES**:
```bash
$ ejson encrypt secrets_unencrypted.ejson
$ mv secrets_unencrypted.ejson secrets.ejson
```

10
.buildkite/env/secrets.ejson vendored Normal file
View File

@ -0,0 +1,10 @@
{
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
"environment": {
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
}
}

View File

@ -1,4 +1,7 @@
#!/bin/bash -e
#!/usr/bin/env bash
set -e
eval "$(ejson2env .buildkite/env/secrets.ejson)"
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
# interprets this as the start of a log group.
@ -24,4 +27,3 @@ export PS4="++"
set -x
rsync -a --delete --link-dest="$d" "$d"/target .
)

20
.buildkite/pipeline-upload.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
#
# This script is used to upload the full buildkite pipeline. The steps defined
# in the buildkite UI should simply be:
#
# steps:
# - command: "ci/buildkite-pipeline-upload.sh"
#
set -e
cd "$(dirname "$0")"/..
buildkite-agent pipeline upload ci/buildkite.yml
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
# Add helpful link back to the corresponding Github Pull Request
buildkite-agent annotate --style "info" \
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
fi

6
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,6 @@
#### Problem
#### Proposed Solution

5
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,5 @@
#### Problem
#### Summary of Changes
Fixes #

4
.gitignore vendored
View File

@ -1,4 +1,3 @@
Cargo.lock
/target/
**/*.rs.bk
@ -14,3 +13,6 @@ Cargo.lock
# test temp files, ledgers, etc.
/farf/
# log files
*.log

2464
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +1,12 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.9.0"
version = "0.10.5"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
readme = "README.md"
repository = "https://github.com/solana-labs/solana"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
]
authors = ["Solana Maintainers <maintainers@solana.com>"]
license = "Apache-2.0"
[[bin]]
@ -34,6 +26,7 @@ name = "solana-drone"
path = "src/bin/drone.rs"
[[bin]]
required-features = ["chacha"]
name = "solana-replicator"
path = "src/bin/replicator.rs"
@ -65,11 +58,13 @@ path = "src/bin/wallet.rs"
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
[features]
unstable = []
ipv6 = []
bpf_c = []
chacha = []
cuda = []
erasure = []
ipv6 = []
test = []
unstable = []
[dependencies]
atty = "0.2"
@ -80,13 +75,17 @@ bytes = "0.4"
chrono = { version = "0.4.0", features = ["serde"] }
clap = "2.31"
dirs = "1.0.2"
elf = "0.0.10"
env_logger = "0.5.12"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
getopts = "0.2"
influx_db_client = "0.3.4"
solana-jsonrpc-core = "0.1"
solana-jsonrpc-http-server = "0.1"
solana-jsonrpc-macros = "0.1"
hex-literal = "0.1.1"
influx_db_client = "0.3.6"
solana-jsonrpc-core = "0.3.0"
solana-jsonrpc-http-server = "0.3.0"
solana-jsonrpc-macros = "0.3.0"
solana-jsonrpc-pubsub = "0.3.0"
solana-jsonrpc-ws-server = "0.3.0"
ipnetwork = "0.12.7"
itertools = "0.7.8"
libc = "0.2.43"
@ -99,22 +98,20 @@ rand = "0.5.1"
rayon = "1.0.0"
reqwest = "0.9.0"
ring = "0.13.2"
sha2 = "0.7.0"
sha2 = "0.8.0"
serde = "1.0.27"
serde_cbor = "0.9.0"
serde_derive = "1.0.27"
serde_json = "1.0.10"
socket2 = "0.3.8"
solana_program_interface = { path = "common" }
solana-sdk = { path = "sdk", version = "0.10.5" }
sys-info = "0.5.6"
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
[dev-dependencies]
noop = { path = "programs/noop" }
print = { path = "programs/print" }
move_funds = { path = "programs/move_funds" }
solana-noop = { path = "programs/native/noop", version = "0.10.5" }
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.5" }
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.5" }
[[bench]]
name = "bank"
@ -131,18 +128,16 @@ name = "signature"
[[bench]]
name = "sigverify"
[[bench]]
required-features = ["chacha"]
name = "chacha"
[workspace]
members = [
".",
"common",
"programs/noop",
"programs/print",
"programs/move_funds",
]
default-members = [
".",
"common",
"programs/noop",
"programs/print",
"programs/move_funds",
"sdk",
"programs/native/noop",
"programs/native/bpf_loader",
"programs/native/lua_loader",
"programs/bpf/rust/noop",
]

View File

@ -21,7 +21,7 @@ It's possible for a centralized database to process 710,000 transactions per sec
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
Testnet Demos

View File

@ -12,9 +12,9 @@ When cutting a new channel branch these pre-steps are required:
1. Pick your branch point for release on master.
2. Create the branch. The name should be "v" + the first 2 "version" fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9".
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0).
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0) by running `./scripts/increment-cargo-version.sh`.
4. Push your new branch to solana.git
5. Land your Carto.toml change as a master PR.
5. Land your Cargo.toml change as a master PR.
At this point, ci/channel-info.sh should show your freshly cut release branch as "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".

View File

@ -4,8 +4,6 @@ extern crate rayon;
extern crate solana;
extern crate test;
use bincode::serialize;
use rayon::prelude::*;
use solana::bank::*;
use solana::hash::hash;
use solana::mint::Mint;
@ -21,31 +19,35 @@ fn bench_process_transaction(bencher: &mut Bencher) {
// Create transactions between unrelated parties.
let transactions: Vec<_> = (0..4096)
.into_par_iter()
.map(|i| {
.into_iter()
.map(|_| {
// Seed the 'from' account.
let rando0 = Keypair::new();
let tx = Transaction::system_move(
&mint.keypair(),
rando0.pubkey(),
10_000,
mint.last_id(),
bank.last_id(),
0,
);
assert!(bank.process_transaction(&tx).is_ok());
assert_eq!(bank.process_transaction(&tx), Ok(()));
// Seed the 'to' account and a cell for its signature.
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
bank.register_entry_id(&last_id);
let rando1 = Keypair::new();
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, last_id, 0);
assert!(bank.process_transaction(&tx).is_ok());
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, bank.last_id(), 0);
assert_eq!(bank.process_transaction(&tx), Ok(()));
// Finally, return the transaction to the benchmark.
tx
}).collect();
let mut id = bank.last_id();
for _ in 0..(MAX_ENTRY_IDS - 1) {
bank.register_entry_id(&id);
id = hash(&id.as_ref())
}
bencher.iter(|| {
// Since benchmarker runs this multiple times, we need to clear the signatures.
bank.clear_signatures();

View File

@ -3,20 +3,21 @@ extern crate bincode;
extern crate rand;
extern crate rayon;
extern crate solana;
extern crate solana_program_interface;
extern crate solana_sdk;
extern crate test;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana::bank::Bank;
use solana::bank::{Bank, MAX_ENTRY_IDS};
use solana::banking_stage::{BankingStage, NUM_THREADS};
use solana::entry::Entry;
use solana::hash::hash;
use solana::mint::Mint;
use solana::packet::to_packets_chunked;
use solana::signature::{KeypairUtil, Signature};
use solana::system_transaction::SystemTransaction;
use solana::transaction::Transaction;
use solana_program_interface::pubkey::Pubkey;
use solana_sdk::pubkey::Pubkey;
use std::iter;
use std::sync::mpsc::{channel, Receiver};
use std::sync::Arc;
@ -63,8 +64,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
new.keys[0] = Pubkey::new(&from[0..32]);
new.keys[1] = Pubkey::new(&to[0..32]);
new.account_keys[0] = Pubkey::new(&from[0..32]);
new.account_keys[1] = Pubkey::new(&to[0..32]);
new.signature = Signature::new(&sig[0..64]);
new
}).collect();
@ -72,7 +73,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
&mint.keypair(),
tx.keys[0],
tx.account_keys[0],
mint_total / txes as i64,
mint.last_id(),
0,
@ -97,14 +98,131 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
}).collect();
let (_stage, signal_receiver) = BankingStage::new(&bank, verified_receiver, Default::default());
let (_stage, signal_receiver) = BankingStage::new(
&bank,
verified_receiver,
Default::default(),
&mint.last_id(),
0,
None,
);
let mut id = mint.last_id();
for _ in 0..MAX_ENTRY_IDS {
id = hash(&id.as_ref());
bank.register_entry_id(&id);
}
bencher.iter(move || {
// make sure the tx last id is still registered
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
bank.register_entry_id(&mint.last_id());
}
for v in verified.chunks(verified.len() / NUM_THREADS) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes);
bank.clear_signatures();
});
}
#[bench]
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
let progs = 5;
let txes = 1000 * NUM_THREADS;
let mint_total = 1_000_000_000_000;
let mint = Mint::new(mint_total);
let (verified_sender, verified_receiver) = channel();
let bank = Arc::new(Bank::new(&mint));
let dummy = Transaction::system_move(
&mint.keypair(),
mint.keypair().pubkey(),
1,
mint.last_id(),
0,
);
let transactions: Vec<_> = (0..txes)
.into_par_iter()
.map(|_| {
let mut new = dummy.clone();
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
new.account_keys[0] = Pubkey::new(&from[0..32]);
new.account_keys[1] = Pubkey::new(&to[0..32]);
let prog = new.instructions[0].clone();
for i in 1..progs {
//generate programs that spend to random keys
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
let to_key = Pubkey::new(&to[0..32]);
new.account_keys.push(to_key);
assert_eq!(new.account_keys.len(), i + 2);
new.instructions.push(prog.clone());
assert_eq!(new.instructions.len(), i + 1);
new.instructions[i].accounts[1] = 1 + i as u8;
assert_eq!(new.key(i, 1), Some(&to_key));
assert_eq!(
new.account_keys[new.instructions[i].accounts[1] as usize],
to_key
);
}
assert_eq!(new.instructions.len(), progs);
new.signature = Signature::new(&sig[0..64]);
new
}).collect();
transactions.iter().for_each(|tx| {
let fund = Transaction::system_move(
&mint.keypair(),
tx.account_keys[0],
mint_total / txes as i64,
mint.last_id(),
0,
);
assert!(bank.process_transaction(&fund).is_ok());
});
//sanity check, make sure all the transactions can execute sequentially
transactions.iter().for_each(|tx| {
let res = bank.process_transaction(&tx);
assert!(res.is_ok(), "sanity test transactions");
});
bank.clear_signatures();
//sanity check, make sure all the transactions can execute in parallel
let res = bank.process_transactions(&transactions);
for r in res {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
.into_iter()
.map(|x| {
let len = x.read().unwrap().packets.len();
(x, iter::repeat(1).take(len).collect())
}).collect();
let (_stage, signal_receiver) = BankingStage::new(
&bank,
verified_receiver,
Default::default(),
&mint.last_id(),
0,
None,
);
let mut id = mint.last_id();
for _ in 0..MAX_ENTRY_IDS {
id = hash(&id.as_ref());
bank.register_entry_id(&id);
}
bencher.iter(move || {
// make sure the transactions are still valid
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
bank.register_entry_id(&mint.last_id());
}
for v in verified.chunks(verified.len() / NUM_THREADS) {
verified_sender.send(v.to_vec()).unwrap();
}
check_txs(&signal_receiver, txes);
bank.clear_signatures();
// make sure the tx last id is still registered
bank.register_entry_id(&mint.last_id());
});
}

29
benches/chacha.rs Normal file
View File

@ -0,0 +1,29 @@
#![feature(test)]
extern crate solana;
extern crate test;
use solana::chacha::chacha_cbc_encrypt_files;
use std::fs::remove_file;
use std::fs::File;
use std::io::Write;
use std::path::Path;
use test::Bencher;
#[bench]
fn bench_chacha_encrypt(bench: &mut Bencher) {
let in_path = Path::new("bench_chacha_encrypt_file_input.txt");
let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc");
{
let mut in_file = File::create(in_path).unwrap();
for _ in 0..1024 {
in_file.write("123456foobar".as_bytes()).unwrap();
}
}
bench.iter(move || {
chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap();
});
remove_file(in_path).unwrap();
remove_file(out_path).unwrap();
}

View File

@ -1,33 +1,61 @@
use std::env;
use std::fs;
use std::process::Command;
fn main() {
println!("cargo:rerun-if-changed=target/perf-libs");
println!("cargo:rerun-if-changed=build.rs");
// Ensure target/perf-libs/ exists. It's been observed that
// a cargo:rerun-if-changed= directive with a non-existent
// directory triggers a rebuild on every |cargo build| invocation
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
fs::create_dir_all("target/perf-libs").unwrap_or_else(|err| {
if err.kind() != std::io::ErrorKind::AlreadyExists {
panic!("Unable to create target/perf-libs: {:?}", err);
}
});
let bpf_c = !env::var("CARGO_FEATURE_BPF_C").is_err();
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
if cuda || erasure {
if bpf_c {
let out_dir = "OUT_DIR=../../../target/".to_string()
+ &env::var("PROFILE").unwrap()
+ &"/bpf".to_string();
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
let status = Command::new("make")
.current_dir("programs/bpf/c")
.arg("all")
.arg(&out_dir)
.status()
.expect("Failed to build C-based BPF programs");
assert!(status.success());
}
if chacha || cuda || erasure {
println!("cargo:rerun-if-changed=target/perf-libs");
println!("cargo:rustc-link-search=native=target/perf-libs");
}
if chacha {
println!("cargo:rerun-if-changed=target/perf-libs/libcpu-crypt.a");
}
if cuda {
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
println!("cargo:rerun-if-changed=target/perf-libs/libcuda-crypt.a");
println!("cargo:rustc-link-lib=static=cuda-crypt");
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=cuda");
println!("cargo:rustc-link-lib=dylib=cudadevrt");
}
if erasure {
println!("cargo:rerun-if-changed=target/perf-libs/libgf_complete.so");
println!("cargo:rerun-if-changed=target/perf-libs/libJerasure.so");
println!("cargo:rustc-link-lib=dylib=Jerasure");
println!("cargo:rustc-link-lib=dylib=gf_complete");
}

View File

@ -29,4 +29,4 @@ maybe_cargo_install() {
maybe_cargo_install audit tree
_ cargo tree
_ cargo audit || true
_ cargo audit

View File

@ -0,0 +1,16 @@
steps:
- command: "ci/snap.sh"
timeout_in_minutes: 40
name: "snap [public]"
- command: "ci/docker-solana/build.sh"
timeout_in_minutes: 20
name: "docker-solana"
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate [public]"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- command: "ci/publish-solana-tar.sh"
timeout_in_minutes: 15
name: "publish solana release tar"

View File

@ -1,4 +0,0 @@
steps:
- command: "ci/snap.sh"
timeout_in_minutes: 40
name: "snap [public]"

View File

@ -1,10 +1,10 @@
steps:
- command: "ci/docker-run.sh solanalabs/rust:1.29.1 ci/test-stable.sh"
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
name: "stable [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable"
timeout_in_minutes: 30
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-bench.sh"
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-bench.sh"
name: "bench [public]"
env:
CARGO_TARGET_CACHE_NAME: "nightly"
@ -12,7 +12,7 @@ steps:
- command: "ci/shellcheck.sh"
name: "shellcheck [public]"
timeout_in_minutes: 20
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-09-03 ci/test-nightly.sh || true"
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-nightly.sh"
name: "nightly [public]"
env:
CARGO_TARGET_CACHE_NAME: "nightly"
@ -36,10 +36,7 @@ steps:
timeout_in_minutes: 20
name: "snap [public]"
- wait
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate [public]"
- trigger: "solana-snap"
- trigger: "solana-secondary"
branches: "!pull/*"
async: true
build:

16
ci/crate-version.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash -e
#
# Outputs the current crate version
#
cd "$(dirname "$0")"/..
while read -r name equals value _; do
if [[ $name = version && $equals = = ]]; then
echo "${value//\"/}"
exit 0
fi
done < <(cat Cargo.toml)
echo Unable to locate version in Cargo.toml 1>&2
exit 1

View File

@ -4,7 +4,6 @@ ARG date
RUN set -x && \
rustup install nightly-$date && \
rustup default nightly-$date && \
rustup component add clippy-preview --toolchain=nightly-$date && \
rustc --version && \
cargo --version && \
cargo +nightly-$date install cargo-cov

View File

@ -1,19 +1,21 @@
# Note: when the rust version is changed also modify
# ci/buildkite.yml to pick up the new image tag
FROM rust:1.29.1
FROM rust:1.30.1
RUN set -x && \
apt update && \
apt-get install apt-transport-https && \
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main > /etc/apt/sources.list.d/llvm.list && \
echo deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-7 main > /etc/apt/sources.list.d/llvm.list && \
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
apt update && \
apt install -y \
buildkite-agent \
cmake \
llvm-6.0 \
lcov \
libclang-common-7-dev \
llvm-7 \
rsync \
sudo \
&& \

1
ci/docker-solana/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
cargo-install/

View File

@ -0,0 +1,13 @@
FROM debian:stretch
# JSON RPC port
EXPOSE 8899/tcp
# Install libssl
RUN apt update && \
apt-get install -y libssl-dev && \
rm -rf /var/lib/apt/lists/*
COPY usr/bin /usr/bin/
ENTRYPOINT [ "/usr/bin/solana-entrypoint.sh" ]
CMD [""]

View File

@ -0,0 +1,17 @@
## Minimal Solana Docker image
This image is automatically updated by CI
https://hub.docker.com/r/solanalabs/solana/
### Usage:
Run the latest beta image:
```bash
$ docker run --rm -p 8899:8899 solanalabs/solana:beta
```
Run the latest edge image:
```bash
$ docker run --rm -p 8899:8899 solanalabs/solana:edge
```
Port *8899* is the JSON RPC port, which is used by clients to communicate with the network.

39
ci/docker-solana/build.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash -ex
cd "$(dirname "$0")"
eval "$(../channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
if [[ -z $CHANNEL ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
rm -rf usr/
../docker-run.sh solanalabs/rust:1.30.0 \
cargo install --path . --root ci/docker-solana/usr
cp -f entrypoint.sh usr/bin/solana-entrypoint.sh
../../scripts/install-native-programs.sh usr/bin/
docker build -t solanalabs/solana:$CHANNEL .
maybeEcho=
if [[ -z $CI ]]; then
echo "Not CI, skipping |docker push|"
maybeEcho="echo"
else
(
set +x
if [[ -n $DOCKER_PASSWORD && -n $DOCKER_USERNAME ]]; then
echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
fi
)
fi
$maybeEcho docker push solanalabs/solana:$CHANNEL

23
ci/docker-solana/entrypoint.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash -ex
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
export RUST_BACKTRACE=1
solana-keygen -o /config/leader-keypair.json
solana-keygen -o /config/drone-keypair.json
solana-genesis --tokens=1000000000 --ledger /ledger < /config/drone-keypair.json
solana-fullnode-config --keypair=/config/leader-keypair.json -l > /config/leader-config.json
solana-drone --keypair /config/drone-keypair.json --network 127.0.0.1:8001 &
drone=$!
solana-fullnode --identity /config/leader-config.json --ledger /ledger/ &
fullnode=$!
abort() {
kill "$drone" "$fullnode"
}
trap abort SIGINT SIGTERM
wait "$fullnode"
kill "$drone" "$fullnode"

36
ci/publish-bpf-sdk.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
version=$(./ci/crate-version.sh)
echo --- Creating tarball
(
set -x
rm -rf bpf-sdk/
mkdir bpf-sdk/
(
echo "$version"
git rev-parse HEAD
) > bpf-sdk/version.txt
cp -ra programs/bpf/c/sdk/* bpf-sdk/
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
)
echo --- AWS S3 Store
set -x
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
tar zxf s3cmd-2.0.1.tar.gz
fi
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
exit 0

View File

@ -2,7 +2,7 @@
cd "$(dirname "$0")/.."
if [[ -z "$BUILDKITE_TAG" ]]; then
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
# Skip publish if this is not a tagged release
exit 0
fi
@ -12,8 +12,18 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
exit 1
fi
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
ci/docker-run.sh rust \
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
maybePublish="echo Publish skipped"
if [[ -n $CI ]]; then
maybePublish="cargo publish --token $CRATES_IO_TOKEN"
fi
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
(
set -x
ci/docker-run.sh rust bash -exc "cd $(dirname "$Cargo_toml"); cargo package; $maybePublish"
)
done
exit 0

73
ci/publish-metrics-dashboard.sh Executable file
View File

@ -0,0 +1,73 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")/.."
if [[ -z $BUILDKITE ]]; then
echo BUILDKITE not defined
exit 1
fi
if [[ -z $CHANNEL ]]; then
CHANNEL=$(buildkite-agent meta-data get "channel" --default "")
fi
if [[ -z $CHANNEL ]]; then
(
cat <<EOF
steps:
- block: "Select Dashboard"
fields:
- select: "Channel"
key: "channel"
options:
- label: "stable"
value: "stable"
- label: "edge"
value: "edge"
- label: "beta"
value: "beta"
- command: "ci/$(basename "$0")"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
ci/channel-info.sh
eval "$(ci/channel-info.sh)"
case $CHANNEL in
edge)
CHANNEL_BRANCH=$EDGE_CHANNEL
;;
beta)
CHANNEL_BRANCH=$BETA_CHANNEL
;;
stable)
CHANNEL_BRANCH=$STABLE_CHANNEL
;;
*)
echo "Error: Invalid CHANNEL=$CHANNEL"
exit 1
;;
esac
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
branch: "$CHANNEL_BRANCH"
env:
CHANNEL: "$CHANNEL"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
set -x
exec metrics/publish-metrics-dashboard.sh "$CHANNEL"

71
ci/publish-solana-tar.sh Executable file
View File

@ -0,0 +1,71 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH ]]; then
DRYRUN="echo"
CHANNEL=unknown
fi
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
if [[ -n "$BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$BUILDKITE_TAG
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
else
CHANNEL_OR_TAG=$CHANNEL
fi
if [[ -z $CHANNEL_OR_TAG ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
echo --- Creating tarball
(
set -x
rm -rf solana-release/
mkdir solana-release/
(
echo "$CHANNEL_OR_TAG"
git rev-parse HEAD
) > solana-release/version.txt
cargo install --root solana-release
./scripts/install-native-programs.sh solana-release/bin
./fetch-perf-libs.sh
cargo install --features=cuda --root solana-release-cuda
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
tar jvcf solana-release.tar.bz2 solana-release/
)
echo --- AWS S3 Store
if [[ -z $DRYRUN ]]; then
(
set -x
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
fi
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
)
else
echo Skipped due to DRYRUN
fi
exit 0

View File

@ -2,6 +2,13 @@
cd "$(dirname "$0")/.."
if ! ci/version-check.sh stable; then
# This job doesn't run within a container, try once to upgrade tooling on a
# version check failure
rustup install stable
ci/version-check.sh stable
fi
DRYRUN=
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
DRYRUN="echo"
@ -10,14 +17,14 @@ fi
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
SNAP_CHANNEL=stable
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
SNAP_CHANNEL=edge
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
SNAP_CHANNEL=beta
CHANNEL=beta
fi
if [[ -z $SNAP_CHANNEL ]]; then
if [[ -z $CHANNEL ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
@ -51,11 +58,13 @@ if [[ ! -x /usr/bin/multilog ]]; then
sudo apt-get install -y daemontools
fi
echo --- build: $SNAP_CHANNEL channel
echo --- build: $CHANNEL channel
snapcraft
source ci/upload_ci_artifact.sh
upload_ci_artifact solana_*.snap
echo --- publish: $SNAP_CHANNEL channel
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
if [[ -z $DO_NOT_PUBLISH_SNAP ]]; then
echo --- publish: $CHANNEL channel
$DRYRUN snapcraft push solana_*.snap --release $CHANNEL
fi

18
ci/solana-testnet.yml Executable file
View File

@ -0,0 +1,18 @@
steps:
- command: "ci/snap.sh"
label: "create snap"
- wait
- command: "ci/testnet-automation.sh"
label: "run testnet"
agents:
- "queue=testnet-deploy"
- wait: ~
continue_on_failure: true
- command: "ci/testnet-automation-cleanup.sh"
label: "delete testnet"
agents:
- "queue=testnet-deploy"

View File

@ -2,6 +2,11 @@
cd "$(dirname "$0")/.."
# shellcheck disable=SC1091
source ci/upload_ci_artifact.sh
eval "$(ci/channel-info.sh)"
ci/version-check.sh nightly
export RUST_BACKTRACE=1
@ -12,6 +17,17 @@ _() {
set -o pipefail
UPLOAD_METRICS=""
TARGET_BRANCH=$BUILDKITE_BRANCH
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
TARGET_BRANCH=$EDGE_CHANNEL
else
UPLOAD_METRICS="upload"
fi
BENCH_FILE=bench_output.log
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee $BENCH_FILE
_ cargo run --release --bin solana-upload-perf -- $BENCH_FILE
BENCH_ARTIFACT=current_bench_results.log
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee "$BENCH_FILE"
_ cargo run --release --bin solana-upload-perf -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" >"$BENCH_ARTIFACT"
upload_ci_artifact "$BENCH_ARTIFACT"

View File

@ -1,6 +1,7 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
source ci/upload_ci_artifact.sh
ci/version-check.sh nightly
export RUST_BACKTRACE=1
@ -10,20 +11,50 @@ _() {
"$@"
}
_ cargo build --verbose --features unstable
_ cargo test --verbose --features=unstable
# Uncomment this to run nightly test suit
# _ cargo test --verbose --features=unstable
exit 0
maybe_cargo_install() {
for cmd in "$@"; do
set +e
cargo "$cmd" --help > /dev/null 2>&1
declare exitcode=$?
set -e
if [[ $exitcode -eq 101 ]]; then
_ cargo install cargo-"$cmd"
fi
done
}
# Coverage disabled (see issue #433)
_ cargo cov test
maybe_cargo_install cov
# Generate coverage data and report via unit-test suite.
_ cargo cov clean
_ cargo cov test --lib
_ cargo cov report
echo --- Coverage report:
ls -l target/cov/report/index.html
# Generate a coverage report with grcov via lcov.
if [[ ! -f ./grcov ]]; then
uname=$(uname | tr '[:upper:]' '[:lower:]')
uname_m=$(uname -m | tr '[:upper:]' '[:lower:]')
name=grcov-${uname}-${uname_m}.tar.bz2
_ wget "https://github.com/mozilla/grcov/releases/download/v0.2.3/${name}"
_ tar -xjf "${name}"
fi
_ ./grcov . -t lcov > lcov.info
_ genhtml -o target/cov/report-lcov --show-details --highlight --ignore-errors source --legend lcov.info
# Upload to tarballs to buildkite.
_ cd target/cov && tar -cjf cov-report.tar.bz2 report/* && cd -
_ upload_ci_artifact "target/cov/cov-report.tar.bz2"
_ cd target/cov && tar -cjf lcov-report.tar.bz2 report-lcov/* && cd -
_ upload_ci_artifact "target/cov/lcov-report.tar.bz2"
if [[ -z "$CODECOV_TOKEN" ]]; then
echo CODECOV_TOKEN undefined
else
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
true
# TODO: Why doesn't codecov grok our lcov files?
#bash <(curl -s https://codecov.io/bash) -X gcov
fi

View File

@ -20,7 +20,15 @@ _() {
"$@"
}
_ cargo test --features=cuda,erasure
FEATURES=cuda,erasure,chacha
_ cargo test --verbose --features="$FEATURES" --lib
# Run integration tests serially
for test in tests/*.rs; do
test=${test##*/} # basename x
test=${test%.rs} # basename x .rs
_ cargo test --verbose --jobs=1 --features="$FEATURES" --test="$test"
done
echo --- ci/localnet-sanity.sh
(

View File

@ -13,9 +13,26 @@ _() {
_ cargo fmt -- --check
_ cargo build --verbose
_ cargo test --verbose
_ cargo test --verbose --lib
_ cargo clippy -- --deny=warnings
# Run integration tests serially
for test in tests/*.rs; do
test=${test##*/} # basename x
test=${test%.rs} # basename x .rs
_ cargo test --verbose --jobs=1 --test="$test"
done
# Run native program's tests
for program in programs/native/*; do
echo --- "$program"
(
set -x
cd "$program"
cargo test --verbose
)
done
echo --- ci/localnet-sanity.sh
(
set -x
@ -24,4 +41,4 @@ echo --- ci/localnet-sanity.sh
USE_INSTALL=1 ci/localnet-sanity.sh
)
_ ci/audit.sh || true
_ ci/audit.sh

View File

@ -0,0 +1,9 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
echo --- find testnet configuration
net/gce.sh config -p testnet-automation
echo --- delete testnet
net/gce.sh delete -p testnet-automation

View File

@ -0,0 +1,7 @@
#!/usr/bin/env python
import sys, json
data=json.load(sys.stdin)
print[\
([result['series'][0]['columns'][1].encode(), result['series'][0]['values'][0][1]]) \
for result in data['results']]

80
ci/testnet-automation.sh Executable file
View File

@ -0,0 +1,80 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
echo --- downloading snap from build artifacts
buildkite-agent artifact download "solana_*.snap" .
# shellcheck disable=SC1091
source ci/upload_ci_artifact.sh
[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300
[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100"
[[ -n $LEADER_CPU_MACHINE_TYPE ]] ||
LEADER_CPU_MACHINE_TYPE="n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
[[ -n $TESTNET_ZONE ]] || TESTNET_ZONE=us-west1-b
launchTestnet() {
declare nodeCount=$1
echo --- setup "$nodeCount" node test
net/gce.sh create \
-n "$nodeCount" -c "$CLIENT_COUNT" \
-G "$LEADER_CPU_MACHINE_TYPE" \
-p "$TESTNET_TAG" -z "$TESTNET_ZONE"
echo --- configure database
net/init-metrics.sh -e
echo --- start "$nodeCount" node test
net/net.sh start -o noValidatorSanity -S solana_*.snap
echo --- wait "$ITERATION_WAIT" seconds to complete test
sleep "$ITERATION_WAIT"
declare q_mean_tps='
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
SELECT sum("count") AS "sum_count"
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
WHERE time > now() - 300s GROUP BY time(1s)
)'
declare q_max_tps='
SELECT round(max("sum_count")) AS "max_tps" FROM (
SELECT sum("count") AS "sum_count"
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
WHERE time > now() - 300s GROUP BY time(1s)
)'
declare q_mean_finality='
SELECT round(mean("duration_ms")) as "mean_finality"
FROM "testnet-automation"."autogen"."leader-finality"
WHERE time > now() - 300s'
declare q_max_finality='
SELECT round(max("duration_ms")) as "max_finality"
FROM "testnet-automation"."autogen"."leader-finality"
WHERE time > now() - 300s'
declare q_99th_finality='
SELECT round(percentile("duration_ms", 99)) as "99th_finality"
FROM "testnet-automation"."autogen"."leader-finality"
WHERE time > now() - 300s'
curl -G "https://metrics.solana.com:8086/query?u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
--data-urlencode "db=$INFLUX_DATABASE" \
--data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_finality;$q_max_finality;$q_99th_finality" |
python ci/testnet-automation-json-parser.py >>TPS"$nodeCount".log
upload_ci_artifact TPS"$nodeCount".log
}
# This is needed, because buildkite doesn't let us define an array of numbers.
# The array is defined as a space separated string of numbers
# shellcheck disable=SC2206
nodes_count_array=($NUMBER_OF_NODES)
for n in "${nodes_count_array[@]}"; do
launchTestnet "$n"
done

View File

@ -9,8 +9,10 @@ clientNodeCount=0
validatorNodeCount=10
publicNetwork=false
snapChannel=edge
tarChannelOrTag=edge
delete=false
enableGpu=false
useTarReleaseChannel=false
usage() {
exitcode=0
@ -19,16 +21,21 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [name] [zone] [options...]
usage: $0 [name] [cloud] [zone] [options...]
Deploys a CD testnet
name - name of the network
zone - GCE to deploy the network into
cloud - cloud provider to use (gce, ec2)
zone - cloud provider zone to deploy the network into
options:
-s edge|beta|stable - Deploy the specified Snap release channel
(default: $snapChannel)
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
specified release channel (edge|beta|stable) or release tag
(vX.Y.Z)
(default: $tarChannelOrTag)
-n [number] - Number of validator nodes (default: $validatorNodeCount)
-c [number] - Number of client nodes (default: $clientNodeCount)
-P - Use public network IP addresses (default: $publicNetwork)
@ -44,12 +51,14 @@ EOF
}
netName=$1
zone=$2
cloudProvider=$2
zone=$3
[[ -n $netName ]] || usage
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
[[ -n $zone ]] || usage "Zone not specified"
shift 2
shift 3
while getopts "h?p:Pn:c:s:gG:a:d" opt; do
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
case $opt in
h | \?)
usage
@ -73,6 +82,17 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
;;
esac
;;
t)
case $OPTARG in
edge|beta|stable|v*)
tarChannelOrTag=$OPTARG
useTarReleaseChannel=true
;;
*)
usage "Invalid release channel: $OPTARG"
;;
esac
;;
g)
enableGpu=true
;;
@ -93,7 +113,7 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
done
gce_create_args=(
create_args=(
-a "$leaderAddress"
-c "$clientNodeCount"
-n "$validatorNodeCount"
@ -103,26 +123,26 @@ gce_create_args=(
if $enableGpu; then
if [[ -z $leaderMachineType ]]; then
gce_create_args+=(-g)
create_args+=(-g)
else
gce_create_args+=(-G "$leaderMachineType")
create_args+=(-G "$leaderMachineType")
fi
fi
if $publicNetwork; then
gce_create_args+=(-P)
create_args+=(-P)
fi
set -x
echo --- gce.sh delete
time net/gce.sh delete -p "$netName"
echo "--- $cloudProvider.sh delete"
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
if $delete; then
exit 0
fi
echo --- gce.sh create
time net/gce.sh create "${gce_create_args[@]}"
echo "--- $cloudProvider.sh create"
time net/"$cloudProvider".sh create "${create_args[@]}"
net/init-metrics.sh -e
echo --- net.sh start
@ -130,7 +150,18 @@ maybeRejectExtraNodes=
if ! $publicNetwork; then
maybeRejectExtraNodes="-o rejectExtraNodes"
fi
maybeNoValidatorSanity=
if [[ -n $NO_VALIDATOR_SANITY ]]; then
maybeNoValidatorSanity="-o noValidatorSanity"
fi
maybeNoLedgerVerify=
if [[ -n $NO_LEDGER_VERIFY ]]; then
maybeNoLedgerVerify="-o noLedgerVerify"
fi
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes
if $useTarReleaseChannel; then
time net/net.sh start -t "$tarChannelOrTag" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
else
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
fi
exit 0

360
ci/testnet-manager.sh Executable file
View File

@ -0,0 +1,360 @@
#!/bin/bash -e
cd "$(dirname "$0")"/..
if [[ -z $BUILDKITE ]]; then
echo BUILDKITE not defined
exit 1
fi
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
exit 1
fi
if [[ -z $TESTNET ]]; then
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
fi
if [[ -z $TESTNET_OP ]]; then
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
fi
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
(
cat <<EOF
steps:
- block: "Manage Testnet"
fields:
- select: "Network"
key: "testnet"
options:
- label: "testnet"
value: "testnet"
- label: "testnet-perf"
value: "testnet-perf"
- label: "testnet-master"
value: "testnet-master"
- label: "testnet-master-perf"
value: "testnet-master-perf"
- label: "testnet-edge"
value: "testnet-edge"
- label: "testnet-edge-perf"
value: "testnet-edge-perf"
- label: "testnet-beta"
value: "testnet-beta"
- label: "testnet-beta-perf"
value: "testnet-beta-perf"
- select: "Operation"
key: "testnet-operation"
default: "sanity-or-restart"
options:
- label: "Sanity check. Restart network on failure"
value: "sanity-or-restart"
- label: "Start (or restart) the network"
value: "start"
- label: "Stop the network"
value: "stop"
- label: "Sanity check only"
value: "sanity"
- command: "ci/$(basename "$0")"
agents:
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
ci/channel-info.sh
eval "$(ci/channel-info.sh)"
case $TESTNET in
testnet-edge|testnet-edge-perf|testnet-master|testnet-master-perf)
CHANNEL_OR_TAG=edge
CHANNEL_BRANCH=$EDGE_CHANNEL
;;
testnet-beta|testnet-beta-perf)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
;;
testnet|testnet-perf)
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$BETA_CHANNEL
else
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$STABLE_CHANNEL
fi
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
branch: "$CHANNEL_BRANCH"
env:
TESTNET: "$TESTNET"
TESTNET_OP: "$TESTNET_OP"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
sanity() {
echo "--- sanity $TESTNET"
case $TESTNET in
testnet-edge)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
)
;;
testnet-edge-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh edge-perf-testnet-solana-com ec2 us-west-2b
)
;;
testnet-beta)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
)
;;
testnet-beta-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh beta-perf-testnet-solana-com ec2 us-west-2b
)
;;
testnet-master)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh master-testnet-solana-com gce us-west1-b
)
;;
testnet-master-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh master-perf-testnet-solana-com gce us-west1-b
)
;;
testnet)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
)
;;
testnet-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
ci/testnet-sanity.sh perf-testnet-solana-com gce us-west1-b
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
}
start() {
declare maybeDelete=$1
if [[ -z $maybeDelete ]]; then
echo "--- start $TESTNET"
else
echo "--- stop $TESTNET"
fi
case $TESTNET in
testnet-edge)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0ccd4f2239886fa94 \
${maybeDelete:+-d}
)
;;
testnet-edge-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
-g -t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
)
;;
testnet-beta)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0f286cf8a0771ce35 \
${maybeDelete:+-d}
)
;;
testnet-beta-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
-g -t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
)
;;
testnet-master)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh master-testnet-solana-com gce us-west1-b \
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a master-testnet-solana-com \
${maybeDelete:+-d}
)
;;
testnet-master-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh master-perf-testnet-solana-com gce us-west1-b \
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
-t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
)
;;
testnet)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
# -s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
# ${maybeDelete:+-d}
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0fa502bf95f6f18b2 \
${maybeDelete:+-d}
)
;;
testnet-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
-t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
# -g \
# -t "$CHANNEL_OR_TAG" -c 2 \
# ${maybeDelete:+-d}
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
}
stop() {
start delete
}
case $TESTNET_OP in
sanity)
sanity
;;
start)
start
;;
stop)
stop
;;
sanity-or-restart)
if sanity; then
echo Pass
else
echo "Sanity failed, restarting the network"
echo "^^^ +++"
start
fi
;;
esac
echo --- fin
exit 0

View File

@ -9,11 +9,13 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [name]
usage: $0 [name] [cloud] [zone]
Sanity check a CD testnet
name - name of the network
cloud - cloud provider to use (gce, ec2)
zone - cloud provider zone of the network
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
metrics
@ -22,14 +24,18 @@ EOF
}
netName=$1
cloudProvider=$2
zone=$3
[[ -n $netName ]] || usage ""
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
[[ -n $zone ]] || usage "Zone not specified"
set -x
echo --- gce.sh config
net/gce.sh config -p "$netName"
echo "--- $cloudProvider.sh config"
timeout 5m net/"$cloudProvider".sh config -p "$netName" -z "$zone"
net/init-metrics.sh -e
echo --- net.sh sanity
net/net.sh sanity \
timeout 5m net/net.sh sanity \
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \

View File

@ -19,12 +19,12 @@ require() {
case ${1:-stable} in
nightly)
require rustc 1.30.[0-9]+-nightly
require cargo 1.29.[0-9]+-nightly
require rustc 1.31.[0-9]+-nightly
require cargo 1.31.[0-9]+-nightly
;;
stable)
require rustc 1.29.[0-9]+
require cargo 1.29.[0-9]+
require rustc 1.30.[0-9]+
require cargo 1.30.[0-9]+
;;
*)
echo Error: unknown argument: "$1"

View File

@ -1,22 +0,0 @@
[package]
name = "solana_program_interface"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
bincode = "1.0.0"
bs58 = "0.2.0"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
serde = "1.0.27"
serde_derive = "1.0.27"

View File

@ -1,29 +0,0 @@
use pubkey::Pubkey;
/// An Account with userdata that is stored on chain
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct Account {
/// tokens in the account
pub tokens: i64,
/// user data
/// A transaction can write to its userdata
pub userdata: Vec<u8>,
/// contract id this contract belongs to
pub program_id: Pubkey,
}
impl Account {
pub fn new(tokens: i64, space: usize, program_id: Pubkey) -> Account {
Account {
tokens,
userdata: vec![0u8; space],
program_id,
}
}
}
#[derive(Debug)]
pub struct KeyedAccount<'a> {
pub key: &'a Pubkey,
pub account: &'a mut Account,
}

View File

@ -5,17 +5,23 @@ Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.o
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
RPC Endpoint
RPC HTTP Endpoint
---
**Default port:** 8899
eg. http://localhost:8899, http://192.168.1.88:8899
RPC PubSub WebSocket Endpoint
---
**Default port:** 8900
eg. ws://localhost:8900, http://192.168.1.88:8900
Methods
---
* [confirmTransaction](#confirmtransaction)
* [getAddress](#getaddress)
* [getBalance](#getbalance)
* [getAccountInfo](#getaccountinfo)
* [getLastId](#getlastid)
@ -23,6 +29,13 @@ Methods
* [getTransactionCount](#gettransactioncount)
* [requestAirdrop](#requestairdrop)
* [sendTransaction](#sendtransaction)
* [startSubscriptionChannel](#startsubscriptionchannel)
* [Subscription Websocket](#subscription-websocket)
* [accountSubscribe](#accountsubscribe)
* [accountUnsubscribe](#accountunsubscribe)
* [signatureSubscribe](#signaturesubscribe)
* [signatureUnsubscribe](#signatureunsubscribe)
Request Formatting
---
@ -155,6 +168,7 @@ events.
* `Confirmed` - Transaction was successful
* `SignatureNotFound` - Unknown transaction
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
* `AccountInUse` - Another Transaction had a write lock one of the Accounts specified in this Transaction. The Transaction may succeed if retried
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
##### Example:
@ -227,3 +241,99 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
```
---
### Subscription Websocket
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
- Submit subscription requests to the websocket using the methods below
- Multiple subscriptions may be active at once
---
### accountSubscribe
Subscribe to an account to receive notifications when the userdata for a given account public key changes
##### Parameters:
* `string` - account Pubkey, as base-58 encoded string
##### Results:
* `integer` - Subscription id (needed to unsubscribe)
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
```
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
---
### accountUnsubscribe
Unsubscribe from account userdata change notifications
##### Parameters:
* `integer` - id of account Subscription to cancel
##### Results:
* `bool` - unsubscribe success message
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```
---
### signatureSubscribe
Subscribe to a transaction signature to receive notification when the transaction is confirmed
On `signatureNotification`, the subscription is automatically cancelled
##### Parameters:
* `string` - Transaction Signature, as base-58 encoded string
##### Results:
* `integer` - subscription id (needed to unsubscribe)
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
// Result
{"jsonrpc": "2.0","result": 0,"id": 1}
```
##### Notification Format:
```bash
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
```
---
### signatureUnsubscribe
Unsubscribe from account userdata change notifications
##### Parameters:
* `integer` - id of account subscription to cancel
##### Results:
* `bool` - unsubscribe success message
##### Example:
```bash
// Request
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
// Result
{"jsonrpc": "2.0","result": true,"id": 1}
```

View File

@ -4,14 +4,18 @@ Currently we have three testnets:
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
with transactions until failure. Runs 24/7
* `testnet-master` - private edge channel testnet with clients trying to flood the network
* `testnet-msater` - public edge channel testnet accessible via master.testnet.solana.com. Runs 24/7
* `testnet-master-perf` - private edge channel testnet with clients trying to flood the network
with transactions until failure. Runs on weekday mornings for a couple hours
## Deploy process
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
Each testnet can be manually manipulated from buildkite as well. The `-perf`
testnets use a release tarball while the non`-perf` builds use the snap build
(we've observed that the snap build runs slower than a tarball but this has yet
to be root caused).
## Where are the testnet logs?
@ -29,7 +33,8 @@ $ net/ssh.sh
for log location details
## How do I reset the testnet?
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
and when prompted select the desired testnet
## How can I scale the tx generation rate?
@ -43,5 +48,5 @@ Currently, a merged PR is the only way to test a change on the testnet. But you
can run your own testnet using the scripts in the `net/` directory.
## Adjusting the number of clients or validators on the testnet
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
Edit `ci/testnet-manager.sh`

View File

@ -15,7 +15,7 @@ mkdir -p target/perf-libs
cd target/perf-libs
(
set -x
curl https://solana-perf.s3.amazonaws.com/v0.9.0/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
)
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then

39
metrics/README.md Normal file
View File

@ -0,0 +1,39 @@
# Metrics
## Testnet Grafana Dashboard
There are three versions of the testnet dashboard, corresponding to the three
release channels:
* https://metrics.solana.com:3000/d/testnet-edge/testnet-monitor-edge
* https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta
* https://metrics.solana.com:3000/d/testnet/testnet-monitor
The dashboard for each channel is defined from the
`metrics/testnet-monitor.json` source file in the git branch associated with
that channel, and deployed by automation running `ci/publish-metrics-dashboard.sh`.
A deploy can be triggered at any time via the `New Build` button of
https://buildkite.com/solana-labs/publish-metrics-dashboard.
### Modifying a Dashboard
Dashboard updates are accomplished by modifying `metrics/testnet-monitor.json`,
**manual edits made directly in Grafana will be overwritten**.
1. Open the desired dashboard in Grafana
2. Create a development copy of the dashboard by selecting `Save As..` in the
`Settings` menu for the dashboard
3. Edit dashboard as desired
4. Extract the JSON Model by selecting `JSON Model` in the `Settings` menu. Copy the JSON to the clipboard
and paste into `metrics/testnet-monitor.json`
5. Delete your development dashboard: `Settings` => `Delete`
### Deploying a Dashboard Manually
If you need to immediately deploy a dashboard using the contents of
`metrics/testnet-monitor.json` in your local workspace,
```
$ export GRAFANA_API_TOKEN="an API key from https://metrics.solana.com:3000/org/apikeys"
$ metrics/publish-metrics-dashboard.sh (edge|beta|stable)
```
Note that automation will eventually overwrite your manual deploy.

View File

@ -0,0 +1,69 @@
#!/usr/bin/env python3
#
# Adjusts the testnet monitor dashboard for the specified release channel
#
import sys
import json
if len(sys.argv) != 3:
print('Error: Dashboard or Channel not specified')
sys.exit(1)
dashboard_json = sys.argv[1]
channel = sys.argv[2]
if channel not in ['edge', 'beta', 'stable']:
print('Error: Unknown channel:', channel)
sys.exit(2)
with open(dashboard_json, 'r') as read_file:
data = json.load(read_file)
if channel == 'stable':
# Stable dashboard only allows the user to select between the stable
# testnet databases
data['title'] = 'Testnet Monitor'
data['uid'] = 'testnet'
data['templating']['list'] = [{'allValue': None,
'current': {'text': 'testnet',
'value': 'testnet'},
'hide': 1,
'includeAll': False,
'label': 'Testnet',
'multi': False,
'name': 'testnet',
'options': [{'selected': False,
'text': 'testnet',
'value': 'testnet'},
{'selected': True,
'text': 'testnet-perf',
'value': 'testnet-perf'}],
'query': 'testnet,testnet-perf',
'type': 'custom'}]
else:
# Non-stable dashboard only allows the user to select between all testnet
# databases
data['title'] = 'Testnet Monitor ({})'.format(channel)
data['uid'] = 'testnet-' + channel
data['templating']['list'] = [{'allValue': None,
'current': {'text': 'testnet',
'value': 'testnet'},
'datasource': 'Solana Metrics (read-only)',
'hide': 1,
'includeAll': False,
'label': 'Testnet',
'multi': False,
'name': 'testnet',
'options': [],
'query': 'show databases',
'refresh': 1,
'regex': 'testnet.*',
'sort': 1,
'tagValuesQuery': '',
'tags': [],
'tagsQuery': '',
'type': 'query',
'useTags': False}]
with open(dashboard_json, 'w') as write_file:
json.dump(data, write_file, indent=2)

15
metrics/grafcli.conf Normal file
View File

@ -0,0 +1,15 @@
[grafcli]
editor = vim
mergetool = vimdiff
verbose = on
force = on
[resources]
[hosts]
metrics = on
[metrics]
type = api
url = https://metrics.solana.com:3000/api
ssl = off

View File

@ -0,0 +1,71 @@
#!/usr/bin/env bash
set -e
cd "$(dirname "$0")"
CHANNEL=$1
if [[ -z $CHANNEL ]]; then
echo "usage: $0 [channel]"
exit 1
fi
case $CHANNEL in
edge)
DASHBOARD=testnet-monitor-edge
;;
beta)
DASHBOARD=testnet-monitor-beta
;;
stable)
DASHBOARD=testnet-monitor
;;
*)
echo "Error: Invalid CHANNEL=$CHANNEL"
exit 1
;;
esac
if [[ -z $GRAFANA_API_TOKEN ]]; then
echo Error: GRAFANA_API_TOKEN not defined
exit 1
fi
DASHBOARD_JSON=./testnet-monitor.json
if [[ ! -r $DASHBOARD_JSON ]]; then
echo Error: $DASHBOARD_JSON not found
fi
(
set -x
./adjust-dashboard-for-channel.py "$DASHBOARD_JSON" "$CHANNEL"
)
rm -rf venv
python3 -m venv venv
# shellcheck source=/dev/null
source venv/bin/activate
echo --- Fetch/build grafcli
(
set -x
git clone git@github.com:mvines/grafcli.git -b experimental-v5 venv/grafcli
cd venv/grafcli
python3 setup.py install
)
echo --- Take a backup of existing dashboard if possible
(
set -x +e
grafcli export remote/metrics/$DASHBOARD $DASHBOARD_JSON.org
grafcli rm remote/metrics/$DASHBOARD
:
)
echo --- Publish $DASHBOARD_JSON to $DASHBOARD
(
set -x
grafcli import $DASHBOARD_JSON remote/metrics
)
exit 0

5576
metrics/testnet-monitor.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -49,8 +49,6 @@ elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
declare program="$1"
printf "solana-%s" "$program"
}
# CUDA was/wasn't selected at build time, can't affect CUDA state here
unset SOLANA_CUDA
else
solana_program() {
declare program="$1"
@ -104,16 +102,16 @@ tune_networking() {
# test the existence of the sysctls before trying to set them
# go ahead and return true and don't exit if these calls fail
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_max=67108864 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.wmem_max=67108864 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.wmem_default=26214400 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
) || true
fi

View File

@ -11,23 +11,21 @@ gce)
# shellcheck source=net/scripts/gce-provider.sh
source "$here"/scripts/gce-provider.sh
imageName="ubuntu-16-04-cuda-9-2-new"
cpuLeaderMachineType=n1-standard-16
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
leaderMachineType=$cpuLeaderMachineType
validatorMachineType=n1-standard-4
validatorMachineType=n1-standard-16
clientMachineType=n1-standard-16
;;
ec2)
# shellcheck source=net/scripts/ec2-provider.sh
source "$here"/scripts/ec2-provider.sh
imageName="ami-0466e26ccc0e752c1"
cpuLeaderMachineType=m4.4xlarge
gpuLeaderMachineType=p2.xlarge
leaderMachineType=$cpuLeaderMachineType
validatorMachineType=m4.xlarge
clientMachineType=m4.4xlarge
validatorMachineType=m4.2xlarge
clientMachineType=m4.2xlarge
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
@ -118,7 +116,7 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
;;
g)
enableGpu=true
leaderMachineType="$gpuLeaderMachineType"
leaderMachineType=$gpuLeaderMachineType
;;
G)
enableGpu=true
@ -131,14 +129,53 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
bootDiskType=$OPTARG
;;
*)
usage "Error: unhandled option: $opt"
usage "unhandled option: $opt"
;;
esac
done
shift $((OPTIND - 1))
[[ -z $1 ]] || usage "Unexpected argument: $1"
sshPrivateKey="$netConfigDir/id_$prefix"
if [[ $cloudProvider = ec2 ]]; then
# EC2 keys can't be retrieved from running instances like GCE keys can so save
# EC2 keys in the user's home directory so |./ec2.sh config| can at least be
# used on the same host that ran |./ec2.sh create| .
sshPrivateKey="$HOME/.ssh/solana-net-id_$prefix"
else
sshPrivateKey="$netConfigDir/id_$prefix"
fi
case $cloudProvider in
gce)
if $enableGpu; then
# TODO: GPU image is still 16.04-based pending resolution of
# https://github.com/solana-labs/solana/issues/1702
imageName="ubuntu-16-04-cuda-9-2-new"
else
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
fi
;;
ec2)
# Deep Learning AMI (Ubuntu 16.04-based)
case $region in # (region global variable is set by cloud_SetZone)
us-east-1)
imageName="ami-047daf3f2b162fc35"
;;
us-west-1)
imageName="ami-08c8c7c4a57a6106d"
;;
us-west-2)
imageName="ami-0b63040ee445728bf"
;;
*)
usage "Unsupported region: $region"
;;
esac
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
;;
esac
# cloud_ForEachInstance [cmd] [extra args to cmd]
@ -206,13 +243,18 @@ EOF
echo "Waiting for $name to finish booting..."
(
for i in $(seq 1 30); do
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
break
set -x +e
for i in $(seq 1 60); do
timeout 20s ssh "${sshOptions[@]}" "$publicIp" "ls -l /.instance-startup-complete"
ret=$?
if [[ $ret -eq 0 ]]; then
exit 0
fi
sleep 2
echo "Retry $i..."
done
echo "$name failed to boot."
exit 1
)
echo "$name has booted."
}
@ -230,7 +272,7 @@ EOF
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
# Try to ping the machine first.
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
timeout 90s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
if [[ ! -r $sshPrivateKey ]]; then
echo "Fetching $sshPrivateKey from $leaderName"
@ -376,6 +418,10 @@ $(
install-earlyoom.sh \
install-libssl-compatability.sh \
install-rsync.sh \
network-config.sh \
remove-docker-interface.sh \
update-default-cuda.sh \
)
cat > /etc/motd <<EOM

View File

@ -23,10 +23,14 @@ Operate a configured testnet
restart - Shortcut for stop then start
start-specific options:
-S [snapFilename] - Deploy the specified Snap file
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
-f [cargoFeatures] - List of |cargo --feaures=| to activate
(ignored if -s or -S is specified)
-S [snapFilename] - Deploy the specified Snap file
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
-T [tarFilename] - Deploy the specified release tarball
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
specified release channel (edge|beta|stable) or release tag
(vX.Y.Z)
-f [cargoFeatures] - List of |cargo --feaures=| to activate
(ignored if -s or -S is specified)
Note: if RUST_LOG is set in the environment it will be propogated into the
network nodes.
@ -44,6 +48,7 @@ EOF
}
snapChannel=
releaseChannel=
snapFilename=
deployMethod=local
sanityExtraArgs=
@ -53,7 +58,7 @@ command=$1
[[ -n $command ]] || usage
shift
while getopts "h?S:s:o:f:" opt; do
while getopts "h?S:s:T:t:o:f:" opt; do
case $opt in
h | \?)
usage
@ -74,6 +79,22 @@ while getopts "h?S:s:o:f:" opt; do
;;
esac
;;
T)
tarballFilename=$OPTARG
[[ -f $tarballFilename ]] || usage "Snap not readable: $tarballFilename"
deployMethod=tar
;;
t)
case $OPTARG in
edge|beta|stable|v*)
releaseChannel=$OPTARG
deployMethod=tar
;;
*)
usage "Invalid release channel: $OPTARG"
;;
esac
;;
f)
cargoFeatures=$OPTARG
;;
@ -110,6 +131,7 @@ build() {
set -x
rm -rf farf
$MAYBE_DOCKER cargo install --features="$cargoFeatures" --root farf
./scripts/install-native-programs.sh farf/
)
echo "Build took $SECONDS seconds"
}
@ -138,6 +160,9 @@ startLeader() {
snap)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
;;
tar)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
;;
local)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
;;
@ -159,7 +184,7 @@ startValidator() {
declare ipAddress=$1
declare logFile="$netLogDir/validator-$ipAddress.log"
echo "--- Starting validator: $leaderIp"
echo "--- Starting validator: $ipAddress"
echo "start log: $logFile"
(
set -x
@ -181,7 +206,7 @@ startClient() {
set -x
startCommon "$ipAddress"
ssh "${sshOptions[@]}" -f "$ipAddress" \
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \"$RUST_LOG\""
) >> "$logFile" 2>&1 || {
cat "$logFile"
echo "^^^ +++"
@ -196,10 +221,11 @@ sanity() {
echo "--- Sanity"
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
declare host=$leaderIp # TODO: maybe use ${validatorIpList[0]} ?
(
set -x
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$leaderIp" \
ssh "${sshOptions[@]}" "$host" \
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
) || ok=false
@ -219,13 +245,17 @@ start() {
set -ex;
apt-get -qq update;
apt-get -qq -y install snapd;
snap download --channel=$snapChannel solana;
until snap download --channel=$snapChannel solana; do
sleep 1;
done
"
)
else
(
cd "$SOLANA_ROOT"
snap download --channel="$snapChannel" solana
until snap download --channel="$snapChannel" solana; do
sleep 1
done
)
fi
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
@ -235,6 +265,17 @@ start() {
}
fi
;;
tar)
if [[ -n $releaseChannel ]]; then
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
cd "$SOLANA_ROOT"
set -x
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
tarballFilename=solana-release.tar.bz2
fi
tar jxvf $tarballFilename
;;
local)
build
;;
@ -286,15 +327,28 @@ start() {
clientDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
if [[ $deployMethod = "snap" ]]; then
declare networkVersion=unknown
declare networkVersion=unknown
case $deployMethod in
snap)
IFS=\ read -r _ networkVersion _ < <(
ssh "${sshOptions[@]}" "$leaderIp" \
"snap info solana | grep \"^installed:\""
)
networkVersion=${networkVersion/0+git./}
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
fi
;;
tar)
networkVersion="$(
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
)"
;;
local)
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
echo
echo "+++ Deployment Successful"

View File

@ -6,8 +6,7 @@ echo "$(date) | $0 $*" > client.log
deployMethod="$1"
entrypointIp="$2"
numNodes="$3"
RUST_LOG="$4"
RUST_LOG="$3"
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
missing() {
@ -17,7 +16,6 @@ missing() {
[[ -n $deployMethod ]] || missing deployMethod
[[ -n $entrypointIp ]] || missing entrypointIp
[[ -n $numNodes ]] || missing numNodes
source net/common.sh
loadConfigFile
@ -35,7 +33,7 @@ snap)
solana_bench_tps=/snap/bin/solana.bench-tps
solana_keygen=/snap/bin/solana.keygen
;;
local)
local|tar)
PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1
export SOLANA_DEFAULT_METRICS_RATE=1
@ -58,8 +56,7 @@ clientCommand="\
$solana_bench_tps \
--network $entrypointIp:8001 \
--identity client.json \
--num-nodes $numNodes \
--duration 600 \
--duration 7500 \
--sustained \
--threads $threadCount \
"

View File

@ -35,7 +35,6 @@ else
setupArgs="-l"
fi
case $deployMethod in
snap)
SECONDS=0
@ -43,12 +42,13 @@ snap)
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/solana/solana.snap" .
sudo snap install solana.snap --devmode --dangerous
# shellcheck disable=SC2089
commonNodeConfig="\
leader-ip=$entrypointIp \
leader-ip=\"$entrypointIp\" \
default-metrics-rate=1 \
metrics-config=$SOLANA_METRICS_CONFIG \
rust-log=$RUST_LOG \
setup-args=$setupArgs \
metrics-config=\"$SOLANA_METRICS_CONFIG\" \
rust-log=\"$RUST_LOG\" \
setup-args=\"$setupArgs\" \
"
if [[ -e /dev/nvidia0 ]]; then
@ -67,7 +67,7 @@ snap)
logmarker="solana deploy $(date)/$RANDOM"
logger "$logmarker"
# shellcheck disable=SC2086 # Don't want to double quote "$nodeConfig"
# shellcheck disable=SC2086,SC2090 # Don't want to double quote "$nodeConfig"
sudo snap set solana $nodeConfig
snap info solana
sudo snap get solana
@ -77,20 +77,25 @@ snap)
echo "Succeeded in ${SECONDS} seconds"
;;
local)
local|tar)
PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1
export RUST_LOG
export SOLANA_DEFAULT_METRICS_RATE=1
./fetch-perf-libs.sh
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
export LD_LIBRARY_PATH="$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
scripts/net-stats.sh > net-stats.log 2>&1 &
case $nodeType in
leader)
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
echo Selecting solana-fullnode-cuda
export SOLANA_CUDA=1
fi
./multinode-demo/setup.sh -t leader $setupArgs
./multinode-demo/drone.sh > drone.log 2>&1 &
./multinode-demo/leader.sh > leader.log 2>&1 &
@ -98,6 +103,11 @@ local)
validator)
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
echo Selecting solana-fullnode-cuda
export SOLANA_CUDA=1
fi
./multinode-demo/setup.sh -t validator $setupArgs
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
;;

View File

@ -65,7 +65,7 @@ snap)
client_id=~/snap/solana/current/config/client-id.json
;;
local)
local|tar)
PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1
entrypointRsyncUrl="$entrypointIp:~/solana"

View File

@ -31,11 +31,7 @@ __cloud_FindInstances() {
declare name zone publicIp privateIp status
while read -r name publicIp privateIp status; do
if [[ $status != RUNNING ]]; then
echo "Warning: $name is not RUNNING, ignoring it."
continue
fi
printf "%-30s | publicIp=%-16s privateIp=%s\n" "$name" "$publicIp" "$privateIp"
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
instances+=("$name:$publicIp:$privateIp")
done < <(gcloud compute instances list \
@ -132,6 +128,9 @@ cloud_CreateInstances() {
--no-restart-on-failure
)
# shellcheck disable=SC2206 # Do not want to quote $imageName as it may contain extra args
args+=(--image $imageName)
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
args+=(--machine-type $machineType)
if [[ -n $optionalBootDiskSize ]]; then

View File

@ -13,8 +13,8 @@ sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
if command -v earlyoom; then
systemctl status earlyoom
else
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.2-*_amd64.deb' -e robots=off -nd
apt install --quiet --yes ./earlyoom_1.2-*_amd64.deb
cat > earlyoom <<OOM
# use the kernel OOM killer, trigger at 20% available RAM,

View File

@ -12,7 +12,6 @@ apt-get --assume-yes install libssl-dev
#
# cc: https://github.com/solana-labs/solana/issues/1090
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
dpkg -i libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
rm libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
dpkg -i libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
rm libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb

11
net/scripts/network-config.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash -ex
#
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
sudo sysctl -w net.core.rmem_default=1610612736
sudo sysctl -w net.core.rmem_max=1610612736
sudo sysctl -w net.core.wmem_default=1610612736
sudo sysctl -w net.core.wmem_max=1610612736

View File

@ -0,0 +1,11 @@
#!/bin/bash -ex
#
# Some instances have docker running and docker0 network interface confuses
# gossip and airdrops fail. As a workaround for now simply remove the docker0
# interface
#
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
ip link delete docker0 || true

View File

@ -0,0 +1,9 @@
#!/bin/bash -ex
#
# Updates the default cuda symlink to the supported version
#
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
ln -sfT /usr/local/cuda-9.2 /usr/local/cuda

1
programs/bpf/c/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/out/

1
programs/bpf/c/makefile Normal file
View File

@ -0,0 +1 @@
include sdk/bpf.mk

View File

@ -0,0 +1,63 @@
## Prerequisites
## LLVM / clang 7.0.0
http://releases.llvm.org/download.html
### Linux Ubuntu 16.04 (xenial)
```
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
$ sudo apt-get update
$ sudo apt-get install -y clang-7
```
### Linux Ubuntu 14.04 (trusty)
```
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
$ sudo apt-get update
$ sudo apt-get install -y clang-7
```
### macOS
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
Once Homebrew is installed, ensure the latest llvm is installed:
```
$ brew update # <- ensure your brew is up to date
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
```
## Development
### Quick start
To get started create a `makefile` containing:
```make
include path/to/bpf.mk
```
and `src/program.c` containing:
```c
#include <solana_sdk.h>
bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[1];
uint8_t *data;
uint64_t data_len;
if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
return false;
}
print_params(1, ka, data, data_len);
return true;
}
```
Then run `make` to build `out/program.o`.
Run `make help` for more details.
### Limitations
* Programs must be fully contained within a single .c file
* No libc is available but `solana_sdk.h` provides a minimal set of
primitives.

115
programs/bpf/c/sdk/bpf.mk Normal file
View File

@ -0,0 +1,115 @@
all:
.PHONY: help all clean
ifneq ($(V),1)
_@ :=@
endif
INC_DIRS ?=
SRC_DIR ?= ./src
OUT_DIR ?= ./out
OS=$(shell uname)
ifeq ($(OS),Darwin)
LLVM_DIR ?= $(shell brew --prefix llvm)
endif
ifdef LLVM_DIR
CC := $(LLVM_DIR)/bin/clang
LLC := $(LLVM_DIR)/bin/llc
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
else
CC := clang-7
LLC := llc-7
OBJ_DUMP := llvm-objdump-7
endif
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
CC_FLAGS := \
-Werror \
-target bpf \
-O2 \
-emit-llvm \
-fno-builtin \
LLC_FLAGS := \
-march=bpf \
-filetype=obj \
OBJ_DUMP_FLAGS := \
-color \
-source \
-disassemble \
help:
@echo 'BPF Program makefile'
@echo ''
@echo 'This makefile will build BPF Programs from C source files into ELFs'
@echo ''
@echo 'Assumptions:'
@echo ' - Programs are a single .c source file (may include headers)'
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
@echo ''
@echo 'User settings'
@echo ' - The following setting are overridable on the command line, default values shown:'
@echo ' - Show commands while building:'
@echo ' V=1'
@echo ' - List of include directories:'
@echo ' INC_DIRS=$(INC_DIRS)'
@echo ' - List of system include directories:'
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
@echo ' - Location of source files:'
@echo ' SRC_DIR=$(SRC_DIR)'
@echo ' - Location to place output files:'
@echo ' OUT_DIR=$(OUT_DIR)'
@echo ' - Location of LLVM:'
@echo ' LLVM_DIR=$(LLVM_DIR)'
@echo ''
@echo 'Usage:'
@echo ' - make help - This help message'
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
@echo ' - make clean - Cleans all programs'
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
@echo ' - make <program name> - Build a single program by name'
@echo ''
@echo 'Available programs:'
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
@echo ''
@echo 'Example:'
@echo ' - Assuming a programed named foo (src/foo.c)'
@echo ' - make foo'
@echo ' - make dump_foo'
.PRECIOUS: $(OUT_DIR)/%.bc
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
@echo "[cc] $@ ($<)"
$(_@)mkdir -p $(OUT_DIR)
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
.PRECIOUS: $(OUT_DIR)/%.o
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
@echo "[llc] $@ ($<)"
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
-include $(wildcard $(OUT_DIR)/*.d)
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
define \n
endef
all: $(PROGRAM_NAMES)
%: $(addprefix $(OUT_DIR)/, %.o) ;
dump_%: %
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
clean:
rm -rf $(OUT_DIR)

View File

@ -0,0 +1,298 @@
#pragma once
/**
* @brief Solana C-based BPF program utility functions and types
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* Numeric types
*/
#ifndef __LP64__
#error LP64 data model required
#endif
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed short int16_t;
typedef unsigned short uint16_t;
typedef signed int int32_t;
typedef unsigned int uint32_t;
typedef signed long int int64_t;
typedef unsigned long int uint64_t;
/**
* NULL
*/
#define NULL 0
/**
* Boolean type
*/
typedef enum { false = 0, true } bool;
/**
* Helper function that prints a string to stdout
*/
extern void sol_log(const char*);
/**
* Helper function that prints a 64 bit values represented in hexadecimal
* to stdout
*/
extern void sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
/**
* Prefix for all BPF functions
*
* This prefix should be used for functions in order to facilitate
* interoperability with BPF representation
*/
#define SOL_FN_PREFIX __attribute__((always_inline)) static
/**
* Size of Public key in bytes
*/
#define SIZE_PUBKEY 32
/**
* Public key
*/
typedef struct {
uint8_t x[SIZE_PUBKEY];
} SolPubkey;
/**
* Compares two public keys
*
* @param one First public key
* @param two Second public key
* @return true if the same
*/
SOL_FN_PREFIX bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) {
for (int i = 0; i < sizeof(*one); i++) {
if (one->x[i] != two->x[i]) {
return false;
}
}
return true;
}
/**
* Keyed Accounts
*/
typedef struct {
SolPubkey *key; /** Public Key of the account owner */
int64_t *tokens; /** Numer of tokens owned by this account */
uint64_t userdata_len; /** Length of userdata in bytes */
uint8_t *userdata; /** On-chain data owned by this account */
SolPubkey *program_id; /** Program that owns this account */
} SolKeyedAccounts;
/**
* Copies memory
*/
SOL_FN_PREFIX void sol_memcpy(void *dst, const void *src, int len) {
for (int i = 0; i < len; i++) {
*((uint8_t *)dst + i) = *((const uint8_t *)src + i);
}
}
/**
* Compares memory
*/
SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
for (int i = 0; i < n; i++) {
uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i);
if (diff) {
return diff;
}
}
return 0;
}
/**
* Computes the number of elements in an array
*/
#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
/**
* Panics
*
* Prints the line number where the panic occurred and then causes
* the BPF VM to immediately halt execution. No accounts' userdata are updated
*/
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_log_64(0xFF, 0xFF, 0xFF, 0xFF, line);
uint8_t *pv = (uint8_t *)1;
*pv = 1;
}
/**
* Asserts
*/
#define sol_assert(expr) \
if (!(expr)) { \
_sol_panic(__LINE__); \
}
/**
* De-serializes the input parameters into usable types
*
* Use this function to deserialize the buffer passed to the program entrypoint
* into usable types. This function does not perform copy deserialization,
* instead it populates the pointers and lengths in SolKeyedAccounts and data so
* that any modification to tokens or account data take place on the original
* buffer. Doing so also eliminates the need to serialize back into the buffer
* at program end.
*
* @param input Source buffer containing serialized input parameters
* @param ka Pointer to an array of SolKeyedAccounts to deserialize into
* @param ka_len Number of SolKeyedAccounts entries in `ka`
* @param ka_len_out If NULL, fill exactly `ka_len` accounts or fail.
* If not NULL, fill up to `ka_len` accounts and return the
* number of filled accounts in `ka_len_out`.
* @param data On return, a pointer to the instruction data
* @param data_len On return, the length in bytes of the instruction data
* @return Boolean true if successful
*/
SOL_FN_PREFIX bool sol_deserialize(
const uint8_t *input,
SolKeyedAccounts *ka,
uint64_t ka_len,
uint64_t *ka_len_out,
const uint8_t **data,
uint64_t *data_len
) {
if (ka_len_out == NULL) {
if (ka_len != *(uint64_t *) input) {
return false;
}
ka_len = *(uint64_t *) input;
} else {
if (ka_len > *(uint64_t *) input) {
ka_len = *(uint64_t *) input;
}
*ka_len_out = ka_len;
}
input += sizeof(uint64_t);
for (int i = 0; i < ka_len; i++) {
// key
ka[i].key = (SolPubkey *) input;
input += sizeof(SolPubkey);
// tokens
ka[i].tokens = (int64_t *) input;
input += sizeof(int64_t);
// account userdata
ka[i].userdata_len = *(uint64_t *) input;
input += sizeof(uint64_t);
ka[i].userdata = input;
input += ka[i].userdata_len;
// program_id
ka[i].program_id = (SolPubkey *) input;
input += sizeof(SolPubkey);
}
// input data
*data_len = *(uint64_t *) input;
input += sizeof(uint64_t);
*data = input;
return true;
}
/**
* Debugging utilities
* @{
*/
/**
* Prints the hexadecimal representation of a public key
*
* @param key The public key to print
*/
SOL_FN_PREFIX void sol_log_key(const SolPubkey *key) {
for (int j = 0; j < sizeof(*key); j++) {
sol_log_64(0, 0, 0, j, key->x[j]);
}
}
/**
* Prints the hexadecimal representation of an array
*
* @param array The array to print
*/
SOL_FN_PREFIX void sol_log_array(const uint8_t *array, int len) {
for (int j = 0; j < len; j++) {
sol_log_64(0, 0, 0, j, array[j]);
}
}
/**
* Prints the hexadecimal representation of the program's input parameters
*
* @param num_ka Numer of SolKeyedAccounts to print
* @param ka A pointer to an array of SolKeyedAccounts to print
* @param data A pointer to the instruction data to print
* @param data_len The length in bytes of the instruction data
*/
SOL_FN_PREFIX void sol_log_params(
uint64_t num_ka,
const SolKeyedAccounts *ka,
const uint8_t *data,
uint64_t data_len
) {
sol_log_64(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
sol_log_key(ka[i].key);
sol_log_64(0, 0, 0, 0, *ka[i].tokens);
sol_log_array(ka[i].userdata, ka[i].userdata_len);
sol_log_key(ka[i].program_id);
}
sol_log_array(data, data_len);
}
/**@}*/
/**
* Program entrypoint
* @{
*
* The following is an example of a simple program that prints the input
* parameters it received:
*
* bool entrypoint(const uint8_t *input) {
* SolKeyedAccounts ka[1];
* uint8_t *data;
* uint64_t data_len;
*
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
* return false;
* }
* sol_log_params(1, ka, data, data_len);
* return true;
* }
*/
/**
* Program entrypoint signature
*
* @param input An array containing serialized input parameters
* @return true if successful
*/
extern bool entrypoint(const uint8_t *input);
#ifdef __cplusplus
}
#endif
/**@}*/

View File

@ -0,0 +1,32 @@
/**
* @brief Example C-based BPF program that moves funds from one account to
* another
*/
#include <solana_sdk.h>
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*/
#define NUM_KA 3
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
int64_t tokens = *(int64_t *)data;
if (*ka[0].tokens >= tokens) {
*ka[0].tokens -= tokens;
*ka[2].tokens += tokens;
// sol_log_64(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
} else {
// sol_log_64(0, 0, 0xFF, *ka[0].tokens, tokens);
}
return true;
}

35
programs/bpf/c/src/noop.c Normal file
View File

@ -0,0 +1,35 @@
/**
* @brief Example C-based BPF program that prints out the parameters
* passed to it
*/
#include <solana_sdk.h>
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*/
#define NUM_KA 1
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
sol_log("noop");
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
sol_log_params(NUM_KA, ka, data, data_len);
sol_assert(sizeof(int8_t) == 1);
sol_assert(sizeof(uint8_t) == 1);
sol_assert(sizeof(int16_t) == 2);
sol_assert(sizeof(uint16_t) == 2);
sol_assert(sizeof(int32_t) == 4);
sol_assert(sizeof(uint32_t) == 4);
sol_assert(sizeof(int64_t) == 8);
sol_assert(sizeof(uint64_t) == 8);
return true;
}

View File

@ -0,0 +1,11 @@
[package]
name = "solana-bpf-noop"
version = "0.10.5"
description = "Solana BPF noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
[dependencies]
rbpf = "0.1.0"
solana-sdk = { path = "../../../../sdk", version = "0.10.5" }

10
programs/bpf/rust/noop/build.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash -ex
# TODO building release flavor with rust produces a bunch of output .bc files
INTERDIR=../../../target/release
OUTDIR="${1:-../../../target/debug/}"
mkdir -p "$OUTDIR"
# cargo +nightly rustc --release -- -C panic=abort --emit=llvm-ir
cargo +nightly rustc --release -- -C panic=abort --emit=llvm-bc
cp "$INTERDIR"/deps/noop_rust-*.bc "$OUTDIR"/noop_rust.bc
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -o "$OUTDIR"/noop_rust.o "$OUTDIR"/noop_rust.bc

3
programs/bpf/rust/noop/dump.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/sh
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble target/release/noop_rust.o

View File

@ -0,0 +1,15 @@
extern crate rbpf;
use std::mem::transmute;
#[no_mangle]
#[link_section = ".text,entrypoint"] // TODO platform independent needed
pub extern "C" fn entrypoint(_raw: *mut u8) {
let bpf_func_trace_printk = unsafe {
transmute::<u64, extern "C" fn(u64, u64, u64, u64, u64)>(
rbpf::helpers::BPF_TRACE_PRINTK_IDX as u64,
)
};
bpf_func_trace_printk(0, 0, 1, 2, 3);
}

View File

@ -1,23 +0,0 @@
[package]
name = "move_funds"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
bincode = "1.0.0"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
solana_program_interface = { path = "../../common" }
[lib]
name = "move_funds"
crate-type = ["dylib"]

View File

@ -1,48 +0,0 @@
extern crate bincode;
extern crate solana_program_interface;
use bincode::deserialize;
use solana_program_interface::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(infos: &mut Vec<KeyedAccount>, data: &[u8]) {
let tokens: i64 = deserialize(data).unwrap();
if infos[0].account.tokens >= tokens {
infos[0].account.tokens -= tokens;
infos[1].account.tokens += tokens;
} else {
println!(
"Insufficient funds, asked {}, only had {}",
tokens, infos[0].account.tokens
);
}
}
#[cfg(test)]
mod tests {
use super::*;
use bincode::serialize;
use solana_program_interface::account::Account;
use solana_program_interface::pubkey::Pubkey;
#[test]
fn test_move_funds() {
let tokens: i64 = 100;
let data: Vec<u8> = serialize(&tokens).unwrap();
let keys = vec![Pubkey::default(); 2];
let mut accounts = vec![Account::default(), Account::default()];
accounts[0].tokens = 100;
accounts[1].tokens = 1;
{
let mut infos: Vec<KeyedAccount> = Vec::new();
for (key, account) in keys.iter().zip(&mut accounts).collect::<Vec<_>>() {
infos.push(KeyedAccount { key, account });
}
process(&mut infos, &data);
}
assert_eq!(0, accounts[0].tokens);
assert_eq!(101, accounts[1].tokens);
}
}

View File

@ -0,0 +1,24 @@
[package]
name = "solana-bpfloader"
version = "0.10.5"
description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
[dependencies]
bincode = "1.0.0"
byteorder = "1.2.1"
elf = "0.0.10"
env_logger = "0.5.12"
libc = "0.2.43"
log = "0.4.2"
solana_rbpf = "0.1.3"
serde = "1.0.27"
serde_derive = "1.0.27"
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
[lib]
name = "solana_bpf_loader"
crate-type = ["cdylib"]

View File

@ -0,0 +1,324 @@
use solana_rbpf::ebpf;
use std::io::{Error, ErrorKind};
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {
let full_msg = format!("[Verifier] Error: {}", msg.as_ref());
Err(Error::new(ErrorKind::Other, full_msg))
}
fn check_prog_len(prog: &[u8]) -> Result<(), Error> {
if prog.len() % ebpf::INSN_SIZE != 0 {
reject(format!(
"eBPF program length must be a multiple of {:?} octets",
ebpf::INSN_SIZE
))?;
}
if prog.len() > ebpf::PROG_MAX_SIZE {
reject(format!(
"eBPF program length limited to {:?}, here {:?}",
ebpf::PROG_MAX_INSNS,
prog.len() / ebpf::INSN_SIZE
))?;
}
if prog.is_empty() {
reject("No program set, call prog_set() to load one".to_string())?;
}
// TODO BPF program may deterministically exit even if the last
// instruction in the block is not an exit (might be earlier and jumped to)
// TODO need to validate more intelligently
// let last_insn = ebpf::get_insn(prog, (prog.len() / ebpf::INSN_SIZE) - 1);
// if last_insn.opc != ebpf::EXIT {
// reject("program does not end with “EXIT” instruction".to_string())?;
// }
Ok(())
}
fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Error> {
if insn.imm == 0 {
reject(format!("division by 0 (insn #{:?})", insn_ptr))?;
}
Ok(())
}
fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Error> {
match insn.imm {
16 | 32 | 64 => Ok(()),
_ => reject(format!(
"unsupported argument for LE/BE (insn #{:?})",
insn_ptr
)),
}
}
fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), Error> {
// We know we can reach next insn since we enforce an EXIT insn at the end of program, while
// this function should be called only for LD_DW insn, that cannot be last in program.
let next_insn = ebpf::get_insn(prog, insn_ptr + 1);
if next_insn.opc != 0 {
reject(format!(
"incomplete LD_DW instruction (insn #{:?})",
insn_ptr
))?;
}
Ok(())
}
fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), Error> {
let insn = ebpf::get_insn(prog, insn_ptr);
if insn.off == -1 {
reject(format!("infinite loop (insn #{:?})", insn_ptr))?;
}
let dst_insn_ptr = insn_ptr as isize + 1 + insn.off as isize;
if dst_insn_ptr < 0 || dst_insn_ptr as usize >= (prog.len() / ebpf::INSN_SIZE) {
reject(format!(
"jump out of code to #{:?} (insn #{:?})",
dst_insn_ptr, insn_ptr
))?;
}
let dst_insn = ebpf::get_insn(prog, dst_insn_ptr as usize);
if dst_insn.opc == 0 {
reject(format!(
"jump to middle of LD_DW at #{:?} (insn #{:?})",
dst_insn_ptr, insn_ptr
))?;
}
Ok(())
}
fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), Error> {
if insn.src > 10 {
reject(format!("invalid source register (insn #{:?})", insn_ptr))?;
}
match (insn.dst, store) {
(0...9, _) | (10, true) => Ok(()),
(10, false) => reject(format!(
"cannot write into register r10 (insn #{:?})",
insn_ptr
)),
(_, _) => reject(format!(
"invalid destination register (insn #{:?})",
insn_ptr
)),
}
}
pub fn check(prog: &[u8]) -> Result<(), Error> {
check_prog_len(prog)?;
let mut insn_ptr: usize = 0;
while insn_ptr * ebpf::INSN_SIZE < prog.len() {
let insn = ebpf::get_insn(prog, insn_ptr);
let mut store = false;
match insn.opc {
// BPF_LD class
ebpf::LD_ABS_B => {}
ebpf::LD_ABS_H => {}
ebpf::LD_ABS_W => {}
ebpf::LD_ABS_DW => {}
ebpf::LD_IND_B => {}
ebpf::LD_IND_H => {}
ebpf::LD_IND_W => {}
ebpf::LD_IND_DW => {}
ebpf::LD_DW_IMM => {
store = true;
check_load_dw(prog, insn_ptr)?;
insn_ptr += 1;
}
// BPF_LDX class
ebpf::LD_B_REG => {}
ebpf::LD_H_REG => {}
ebpf::LD_W_REG => {}
ebpf::LD_DW_REG => {}
// BPF_ST class
ebpf::ST_B_IMM => store = true,
ebpf::ST_H_IMM => store = true,
ebpf::ST_W_IMM => store = true,
ebpf::ST_DW_IMM => store = true,
// BPF_STX class
ebpf::ST_B_REG => store = true,
ebpf::ST_H_REG => store = true,
ebpf::ST_W_REG => store = true,
ebpf::ST_DW_REG => store = true,
ebpf::ST_W_XADD => {
unimplemented!();
}
ebpf::ST_DW_XADD => {
unimplemented!();
}
// BPF_ALU class
ebpf::ADD32_IMM => {}
ebpf::ADD32_REG => {}
ebpf::SUB32_IMM => {}
ebpf::SUB32_REG => {}
ebpf::MUL32_IMM => {}
ebpf::MUL32_REG => {}
ebpf::DIV32_IMM => {
check_imm_nonzero(&insn, insn_ptr)?;
}
ebpf::DIV32_REG => {}
ebpf::OR32_IMM => {}
ebpf::OR32_REG => {}
ebpf::AND32_IMM => {}
ebpf::AND32_REG => {}
ebpf::LSH32_IMM => {}
ebpf::LSH32_REG => {}
ebpf::RSH32_IMM => {}
ebpf::RSH32_REG => {}
ebpf::NEG32 => {}
ebpf::MOD32_IMM => {
check_imm_nonzero(&insn, insn_ptr)?;
}
ebpf::MOD32_REG => {}
ebpf::XOR32_IMM => {}
ebpf::XOR32_REG => {}
ebpf::MOV32_IMM => {}
ebpf::MOV32_REG => {}
ebpf::ARSH32_IMM => {}
ebpf::ARSH32_REG => {}
ebpf::LE => {
check_imm_endian(&insn, insn_ptr)?;
}
ebpf::BE => {
check_imm_endian(&insn, insn_ptr)?;
}
// BPF_ALU64 class
ebpf::ADD64_IMM => {}
ebpf::ADD64_REG => {}
ebpf::SUB64_IMM => {}
ebpf::SUB64_REG => {}
ebpf::MUL64_IMM => {
check_imm_nonzero(&insn, insn_ptr)?;
}
ebpf::MUL64_REG => {}
ebpf::DIV64_IMM => {
check_imm_nonzero(&insn, insn_ptr)?;
}
ebpf::DIV64_REG => {}
ebpf::OR64_IMM => {}
ebpf::OR64_REG => {}
ebpf::AND64_IMM => {}
ebpf::AND64_REG => {}
ebpf::LSH64_IMM => {}
ebpf::LSH64_REG => {}
ebpf::RSH64_IMM => {}
ebpf::RSH64_REG => {}
ebpf::NEG64 => {}
ebpf::MOD64_IMM => {}
ebpf::MOD64_REG => {}
ebpf::XOR64_IMM => {}
ebpf::XOR64_REG => {}
ebpf::MOV64_IMM => {}
ebpf::MOV64_REG => {}
ebpf::ARSH64_IMM => {}
ebpf::ARSH64_REG => {}
// BPF_JMP class
ebpf::JA => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JEQ_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JEQ_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JGT_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JGT_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JGE_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JGE_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JLT_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JLT_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JLE_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JLE_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSET_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSET_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JNE_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JNE_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSGT_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSGT_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSGE_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSGE_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSLT_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSLT_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSLE_IMM => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::JSLE_REG => {
check_jmp_offset(prog, insn_ptr)?;
}
ebpf::CALL => {}
ebpf::TAIL_CALL => unimplemented!(),
ebpf::EXIT => {}
_ => {
reject(format!(
"unknown eBPF opcode {:#2x} (insn #{:?})",
insn.opc, insn_ptr
))?;
}
}
check_registers(&insn, store, insn_ptr)?;
insn_ptr += 1;
}
// insn_ptr should now be equal to number of instructions.
if insn_ptr != prog.len() / ebpf::INSN_SIZE {
reject(format!("jumped out of code to #{:?}", insn_ptr))?;
}
Ok(())
}

View File

@ -0,0 +1,230 @@
pub mod bpf_verifier;
extern crate bincode;
extern crate byteorder;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate libc;
extern crate solana_rbpf;
extern crate solana_sdk;
use bincode::deserialize;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use libc::c_char;
use solana_rbpf::EbpfVmRaw;
use solana_sdk::account::KeyedAccount;
use solana_sdk::loader_instruction::LoaderInstruction;
use solana_sdk::pubkey::Pubkey;
use std::ffi::CStr;
use std::io::prelude::*;
use std::io::{Error, ErrorKind};
use std::mem;
use std::sync::{Once, ONCE_INIT};
// TODO use rbpf's disassemble
#[allow(dead_code)]
fn dump_program(key: &Pubkey, prog: &[u8]) {
let mut eight_bytes: Vec<u8> = Vec::new();
info!("BPF Program: {:?}", key);
for i in prog.iter() {
if eight_bytes.len() >= 7 {
info!("{:02X?}", eight_bytes);
eight_bytes.clear();
} else {
eight_bytes.push(i.clone());
}
}
}
#[allow(unused_variables)]
pub fn helper_sol_log_verify(
addr: u64,
unused2: u64,
unused3: u64,
unused4: u64,
unused5: u64,
ro_regions: &[&[u8]],
unused7: &[&[u8]],
) -> Result<(()), Error> {
for region in ro_regions.iter() {
if region.as_ptr() as u64 <= addr
&& addr as u64 <= region.as_ptr() as u64 + region.len() as u64
{
let c_buf: *const c_char = addr as *const c_char;
let max_size = (region.as_ptr() as u64 + region.len() as u64) - addr;
unsafe {
for i in 0..max_size {
if std::ptr::read(c_buf.offset(i as isize)) == 0 {
return Ok(());
}
}
}
return Err(Error::new(ErrorKind::Other, "Error, Unterminated string"));
}
}
Err(Error::new(
ErrorKind::Other,
"Error: Load segfault, bad string pointer",
))
}
#[allow(unused_variables)]
pub fn helper_sol_log(addr: u64, unused2: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 {
let c_buf: *const c_char = addr as *const c_char;
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
match c_str.to_str() {
Ok(slice) => info!("sol_log: {:?}", slice),
Err(e) => warn!("Error: Cannot print invalid string"),
};
0
}
pub fn helper_sol_log_u64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
info!(
"sol_log_u64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
arg1, arg2, arg3, arg4, arg5
);
0
}
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
let mut vm = EbpfVmRaw::new(None)?;
vm.set_verifier(bpf_verifier::check)?;
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
vm.set_elf(&prog)?;
vm.register_helper_ex("sol_log", Some(helper_sol_log_verify), helper_sol_log)?;
vm.register_helper_ex("sol_log_64", None, helper_sol_log_u64)?;
Ok(vm)
}
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
assert_eq!(32, mem::size_of::<Pubkey>());
let mut v: Vec<u8> = Vec::new();
v.write_u64::<LittleEndian>(keyed_accounts.len() as u64)
.unwrap();
for info in keyed_accounts.iter_mut() {
v.write_all(info.key.as_ref()).unwrap();
v.write_i64::<LittleEndian>(info.account.tokens).unwrap();
v.write_u64::<LittleEndian>(info.account.userdata.len() as u64)
.unwrap();
v.write_all(&info.account.userdata).unwrap();
v.write_all(info.account.program_id.as_ref()).unwrap();
}
v.write_u64::<LittleEndian>(data.len() as u64).unwrap();
v.write_all(data).unwrap();
v
}
fn deserialize_parameters(keyed_accounts: &mut [KeyedAccount], buffer: &[u8]) {
assert_eq!(32, mem::size_of::<Pubkey>());
let mut start = mem::size_of::<u64>();
for info in keyed_accounts.iter_mut() {
start += mem::size_of::<Pubkey>(); // skip pubkey
info.account.tokens = LittleEndian::read_i64(&buffer[start..]);
start += mem::size_of::<u64>() // skip tokens
+ mem::size_of::<u64>(); // skip length tag
let end = start + info.account.userdata.len();
info.account.userdata.clone_from_slice(&buffer[start..end]);
start += info.account.userdata.len() // skip userdata
+ mem::size_of::<Pubkey>(); // skip program_id
}
}
#[no_mangle]
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -> bool {
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
// env_logger can only be initialized once
env_logger::init();
});
if keyed_accounts[0].account.executable {
let prog = keyed_accounts[0].account.userdata.clone();
trace!("Call BPF, {} instructions", prog.len() / 8);
//dump_program(keyed_accounts[0].key, &prog);
let mut vm = match create_vm(&prog) {
Ok(vm) => vm,
Err(e) => {
warn!("create_vm failed: {}", e);
return false;
}
};
let mut v = serialize_parameters(&mut keyed_accounts[1..], &tx_data);
match vm.execute_program(v.as_mut_slice()) {
Ok(status) => if 0 == status {
return false;
},
Err(e) => {
warn!("execute_program failed: {}", e);
return false;
}
}
deserialize_parameters(&mut keyed_accounts[1..], &v);
trace!(
"BPF program executed {} instructions",
vm.get_last_instruction_count()
);
} else if let Ok(instruction) = deserialize(tx_data) {
match instruction {
LoaderInstruction::Write { offset, bytes } => {
let offset = offset as usize;
let len = bytes.len();
debug!("Write: offset={} length={}", offset, len);
if keyed_accounts[0].account.userdata.len() < offset + len {
warn!(
"Write overflow: {} < {}",
keyed_accounts[0].account.userdata.len(),
offset + len
);
return false;
}
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
}
LoaderInstruction::Finalize => {
keyed_accounts[0].account.executable = true;
info!("Finalize: account {:?}", keyed_accounts[0].key);
}
}
} else {
warn!("Invalid program transaction: {:?}", tx_data);
}
true
}
#[cfg(test)]
mod tests {
use super::*;
use solana_rbpf::helpers;
#[test]
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
fn test_non_terminating_program() {
#[rustfmt::skip]
let prog = &[
0xb7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r6 = 0
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = 0
0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r2 = 0
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r4 = 0
0xbf, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r5 = r6
0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call 6
0x07, 0x06, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // r6 + 1
0x05, 0x00, 0xf8, 0xff, 0x00, 0x00, 0x00, 0x00, // goto -8
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
];
let input = &mut [0x00];
let mut vm = EbpfVmRaw::new(None).unwrap();
vm.set_verifier(bpf_verifier::check).unwrap();
vm.set_max_instruction_count(36000).unwrap(); // 36000 is a wag, need to tune
vm.set_program(prog).unwrap();
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helpers::bpf_trace_printf)
.unwrap();
vm.execute_program(input).unwrap();
}
}

View File

@ -0,0 +1,24 @@
[package]
name = "solana-lualoader"
version = "0.10.5"
description = "Solana Lua Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
[dependencies]
bincode = "1.0.0"
env_logger = "0.5.12"
log = "0.4.2"
rlua = "0.15.2"
serde = "1.0.27"
serde_derive = "1.0.27"
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
[dev-dependencies]
bincode = "1.0.0"
[lib]
name = "solana_lua_loader"
crate-type = ["cdylib"]

View File

@ -0,0 +1,50 @@
-- M-N Multisig. Pass in a table "{m=M, n=N, tokens=T}" where M is the number
-- of signatures required, and N is a list of the pubkeys identifying
-- those signatures. Once M of len(N) signatures are collected, tokens T
-- are subtracted from account 1 and given to account 4. Note that unlike
-- Rust, Lua is one-based and that account 1 is the first account.
function find(t, x)
for i, v in pairs(t) do
if v == x then
return i
end
end
end
function deserialize(bytes)
return load("return" .. bytes)()
end
local from_account,
serialize_account,
state_account,
to_account = table.unpack(accounts)
local serialize = load(serialize_account.userdata)().serialize
if #state_account.userdata == 0 then
local cfg = deserialize(data)
state_account.userdata = serialize(cfg, nil, "s")
return
end
local cfg = deserialize(state_account.userdata)
local key = deserialize(data)
local i = find(cfg.n, key)
if i == nil then
return
end
table.remove(cfg.n, i)
cfg.m = cfg.m - 1
state_account.userdata = serialize(cfg, nil, "s")
if cfg.m == 0 then
from_account.tokens = from_account.tokens - cfg.tokens
to_account.tokens = to_account.tokens + cfg.tokens
-- End of game.
state_account.tokens = 0
end

View File

@ -0,0 +1,174 @@
----------------------------------------------------------------
-- serialize.lua
--
-- Exports:
--
-- orderedPairs : deterministically ordered version of pairs()
--
-- serialize : convert Lua value to string in Lua syntax
--
----------------------------------------------------------------
-- orderedPairs: iterate over table elements in deterministic order. First,
-- array elements are returned, then remaining elements sorted by the key's
-- type and value.
-- compare any two Lua values, establishing a complete ordering
local function ltAny(a,b)
local ta, tb = type(a), type(b)
if ta ~= tb then
return ta < tb
end
if ta == "string" or ta == "number" then
return a < b
end
return tostring(a) < tostring(b)
end
local inext = ipairs{}
local function orderedPairs(t)
local keys = {}
local keyIndex = 1
local counting = true
local function _next(seen, s)
local v
if counting then
-- return next array index
s, v = inext(t, s)
if s ~= nil then
seen[s] = true
return s,v
end
counting = false
-- construct sorted unseen keys
for k,v in pairs(t) do
if not seen[k] then
table.insert(keys, k)
end
end
table.sort(keys, ltAny)
end
-- return next unseen table element
s = keys[keyIndex]
if s ~= nil then
keyIndex = keyIndex + 1
v = t[s]
end
return s, v
end
return _next, {}, 0
end
-- avoid 'nan', 'inf', and '-inf'
local numtostring = {
[tostring(-1/0)] = "-1/0",
[tostring(1/0)] = "1/0",
[tostring(0/0)] = "0/0"
}
setmetatable(numtostring, { __index = function (t, k) return k end })
-- serialize: Serialize a Lua data structure
--
-- x = value to serialize
-- out = function to be called repeatedly with strings, or
-- table into which strings should be inserted, or
-- nil => return a string
-- iter = function to iterate over table elements, or
-- "s" to sort elements by key, or
-- nil for default (fastest)
--
-- Notes:
-- * Does not support self-referential data structures.
-- * Does not optimize for repeated sub-expressions.
-- * Does not preserve topology; only values.
-- * Does not handle types other than nil, number, boolean, string, table
--
local function serialize(x, out, iter)
local visited = {}
local iter = iter=="s" and orderedPairs or iter or pairs
assert(type(iter) == "function")
local function _serialize(x)
if type(x) == "string" then
out(string.format("%q", x))
elseif type(x) == "number" then
out(numtostring[tostring(x)])
elseif type(x) == "boolean" or
type(x) == "nil" then
out(tostring(x))
elseif type(x) == "table" then
if visited[x] then
error("serialize: recursive structure")
end
visited[x] = true
local first, nextIndex = true, 1
out "{"
for k,v in iter(x) do
if first then
first = false
else
out ","
end
if k == nextIndex then
nextIndex = nextIndex + 1
else
if type(k) == "string" and k:match("^[%a_][%w_]*$") then
out(k.."=")
else
out "["
_serialize(k)
out "]="
end
end
_serialize(v)
end
out "}"
visited[x] = false
else
error("serialize: unsupported type")
end
end
local result
if not out then
result = {}
out = result
end
if type(out) == "table" then
local t = out
function out(s)
table.insert(t,s)
end
end
_serialize(x)
if result then
return table.concat(result)
end
end
return {
orderedPairs = orderedPairs,
serialize = serialize
}

View File

@ -0,0 +1,298 @@
extern crate bincode;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate rlua;
extern crate solana_sdk;
use bincode::deserialize;
use rlua::{Lua, Result, Table};
use solana_sdk::account::KeyedAccount;
use solana_sdk::loader_instruction::LoaderInstruction;
use std::str;
use std::sync::{Once, ONCE_INIT};
/// Make KeyAccount values available to Lua.
fn set_accounts(lua: &Lua, name: &str, keyed_accounts: &[KeyedAccount]) -> Result<()> {
let accounts = lua.create_table()?;
for (i, keyed_account) in keyed_accounts.iter().enumerate() {
let account = lua.create_table()?;
account.set("key", keyed_account.key.to_string())?;
account.set("tokens", keyed_account.account.tokens)?;
let data_str = lua.create_string(&keyed_account.account.userdata)?;
account.set("userdata", data_str)?;
accounts.set(i + 1, account)?;
}
let globals = lua.globals();
globals.set(name, accounts)
}
/// Commit the new KeyedAccount values.
fn update_accounts(lua: &Lua, name: &str, keyed_accounts: &mut [KeyedAccount]) -> Result<()> {
let globals = lua.globals();
let accounts: Table = globals.get(name)?;
for (i, keyed_account) in keyed_accounts.into_iter().enumerate() {
let account: Table = accounts.get(i + 1)?;
keyed_account.account.tokens = account.get("tokens")?;
let data_str: rlua::String = account.get("userdata")?;
keyed_account.account.userdata = data_str.as_bytes().to_vec();
}
Ok(())
}
fn run_lua(keyed_accounts: &mut [KeyedAccount], code: &str, data: &[u8]) -> Result<()> {
let lua = Lua::new();
let globals = lua.globals();
let data_str = lua.create_string(data)?;
globals.set("data", data_str)?;
set_accounts(&lua, "accounts", keyed_accounts)?;
lua.exec::<_, ()>(code, None)?;
update_accounts(&lua, "accounts", keyed_accounts)
}
#[no_mangle]
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -> bool {
static INIT: Once = ONCE_INIT;
INIT.call_once(|| {
// env_logger can only be initialized once
env_logger::init();
});
if keyed_accounts[0].account.executable {
let code = keyed_accounts[0].account.userdata.clone();
let code = str::from_utf8(&code).unwrap();
match run_lua(&mut keyed_accounts[1..], &code, tx_data) {
Ok(()) => {
trace!("Lua success");
return true;
}
Err(e) => {
warn!("Lua Error: {:#?}", e);
return false;
}
}
} else if let Ok(instruction) = deserialize(tx_data) {
match instruction {
LoaderInstruction::Write { offset, bytes } => {
let offset = offset as usize;
let len = bytes.len();
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
if keyed_accounts[0].account.userdata.len() < offset + len {
warn!(
"Write overflow {} < {}",
keyed_accounts[0].account.userdata.len(),
offset + len
);
return false;
}
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
}
LoaderInstruction::Finalize => {
keyed_accounts[0].account.executable = true;
trace!("LuaLoader::Finalize prog: {:?}", keyed_accounts[0].key);
}
}
} else {
warn!("Invalid program transaction: {:?}", tx_data);
return false;
}
true
}
#[cfg(test)]
mod tests {
extern crate bincode;
use self::bincode::serialize;
use super::*;
use solana_sdk::account::{create_keyed_accounts, Account};
use solana_sdk::pubkey::Pubkey;
use std::fs::File;
use std::io::prelude::*;
use std::path::PathBuf;
#[test]
fn test_update_accounts() -> Result<()> {
let mut accounts = [(Pubkey::default(), Account::default())];
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
let lua = Lua::new();
set_accounts(&lua, "xs", &keyed_accounts)?;
keyed_accounts[0].account.tokens = 42;
keyed_accounts[0].account.userdata = vec![];
update_accounts(&lua, "xs", &mut keyed_accounts)?;
// Ensure update_accounts() overwrites the local value 42.
assert_eq!(keyed_accounts[0].account.tokens, 0);
Ok(())
}
#[test]
fn test_credit_with_lua() -> Result<()> {
let code = r#"accounts[1].tokens = accounts[1].tokens + 1"#;
let mut accounts = [(Pubkey::default(), Account::default())];
run_lua(&mut create_keyed_accounts(&mut accounts), code, &[])?;
assert_eq!(accounts[0].1.tokens, 1);
Ok(())
}
#[test]
fn test_error_with_lua() {
let code = r#"accounts[1].tokens += 1"#;
let mut accounts = [(Pubkey::default(), Account::default())];
assert!(run_lua(&mut create_keyed_accounts(&mut accounts), code, &[]).is_err());
}
#[test]
fn test_move_funds_with_lua_via_process() {
let userdata = r#"
local tokens, _ = string.unpack("I", data)
accounts[1].tokens = accounts[1].tokens - tokens
accounts[2].tokens = accounts[2].tokens + tokens
"#.as_bytes()
.to_vec();
let alice_pubkey = Pubkey::default();
let bob_pubkey = Pubkey::default();
let program_id = Pubkey::default();
let mut accounts = [
(
Pubkey::default(),
Account {
tokens: 1,
userdata,
program_id,
executable: true,
loader_program_id: Pubkey::default(),
},
),
(alice_pubkey, Account::new(100, 0, program_id)),
(bob_pubkey, Account::new(1, 0, program_id)),
];
let data = serialize(&10u64).unwrap();
process(&mut create_keyed_accounts(&mut accounts), &data);
assert_eq!(accounts[1].1.tokens, 90);
assert_eq!(accounts[2].1.tokens, 11);
process(&mut create_keyed_accounts(&mut accounts), &data);
assert_eq!(accounts[1].1.tokens, 80);
assert_eq!(accounts[2].1.tokens, 21);
}
fn read_test_file(name: &str) -> Vec<u8> {
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push(name);
let mut file = File::open(path).unwrap();
let mut contents = vec![];
file.read_to_end(&mut contents).unwrap();
contents
}
#[test]
fn test_load_lua_library() {
let userdata = r#"
local serialize = load(accounts[2].userdata)().serialize
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
"#.as_bytes()
.to_vec();
let program_id = Pubkey::default();
let program_account = Account {
tokens: 1,
userdata,
program_id,
executable: true,
loader_program_id: Pubkey::default(),
};
let alice_account = Account::new(100, 0, program_id);
let serialize_account = Account {
tokens: 100,
userdata: read_test_file("serialize.lua"),
program_id,
executable: false,
loader_program_id: Pubkey::default(),
};
let mut accounts = [
(Pubkey::default(), program_account),
(Pubkey::default(), alice_account),
(Pubkey::default(), serialize_account),
(Pubkey::default(), Account::new(1, 0, program_id)),
];
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
process(&mut keyed_accounts, &[]);
// Verify deterministic ordering of a serialized Lua table.
assert_eq!(
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
"{a=1,b=2,c=3}"
);
}
#[test]
fn test_lua_multisig() {
let program_id = Pubkey::default();
let alice_pubkey = Pubkey::new(&[0; 32]);
let serialize_pubkey = Pubkey::new(&[1; 32]);
let state_pubkey = Pubkey::new(&[2; 32]);
let bob_pubkey = Pubkey::new(&[3; 32]);
let carol_pubkey = Pubkey::new(&[4; 32]);
let dan_pubkey = Pubkey::new(&[5; 32]);
let erin_pubkey = Pubkey::new(&[6; 32]);
let program_account = Account {
tokens: 1,
userdata: read_test_file("multisig.lua"),
program_id,
executable: true,
loader_program_id: Pubkey::default(),
};
let alice_account = Account {
tokens: 100,
userdata: Vec::new(),
program_id,
executable: true,
loader_program_id: Pubkey::default(),
};
let serialize_account = Account {
tokens: 100,
userdata: read_test_file("serialize.lua"),
program_id,
executable: true,
loader_program_id: Pubkey::default(),
};
let mut accounts = [
(Pubkey::default(), program_account), // Account holding the program
(alice_pubkey, alice_account), // The payer
(serialize_pubkey, serialize_account), // Where the serialize library is stored.
(state_pubkey, Account::new(1, 0, program_id)), // Where program state is stored.
(bob_pubkey, Account::new(1, 0, program_id)), // The payee once M signatures are collected.
];
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
let data = format!(
r#"{{m=2, n={{"{}","{}","{}"}}, tokens=100}}"#,
carol_pubkey, dan_pubkey, erin_pubkey
).as_bytes()
.to_vec();
process(&mut keyed_accounts, &data);
assert_eq!(keyed_accounts[4].account.tokens, 1);
let data = format!(r#""{}""#, carol_pubkey).into_bytes();
process(&mut keyed_accounts, &data);
assert_eq!(keyed_accounts[4].account.tokens, 1);
let data = format!(r#""{}""#, dan_pubkey).into_bytes();
process(&mut keyed_accounts, &data);
assert_eq!(keyed_accounts[4].account.tokens, 101); // Pay day!
let data = format!(r#""{}""#, erin_pubkey).into_bytes();
process(&mut keyed_accounts, &data);
assert_eq!(keyed_accounts[4].account.tokens, 101); // No change!
}
}

View File

@ -0,0 +1,15 @@
[package]
name = "solana-noop"
version = "0.10.5"
description = "Solana noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
[dependencies]
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
[lib]
name = "noop"
crate-type = ["cdylib"]

View File

@ -0,0 +1,10 @@
extern crate solana_sdk;
use solana_sdk::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> bool {
println!("noop: keyed_accounts: {:#?}", keyed_accounts);
println!("noop: data: {:?}", data);
true
}

View File

@ -1,21 +0,0 @@
[package]
name = "noop"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
solana_program_interface = { path = "../../common" }
[lib]
name = "noop"
crate-type = ["dylib"]

View File

@ -1,6 +0,0 @@
extern crate solana_program_interface;
use solana_program_interface::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(_infos: &mut Vec<KeyedAccount>, _data: &[u8]) {}

View File

@ -1,21 +0,0 @@
[package]
name = "print"
version = "0.1.0"
authors = [
"Anatoly Yakovenko <anatoly@solana.com>",
"Greg Fitzgerald <greg@solana.com>",
"Stephen Akridge <stephen@solana.com>",
"Michael Vines <mvines@solana.com>",
"Rob Walker <rob@solana.com>",
"Pankaj Garg <pankaj@solana.com>",
"Tyera Eulberg <tyera@solana.com>",
"Jack May <jack@solana.com>",
]
[dependencies]
solana_program_interface = { path = "../../common" }
[lib]
name = "print"
crate-type = ["dylib"]

View File

@ -1,9 +0,0 @@
extern crate solana_program_interface;
use solana_program_interface::account::KeyedAccount;
#[no_mangle]
pub extern "C" fn process(infos: &mut Vec<KeyedAccount>, _data: &[u8]) {
println!("AccountInfos: {:#?}", infos);
//println!("data: {:#?}", data);
}

View File

@ -1,10 +1,17 @@
# Smart Contracts Engine
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
The goal of this RFC is to define a set of constraints for APIs and smart contracts runtime such that we can execute our contracts safely on massively parallel hardware such as a GPU.
## Version
version 0.2
Version 0.3
## Definitions
* Transaction - an atomic operation with multiple instructions. All Instruction must complete successfully for the transaction to be comitted.
* Instruction - a call to a program that modifies Account token balances and Account specific userdata state. A single transaction may have multiple Instructions with different Accounts and Programs.
* Program - Programs are code that modifies Account token balances and Account specific userdata state.
* Account - A single instance of state. Accounts are looked up by account Pubkeys and are associated with a Program's Pubkey.
## Toolchain Stack
@ -39,173 +46,136 @@ In Figure 1 an untrusted client, creates a program in the front-end language of
## Runtime
The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable and doesn't require dynamic resource management. The goal is to execute as many contracts as possible in parallel, and have them pass or fail without a destructive state change.
The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable. To achieve this goal the runtime forces each Instruction to specify all of its memory dependencies up front, and therefore a single Instruction cannot cause a dynamic memory allocation. An explicit Instruction for memory allocation from the `SystemProgram::CreateAccount` is the only way to allocate new memory in the engine. A Transaction may compose multiple Instruction, including `SystemProgram::CreateAccount`, into a single atomic sequence which allows for memory allocation to achieve a result that is similar to dynamic allocation.
### State
State is addressed by an account which is at the moment simply the Pubkey. Our goal is to eliminate memory allocation from within the smart contract itself. Thus the client of the contract provides all the state that is necessary for the contract to execute in the transaction itself. The runtime interacts with the contract through a state transition function, which takes a mapping of [(Pubkey,State)] and returns [(Pubkey, State')]. The State is an opeque type to the runtime, a `Vec<u8>`, the contents of which the contract has full control over.
State is addressed by an Account which is at the moment simply the Pubkey. Our goal is to eliminate memory allocation from within the program itself. Thus the client of the program provides all the state that is necessary for the program to execute in the transaction itself. The runtime interacts with the program through an entry point with a well defined interface. The userdata stored in an Account is an opaque type to the runtime, a `Vec<u8>`, the contents of which the program code has full control over.
### Call Structure
### Transaction structure
```
/// Call definition
/// Signed portion
/// An atomic transaction
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct CallData {
/// Each Pubkey in this vector is mapped to a corresponding `Page` that is loaded for contract execution
/// In a simple pay transaction `key[0]` is the token owner's key and `key[1]` is the recipient's key.
pub keys: Vec<Pubkey>,
pub struct Transaction {
/// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`.
pub signature: Signature,
/// The Pubkeys that are required to have a proof. The proofs are a `Vec<Signature> which encoded along side this data structure
/// Each Signature signs the `required_proofs` vector as well as the `keys` vectors. The transaction is valid if and only if all
/// the required signatures are present and the public key vector is unchanged between signatures.
pub required_proofs: Vec<u8>,
/// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is
/// program-specific.
/// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0].
/// In the future which key pays the fee and which keys have signatures would be configurable.
/// * account_keys[1] - Typically this is the program context or the recipient of the tokens
pub account_keys: Vec<Pubkey>,
/// PoH data
/// last PoH hash observed by the sender
/// The ID of a recent ledger entry.
pub last_id: Hash,
/// Program
/// The address of the program we want to call. ContractId is just a Pubkey that is the address of the loaded code that will execute this Call.
pub contract_id: ContractId,
/// OS scheduling fee
/// The number of tokens paid for processing and storage of this transaction.
pub fee: i64,
/// struct version to prevent duplicate spends
/// Calls with a version <= Page.version are rejected
pub version: u64,
/// method to call in the contract
pub method: u8,
/// usedata in bytes
pub userdata: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Call {
/// Signatures and Keys
/// (signature, key index)
/// This vector contains a tuple of signatures, and the key index the signature is for
/// proofs[0] is always key[0]
pub proofs: Vec<Signature>,
pub data: CallData,
/// Keys identifying programs in the instructions vector.
pub program_ids: Vec<Pubkey>,
/// Programs that will be executed in sequence and commited in one atomic transaction if all
/// succeed.
pub instructions: Vec<Instruction>,
}
```
At it's core, this is just a set of Pubkeys and Signatures with a bit of metadata. The contract Pubkey routes this transaction into that contracts entry point. `version` is used for dropping retransmitted requests.
The Transaction structure specifies a list of Pubkey's and signatures for those keys and a sequentail list of instructions that will operate over the state's assosciated with the `account_keys`. For the transaction to be committed all the instructions must execute successfully, if any abort the whole transaction fails to commit.
Contracts should be able to read any state that is part of runtime, but only write to state that the contract allocated.
### Account structure
Accounts maintain token state as well as program specific memory.
```
/// An Account with userdata that is stored on chain
pub struct Account {
/// tokens in the account
pub tokens: i64,
/// user data
/// A transaction can write to its userdata
pub userdata: Vec<u8>,
/// program id this Account belongs to
pub program_id: Pubkey,
}
```
### Execution
# Transaction Engine
Calls batched and processed in a pipeline
At it's core, the engine looks up all the Pubkeys maps them to accounts and routs them to the `program_id` entry point.
## Execution
Transactions are batched and processed in a pipeline
```
+-----------+ +-------------+ +--------------+ +--------------------+
| sigverify |--->| lock memory |--->| validate fee |--->| allocate new pages |--->
| sigverify |--->| lock memory |--->| validate fee |--->| allocate accounts |--->
+-----------+ +-------------+ +--------------+ +--------------------+
+------------+ +---------+ +--------------+ +-=------------+
--->| load pages |--->| execute |--->|unlock memory |--->| commit pages |
+------------+ +---------+ +--------------+ +--------------+
+------------+ +---------+ +-=------------+ +--------------+
--->| load data |--->| execute |--->| commit data |-->|unlock memory |
+------------+ +---------+ +--------------+ +--------------+
```
At the `execute` stage, the loaded pages have no data dependencies, so all the contracts can be executed in parallel.
## Memory Management
```
pub struct Page {
/// key that indexes this page
/// prove ownership of this key to spend from this Page
owner: Pubkey,
/// contract that owns this page
/// contract can write to the data that is in `memory` vector
contract: Pubkey,
/// balance that belongs to owner
balance: u64,
/// version of the structure, public for testing
version: u64,
/// hash of the page data
memhash: Hash,
/// The following could be in a separate structure
memory: Vec<u8>,
}
```
At the `execute` stage, the loaded pages have no data dependencies, so all the programs can be executed in parallel.
The guarantee that runtime enforces:
1. The contract code is the only code that will modify the contents of `memory`
2. Total balances on all the pages is equal before and after exectuion of a call
3. Balances of each of the pages not owned by the contract must be equal to or greater after the call than before the call.
The runtime enforces the following rules:
1. The `program_id` code is the only code that will modify the contents of `Account::userdata` of Account's that have been assigned to it. This means that upon assignment userdata vector is guarnteed to be `0`.
2. Total balances on all the accounts is equal before and after execution of a Transaction.
3. Balances of each of the accounts not assigned to `program_id` must be equal to or greater after the Transaction than before the transaction.
4. All Instructions in the Transaction executed without a failure.
## Entry Point
Exectuion of the contract involves maping the contract's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages.
Execution of the program involves mapping the Program's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages.
```
// Find the method
match (tx.contract, tx.method) {
// system interface
// everyone has the same reallocate
(_, 0) => system_0_realloc(&tx, &mut call_pages),
(_, 1) => system_1_assign(&tx, &mut call_pages),
// contract methods
(DEFAULT_CONTRACT, 128) => default_contract_128_move_funds(&tx, &mut call_pages),
(contract, method) => //...
pub fn process_transaction(
tx: &Transaction,
pix: usize,
accounts: &mut [&mut Account],
) -> Result<()>;
```
The first 127 methods are reserved for the system interface, which implements allocation and assignment of memory. The rest, including the contract for moving funds are implemented by the contract itself.
## System Interface
```
/// SYSTEM interface, same for very contract, methods 0 to 127
/// method 0
/// reallocate
/// spend the funds from the call to the first recipient's
pub fn system_0_realloc(call: &Call, pages: &mut Vec<Page>) {
if call.contract == DEFAULT_CONTRACT {
let size: u64 = deserialize(&call.userdata).unwrap();
pages[0].memory.resize(size as usize, 0u8);
}
pub enum SystemProgram {
/// Create a new account
/// * Transaction::keys[0] - source
/// * Transaction::keys[1] - new account key
/// * tokens - number of tokens to transfer to the new account
/// * space - memory to allocate if greater then zero
/// * program_id - the program id of the new account
CreateAccount {
tokens: i64,
space: u64,
program_id: Pubkey,
},
/// Assign account to a program
/// * Transaction::keys[0] - account to assign
Assign { program_id: Pubkey },
/// Move tokens
/// * Transaction::keys[0] - source
/// * Transaction::keys[1] - destination
Move { tokens: i64 },
}
/// method 1
/// assign
/// assign the page to a contract
pub fn system_1_assign(call: &Call, pages: &mut Vec<Page>) {
let contract = deserialize(&call.userdata).unwrap();
if call.contract == DEFAULT_CONTRACT {
pages[0].contract = contract;
//zero out the memory in pages[0].memory
//Contracts need to own the state of that data otherwise a use could fabricate the state and
//manipulate the contract
pages[0].memory.clear();
}
}
```
The first method resizes the memory that is assosciated with the callers page. The second system call assignes the page to the contract. Both methods check if the current contract is 0, otherwise the method does nothing and the caller spent their fees.
This ensures that when memory is assigned to the contract the initial state of all the bytes is 0, and the contract itself is the only thing that can modify that state.
## Simplest contract
```
/// DEFAULT_CONTRACT interface
/// All contracts start with 128
/// method 128
/// move_funds
/// spend the funds from the call to the first recipient's
pub fn default_contract_128_move_funds(call: &Call, pages: &mut Vec<Page>) {
let amount: u64 = deserialize(&call.userdata).unwrap();
if pages[0].balance >= amount {
pages[0].balance -= amount;
pages[1].balance += amount;
}
}
```
This simply moves the amount from page[0], which is the callers page, to page[1], which is the recipient's page.
The interface is best described by the `Instruction::userdata` that the user encodes.
* `CreateAccount` - This allows the user to create and assign an Account to a Program.
* `Assign` - allows the user to assign an existing account to a `Program`.
* `Move` - moves tokens between `Account`s that are assosciated with `SystemProgram`. This cannot be used to move tokens of other `Account`s. Programs need to implement their own version of Move.
## Notes
1. There is no dynamic memory allocation.
2. Persistent Memory is allocated to a Key with ownership
3. Contracts can `call` to update key owned state
4. `call` is just a *syscall* that does a cryptographic check of memory ownership
5. Kernel guarantees that when memory is assigned to the contract its state is 0
6. Kernel guarantees that contract is the only thing that can modify memory that its assigned to
7. Kernel guarantees that the contract can only spend tokens that are in pages that are assigned to it
8. Kernel guarantees the balances belonging to pages are balanced before and after the call
1. There is no dynamic memory allocation. Client's need to call the `SystemProgram` to create memory before passing it to another program. This Instruction can be composed into a single Transaction with the call to the program itself.
2. Runtime guarantees that when memory is assigned to the `Program` it is zero initialized.
3. Runtime guarantees that `Program`'s code is the only thing that can modify memory that its assigned to
4. Runtime guarantees that the `Program` can only spend tokens that are in `Account`s that are assigned to it
5. Runtime guarantees the balances belonging to `Account`s are balanced before and after the transaction
6. Runtime guarantees that multiple instructions all executed successfully when a transaction is committed.
# Future Work
* Continuations and Signals for long running Transactions. https://github.com/solana-labs/solana/issues/1485

View File

@ -1,32 +1,42 @@
# Consensus
VERY WIP
The goal of this RFC is to define the consensus algorithm used in Solana. This proposal covers a Proof of Stake (PoS) algorithm that leverages Proof of History (PoH). PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time among partitions.
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
## Version
version 0.1
version 0.4
## Basic Design Idea
Nodes on the network can be "up" or "down". A node indicates it is up either by voting as a validator or by generating a PoH stream as the designated leader. Consensus is reached when a supermajority + 1 of the staked nodes have voted on the state of the network at a particular PoH tick count.
Nodes take turns being leader and generating the PoH that encodes state changes. The network can tolerate loss of connection to any leader by synthesizing what the leader ***would have generated*** had it been connected but not ingesting any state changes. The complexity of forks is thereby limited to a "there/not-there" skip list of branches that may arise on leader rotation periods boundaries.
## Message Flow
1. Transactions are ingested at the leader.
2. Leader filters for valid transactions
3. Leader executes valid transactions on its state
4. Leader packages transactions into blobs
5. Leader transmits blobs to validator nodes.
1. Transactions are ingested at the current leader.
2. Leader filters for valid transactions.
3. Leader executes valid transactions on its state.
4. Leader packages transactions into entries based off the longest observed PoH branch.
5. Leader transmits the entries to validator nodes (in signed blobs)
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
b. The PoH stream includes ticks; empty entries that indicate liveness of the leader and the passage of time on the network.
c. A leader's stream begins with the tick entries necessary complete the PoH back to that node's most recently observed prior leader period.
6. Validators retransmit entries to peers in their set and to further downstream nodes.
7. Validators validate the transactions and execute them on their state.
8. Validators compute the hash of the state.
9. Validators transmit votes to the leader.
a. Votes are signatures of the hash of the computed state.
10. Leader executes the votes as any other transaction and broadcasts them out to the network
11. Validators observe their votes, and all the votes from the network.
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
9. At specific times, i.e. specific PoH tick counts, validators transmit votes to the leader.
a. Votes are signatures of the hash of the computed state at that PoH tick count
10. Leader executes the votes as any other transaction and broadcasts them to the network
a. The leader votes at that same height once a majority of stake is represented on the PoH stream *(open question: do leaders vote?)*
11. Validators observe their votes and all the votes from the network.
12. Validators vote on the longest chain of periods that contains their vote.
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
## Staking
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
@ -43,7 +53,7 @@ CreateStake(
)
```
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain number of validation voting rounds.
## Validation Votes
@ -63,7 +73,7 @@ Validators `stake` some of their spendable sol into a staking account. The stak
```
Slash(Validate(
PoH count,
PoH tick count,
PoH hash,
stake public key,
...
@ -75,48 +85,14 @@ When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH
## Leader Slashing
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
The goal is to discourage leaders from generating multiple PoH streams. When this occurs, the network adopts ticks for that leader's period. Leaders can be slashed for generating multiple conflicting PoH streams during their period.
## Validation Vote Contract
The goal of this contract is to simulate economic cost of mining on a shorter branch.
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
1. With my signature I am certifying that I computed `state hash` at `PoH count tick count` and `PoH hash`.
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH tick count` + `N` is reached by the PoH stream (lockout period).
3. I will not vote for any other branch below `PoH count`.
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
## Leader Seed Generation
Leader selection is decided via a random seed. The process is as follows:
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
2. append them together
3. hash the string for `N` counts via a similar process as PoH itself.
4. The resulting hash is the random seed for `M` counts, where M > N
## Leader Ranking and Rotation
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
## Partition selection
Validators should select the first branch to reach finality, or the highest ranking leader.
## Examples
### Small Partition
1. Network partition M occurs for 10% of the nodes
2. The larger partition K, with 90% of the stake weight continues to operate as normal
3. M cycles through the ranks until one of them is leader.
4. M validators observe 10% of the vote pool, finality is not reached
5. M and K re-connect.
6. M validators cancel their votes on K which are below K's `PoH count`
### Leader Timeout
1. Next rank node observes a timeout.
2. Nodes receiving both PoH streams pick the higher rank node.
3. 2, causes a partition, since nodes can only vote for 1 leader.
4. Partition is resolved just like in the [Small Partition](#small-parition)
4. Each vote on a branch increases the lockout for all prior votes on that branch according to a network-specified function.

View File

@ -52,3 +52,4 @@ Our solution to this is to force the clients to continue using the same identity
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.
* Validators should also get rewarded for validating submitted storage proofs as incentive for storing the ledger. They can only validate proofs if they are storing that slice of the ledger.

View File

@ -0,0 +1,108 @@
# Leader Rotation
The goal of this RFC is to define how leader nodes are rotated in Solana, how rotation may cause forks to arise, and how the converges
in response.
## Version
version 0.1
## Leader Seed Generation
Leader selection is decided via a random seed. The process is as follows:
1. Periodically at a specific `PoH tick count` select the first vote signatures that create a supermajority from the previous voting round.
2. Append them together.
3. Hash the string for `N` counts via a similar process as PoH itself.
4. The resulting hash is the random seed for `M` counts, `M` leader periods, where M > N
## Leader Rotation
1. The leader is chosen via a random seed generated from stake weights and votes (the leader schedule)
2. The leader is rotated every `T` PoH ticks (leader period), accoding to the leader schedule
3. The schedule is applicable for `M` voting rounds
Leader's transmit for a count of `T` PoH ticks. When `T` is reached all the validators should switch to the next scheduled leader. To schedule leaders, the supermajority + `M` nodes are shuffled using the above calculated random seed.
All `T` ticks must be observed from the current leader for that part of PoH to be accepted by the network. If `T` ticks (and any intervening transactions) are not observed, the network optimistically fills in the `T` ticks, and continues with PoH from the next leader.
## Partitions, Forks
Forks can arise at PoH tick counts that correspond to leader rotations, because leader nodes may or may not have observed the previous leader's data. These empty ticks are generated by all nodes in the network at a network-specified rate for hashes/per/tick `Z`.
There are only two possible versions of the PoH during a voting period: PoH with `T` ticks and entries generated by the current leader, or PoH with just ticks. The "just ticks" version of the PoH can be thought of as a virtual ledger, one that all nodes in the network can derive from the last tick in the previous period.
Validators can ignore forks at other points (e.g. from the wrong leader), or slash the leader responsible for the fork.
Validators vote on the longest chain that contains their previous vote, or a longer chain if the lockout on their previous vote has expired.
#### Validator's View
##### Time Progression
The diagram below represents a validator's view of the PoH stream with possible forks over time. L1, L2, etc. are leader periods, and `E`s represent entries from that leader during that leader's period. The 'x's represent ticks only, and time flows downwards in the diagram.
```
time +----+ validator action
| | L1 | E(L1)
| |----| / \ vote(E(L2))
| | L2 | E(L2) x
| |----| / \ / \ vote(E(L2))
| | L3 | E(L3) x E(L3)' x
| |----| / \ / \ / \ / \ slash(L3)
| | L4 | x x E(L4) x x x x x
V |----| | | | | | | | | vote(E(L4))
V | L5 | xx xx xx E(L5) xx xx xx xx
V +----+ hang on to E(L4) and E(L5) for more...
```
Note that an `E` appearing on 2 branches at the same period is a slashable condition, so a validator observing `E(L3)` and `E(L3)'` can slash L3 and safely choose `x` for that period. Once a validator observes a supermajority vote on any branch, other branches can be discarded below that tick count. For any period, validators need only consider a single "has entries" chain or a "ticks only" chain.
##### Time Division
It's useful to consider leader rotation over PoH tick count as time division of the job of encoding state for the network. The following table presents the above tree of forks as a time-divided ledger.
leader period | L1 | L2 | L3 | L4 | L5
-------|----|----|----|----|----
data | E(L1)| E(L2) | E(L3) | E(L4) | E(L5)
ticks to prev | | | | x | xx
Note that only data from leader L3 will be accepted during leader period L3. Data from L3 may include "catchup" ticks back to a period other than L2 if L3 did not observe L2's data. L4 and L5's transmissions include the "ticks to prev" PoH entries.
This arrangement of the network data streams permits nodes to save exactly this to the ledger for replay, restart, and checkpoints.
#### Leader's View
When a new leader begins a period, it must first transmit any PoH (ticks) required to link the new period with the most recently observed and voted period.
## Examples
### Small Partition
1. Network partition M occurs for 10% of the nodes
2. The larger partition K, with 90% of the stake weight continues to operate as normal
3. M cycles through the ranks until one of them is leader, generating ticks for periods where the leader is in K.
4. M validators observe 10% of the vote pool, finality is not reached.
5. M and K re-connect.
6. M validators cancel their votes on M, which has not reached finality, and re-cast on K (after their vote lockout on M).
### Leader Timeout
1. Next rank leader node V observes a timeout from current leader A, fills in A's period with virtual ticks and starts sending out entries.
2. Nodes observing both streams keep track of the forks, waiting for:
a. their vote on leader A to expire in order to be able to vote on B
b. a supermajority on A's period
3. If a occurs, leader B's period is filled with ticks, if b occurs, A's period is filled with ticks
4. Partition is resolved just like in the [Small Partition](#small-parition)
## Network Variables
`M` - number of nodes outside the supermajority to whom leaders broadcast their PoH for validation
`N` - number of voting rounds for which a leader schedule is considered before a new leader schedule is used
`T` - number of PoH ticks per leader period (also voting period)
`Z` - number of hashes per PoH tick

View File

@ -117,6 +117,14 @@ $ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
<TX_SIGNATURE>
```
### Deploy program
```
// Command
$ solana-wallet deploy <PATH>
// Return
<PROGRAM_ID>
```
## Javascript solana-web3.js Interface

View File

@ -0,0 +1,98 @@
#!/bin/bash -e
usage() {
cat <<EOF
usage: $0 [major|minor|patch|-preXYZ]
Increments the Cargo.toml version.
A minor version increment is the default
EOF
exit 0
}
here="$(dirname "$0")"
cd "$here"/..
source ci/semver_bash/semver.sh
readCargoVariable() {
declare variable="$1"
declare Cargo_toml="$2"
while read -r name equals value _; do
if [[ $name = "$variable" && $equals = = ]]; then
echo "${value//\"/}"
return
fi
done < <(cat "$Cargo_toml")
echo "Unable to locate $variable in $Cargo_toml" 1>&2
}
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
Cargo_tomls="$(find . -name Cargo.toml)"
# Collect the name of all the internal crates
crates=()
for Cargo_toml in $Cargo_tomls; do
crates+=("$(readCargoVariable name "$Cargo_toml")")
done
# Read the current version
MAJOR=0
MINOR=0
PATCH=0
SPECIAL=""
semverParseInto "$(readCargoVariable version ./Cargo.toml)" MAJOR MINOR PATCH SPECIAL
[[ -n $MAJOR ]] || usage
currentVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
SPECIAL=""
# Figure out what to increment
case ${1:-minor} in
patch)
PATCH=$((PATCH + 1))
;;
major)
MAJOR=$((MAJOR+ 1))
;;
minor)
MINOR=$((MINOR+ 1))
;;
-*)
if [[ $1 =~ ^-[A-Za-z0-9]*$ ]]; then
SPECIAL="$1"
else
echo "Error: Unsupported characters found in $1"
exit 1
fi
;;
*)
echo "Error: unknown argument: $1"
usage
;;
esac
newVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
# Update all the Cargo.toml files
for Cargo_toml in $Cargo_tomls; do
# Set new crate version
(
set -x
sed -i "$Cargo_toml" -e "s/^version = \"[^\"]*\"$/version = \"$newVersion\"/"
)
# Fix up the version references to other internal crates
for crate in "${crates[@]}"; do
(
set -x
sed -i "$Cargo_toml" -e "
s/^$crate = .*path = \"\([^\"]*\)\".*\$/$crate = \{ path = \"\1\", version = \"$newVersion\" \}/
"
)
done
done
echo "$currentVersion -> $newVersion"
exit 0

View File

@ -0,0 +1,29 @@
#!/bin/bash -e
#
# Installs native programs as |cargo install| doesn't know about them
#
here=$(dirname "$0")
SOLANA_ROOT="$(cd "$here"/..; pwd)"
installDir=$1
variant=${2:-release}
if [[ -z $installDir ]]; then
echo Install directory not specified
exit 1
fi
if [[ ! -d $installDir ]]; then
echo "Not a directory: $installDir"
exit 1
fi
for dir in "$SOLANA_ROOT"/programs/native/*; do
for program in echo "$SOLANA_ROOT"/target/"$variant"/deps/lib{,solana_}"$(basename "$dir")".{so,dylib,dll}; do
if [[ -f $program ]]; then
cp -v "$program" "$installDir"
fi
done
done

View File

@ -18,6 +18,10 @@ receive_errors=0
receive_errors_diff=0
rcvbuf_errors=0
rcvbuf_errors_diff=0
in_octets=0
in_octets_diff=0
out_octets=0
out_octets_diff=0
update_netstat() {
declare net_stat
@ -39,13 +43,21 @@ update_netstat() {
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /RcvbufErrors/ {tmp_var = $2} END { print tmp_var }')
rcvbuf_errors_diff=$((stats - rcvbuf_errors))
rcvbuf_errors="$stats"
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /InOctets/ {tmp_var = $2} END { print tmp_var }')
in_octets_diff=$((stats - in_octets))
in_octets="$stats"
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /OutOctets/ {tmp_var = $2} END { print tmp_var }')
out_octets_diff=$((stats - out_octets))
out_octets="$stats"
}
update_netstat
while true; do
update_netstat
report="packets_sent=$packets_sent_diff,packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff"
report="packets_sent=$packets_sent_diff,packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff,in_octets=$in_octets_diff,out_octets=$out_octets_diff"
echo "$report"
./metrics-write-datapoint.sh "net-stats,hostname=$HOSTNAME $report"

Some files were not shown because too many files have changed in this diff Show More