Compare commits
314 Commits
Author | SHA1 | Date | |
---|---|---|---|
14306a33e7 | |||
babc3847d7 | |||
40fd1befa5 | |||
7808af9a65 | |||
3c17732826 | |||
77aee571ad | |||
a01b55c580 | |||
0ecdc64302 | |||
ba06082d58 | |||
08e9c1a96e | |||
9f38b86df8 | |||
ca12faca9c | |||
97a0791f3f | |||
4791c7e0a7 | |||
1ba13fe180 | |||
9a30100a9c | |||
aa741b3147 | |||
09db7b5b52 | |||
fa9faa2cec | |||
d2dc585974 | |||
6721bdde3d | |||
a733873b8f | |||
7c02bbc47c | |||
16a815d2b1 | |||
ddb490e2fb | |||
242d0a23fb | |||
869009243d | |||
7b61f5279c | |||
7ef0b815ec | |||
8742de789e | |||
bfadd7b787 | |||
2e14bfcf4e | |||
a19426f055 | |||
df366017a7 | |||
7d76badd03 | |||
8047ab777c | |||
0d0a1c2919 | |||
1da90017ce | |||
0909618efa | |||
28bb7849f4 | |||
9cffd3a1ea | |||
917151ce54 | |||
6dcd127634 | |||
af66edf8c0 | |||
ab5b921e8f | |||
6c2843543b | |||
85f74cc537 | |||
43665115b4 | |||
156115c04c | |||
a66577eb87 | |||
3345d059e8 | |||
8c8c5de779 | |||
f03e971598 | |||
b4a1cdceaa | |||
b250d20059 | |||
dc3b270410 | |||
9d5092a71c | |||
a287c9e5fa | |||
ee85d534f9 | |||
6e1b291c17 | |||
68f7b1ecf3 | |||
58fe5cabd6 | |||
8993c6ae24 | |||
0e56473add | |||
f6b709ca48 | |||
ffa1fa557b | |||
e7631c85a1 | |||
edeadb503f | |||
d2044f2562 | |||
5703c740cf | |||
6ae20e78e2 | |||
506fc3baeb | |||
68523f4a7f | |||
beae217ab9 | |||
2c8c117e3c | |||
3a1285ebe5 | |||
e2660f2ac1 | |||
22eb1b977f | |||
43ef8d7bb7 | |||
d9271f2d30 | |||
dfbfd4d4dd | |||
9cb262ad4b | |||
73ee0cb100 | |||
9a6154beaf | |||
3f494bb91b | |||
2eb312796d | |||
3fb86662fb | |||
dce31f6002 | |||
39c42a6aba | |||
9961c0ee0a | |||
3f843f21b9 | |||
d07961a58b | |||
b85aa9282e | |||
1cd354cf15 | |||
92cd2d09ed | |||
a40122548f | |||
6e27f797bd | |||
476a585222 | |||
aa74ddb6c0 | |||
95921ce129 | |||
ee6d00a2fe | |||
212cbc4977 | |||
a6af1ba08d | |||
ee27e9e1cf | |||
4d21ee0546 | |||
493a2477b5 | |||
e284af33b9 | |||
f0aa14e135 | |||
fb9d8dfa99 | |||
4b02bbc802 | |||
18cf660f61 | |||
376303a1eb | |||
f295eb06d0 | |||
f423f61d8b | |||
94b06b2cbf | |||
9b2fc8cde7 | |||
d810752e86 | |||
fdaad1d85b | |||
7f29c1fe23 | |||
68df9d06db | |||
b60cb48c18 | |||
0fee854220 | |||
0cc7bbfe7d | |||
68834bd4c5 | |||
2df40cf9c9 | |||
f671b7f63f | |||
236113e417 | |||
a340b18b19 | |||
f6c8e1a4bf | |||
160cff4a30 | |||
48685cf766 | |||
0f32102684 | |||
d46682d1f2 | |||
55833e20b1 | |||
02cfa76916 | |||
9314eea7e9 | |||
1733beabf7 | |||
471d8f6ff9 | |||
e47fcb196b | |||
3ae53961c8 | |||
113b002095 | |||
9447537d8c | |||
7404b8739e | |||
7239395d95 | |||
926d459c8f | |||
7cabe203dc | |||
1e53f4266a | |||
24b513c3c7 | |||
b982595c73 | |||
af8a36b7fb | |||
208e7d7943 | |||
557736f1cf | |||
61927e1941 | |||
fc75827aaf | |||
2f2531d921 | |||
d5f20980eb | |||
21eae981f9 | |||
ead7f4287a | |||
3b33150cfb | |||
6d34a68e54 | |||
5c483c9928 | |||
a68c99d782 | |||
0aebbae909 | |||
a3a2215bda | |||
eb377993b3 | |||
5ca52d785c | |||
8d9912b4e2 | |||
c77b1c9687 | |||
8849ecd772 | |||
7977b97227 | |||
4f34822900 | |||
bbb38ac106 | |||
ce934a547e | |||
16b19d35dd | |||
45cfa5b574 | |||
df9ccce5b2 | |||
f8516b677a | |||
dfde83bdce | |||
cb0f19e4f1 | |||
26b99d3f85 | |||
2f9c0d1d9e | |||
0423cafbeb | |||
0bd1412562 | |||
0339642e77 | |||
37a0b7b132 | |||
c30b605047 | |||
76076d6fad | |||
0a819ec4e2 | |||
57a717056e | |||
856c48541f | |||
2045091c4f | |||
03ac5a6eef | |||
32fadc9c30 | |||
15a89d4f17 | |||
d0f43e9934 | |||
31e779d3f2 | |||
30c79fd40d | |||
639c93460a | |||
7611730cdb | |||
9df9c1433a | |||
4ea422bcec | |||
6074e4f962 | |||
d52e6d01ec | |||
63caca33be | |||
64efa62a74 | |||
912eb5e8e9 | |||
bb628e8495 | |||
d0c19c2c97 | |||
926fdb7519 | |||
c886625c83 | |||
f6c10d8a2e | |||
2bd877528f | |||
d09889b1dd | |||
1b2e9122d5 | |||
7424388924 | |||
537436bd5e | |||
32fc0cd7e9 | |||
fb99494858 | |||
5b4d4b97bc | |||
c5180c8092 | |||
515c200d86 | |||
32aab82e32 | |||
6aaa350145 | |||
d3b4dfe104 | |||
9fc30f6db4 | |||
2d0f07091d | |||
3828eda507 | |||
1e736ec16d | |||
bba6437ea9 | |||
e5ab9a856c | |||
1515bba9c6 | |||
14a9ef4bbe | |||
041040c659 | |||
47f69f2d24 | |||
9dd4dc2088 | |||
b534c32ee3 | |||
d2712f1457 | |||
183f560d06 | |||
ae150c0897 | |||
606e1396cf | |||
5c85e037f8 | |||
5c523716aa | |||
5f8cbf359e | |||
e83834e6be | |||
02225aa95c | |||
9931ac9780 | |||
2ba2bc72ca | |||
45b8ba9ede | |||
40968e09b7 | |||
262f26cf76 | |||
785c619198 | |||
24a993710d | |||
c240bb12ae | |||
eed3b9db94 | |||
29a8823db1 | |||
a80955eacb | |||
9716c3de71 | |||
34fa3208e0 | |||
9c4e19958b | |||
0403299728 | |||
95701114e3 | |||
a99d17c3ac | |||
517149d325 | |||
32aa2575b5 | |||
8fe7b96629 | |||
9350619afa | |||
d8d8f0bfc8 | |||
0a39722719 | |||
9c0fa4d1d2 | |||
da0404ad03 | |||
b508fdb62c | |||
680f90df21 | |||
1a68807ad9 | |||
d901767b54 | |||
13d4443d4d | |||
74b63c12a0 | |||
cd42f6591a | |||
5491422b12 | |||
23f3ff3cf0 | |||
f90488c77b | |||
beb4536841 | |||
3fa46dd66d | |||
ad5fcf778f | |||
83b000ae88 | |||
33e179caa6 | |||
b1e941cab9 | |||
6db961d256 | |||
83409ded59 | |||
396b2e9772 | |||
94459deb94 | |||
660af84b8d | |||
7b31020903 | |||
9a4143b4d9 | |||
aebc47ad55 | |||
b6b5455917 | |||
5bc01cd51a | |||
c79acac37b | |||
a5f2aa6777 | |||
4169e5c510 | |||
0727c440b3 | |||
19a7ff0c43 | |||
5f18403199 | |||
9f325fca09 | |||
10d08acefa | |||
52d50e6bc4 | |||
e7de7c32db | |||
a5f07638ec | |||
aa2a3fe201 | |||
abd13ba4ca | |||
485ba093b3 | |||
36b18e4fb5 | |||
8d92232949 | |||
e4d8c094a4 | |||
d26e1c51a9 |
31
.buildkite/env/README.md
vendored
Normal file
31
.buildkite/env/README.md
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
|
||||
[ejson](https://github.com/Shopify/ejson) and
|
||||
[ejson2env](https://github.com/Shopify/ejson2env) are used to manage access
|
||||
tokens and other secrets required for CI.
|
||||
|
||||
#### Setup
|
||||
```bash
|
||||
$ sudo gem install ejson ejson2env
|
||||
```
|
||||
|
||||
then obtain the necessary keypair and place it in `/opt/ejson/keys/`.
|
||||
|
||||
#### Usage
|
||||
Run the following command to decrypt the secrets into the environment:
|
||||
```bash
|
||||
eval $(ejson2env secrets.ejson)
|
||||
```
|
||||
|
||||
#### Managing secrets.ejson
|
||||
To decrypt `secrets.ejson` for modification, run:
|
||||
```bash
|
||||
$ ejson decrypt secrets.ejson -o secrets_unencrypted.ejson
|
||||
```
|
||||
|
||||
Edit, then run the following to re-encrypt the file **BEFORE COMMITING YOUR
|
||||
CHANGES**:
|
||||
```bash
|
||||
$ ejson encrypt secrets_unencrypted.ejson
|
||||
$ mv secrets_unencrypted.ejson secrets.ejson
|
||||
```
|
||||
|
10
.buildkite/env/secrets.ejson
vendored
Normal file
10
.buildkite/env/secrets.ejson
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
|
||||
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
|
||||
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
|
||||
}
|
||||
}
|
@ -1,4 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
eval "$(ejson2env .buildkite/env/secrets.ejson)"
|
||||
|
||||
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
|
||||
# interprets this as the start of a log group.
|
||||
@ -24,4 +27,3 @@ export PS4="++"
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||
)
|
||||
|
||||
|
20
.buildkite/pipeline-upload.sh
Executable file
20
.buildkite/pipeline-upload.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script is used to upload the full buildkite pipeline. The steps defined
|
||||
# in the buildkite UI should simply be:
|
||||
#
|
||||
# steps:
|
||||
# - command: "ci/buildkite-pipeline-upload.sh"
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
buildkite-agent pipeline upload ci/buildkite.yml
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
buildkite-agent annotate --style "info" \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
fi
|
||||
|
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
||||
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
#### Problem
|
||||
|
||||
#### Summary of Changes
|
||||
|
||||
Fixes #
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,4 +1,3 @@
|
||||
Cargo.lock
|
||||
/target/
|
||||
|
||||
**/*.rs.bk
|
||||
@ -14,3 +13,6 @@ Cargo.lock
|
||||
|
||||
# test temp files, ledgers, etc.
|
||||
/farf/
|
||||
|
||||
# log files
|
||||
*.log
|
||||
|
2464
Cargo.lock
generated
Normal file
2464
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
63
Cargo.toml
63
Cargo.toml
@ -1,20 +1,12 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.9.0"
|
||||
version = "0.10.5"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
@ -34,6 +26,7 @@ name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[[bin]]
|
||||
required-features = ["chacha"]
|
||||
name = "solana-replicator"
|
||||
path = "src/bin/replicator.rs"
|
||||
|
||||
@ -65,11 +58,13 @@ path = "src/bin/wallet.rs"
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
ipv6 = []
|
||||
bpf_c = []
|
||||
chacha = []
|
||||
cuda = []
|
||||
erasure = []
|
||||
ipv6 = []
|
||||
test = []
|
||||
unstable = []
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2"
|
||||
@ -80,13 +75,17 @@ bytes = "0.4"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
clap = "2.31"
|
||||
dirs = "1.0.2"
|
||||
elf = "0.0.10"
|
||||
env_logger = "0.5.12"
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
influx_db_client = "0.3.4"
|
||||
solana-jsonrpc-core = "0.1"
|
||||
solana-jsonrpc-http-server = "0.1"
|
||||
solana-jsonrpc-macros = "0.1"
|
||||
hex-literal = "0.1.1"
|
||||
influx_db_client = "0.3.6"
|
||||
solana-jsonrpc-core = "0.3.0"
|
||||
solana-jsonrpc-http-server = "0.3.0"
|
||||
solana-jsonrpc-macros = "0.3.0"
|
||||
solana-jsonrpc-pubsub = "0.3.0"
|
||||
solana-jsonrpc-ws-server = "0.3.0"
|
||||
ipnetwork = "0.12.7"
|
||||
itertools = "0.7.8"
|
||||
libc = "0.2.43"
|
||||
@ -99,22 +98,20 @@ rand = "0.5.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.9.0"
|
||||
ring = "0.13.2"
|
||||
sha2 = "0.7.0"
|
||||
sha2 = "0.8.0"
|
||||
serde = "1.0.27"
|
||||
serde_cbor = "0.9.0"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
socket2 = "0.3.8"
|
||||
solana_program_interface = { path = "common" }
|
||||
solana-sdk = { path = "sdk", version = "0.10.5" }
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
untrusted = "0.6.2"
|
||||
|
||||
[dev-dependencies]
|
||||
noop = { path = "programs/noop" }
|
||||
print = { path = "programs/print" }
|
||||
move_funds = { path = "programs/move_funds" }
|
||||
solana-noop = { path = "programs/native/noop", version = "0.10.5" }
|
||||
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.5" }
|
||||
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.5" }
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
@ -131,18 +128,16 @@ name = "signature"
|
||||
[[bench]]
|
||||
name = "sigverify"
|
||||
|
||||
[[bench]]
|
||||
required-features = ["chacha"]
|
||||
name = "chacha"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
".",
|
||||
"common",
|
||||
"programs/noop",
|
||||
"programs/print",
|
||||
"programs/move_funds",
|
||||
]
|
||||
default-members = [
|
||||
".",
|
||||
"common",
|
||||
"programs/noop",
|
||||
"programs/print",
|
||||
"programs/move_funds",
|
||||
"sdk",
|
||||
"programs/native/noop",
|
||||
"programs/native/bpf_loader",
|
||||
"programs/native/lua_loader",
|
||||
"programs/bpf/rust/noop",
|
||||
]
|
||||
|
@ -21,7 +21,7 @@ It's possible for a centralized database to process 710,000 transactions per sec
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
|
||||
Testnet Demos
|
||||
|
@ -12,9 +12,9 @@ When cutting a new channel branch these pre-steps are required:
|
||||
|
||||
1. Pick your branch point for release on master.
|
||||
2. Create the branch. The name should be "v" + the first 2 "version" fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9".
|
||||
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0).
|
||||
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0) by running `./scripts/increment-cargo-version.sh`.
|
||||
4. Push your new branch to solana.git
|
||||
5. Land your Carto.toml change as a master PR.
|
||||
5. Land your Cargo.toml change as a master PR.
|
||||
|
||||
At this point, ci/channel-info.sh should show your freshly cut release branch as "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
|
||||
|
||||
|
@ -4,8 +4,6 @@ extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use bincode::serialize;
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
@ -21,31 +19,35 @@ fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = Keypair::new();
|
||||
let tx = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
rando0.pubkey(),
|
||||
10_000,
|
||||
mint.last_id(),
|
||||
bank.last_id(),
|
||||
0,
|
||||
);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = Keypair::new();
|
||||
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, last_id, 0);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, bank.last_id(), 0);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
}).collect();
|
||||
|
||||
let mut id = bank.last_id();
|
||||
|
||||
for _ in 0..(MAX_ENTRY_IDS - 1) {
|
||||
bank.register_entry_id(&id);
|
||||
id = hash(&id.as_ref())
|
||||
}
|
||||
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
|
@ -3,20 +3,21 @@ extern crate bincode;
|
||||
extern crate rand;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate solana_program_interface;
|
||||
extern crate solana_sdk;
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::Bank;
|
||||
use solana::bank::{Bank, MAX_ENTRY_IDS};
|
||||
use solana::banking_stage::{BankingStage, NUM_THREADS};
|
||||
use solana::entry::Entry;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::to_packets_chunked;
|
||||
use solana::signature::{KeypairUtil, Signature};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use solana_program_interface::pubkey::Pubkey;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
@ -63,8 +64,8 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.keys[1] = Pubkey::new(&to[0..32]);
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
new.signature = Signature::new(&sig[0..64]);
|
||||
new
|
||||
}).collect();
|
||||
@ -72,7 +73,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
tx.keys[0],
|
||||
tx.account_keys[0],
|
||||
mint_total / txes as i64,
|
||||
mint.last_id(),
|
||||
0,
|
||||
@ -97,14 +98,131 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
}).collect();
|
||||
let (_stage, signal_receiver) = BankingStage::new(&bank, verified_receiver, Default::default());
|
||||
let (_stage, signal_receiver) = BankingStage::new(
|
||||
&bank,
|
||||
verified_receiver,
|
||||
Default::default(),
|
||||
&mint.last_id(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
let mut id = mint.last_id();
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_entry_id(&id);
|
||||
}
|
||||
|
||||
bencher.iter(move || {
|
||||
// make sure the tx last id is still registered
|
||||
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
}
|
||||
for v in verified.chunks(verified.len() / NUM_THREADS) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes);
|
||||
bank.clear_signatures();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
||||
let progs = 5;
|
||||
let txes = 1000 * NUM_THREADS;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let dummy = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
mint.keypair().pubkey(),
|
||||
1,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
let prog = new.instructions[0].clone();
|
||||
for i in 1..progs {
|
||||
//generate programs that spend to random keys
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let to_key = Pubkey::new(&to[0..32]);
|
||||
new.account_keys.push(to_key);
|
||||
assert_eq!(new.account_keys.len(), i + 2);
|
||||
new.instructions.push(prog.clone());
|
||||
assert_eq!(new.instructions.len(), i + 1);
|
||||
new.instructions[i].accounts[1] = 1 + i as u8;
|
||||
assert_eq!(new.key(i, 1), Some(&to_key));
|
||||
assert_eq!(
|
||||
new.account_keys[new.instructions[i].accounts[1] as usize],
|
||||
to_key
|
||||
);
|
||||
}
|
||||
assert_eq!(new.instructions.len(), progs);
|
||||
new.signature = Signature::new(&sig[0..64]);
|
||||
new
|
||||
}).collect();
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
tx.account_keys[0],
|
||||
mint_total / txes as i64,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
assert!(bank.process_transaction(&fund).is_ok());
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
}).collect();
|
||||
let (_stage, signal_receiver) = BankingStage::new(
|
||||
&bank,
|
||||
verified_receiver,
|
||||
Default::default(),
|
||||
&mint.last_id(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
let mut id = mint.last_id();
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_entry_id(&id);
|
||||
}
|
||||
|
||||
bencher.iter(move || {
|
||||
// make sure the transactions are still valid
|
||||
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
}
|
||||
for v in verified.chunks(verified.len() / NUM_THREADS) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes);
|
||||
bank.clear_signatures();
|
||||
// make sure the tx last id is still registered
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
});
|
||||
}
|
||||
|
29
benches/chacha.rs
Normal file
29
benches/chacha.rs
Normal file
@ -0,0 +1,29 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use solana::chacha::chacha_cbc_encrypt_files;
|
||||
use std::fs::remove_file;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_chacha_encrypt(bench: &mut Bencher) {
|
||||
let in_path = Path::new("bench_chacha_encrypt_file_input.txt");
|
||||
let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc");
|
||||
{
|
||||
let mut in_file = File::create(in_path).unwrap();
|
||||
for _ in 0..1024 {
|
||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
bench.iter(move || {
|
||||
chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap();
|
||||
});
|
||||
|
||||
remove_file(in_path).unwrap();
|
||||
remove_file(out_path).unwrap();
|
||||
}
|
36
build.rs
36
build.rs
@ -1,33 +1,61 @@
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs");
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
// Ensure target/perf-libs/ exists. It's been observed that
|
||||
// a cargo:rerun-if-changed= directive with a non-existent
|
||||
// directory triggers a rebuild on every |cargo build| invocation
|
||||
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
|
||||
fs::create_dir_all("target/perf-libs").unwrap_or_else(|err| {
|
||||
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
panic!("Unable to create target/perf-libs: {:?}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let bpf_c = !env::var("CARGO_FEATURE_BPF_C").is_err();
|
||||
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
|
||||
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
|
||||
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
||||
|
||||
if cuda || erasure {
|
||||
if bpf_c {
|
||||
let out_dir = "OUT_DIR=../../../target/".to_string()
|
||||
+ &env::var("PROFILE").unwrap()
|
||||
+ &"/bpf".to_string();
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
|
||||
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
|
||||
let status = Command::new("make")
|
||||
.current_dir("programs/bpf/c")
|
||||
.arg("all")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to build C-based BPF programs");
|
||||
assert!(status.success());
|
||||
}
|
||||
if chacha || cuda || erasure {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs");
|
||||
println!("cargo:rustc-link-search=native=target/perf-libs");
|
||||
}
|
||||
if chacha {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libcpu-crypt.a");
|
||||
}
|
||||
if cuda {
|
||||
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libcuda-crypt.a");
|
||||
println!("cargo:rustc-link-lib=static=cuda-crypt");
|
||||
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||
}
|
||||
if erasure {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libgf_complete.so");
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libJerasure.so");
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
|
@ -29,4 +29,4 @@ maybe_cargo_install() {
|
||||
maybe_cargo_install audit tree
|
||||
|
||||
_ cargo tree
|
||||
_ cargo audit || true
|
||||
_ cargo audit
|
||||
|
16
ci/buildkite-secondary.yml
Normal file
16
ci/buildkite-secondary.yml
Normal file
@ -0,0 +1,16 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
||||
- command: "ci/docker-solana/build.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "docker-solana"
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-solana-tar.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish solana release tar"
|
@ -1,4 +0,0 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
@ -1,10 +1,10 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.29.1 ci/test-stable.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-bench.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-bench.sh"
|
||||
name: "bench [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -12,7 +12,7 @@ steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-09-03 ci/test-nightly.sh || true"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -36,10 +36,7 @@ steps:
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- trigger: "solana-snap"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
|
16
ci/crate-version.sh
Executable file
16
ci/crate-version.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Outputs the current crate version
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
while read -r name equals value _; do
|
||||
if [[ $name = version && $equals = = ]]; then
|
||||
echo "${value//\"/}"
|
||||
exit 0
|
||||
fi
|
||||
done < <(cat Cargo.toml)
|
||||
|
||||
echo Unable to locate version in Cargo.toml 1>&2
|
||||
exit 1
|
@ -4,7 +4,6 @@ ARG date
|
||||
RUN set -x && \
|
||||
rustup install nightly-$date && \
|
||||
rustup default nightly-$date && \
|
||||
rustup component add clippy-preview --toolchain=nightly-$date && \
|
||||
rustc --version && \
|
||||
cargo --version && \
|
||||
cargo +nightly-$date install cargo-cov
|
||||
|
@ -1,19 +1,21 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/buildkite.yml to pick up the new image tag
|
||||
FROM rust:1.29.1
|
||||
FROM rust:1.30.1
|
||||
|
||||
RUN set -x && \
|
||||
apt update && \
|
||||
apt-get install apt-transport-https && \
|
||||
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main > /etc/apt/sources.list.d/llvm.list && \
|
||||
echo deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-7 main > /etc/apt/sources.list.d/llvm.list && \
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
buildkite-agent \
|
||||
cmake \
|
||||
llvm-6.0 \
|
||||
lcov \
|
||||
libclang-common-7-dev \
|
||||
llvm-7 \
|
||||
rsync \
|
||||
sudo \
|
||||
&& \
|
||||
|
1
ci/docker-solana/.gitignore
vendored
Normal file
1
ci/docker-solana/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
cargo-install/
|
13
ci/docker-solana/Dockerfile
Normal file
13
ci/docker-solana/Dockerfile
Normal file
@ -0,0 +1,13 @@
|
||||
FROM debian:stretch
|
||||
|
||||
# JSON RPC port
|
||||
EXPOSE 8899/tcp
|
||||
|
||||
# Install libssl
|
||||
RUN apt update && \
|
||||
apt-get install -y libssl-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY usr/bin /usr/bin/
|
||||
ENTRYPOINT [ "/usr/bin/solana-entrypoint.sh" ]
|
||||
CMD [""]
|
17
ci/docker-solana/README.md
Normal file
17
ci/docker-solana/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
## Minimal Solana Docker image
|
||||
This image is automatically updated by CI
|
||||
|
||||
https://hub.docker.com/r/solanalabs/solana/
|
||||
|
||||
### Usage:
|
||||
Run the latest beta image:
|
||||
```bash
|
||||
$ docker run --rm -p 8899:8899 solanalabs/solana:beta
|
||||
```
|
||||
|
||||
Run the latest edge image:
|
||||
```bash
|
||||
$ docker run --rm -p 8899:8899 solanalabs/solana:edge
|
||||
```
|
||||
|
||||
Port *8899* is the JSON RPC port, which is used by clients to communicate with the network.
|
39
ci/docker-solana/build.sh
Executable file
39
ci/docker-solana/build.sh
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
eval "$(../channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rm -rf usr/
|
||||
../docker-run.sh solanalabs/rust:1.30.0 \
|
||||
cargo install --path . --root ci/docker-solana/usr
|
||||
cp -f entrypoint.sh usr/bin/solana-entrypoint.sh
|
||||
../../scripts/install-native-programs.sh usr/bin/
|
||||
|
||||
docker build -t solanalabs/solana:$CHANNEL .
|
||||
|
||||
maybeEcho=
|
||||
if [[ -z $CI ]]; then
|
||||
echo "Not CI, skipping |docker push|"
|
||||
maybeEcho="echo"
|
||||
else
|
||||
(
|
||||
set +x
|
||||
if [[ -n $DOCKER_PASSWORD && -n $DOCKER_USERNAME ]]; then
|
||||
echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
||||
fi
|
||||
)
|
||||
fi
|
||||
$maybeEcho docker push solanalabs/solana:$CHANNEL
|
23
ci/docker-solana/entrypoint.sh
Executable file
23
ci/docker-solana/entrypoint.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
solana-keygen -o /config/leader-keypair.json
|
||||
solana-keygen -o /config/drone-keypair.json
|
||||
|
||||
solana-genesis --tokens=1000000000 --ledger /ledger < /config/drone-keypair.json
|
||||
solana-fullnode-config --keypair=/config/leader-keypair.json -l > /config/leader-config.json
|
||||
|
||||
solana-drone --keypair /config/drone-keypair.json --network 127.0.0.1:8001 &
|
||||
drone=$!
|
||||
solana-fullnode --identity /config/leader-config.json --ledger /ledger/ &
|
||||
fullnode=$!
|
||||
|
||||
abort() {
|
||||
kill "$drone" "$fullnode"
|
||||
}
|
||||
|
||||
trap abort SIGINT SIGTERM
|
||||
wait "$fullnode"
|
||||
kill "$drone" "$fullnode"
|
36
ci/publish-bpf-sdk.sh
Executable file
36
ci/publish-bpf-sdk.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
version=$(./ci/crate-version.sh)
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf bpf-sdk/
|
||||
mkdir bpf-sdk/
|
||||
(
|
||||
echo "$version"
|
||||
git rev-parse HEAD
|
||||
) > bpf-sdk/version.txt
|
||||
|
||||
cp -ra programs/bpf/c/sdk/* bpf-sdk/
|
||||
|
||||
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
|
||||
)
|
||||
|
||||
|
||||
echo --- AWS S3 Store
|
||||
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
|
||||
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
|
||||
|
||||
exit 0
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
@ -12,8 +12,18 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
maybePublish="echo Publish skipped"
|
||||
if [[ -n $CI ]]; then
|
||||
maybePublish="cargo publish --token $CRATES_IO_TOKEN"
|
||||
fi
|
||||
|
||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
(
|
||||
set -x
|
||||
ci/docker-run.sh rust bash -exc "cd $(dirname "$Cargo_toml"); cargo package; $maybePublish"
|
||||
)
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
73
ci/publish-metrics-dashboard.sh
Executable file
73
ci/publish-metrics-dashboard.sh
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z $BUILDKITE ]]; then
|
||||
echo BUILDKITE not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
CHANNEL=$(buildkite-agent meta-data get "channel" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- block: "Select Dashboard"
|
||||
fields:
|
||||
- select: "Channel"
|
||||
key: "channel"
|
||||
options:
|
||||
- label: "stable"
|
||||
value: "stable"
|
||||
- label: "edge"
|
||||
value: "edge"
|
||||
- label: "beta"
|
||||
value: "beta"
|
||||
- command: "ci/$(basename "$0")"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
ci/channel-info.sh
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
case $CHANNEL in
|
||||
edge)
|
||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||
;;
|
||||
beta)
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
stable)
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid CHANNEL=$CHANNEL"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
branch: "$CHANNEL_BRANCH"
|
||||
env:
|
||||
CHANNEL: "$CHANNEL"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec metrics/publish-metrics-dashboard.sh "$CHANNEL"
|
71
ci/publish-solana-tar.sh
Executable file
71
ci/publish-solana-tar.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]]; then
|
||||
DRYRUN="echo"
|
||||
CHANNEL=unknown
|
||||
fi
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -n "$BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
||||
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf solana-release/
|
||||
mkdir solana-release/
|
||||
(
|
||||
echo "$CHANNEL_OR_TAG"
|
||||
git rev-parse HEAD
|
||||
) > solana-release/version.txt
|
||||
|
||||
cargo install --root solana-release
|
||||
./scripts/install-native-programs.sh solana-release/bin
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda --root solana-release-cuda
|
||||
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
|
||||
|
||||
tar jvcf solana-release.tar.bz2 solana-release/
|
||||
)
|
||||
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
|
||||
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
|
||||
)
|
||||
else
|
||||
echo Skipped due to DRYRUN
|
||||
fi
|
||||
exit 0
|
||||
|
23
ci/snap.sh
23
ci/snap.sh
@ -2,6 +2,13 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
@ -10,14 +17,14 @@ fi
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
SNAP_CHANNEL=stable
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
SNAP_CHANNEL=beta
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $SNAP_CHANNEL ]]; then
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
@ -51,11 +58,13 @@ if [[ ! -x /usr/bin/multilog ]]; then
|
||||
sudo apt-get install -y daemontools
|
||||
fi
|
||||
|
||||
echo --- build: $SNAP_CHANNEL channel
|
||||
echo --- build: $CHANNEL channel
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish: $SNAP_CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
||||
if [[ -z $DO_NOT_PUBLISH_SNAP ]]; then
|
||||
echo --- publish: $CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $CHANNEL
|
||||
fi
|
||||
|
18
ci/solana-testnet.yml
Executable file
18
ci/solana-testnet.yml
Executable file
@ -0,0 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
label: "create snap"
|
||||
|
||||
- wait
|
||||
|
||||
- command: "ci/testnet-automation.sh"
|
||||
label: "run testnet"
|
||||
agents:
|
||||
- "queue=testnet-deploy"
|
||||
|
||||
- wait: ~
|
||||
continue_on_failure: true
|
||||
|
||||
- command: "ci/testnet-automation-cleanup.sh"
|
||||
label: "delete testnet"
|
||||
agents:
|
||||
- "queue=testnet-deploy"
|
@ -2,6 +2,11 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
@ -12,6 +17,17 @@ _() {
|
||||
|
||||
set -o pipefail
|
||||
|
||||
UPLOAD_METRICS=""
|
||||
TARGET_BRANCH=$BUILDKITE_BRANCH
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
TARGET_BRANCH=$EDGE_CHANNEL
|
||||
else
|
||||
UPLOAD_METRICS="upload"
|
||||
fi
|
||||
|
||||
BENCH_FILE=bench_output.log
|
||||
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee $BENCH_FILE
|
||||
_ cargo run --release --bin solana-upload-perf -- $BENCH_FILE
|
||||
BENCH_ARTIFACT=current_bench_results.log
|
||||
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee "$BENCH_FILE"
|
||||
_ cargo run --release --bin solana-upload-perf -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" >"$BENCH_ARTIFACT"
|
||||
|
||||
upload_ci_artifact "$BENCH_ARTIFACT"
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
@ -10,20 +11,50 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features=unstable
|
||||
# Uncomment this to run nightly test suit
|
||||
# _ cargo test --verbose --features=unstable
|
||||
|
||||
exit 0
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo cov test
|
||||
maybe_cargo_install cov
|
||||
|
||||
# Generate coverage data and report via unit-test suite.
|
||||
_ cargo cov clean
|
||||
_ cargo cov test --lib
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
# Generate a coverage report with grcov via lcov.
|
||||
if [[ ! -f ./grcov ]]; then
|
||||
uname=$(uname | tr '[:upper:]' '[:lower:]')
|
||||
uname_m=$(uname -m | tr '[:upper:]' '[:lower:]')
|
||||
name=grcov-${uname}-${uname_m}.tar.bz2
|
||||
_ wget "https://github.com/mozilla/grcov/releases/download/v0.2.3/${name}"
|
||||
_ tar -xjf "${name}"
|
||||
fi
|
||||
_ ./grcov . -t lcov > lcov.info
|
||||
_ genhtml -o target/cov/report-lcov --show-details --highlight --ignore-errors source --legend lcov.info
|
||||
|
||||
# Upload to tarballs to buildkite.
|
||||
_ cd target/cov && tar -cjf cov-report.tar.bz2 report/* && cd -
|
||||
_ upload_ci_artifact "target/cov/cov-report.tar.bz2"
|
||||
|
||||
_ cd target/cov && tar -cjf lcov-report.tar.bz2 report-lcov/* && cd -
|
||||
_ upload_ci_artifact "target/cov/lcov-report.tar.bz2"
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
true
|
||||
# TODO: Why doesn't codecov grok our lcov files?
|
||||
#bash <(curl -s https://codecov.io/bash) -X gcov
|
||||
fi
|
||||
|
@ -20,7 +20,15 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo test --features=cuda,erasure
|
||||
FEATURES=cuda,erasure,chacha
|
||||
_ cargo test --verbose --features="$FEATURES" --lib
|
||||
|
||||
# Run integration tests serially
|
||||
for test in tests/*.rs; do
|
||||
test=${test##*/} # basename x
|
||||
test=${test%.rs} # basename x .rs
|
||||
_ cargo test --verbose --jobs=1 --features="$FEATURES" --test="$test"
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
|
@ -13,9 +13,26 @@ _() {
|
||||
|
||||
_ cargo fmt -- --check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test --verbose --lib
|
||||
_ cargo clippy -- --deny=warnings
|
||||
|
||||
# Run integration tests serially
|
||||
for test in tests/*.rs; do
|
||||
test=${test##*/} # basename x
|
||||
test=${test%.rs} # basename x .rs
|
||||
_ cargo test --verbose --jobs=1 --test="$test"
|
||||
done
|
||||
|
||||
# Run native program's tests
|
||||
for program in programs/native/*; do
|
||||
echo --- "$program"
|
||||
(
|
||||
set -x
|
||||
cd "$program"
|
||||
cargo test --verbose
|
||||
)
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
@ -24,4 +41,4 @@ echo --- ci/localnet-sanity.sh
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
||||
_ ci/audit.sh || true
|
||||
_ ci/audit.sh
|
||||
|
9
ci/testnet-automation-cleanup.sh
Executable file
9
ci/testnet-automation-cleanup.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- find testnet configuration
|
||||
net/gce.sh config -p testnet-automation
|
||||
|
||||
echo --- delete testnet
|
||||
net/gce.sh delete -p testnet-automation
|
7
ci/testnet-automation-json-parser.py
Executable file
7
ci/testnet-automation-json-parser.py
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, json
|
||||
|
||||
data=json.load(sys.stdin)
|
||||
print[\
|
||||
([result['series'][0]['columns'][1].encode(), result['series'][0]['values'][0][1]]) \
|
||||
for result in data['results']]
|
80
ci/testnet-automation.sh
Executable file
80
ci/testnet-automation.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- downloading snap from build artifacts
|
||||
buildkite-agent artifact download "solana_*.snap" .
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300
|
||||
[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100"
|
||||
[[ -n $LEADER_CPU_MACHINE_TYPE ]] ||
|
||||
LEADER_CPU_MACHINE_TYPE="n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
|
||||
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
|
||||
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
|
||||
[[ -n $TESTNET_ZONE ]] || TESTNET_ZONE=us-west1-b
|
||||
|
||||
launchTestnet() {
|
||||
declare nodeCount=$1
|
||||
echo --- setup "$nodeCount" node test
|
||||
net/gce.sh create \
|
||||
-n "$nodeCount" -c "$CLIENT_COUNT" \
|
||||
-G "$LEADER_CPU_MACHINE_TYPE" \
|
||||
-p "$TESTNET_TAG" -z "$TESTNET_ZONE"
|
||||
|
||||
echo --- configure database
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- start "$nodeCount" node test
|
||||
net/net.sh start -o noValidatorSanity -S solana_*.snap
|
||||
|
||||
echo --- wait "$ITERATION_WAIT" seconds to complete test
|
||||
sleep "$ITERATION_WAIT"
|
||||
|
||||
declare q_mean_tps='
|
||||
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
|
||||
SELECT sum("count") AS "sum_count"
|
||||
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||
WHERE time > now() - 300s GROUP BY time(1s)
|
||||
)'
|
||||
|
||||
declare q_max_tps='
|
||||
SELECT round(max("sum_count")) AS "max_tps" FROM (
|
||||
SELECT sum("count") AS "sum_count"
|
||||
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||
WHERE time > now() - 300s GROUP BY time(1s)
|
||||
)'
|
||||
|
||||
declare q_mean_finality='
|
||||
SELECT round(mean("duration_ms")) as "mean_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
declare q_max_finality='
|
||||
SELECT round(max("duration_ms")) as "max_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
declare q_99th_finality='
|
||||
SELECT round(percentile("duration_ms", 99)) as "99th_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
curl -G "https://metrics.solana.com:8086/query?u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
--data-urlencode "db=$INFLUX_DATABASE" \
|
||||
--data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_finality;$q_max_finality;$q_99th_finality" |
|
||||
python ci/testnet-automation-json-parser.py >>TPS"$nodeCount".log
|
||||
|
||||
upload_ci_artifact TPS"$nodeCount".log
|
||||
}
|
||||
|
||||
# This is needed, because buildkite doesn't let us define an array of numbers.
|
||||
# The array is defined as a space separated string of numbers
|
||||
# shellcheck disable=SC2206
|
||||
nodes_count_array=($NUMBER_OF_NODES)
|
||||
|
||||
for n in "${nodes_count_array[@]}"; do
|
||||
launchTestnet "$n"
|
||||
done
|
@ -9,8 +9,10 @@ clientNodeCount=0
|
||||
validatorNodeCount=10
|
||||
publicNetwork=false
|
||||
snapChannel=edge
|
||||
tarChannelOrTag=edge
|
||||
delete=false
|
||||
enableGpu=false
|
||||
useTarReleaseChannel=false
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
@ -19,16 +21,21 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [zone] [options...]
|
||||
usage: $0 [name] [cloud] [zone] [options...]
|
||||
|
||||
Deploys a CD testnet
|
||||
|
||||
name - name of the network
|
||||
zone - GCE to deploy the network into
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone - cloud provider zone to deploy the network into
|
||||
|
||||
options:
|
||||
-s edge|beta|stable - Deploy the specified Snap release channel
|
||||
(default: $snapChannel)
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
(default: $tarChannelOrTag)
|
||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
@ -44,12 +51,14 @@ EOF
|
||||
}
|
||||
|
||||
netName=$1
|
||||
zone=$2
|
||||
cloudProvider=$2
|
||||
zone=$3
|
||||
[[ -n $netName ]] || usage
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
shift 2
|
||||
shift 3
|
||||
|
||||
while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
||||
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -73,6 +82,17 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
tarChannelOrTag=$OPTARG
|
||||
useTarReleaseChannel=true
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
;;
|
||||
@ -93,7 +113,7 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
||||
done
|
||||
|
||||
|
||||
gce_create_args=(
|
||||
create_args=(
|
||||
-a "$leaderAddress"
|
||||
-c "$clientNodeCount"
|
||||
-n "$validatorNodeCount"
|
||||
@ -103,26 +123,26 @@ gce_create_args=(
|
||||
|
||||
if $enableGpu; then
|
||||
if [[ -z $leaderMachineType ]]; then
|
||||
gce_create_args+=(-g)
|
||||
create_args+=(-g)
|
||||
else
|
||||
gce_create_args+=(-G "$leaderMachineType")
|
||||
create_args+=(-G "$leaderMachineType")
|
||||
fi
|
||||
fi
|
||||
|
||||
if $publicNetwork; then
|
||||
gce_create_args+=(-P)
|
||||
create_args+=(-P)
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- gce.sh delete
|
||||
time net/gce.sh delete -p "$netName"
|
||||
echo "--- $cloudProvider.sh delete"
|
||||
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
|
||||
if $delete; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- gce.sh create
|
||||
time net/gce.sh create "${gce_create_args[@]}"
|
||||
echo "--- $cloudProvider.sh create"
|
||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- net.sh start
|
||||
@ -130,7 +150,18 @@ maybeRejectExtraNodes=
|
||||
if ! $publicNetwork; then
|
||||
maybeRejectExtraNodes="-o rejectExtraNodes"
|
||||
fi
|
||||
maybeNoValidatorSanity=
|
||||
if [[ -n $NO_VALIDATOR_SANITY ]]; then
|
||||
maybeNoValidatorSanity="-o noValidatorSanity"
|
||||
fi
|
||||
maybeNoLedgerVerify=
|
||||
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||
maybeNoLedgerVerify="-o noLedgerVerify"
|
||||
fi
|
||||
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
|
||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes
|
||||
|
||||
if $useTarReleaseChannel; then
|
||||
time net/net.sh start -t "$tarChannelOrTag" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
else
|
||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
fi
|
||||
exit 0
|
||||
|
360
ci/testnet-manager.sh
Executable file
360
ci/testnet-manager.sh
Executable file
@ -0,0 +1,360 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
if [[ -z $BUILDKITE ]]; then
|
||||
echo BUILDKITE not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
|
||||
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET ]]; then
|
||||
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET_OP ]]; then
|
||||
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- block: "Manage Testnet"
|
||||
fields:
|
||||
- select: "Network"
|
||||
key: "testnet"
|
||||
options:
|
||||
- label: "testnet"
|
||||
value: "testnet"
|
||||
- label: "testnet-perf"
|
||||
value: "testnet-perf"
|
||||
- label: "testnet-master"
|
||||
value: "testnet-master"
|
||||
- label: "testnet-master-perf"
|
||||
value: "testnet-master-perf"
|
||||
- label: "testnet-edge"
|
||||
value: "testnet-edge"
|
||||
- label: "testnet-edge-perf"
|
||||
value: "testnet-edge-perf"
|
||||
- label: "testnet-beta"
|
||||
value: "testnet-beta"
|
||||
- label: "testnet-beta-perf"
|
||||
value: "testnet-beta-perf"
|
||||
- select: "Operation"
|
||||
key: "testnet-operation"
|
||||
default: "sanity-or-restart"
|
||||
options:
|
||||
- label: "Sanity check. Restart network on failure"
|
||||
value: "sanity-or-restart"
|
||||
- label: "Start (or restart) the network"
|
||||
value: "start"
|
||||
- label: "Stop the network"
|
||||
value: "stop"
|
||||
- label: "Sanity check only"
|
||||
value: "sanity"
|
||||
- command: "ci/$(basename "$0")"
|
||||
agents:
|
||||
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||
|
||||
ci/channel-info.sh
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge|testnet-edge-perf|testnet-master|testnet-master-perf)
|
||||
CHANNEL_OR_TAG=edge
|
||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||
;;
|
||||
testnet-beta|testnet-beta-perf)
|
||||
CHANNEL_OR_TAG=beta
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
testnet|testnet-perf)
|
||||
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
|
||||
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
else
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
branch: "$CHANNEL_BRANCH"
|
||||
env:
|
||||
TESTNET: "$TESTNET"
|
||||
TESTNET_OP: "$TESTNET_OP"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
sanity() {
|
||||
echo "--- sanity $TESTNET"
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh edge-perf-testnet-solana-com ec2 us-west-2b
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh beta-perf-testnet-solana-com ec2 us-west-2b
|
||||
)
|
||||
;;
|
||||
testnet-master)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh master-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-master-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh master-perf-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
|
||||
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
|
||||
ci/testnet-sanity.sh perf-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
start() {
|
||||
declare maybeDelete=$1
|
||||
if [[ -z $maybeDelete ]]; then
|
||||
echo "--- start $TESTNET"
|
||||
else
|
||||
echo "--- stop $TESTNET"
|
||||
fi
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
|
||||
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0ccd4f2239886fa94 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
|
||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0f286cf8a0771ce35 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
|
||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-master)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh master-testnet-solana-com gce us-west1-b \
|
||||
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a master-testnet-solana-com \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-master-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh master-perf-testnet-solana-com gce us-west1-b \
|
||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
|
||||
# -s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
|
||||
# ${maybeDelete:+-d}
|
||||
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0fa502bf95f6f18b2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
|
||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
|
||||
# -g \
|
||||
# -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
# ${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
stop() {
|
||||
start delete
|
||||
}
|
||||
|
||||
case $TESTNET_OP in
|
||||
sanity)
|
||||
sanity
|
||||
;;
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
sanity-or-restart)
|
||||
if sanity; then
|
||||
echo Pass
|
||||
else
|
||||
echo "Sanity failed, restarting the network"
|
||||
echo "^^^ +++"
|
||||
start
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo --- fin
|
||||
exit 0
|
@ -9,11 +9,13 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name]
|
||||
usage: $0 [name] [cloud] [zone]
|
||||
|
||||
Sanity check a CD testnet
|
||||
|
||||
name - name of the network
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone - cloud provider zone of the network
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
@ -22,14 +24,18 @@ EOF
|
||||
}
|
||||
|
||||
netName=$1
|
||||
cloudProvider=$2
|
||||
zone=$3
|
||||
[[ -n $netName ]] || usage ""
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
|
||||
set -x
|
||||
echo --- gce.sh config
|
||||
net/gce.sh config -p "$netName"
|
||||
echo "--- $cloudProvider.sh config"
|
||||
timeout 5m net/"$cloudProvider".sh config -p "$netName" -z "$zone"
|
||||
net/init-metrics.sh -e
|
||||
echo --- net.sh sanity
|
||||
net/net.sh sanity \
|
||||
timeout 5m net/net.sh sanity \
|
||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||
|
@ -19,12 +19,12 @@ require() {
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.30.[0-9]+-nightly
|
||||
require cargo 1.29.[0-9]+-nightly
|
||||
require rustc 1.31.[0-9]+-nightly
|
||||
require cargo 1.31.[0-9]+-nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.29.[0-9]+
|
||||
require cargo 1.29.[0-9]+
|
||||
require rustc 1.30.[0-9]+
|
||||
require cargo 1.30.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
|
@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "solana_program_interface"
|
||||
version = "0.1.0"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
"Jack May <jack@solana.com>",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
bs58 = "0.2.0"
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
|
||||
|
@ -1,29 +0,0 @@
|
||||
use pubkey::Pubkey;
|
||||
|
||||
/// An Account with userdata that is stored on chain
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||
pub struct Account {
|
||||
/// tokens in the account
|
||||
pub tokens: i64,
|
||||
/// user data
|
||||
/// A transaction can write to its userdata
|
||||
pub userdata: Vec<u8>,
|
||||
/// contract id this contract belongs to
|
||||
pub program_id: Pubkey,
|
||||
}
|
||||
|
||||
impl Account {
|
||||
pub fn new(tokens: i64, space: usize, program_id: Pubkey) -> Account {
|
||||
Account {
|
||||
tokens,
|
||||
userdata: vec![0u8; space],
|
||||
program_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct KeyedAccount<'a> {
|
||||
pub key: &'a Pubkey,
|
||||
pub account: &'a mut Account,
|
||||
}
|
114
doc/json-rpc.md
114
doc/json-rpc.md
@ -5,17 +5,23 @@ Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.o
|
||||
|
||||
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
|
||||
|
||||
RPC Endpoint
|
||||
RPC HTTP Endpoint
|
||||
---
|
||||
|
||||
**Default port:** 8899
|
||||
eg. http://localhost:8899, http://192.168.1.88:8899
|
||||
|
||||
RPC PubSub WebSocket Endpoint
|
||||
---
|
||||
|
||||
**Default port:** 8900
|
||||
eg. ws://localhost:8900, http://192.168.1.88:8900
|
||||
|
||||
|
||||
Methods
|
||||
---
|
||||
|
||||
* [confirmTransaction](#confirmtransaction)
|
||||
* [getAddress](#getaddress)
|
||||
* [getBalance](#getbalance)
|
||||
* [getAccountInfo](#getaccountinfo)
|
||||
* [getLastId](#getlastid)
|
||||
@ -23,6 +29,13 @@ Methods
|
||||
* [getTransactionCount](#gettransactioncount)
|
||||
* [requestAirdrop](#requestairdrop)
|
||||
* [sendTransaction](#sendtransaction)
|
||||
* [startSubscriptionChannel](#startsubscriptionchannel)
|
||||
|
||||
* [Subscription Websocket](#subscription-websocket)
|
||||
* [accountSubscribe](#accountsubscribe)
|
||||
* [accountUnsubscribe](#accountunsubscribe)
|
||||
* [signatureSubscribe](#signaturesubscribe)
|
||||
* [signatureUnsubscribe](#signatureunsubscribe)
|
||||
|
||||
Request Formatting
|
||||
---
|
||||
@ -155,6 +168,7 @@ events.
|
||||
* `Confirmed` - Transaction was successful
|
||||
* `SignatureNotFound` - Unknown transaction
|
||||
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
|
||||
* `AccountInUse` - Another Transaction had a write lock one of the Accounts specified in this Transaction. The Transaction may succeed if retried
|
||||
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
|
||||
|
||||
##### Example:
|
||||
@ -227,3 +241,99 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Subscription Websocket
|
||||
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||
- Submit subscription requests to the websocket using the methods below
|
||||
- Multiple subscriptions may be active at once
|
||||
|
||||
---
|
||||
|
||||
### accountSubscribe
|
||||
Subscribe to an account to receive notifications when the userdata for a given account public key changes
|
||||
|
||||
##### Parameters:
|
||||
* `string` - account Pubkey, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - Subscription id (needed to unsubscribe)
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### accountUnsubscribe
|
||||
Unsubscribe from account userdata change notifications
|
||||
|
||||
##### Parameters:
|
||||
* `integer` - id of account Subscription to cancel
|
||||
|
||||
##### Results:
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### signatureSubscribe
|
||||
Subscribe to a transaction signature to receive notification when the transaction is confirmed
|
||||
On `signatureNotification`, the subscription is automatically cancelled
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Transaction Signature, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - subscription id (needed to unsubscribe)
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### signatureUnsubscribe
|
||||
Unsubscribe from account userdata change notifications
|
||||
|
||||
##### Parameters:
|
||||
* `integer` - id of account subscription to cancel
|
||||
|
||||
##### Results:
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
@ -4,14 +4,18 @@ Currently we have three testnets:
|
||||
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
|
||||
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs 24/7
|
||||
* `testnet-master` - private edge channel testnet with clients trying to flood the network
|
||||
* `testnet-msater` - public edge channel testnet accessible via master.testnet.solana.com. Runs 24/7
|
||||
* `testnet-master-perf` - private edge channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs on weekday mornings for a couple hours
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
||||
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
||||
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
||||
Each testnet can be manually manipulated from buildkite as well. The `-perf`
|
||||
testnets use a release tarball while the non`-perf` builds use the snap build
|
||||
(we've observed that the snap build runs slower than a tarball but this has yet
|
||||
to be root caused).
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
@ -29,7 +33,8 @@ $ net/ssh.sh
|
||||
for log location details
|
||||
|
||||
## How do I reset the testnet?
|
||||
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
|
||||
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
||||
and when prompted select the desired testnet
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
@ -43,5 +48,5 @@ Currently, a merged PR is the only way to test a change on the testnet. But you
|
||||
can run your own testnet using the scripts in the `net/` directory.
|
||||
|
||||
## Adjusting the number of clients or validators on the testnet
|
||||
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
|
||||
Edit `ci/testnet-manager.sh`
|
||||
|
||||
|
@ -15,7 +15,7 @@ mkdir -p target/perf-libs
|
||||
cd target/perf-libs
|
||||
(
|
||||
set -x
|
||||
curl https://solana-perf.s3.amazonaws.com/v0.9.0/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
|
39
metrics/README.md
Normal file
39
metrics/README.md
Normal file
@ -0,0 +1,39 @@
|
||||
# Metrics
|
||||
|
||||
## Testnet Grafana Dashboard
|
||||
|
||||
There are three versions of the testnet dashboard, corresponding to the three
|
||||
release channels:
|
||||
* https://metrics.solana.com:3000/d/testnet-edge/testnet-monitor-edge
|
||||
* https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta
|
||||
* https://metrics.solana.com:3000/d/testnet/testnet-monitor
|
||||
|
||||
The dashboard for each channel is defined from the
|
||||
`metrics/testnet-monitor.json` source file in the git branch associated with
|
||||
that channel, and deployed by automation running `ci/publish-metrics-dashboard.sh`.
|
||||
|
||||
A deploy can be triggered at any time via the `New Build` button of
|
||||
https://buildkite.com/solana-labs/publish-metrics-dashboard.
|
||||
|
||||
### Modifying a Dashboard
|
||||
|
||||
Dashboard updates are accomplished by modifying `metrics/testnet-monitor.json`,
|
||||
**manual edits made directly in Grafana will be overwritten**.
|
||||
|
||||
1. Open the desired dashboard in Grafana
|
||||
2. Create a development copy of the dashboard by selecting `Save As..` in the
|
||||
`Settings` menu for the dashboard
|
||||
3. Edit dashboard as desired
|
||||
4. Extract the JSON Model by selecting `JSON Model` in the `Settings` menu. Copy the JSON to the clipboard
|
||||
and paste into `metrics/testnet-monitor.json`
|
||||
5. Delete your development dashboard: `Settings` => `Delete`
|
||||
|
||||
### Deploying a Dashboard Manually
|
||||
|
||||
If you need to immediately deploy a dashboard using the contents of
|
||||
`metrics/testnet-monitor.json` in your local workspace,
|
||||
```
|
||||
$ export GRAFANA_API_TOKEN="an API key from https://metrics.solana.com:3000/org/apikeys"
|
||||
$ metrics/publish-metrics-dashboard.sh (edge|beta|stable)
|
||||
```
|
||||
Note that automation will eventually overwrite your manual deploy.
|
69
metrics/adjust-dashboard-for-channel.py
Executable file
69
metrics/adjust-dashboard-for-channel.py
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Adjusts the testnet monitor dashboard for the specified release channel
|
||||
#
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print('Error: Dashboard or Channel not specified')
|
||||
sys.exit(1)
|
||||
|
||||
dashboard_json = sys.argv[1]
|
||||
channel = sys.argv[2]
|
||||
if channel not in ['edge', 'beta', 'stable']:
|
||||
print('Error: Unknown channel:', channel)
|
||||
sys.exit(2)
|
||||
|
||||
with open(dashboard_json, 'r') as read_file:
|
||||
data = json.load(read_file)
|
||||
|
||||
if channel == 'stable':
|
||||
# Stable dashboard only allows the user to select between the stable
|
||||
# testnet databases
|
||||
data['title'] = 'Testnet Monitor'
|
||||
data['uid'] = 'testnet'
|
||||
data['templating']['list'] = [{'allValue': None,
|
||||
'current': {'text': 'testnet',
|
||||
'value': 'testnet'},
|
||||
'hide': 1,
|
||||
'includeAll': False,
|
||||
'label': 'Testnet',
|
||||
'multi': False,
|
||||
'name': 'testnet',
|
||||
'options': [{'selected': False,
|
||||
'text': 'testnet',
|
||||
'value': 'testnet'},
|
||||
{'selected': True,
|
||||
'text': 'testnet-perf',
|
||||
'value': 'testnet-perf'}],
|
||||
'query': 'testnet,testnet-perf',
|
||||
'type': 'custom'}]
|
||||
else:
|
||||
# Non-stable dashboard only allows the user to select between all testnet
|
||||
# databases
|
||||
data['title'] = 'Testnet Monitor ({})'.format(channel)
|
||||
data['uid'] = 'testnet-' + channel
|
||||
data['templating']['list'] = [{'allValue': None,
|
||||
'current': {'text': 'testnet',
|
||||
'value': 'testnet'},
|
||||
'datasource': 'Solana Metrics (read-only)',
|
||||
'hide': 1,
|
||||
'includeAll': False,
|
||||
'label': 'Testnet',
|
||||
'multi': False,
|
||||
'name': 'testnet',
|
||||
'options': [],
|
||||
'query': 'show databases',
|
||||
'refresh': 1,
|
||||
'regex': 'testnet.*',
|
||||
'sort': 1,
|
||||
'tagValuesQuery': '',
|
||||
'tags': [],
|
||||
'tagsQuery': '',
|
||||
'type': 'query',
|
||||
'useTags': False}]
|
||||
|
||||
with open(dashboard_json, 'w') as write_file:
|
||||
json.dump(data, write_file, indent=2)
|
15
metrics/grafcli.conf
Normal file
15
metrics/grafcli.conf
Normal file
@ -0,0 +1,15 @@
|
||||
[grafcli]
|
||||
editor = vim
|
||||
mergetool = vimdiff
|
||||
verbose = on
|
||||
force = on
|
||||
|
||||
[resources]
|
||||
|
||||
[hosts]
|
||||
metrics = on
|
||||
|
||||
[metrics]
|
||||
type = api
|
||||
url = https://metrics.solana.com:3000/api
|
||||
ssl = off
|
71
metrics/publish-metrics-dashboard.sh
Executable file
71
metrics/publish-metrics-dashboard.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
CHANNEL=$1
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo "usage: $0 [channel]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $CHANNEL in
|
||||
edge)
|
||||
DASHBOARD=testnet-monitor-edge
|
||||
;;
|
||||
beta)
|
||||
DASHBOARD=testnet-monitor-beta
|
||||
;;
|
||||
stable)
|
||||
DASHBOARD=testnet-monitor
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid CHANNEL=$CHANNEL"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
if [[ -z $GRAFANA_API_TOKEN ]]; then
|
||||
echo Error: GRAFANA_API_TOKEN not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DASHBOARD_JSON=./testnet-monitor.json
|
||||
if [[ ! -r $DASHBOARD_JSON ]]; then
|
||||
echo Error: $DASHBOARD_JSON not found
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
./adjust-dashboard-for-channel.py "$DASHBOARD_JSON" "$CHANNEL"
|
||||
)
|
||||
|
||||
rm -rf venv
|
||||
python3 -m venv venv
|
||||
# shellcheck source=/dev/null
|
||||
source venv/bin/activate
|
||||
|
||||
echo --- Fetch/build grafcli
|
||||
(
|
||||
set -x
|
||||
git clone git@github.com:mvines/grafcli.git -b experimental-v5 venv/grafcli
|
||||
cd venv/grafcli
|
||||
python3 setup.py install
|
||||
)
|
||||
|
||||
echo --- Take a backup of existing dashboard if possible
|
||||
(
|
||||
set -x +e
|
||||
grafcli export remote/metrics/$DASHBOARD $DASHBOARD_JSON.org
|
||||
grafcli rm remote/metrics/$DASHBOARD
|
||||
:
|
||||
)
|
||||
|
||||
echo --- Publish $DASHBOARD_JSON to $DASHBOARD
|
||||
(
|
||||
set -x
|
||||
grafcli import $DASHBOARD_JSON remote/metrics
|
||||
)
|
||||
|
||||
exit 0
|
5576
metrics/testnet-monitor.json
Normal file
5576
metrics/testnet-monitor.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -49,8 +49,6 @@ elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
}
|
||||
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
||||
unset SOLANA_CUDA
|
||||
else
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
@ -104,16 +102,16 @@ tune_networking() {
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=67108864 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_max=67108864 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
|
70
net/gce.sh
70
net/gce.sh
@ -11,23 +11,21 @@ gce)
|
||||
# shellcheck source=net/scripts/gce-provider.sh
|
||||
source "$here"/scripts/gce-provider.sh
|
||||
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
cpuLeaderMachineType=n1-standard-16
|
||||
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=n1-standard-4
|
||||
validatorMachineType=n1-standard-16
|
||||
clientMachineType=n1-standard-16
|
||||
;;
|
||||
ec2)
|
||||
# shellcheck source=net/scripts/ec2-provider.sh
|
||||
source "$here"/scripts/ec2-provider.sh
|
||||
|
||||
imageName="ami-0466e26ccc0e752c1"
|
||||
cpuLeaderMachineType=m4.4xlarge
|
||||
gpuLeaderMachineType=p2.xlarge
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=m4.xlarge
|
||||
clientMachineType=m4.4xlarge
|
||||
validatorMachineType=m4.2xlarge
|
||||
clientMachineType=m4.2xlarge
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
@ -118,7 +116,7 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
leaderMachineType="$gpuLeaderMachineType"
|
||||
leaderMachineType=$gpuLeaderMachineType
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
@ -131,14 +129,53 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
||||
bootDiskType=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
usage "unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
[[ -z $1 ]] || usage "Unexpected argument: $1"
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
if [[ $cloudProvider = ec2 ]]; then
|
||||
# EC2 keys can't be retrieved from running instances like GCE keys can so save
|
||||
# EC2 keys in the user's home directory so |./ec2.sh config| can at least be
|
||||
# used on the same host that ran |./ec2.sh create| .
|
||||
sshPrivateKey="$HOME/.ssh/solana-net-id_$prefix"
|
||||
else
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
fi
|
||||
|
||||
case $cloudProvider in
|
||||
gce)
|
||||
if $enableGpu; then
|
||||
# TODO: GPU image is still 16.04-based pending resolution of
|
||||
# https://github.com/solana-labs/solana/issues/1702
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
else
|
||||
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
|
||||
fi
|
||||
;;
|
||||
ec2)
|
||||
# Deep Learning AMI (Ubuntu 16.04-based)
|
||||
case $region in # (region global variable is set by cloud_SetZone)
|
||||
us-east-1)
|
||||
imageName="ami-047daf3f2b162fc35"
|
||||
;;
|
||||
us-west-1)
|
||||
imageName="ami-08c8c7c4a57a6106d"
|
||||
;;
|
||||
us-west-2)
|
||||
imageName="ami-0b63040ee445728bf"
|
||||
;;
|
||||
*)
|
||||
usage "Unsupported region: $region"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
||||
@ -206,13 +243,18 @@ EOF
|
||||
|
||||
echo "Waiting for $name to finish booting..."
|
||||
(
|
||||
for i in $(seq 1 30); do
|
||||
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
|
||||
break
|
||||
set -x +e
|
||||
for i in $(seq 1 60); do
|
||||
timeout 20s ssh "${sshOptions[@]}" "$publicIp" "ls -l /.instance-startup-complete"
|
||||
ret=$?
|
||||
if [[ $ret -eq 0 ]]; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
echo "Retry $i..."
|
||||
done
|
||||
echo "$name failed to boot."
|
||||
exit 1
|
||||
)
|
||||
echo "$name has booted."
|
||||
}
|
||||
@ -230,7 +272,7 @@ EOF
|
||||
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
|
||||
|
||||
# Try to ping the machine first.
|
||||
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
timeout 90s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
|
||||
if [[ ! -r $sshPrivateKey ]]; then
|
||||
echo "Fetching $sshPrivateKey from $leaderName"
|
||||
@ -376,6 +418,10 @@ $(
|
||||
install-earlyoom.sh \
|
||||
install-libssl-compatability.sh \
|
||||
install-rsync.sh \
|
||||
network-config.sh \
|
||||
remove-docker-interface.sh \
|
||||
update-default-cuda.sh \
|
||||
|
||||
)
|
||||
|
||||
cat > /etc/motd <<EOM
|
||||
|
82
net/net.sh
82
net/net.sh
@ -23,10 +23,14 @@ Operate a configured testnet
|
||||
restart - Shortcut for stop then start
|
||||
|
||||
start-specific options:
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-T [tarFilename] - Deploy the specified release tarball
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
|
||||
Note: if RUST_LOG is set in the environment it will be propogated into the
|
||||
network nodes.
|
||||
@ -44,6 +48,7 @@ EOF
|
||||
}
|
||||
|
||||
snapChannel=
|
||||
releaseChannel=
|
||||
snapFilename=
|
||||
deployMethod=local
|
||||
sanityExtraArgs=
|
||||
@ -53,7 +58,7 @@ command=$1
|
||||
[[ -n $command ]] || usage
|
||||
shift
|
||||
|
||||
while getopts "h?S:s:o:f:" opt; do
|
||||
while getopts "h?S:s:T:t:o:f:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -74,6 +79,22 @@ while getopts "h?S:s:o:f:" opt; do
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
T)
|
||||
tarballFilename=$OPTARG
|
||||
[[ -f $tarballFilename ]] || usage "Snap not readable: $tarballFilename"
|
||||
deployMethod=tar
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
releaseChannel=$OPTARG
|
||||
deployMethod=tar
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
f)
|
||||
cargoFeatures=$OPTARG
|
||||
;;
|
||||
@ -110,6 +131,7 @@ build() {
|
||||
set -x
|
||||
rm -rf farf
|
||||
$MAYBE_DOCKER cargo install --features="$cargoFeatures" --root farf
|
||||
./scripts/install-native-programs.sh farf/
|
||||
)
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
@ -138,6 +160,9 @@ startLeader() {
|
||||
snap)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
|
||||
;;
|
||||
tar)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
local)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
@ -159,7 +184,7 @@ startValidator() {
|
||||
declare ipAddress=$1
|
||||
declare logFile="$netLogDir/validator-$ipAddress.log"
|
||||
|
||||
echo "--- Starting validator: $leaderIp"
|
||||
echo "--- Starting validator: $ipAddress"
|
||||
echo "start log: $logFile"
|
||||
(
|
||||
set -x
|
||||
@ -181,7 +206,7 @@ startClient() {
|
||||
set -x
|
||||
startCommon "$ipAddress"
|
||||
ssh "${sshOptions[@]}" -f "$ipAddress" \
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \"$RUST_LOG\""
|
||||
) >> "$logFile" 2>&1 || {
|
||||
cat "$logFile"
|
||||
echo "^^^ +++"
|
||||
@ -196,10 +221,11 @@ sanity() {
|
||||
echo "--- Sanity"
|
||||
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
|
||||
|
||||
declare host=$leaderIp # TODO: maybe use ${validatorIpList[0]} ?
|
||||
(
|
||||
set -x
|
||||
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
ssh "${sshOptions[@]}" "$host" \
|
||||
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
|
||||
) || ok=false
|
||||
|
||||
@ -219,13 +245,17 @@ start() {
|
||||
set -ex;
|
||||
apt-get -qq update;
|
||||
apt-get -qq -y install snapd;
|
||||
snap download --channel=$snapChannel solana;
|
||||
until snap download --channel=$snapChannel solana; do
|
||||
sleep 1;
|
||||
done
|
||||
"
|
||||
)
|
||||
else
|
||||
(
|
||||
cd "$SOLANA_ROOT"
|
||||
snap download --channel="$snapChannel" solana
|
||||
until snap download --channel="$snapChannel" solana; do
|
||||
sleep 1
|
||||
done
|
||||
)
|
||||
fi
|
||||
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
|
||||
@ -235,6 +265,17 @@ start() {
|
||||
}
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
if [[ -n $releaseChannel ]]; then
|
||||
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
|
||||
cd "$SOLANA_ROOT"
|
||||
|
||||
set -x
|
||||
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
|
||||
tarballFilename=solana-release.tar.bz2
|
||||
fi
|
||||
tar jxvf $tarballFilename
|
||||
;;
|
||||
local)
|
||||
build
|
||||
;;
|
||||
@ -286,15 +327,28 @@ start() {
|
||||
clientDeployTime=$SECONDS
|
||||
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
|
||||
|
||||
if [[ $deployMethod = "snap" ]]; then
|
||||
declare networkVersion=unknown
|
||||
declare networkVersion=unknown
|
||||
case $deployMethod in
|
||||
snap)
|
||||
IFS=\ read -r _ networkVersion _ < <(
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
"snap info solana | grep \"^installed:\""
|
||||
)
|
||||
networkVersion=${networkVersion/0+git./}
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
networkVersion="$(
|
||||
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
|
||||
)"
|
||||
;;
|
||||
local)
|
||||
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
|
||||
;;
|
||||
*)
|
||||
usage "Internal error: invalid deployMethod: $deployMethod"
|
||||
;;
|
||||
esac
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
|
||||
|
||||
echo
|
||||
echo "+++ Deployment Successful"
|
||||
|
@ -6,8 +6,7 @@ echo "$(date) | $0 $*" > client.log
|
||||
|
||||
deployMethod="$1"
|
||||
entrypointIp="$2"
|
||||
numNodes="$3"
|
||||
RUST_LOG="$4"
|
||||
RUST_LOG="$3"
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
|
||||
missing() {
|
||||
@ -17,7 +16,6 @@ missing() {
|
||||
|
||||
[[ -n $deployMethod ]] || missing deployMethod
|
||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||
[[ -n $numNodes ]] || missing numNodes
|
||||
|
||||
source net/common.sh
|
||||
loadConfigFile
|
||||
@ -35,7 +33,7 @@ snap)
|
||||
solana_bench_tps=/snap/bin/solana.bench-tps
|
||||
solana_keygen=/snap/bin/solana.keygen
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
@ -58,8 +56,7 @@ clientCommand="\
|
||||
$solana_bench_tps \
|
||||
--network $entrypointIp:8001 \
|
||||
--identity client.json \
|
||||
--num-nodes $numNodes \
|
||||
--duration 600 \
|
||||
--duration 7500 \
|
||||
--sustained \
|
||||
--threads $threadCount \
|
||||
"
|
||||
|
@ -35,7 +35,6 @@ else
|
||||
setupArgs="-l"
|
||||
fi
|
||||
|
||||
|
||||
case $deployMethod in
|
||||
snap)
|
||||
SECONDS=0
|
||||
@ -43,12 +42,13 @@ snap)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/solana/solana.snap" .
|
||||
sudo snap install solana.snap --devmode --dangerous
|
||||
|
||||
# shellcheck disable=SC2089
|
||||
commonNodeConfig="\
|
||||
leader-ip=$entrypointIp \
|
||||
leader-ip=\"$entrypointIp\" \
|
||||
default-metrics-rate=1 \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
rust-log=$RUST_LOG \
|
||||
setup-args=$setupArgs \
|
||||
metrics-config=\"$SOLANA_METRICS_CONFIG\" \
|
||||
rust-log=\"$RUST_LOG\" \
|
||||
setup-args=\"$setupArgs\" \
|
||||
"
|
||||
|
||||
if [[ -e /dev/nvidia0 ]]; then
|
||||
@ -67,7 +67,7 @@ snap)
|
||||
logmarker="solana deploy $(date)/$RANDOM"
|
||||
logger "$logmarker"
|
||||
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$nodeConfig"
|
||||
# shellcheck disable=SC2086,SC2090 # Don't want to double quote "$nodeConfig"
|
||||
sudo snap set solana $nodeConfig
|
||||
snap info solana
|
||||
sudo snap get solana
|
||||
@ -77,20 +77,25 @@ snap)
|
||||
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export RUST_LOG
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
|
||||
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
|
||||
|
||||
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||
|
||||
case $nodeType in
|
||||
leader)
|
||||
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||
echo Selecting solana-fullnode-cuda
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
./multinode-demo/setup.sh -t leader $setupArgs
|
||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||
./multinode-demo/leader.sh > leader.log 2>&1 &
|
||||
@ -98,6 +103,11 @@ local)
|
||||
validator)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
||||
|
||||
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||
echo Selecting solana-fullnode-cuda
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
|
||||
./multinode-demo/setup.sh -t validator $setupArgs
|
||||
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
|
||||
;;
|
||||
|
@ -65,7 +65,7 @@ snap)
|
||||
client_id=~/snap/solana/current/config/client-id.json
|
||||
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
entrypointRsyncUrl="$entrypointIp:~/solana"
|
||||
|
@ -31,11 +31,7 @@ __cloud_FindInstances() {
|
||||
|
||||
declare name zone publicIp privateIp status
|
||||
while read -r name publicIp privateIp status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $name is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s\n" "$name" "$publicIp" "$privateIp"
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
||||
|
||||
instances+=("$name:$publicIp:$privateIp")
|
||||
done < <(gcloud compute instances list \
|
||||
@ -132,6 +128,9 @@ cloud_CreateInstances() {
|
||||
--no-restart-on-failure
|
||||
)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $imageName as it may contain extra args
|
||||
args+=(--image $imageName)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
|
||||
args+=(--machine-type $machineType)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
|
@ -13,8 +13,8 @@ sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
if command -v earlyoom; then
|
||||
systemctl status earlyoom
|
||||
else
|
||||
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
|
||||
apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
|
||||
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.2-*_amd64.deb' -e robots=off -nd
|
||||
apt install --quiet --yes ./earlyoom_1.2-*_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
|
@ -12,7 +12,6 @@ apt-get --assume-yes install libssl-dev
|
||||
#
|
||||
# cc: https://github.com/solana-labs/solana/issues/1090
|
||||
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
|
||||
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
dpkg -i libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
rm libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
|
||||
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||
dpkg -i libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||
rm libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||
|
11
net/scripts/network-config.sh
Executable file
11
net/scripts/network-config.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
sudo sysctl -w net.core.rmem_default=1610612736
|
||||
sudo sysctl -w net.core.rmem_max=1610612736
|
||||
|
||||
sudo sysctl -w net.core.wmem_default=1610612736
|
||||
sudo sysctl -w net.core.wmem_max=1610612736
|
11
net/scripts/remove-docker-interface.sh
Executable file
11
net/scripts/remove-docker-interface.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Some instances have docker running and docker0 network interface confuses
|
||||
# gossip and airdrops fail. As a workaround for now simply remove the docker0
|
||||
# interface
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
ip link delete docker0 || true
|
9
net/scripts/update-default-cuda.sh
Executable file
9
net/scripts/update-default-cuda.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Updates the default cuda symlink to the supported version
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
ln -sfT /usr/local/cuda-9.2 /usr/local/cuda
|
1
programs/bpf/c/.gitignore
vendored
Normal file
1
programs/bpf/c/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/out/
|
1
programs/bpf/c/makefile
Normal file
1
programs/bpf/c/makefile
Normal file
@ -0,0 +1 @@
|
||||
include sdk/bpf.mk
|
63
programs/bpf/c/sdk/README.md
Normal file
63
programs/bpf/c/sdk/README.md
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
## Prerequisites
|
||||
|
||||
## LLVM / clang 7.0.0
|
||||
http://releases.llvm.org/download.html
|
||||
|
||||
### Linux Ubuntu 16.04 (xenial)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### Linux Ubuntu 14.04 (trusty)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### macOS
|
||||
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
|
||||
|
||||
Once Homebrew is installed, ensure the latest llvm is installed:
|
||||
```
|
||||
$ brew update # <- ensure your brew is up to date
|
||||
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
|
||||
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Quick start
|
||||
To get started create a `makefile` containing:
|
||||
```make
|
||||
include path/to/bpf.mk
|
||||
```
|
||||
and `src/program.c` containing:
|
||||
```c
|
||||
#include <solana_sdk.h>
|
||||
|
||||
bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[1];
|
||||
uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
print_params(1, ka, data, data_len);
|
||||
return true;
|
||||
}
|
||||
```
|
||||
|
||||
Then run `make` to build `out/program.o`.
|
||||
Run `make help` for more details.
|
||||
|
||||
### Limitations
|
||||
* Programs must be fully contained within a single .c file
|
||||
* No libc is available but `solana_sdk.h` provides a minimal set of
|
||||
primitives.
|
115
programs/bpf/c/sdk/bpf.mk
Normal file
115
programs/bpf/c/sdk/bpf.mk
Normal file
@ -0,0 +1,115 @@
|
||||
|
||||
all:
|
||||
.PHONY: help all clean
|
||||
|
||||
ifneq ($(V),1)
|
||||
_@ :=@
|
||||
endif
|
||||
|
||||
INC_DIRS ?=
|
||||
SRC_DIR ?= ./src
|
||||
OUT_DIR ?= ./out
|
||||
|
||||
OS=$(shell uname)
|
||||
ifeq ($(OS),Darwin)
|
||||
LLVM_DIR ?= $(shell brew --prefix llvm)
|
||||
endif
|
||||
|
||||
ifdef LLVM_DIR
|
||||
CC := $(LLVM_DIR)/bin/clang
|
||||
LLC := $(LLVM_DIR)/bin/llc
|
||||
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
|
||||
else
|
||||
CC := clang-7
|
||||
LLC := llc-7
|
||||
OBJ_DUMP := llvm-objdump-7
|
||||
endif
|
||||
|
||||
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
|
||||
|
||||
CC_FLAGS := \
|
||||
-Werror \
|
||||
-target bpf \
|
||||
-O2 \
|
||||
-emit-llvm \
|
||||
-fno-builtin \
|
||||
|
||||
LLC_FLAGS := \
|
||||
-march=bpf \
|
||||
-filetype=obj \
|
||||
|
||||
OBJ_DUMP_FLAGS := \
|
||||
-color \
|
||||
-source \
|
||||
-disassemble \
|
||||
|
||||
help:
|
||||
@echo 'BPF Program makefile'
|
||||
@echo ''
|
||||
@echo 'This makefile will build BPF Programs from C source files into ELFs'
|
||||
@echo ''
|
||||
@echo 'Assumptions:'
|
||||
@echo ' - Programs are a single .c source file (may include headers)'
|
||||
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
|
||||
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
|
||||
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
|
||||
@echo ''
|
||||
@echo 'User settings'
|
||||
@echo ' - The following setting are overridable on the command line, default values shown:'
|
||||
@echo ' - Show commands while building:'
|
||||
@echo ' V=1'
|
||||
@echo ' - List of include directories:'
|
||||
@echo ' INC_DIRS=$(INC_DIRS)'
|
||||
@echo ' - List of system include directories:'
|
||||
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
|
||||
@echo ' - Location of source files:'
|
||||
@echo ' SRC_DIR=$(SRC_DIR)'
|
||||
@echo ' - Location to place output files:'
|
||||
@echo ' OUT_DIR=$(OUT_DIR)'
|
||||
@echo ' - Location of LLVM:'
|
||||
@echo ' LLVM_DIR=$(LLVM_DIR)'
|
||||
@echo ''
|
||||
@echo 'Usage:'
|
||||
@echo ' - make help - This help message'
|
||||
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
|
||||
@echo ' - make clean - Cleans all programs'
|
||||
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
|
||||
@echo ' - make <program name> - Build a single program by name'
|
||||
@echo ''
|
||||
@echo 'Available programs:'
|
||||
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
|
||||
@echo ''
|
||||
@echo 'Example:'
|
||||
@echo ' - Assuming a programed named foo (src/foo.c)'
|
||||
@echo ' - make foo'
|
||||
@echo ' - make dump_foo'
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.bc
|
||||
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
|
||||
@echo "[cc] $@ ($<)"
|
||||
$(_@)mkdir -p $(OUT_DIR)
|
||||
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.o
|
||||
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
|
||||
@echo "[llc] $@ ($<)"
|
||||
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
|
||||
|
||||
-include $(wildcard $(OUT_DIR)/*.d)
|
||||
|
||||
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
endef
|
||||
|
||||
all: $(PROGRAM_NAMES)
|
||||
|
||||
%: $(addprefix $(OUT_DIR)/, %.o) ;
|
||||
|
||||
dump_%: %
|
||||
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
|
||||
|
||||
clean:
|
||||
rm -rf $(OUT_DIR)
|
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
@ -0,0 +1,298 @@
|
||||
#pragma once
|
||||
/**
|
||||
* @brief Solana C-based BPF program utility functions and types
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Numeric types
|
||||
*/
|
||||
#ifndef __LP64__
|
||||
#error LP64 data model required
|
||||
#endif
|
||||
|
||||
typedef signed char int8_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned int uint32_t;
|
||||
typedef signed long int int64_t;
|
||||
typedef unsigned long int uint64_t;
|
||||
|
||||
/**
|
||||
* NULL
|
||||
*/
|
||||
#define NULL 0
|
||||
|
||||
/**
|
||||
* Boolean type
|
||||
*/
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
/**
|
||||
* Helper function that prints a string to stdout
|
||||
*/
|
||||
extern void sol_log(const char*);
|
||||
|
||||
/**
|
||||
* Helper function that prints a 64 bit values represented in hexadecimal
|
||||
* to stdout
|
||||
*/
|
||||
extern void sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||
|
||||
/**
|
||||
* Prefix for all BPF functions
|
||||
*
|
||||
* This prefix should be used for functions in order to facilitate
|
||||
* interoperability with BPF representation
|
||||
*/
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
/**
|
||||
* Size of Public key in bytes
|
||||
*/
|
||||
#define SIZE_PUBKEY 32
|
||||
|
||||
/**
|
||||
* Public key
|
||||
*/
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
/**
|
||||
* Compares two public keys
|
||||
*
|
||||
* @param one First public key
|
||||
* @param two Second public key
|
||||
* @return true if the same
|
||||
*/
|
||||
SOL_FN_PREFIX bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) {
|
||||
for (int i = 0; i < sizeof(*one); i++) {
|
||||
if (one->x[i] != two->x[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Keyed Accounts
|
||||
*/
|
||||
typedef struct {
|
||||
SolPubkey *key; /** Public Key of the account owner */
|
||||
int64_t *tokens; /** Numer of tokens owned by this account */
|
||||
uint64_t userdata_len; /** Length of userdata in bytes */
|
||||
uint8_t *userdata; /** On-chain data owned by this account */
|
||||
SolPubkey *program_id; /** Program that owns this account */
|
||||
} SolKeyedAccounts;
|
||||
|
||||
/**
|
||||
* Copies memory
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, const void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((const uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares memory
|
||||
*/
|
||||
SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
|
||||
for (int i = 0; i < n; i++) {
|
||||
uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i);
|
||||
if (diff) {
|
||||
return diff;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of elements in an array
|
||||
*/
|
||||
#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
|
||||
|
||||
/**
|
||||
* Panics
|
||||
*
|
||||
* Prints the line number where the panic occurred and then causes
|
||||
* the BPF VM to immediately halt execution. No accounts' userdata are updated
|
||||
*/
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_log_64(0xFF, 0xFF, 0xFF, 0xFF, line);
|
||||
uint8_t *pv = (uint8_t *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts
|
||||
*/
|
||||
#define sol_assert(expr) \
|
||||
if (!(expr)) { \
|
||||
_sol_panic(__LINE__); \
|
||||
}
|
||||
|
||||
/**
|
||||
* De-serializes the input parameters into usable types
|
||||
*
|
||||
* Use this function to deserialize the buffer passed to the program entrypoint
|
||||
* into usable types. This function does not perform copy deserialization,
|
||||
* instead it populates the pointers and lengths in SolKeyedAccounts and data so
|
||||
* that any modification to tokens or account data take place on the original
|
||||
* buffer. Doing so also eliminates the need to serialize back into the buffer
|
||||
* at program end.
|
||||
*
|
||||
* @param input Source buffer containing serialized input parameters
|
||||
* @param ka Pointer to an array of SolKeyedAccounts to deserialize into
|
||||
* @param ka_len Number of SolKeyedAccounts entries in `ka`
|
||||
* @param ka_len_out If NULL, fill exactly `ka_len` accounts or fail.
|
||||
* If not NULL, fill up to `ka_len` accounts and return the
|
||||
* number of filled accounts in `ka_len_out`.
|
||||
* @param data On return, a pointer to the instruction data
|
||||
* @param data_len On return, the length in bytes of the instruction data
|
||||
* @return Boolean true if successful
|
||||
*/
|
||||
SOL_FN_PREFIX bool sol_deserialize(
|
||||
const uint8_t *input,
|
||||
SolKeyedAccounts *ka,
|
||||
uint64_t ka_len,
|
||||
uint64_t *ka_len_out,
|
||||
const uint8_t **data,
|
||||
uint64_t *data_len
|
||||
) {
|
||||
|
||||
|
||||
if (ka_len_out == NULL) {
|
||||
if (ka_len != *(uint64_t *) input) {
|
||||
return false;
|
||||
}
|
||||
ka_len = *(uint64_t *) input;
|
||||
} else {
|
||||
if (ka_len > *(uint64_t *) input) {
|
||||
ka_len = *(uint64_t *) input;
|
||||
}
|
||||
*ka_len_out = ka_len;
|
||||
}
|
||||
|
||||
input += sizeof(uint64_t);
|
||||
for (int i = 0; i < ka_len; i++) {
|
||||
// key
|
||||
ka[i].key = (SolPubkey *) input;
|
||||
input += sizeof(SolPubkey);
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = (int64_t *) input;
|
||||
input += sizeof(int64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *) input;
|
||||
input += sizeof(uint64_t);
|
||||
ka[i].userdata = input;
|
||||
input += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *) input;
|
||||
input += sizeof(SolPubkey);
|
||||
}
|
||||
|
||||
// input data
|
||||
*data_len = *(uint64_t *) input;
|
||||
input += sizeof(uint64_t);
|
||||
*data = input;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Debugging utilities
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of a public key
|
||||
*
|
||||
* @param key The public key to print
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_key(const SolPubkey *key) {
|
||||
for (int j = 0; j < sizeof(*key); j++) {
|
||||
sol_log_64(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of an array
|
||||
*
|
||||
* @param array The array to print
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_array(const uint8_t *array, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_log_64(0, 0, 0, j, array[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of the program's input parameters
|
||||
*
|
||||
* @param num_ka Numer of SolKeyedAccounts to print
|
||||
* @param ka A pointer to an array of SolKeyedAccounts to print
|
||||
* @param data A pointer to the instruction data to print
|
||||
* @param data_len The length in bytes of the instruction data
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_params(
|
||||
uint64_t num_ka,
|
||||
const SolKeyedAccounts *ka,
|
||||
const uint8_t *data,
|
||||
uint64_t data_len
|
||||
) {
|
||||
sol_log_64(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
sol_log_key(ka[i].key);
|
||||
sol_log_64(0, 0, 0, 0, *ka[i].tokens);
|
||||
sol_log_array(ka[i].userdata, ka[i].userdata_len);
|
||||
sol_log_key(ka[i].program_id);
|
||||
}
|
||||
sol_log_array(data, data_len);
|
||||
}
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* Program entrypoint
|
||||
* @{
|
||||
*
|
||||
* The following is an example of a simple program that prints the input
|
||||
* parameters it received:
|
||||
*
|
||||
* bool entrypoint(const uint8_t *input) {
|
||||
* SolKeyedAccounts ka[1];
|
||||
* uint8_t *data;
|
||||
* uint64_t data_len;
|
||||
*
|
||||
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||
* return false;
|
||||
* }
|
||||
* sol_log_params(1, ka, data, data_len);
|
||||
* return true;
|
||||
* }
|
||||
*/
|
||||
|
||||
/**
|
||||
* Program entrypoint signature
|
||||
*
|
||||
* @param input An array containing serialized input parameters
|
||||
* @return true if successful
|
||||
*/
|
||||
extern bool entrypoint(const uint8_t *input);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
/**@}*/
|
32
programs/bpf/c/src/move_funds.c
Normal file
32
programs/bpf/c/src/move_funds.c
Normal file
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @brief Example C-based BPF program that moves funds from one account to
|
||||
* another
|
||||
*/
|
||||
|
||||
#include <solana_sdk.h>
|
||||
|
||||
/**
|
||||
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||
* unexpected number of accounts are passed to the program's entrypoint
|
||||
*/
|
||||
#define NUM_KA 3
|
||||
|
||||
extern bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[NUM_KA];
|
||||
const uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t tokens = *(int64_t *)data;
|
||||
if (*ka[0].tokens >= tokens) {
|
||||
*ka[0].tokens -= tokens;
|
||||
*ka[2].tokens += tokens;
|
||||
// sol_log_64(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
|
||||
} else {
|
||||
// sol_log_64(0, 0, 0xFF, *ka[0].tokens, tokens);
|
||||
}
|
||||
return true;
|
||||
}
|
35
programs/bpf/c/src/noop.c
Normal file
35
programs/bpf/c/src/noop.c
Normal file
@ -0,0 +1,35 @@
|
||||
/**
|
||||
* @brief Example C-based BPF program that prints out the parameters
|
||||
* passed to it
|
||||
*/
|
||||
|
||||
#include <solana_sdk.h>
|
||||
|
||||
/**
|
||||
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||
* unexpected number of accounts are passed to the program's entrypoint
|
||||
*/
|
||||
#define NUM_KA 1
|
||||
|
||||
extern bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[NUM_KA];
|
||||
const uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
sol_log("noop");
|
||||
|
||||
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
sol_log_params(NUM_KA, ka, data, data_len);
|
||||
|
||||
sol_assert(sizeof(int8_t) == 1);
|
||||
sol_assert(sizeof(uint8_t) == 1);
|
||||
sol_assert(sizeof(int16_t) == 2);
|
||||
sol_assert(sizeof(uint16_t) == 2);
|
||||
sol_assert(sizeof(int32_t) == 4);
|
||||
sol_assert(sizeof(uint32_t) == 4);
|
||||
sol_assert(sizeof(int64_t) == 8);
|
||||
sol_assert(sizeof(uint64_t) == 8);
|
||||
return true;
|
||||
}
|
11
programs/bpf/rust/noop/Cargo.toml
Normal file
11
programs/bpf/rust/noop/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "solana-bpf-noop"
|
||||
version = "0.10.5"
|
||||
description = "Solana BPF noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
rbpf = "0.1.0"
|
||||
solana-sdk = { path = "../../../../sdk", version = "0.10.5" }
|
10
programs/bpf/rust/noop/build.sh
Executable file
10
programs/bpf/rust/noop/build.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# TODO building release flavor with rust produces a bunch of output .bc files
|
||||
INTERDIR=../../../target/release
|
||||
OUTDIR="${1:-../../../target/debug/}"
|
||||
mkdir -p "$OUTDIR"
|
||||
# cargo +nightly rustc --release -- -C panic=abort --emit=llvm-ir
|
||||
cargo +nightly rustc --release -- -C panic=abort --emit=llvm-bc
|
||||
cp "$INTERDIR"/deps/noop_rust-*.bc "$OUTDIR"/noop_rust.bc
|
||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -o "$OUTDIR"/noop_rust.o "$OUTDIR"/noop_rust.bc
|
3
programs/bpf/rust/noop/dump.sh
Executable file
3
programs/bpf/rust/noop/dump.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble target/release/noop_rust.o
|
15
programs/bpf/rust/noop/src/lib.rs
Normal file
15
programs/bpf/rust/noop/src/lib.rs
Normal file
@ -0,0 +1,15 @@
|
||||
extern crate rbpf;
|
||||
|
||||
use std::mem::transmute;
|
||||
|
||||
#[no_mangle]
|
||||
#[link_section = ".text,entrypoint"] // TODO platform independent needed
|
||||
pub extern "C" fn entrypoint(_raw: *mut u8) {
|
||||
let bpf_func_trace_printk = unsafe {
|
||||
transmute::<u64, extern "C" fn(u64, u64, u64, u64, u64)>(
|
||||
rbpf::helpers::BPF_TRACE_PRINTK_IDX as u64,
|
||||
)
|
||||
};
|
||||
|
||||
bpf_func_trace_printk(0, 0, 1, 2, 3);
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
[package]
|
||||
name = "move_funds"
|
||||
version = "0.1.0"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
"Jack May <jack@solana.com>",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
solana_program_interface = { path = "../../common" }
|
||||
|
||||
[lib]
|
||||
name = "move_funds"
|
||||
crate-type = ["dylib"]
|
||||
|
@ -1,48 +0,0 @@
|
||||
extern crate bincode;
|
||||
extern crate solana_program_interface;
|
||||
|
||||
use bincode::deserialize;
|
||||
use solana_program_interface::account::KeyedAccount;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(infos: &mut Vec<KeyedAccount>, data: &[u8]) {
|
||||
let tokens: i64 = deserialize(data).unwrap();
|
||||
if infos[0].account.tokens >= tokens {
|
||||
infos[0].account.tokens -= tokens;
|
||||
infos[1].account.tokens += tokens;
|
||||
} else {
|
||||
println!(
|
||||
"Insufficient funds, asked {}, only had {}",
|
||||
tokens, infos[0].account.tokens
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_program_interface::account::Account;
|
||||
use solana_program_interface::pubkey::Pubkey;
|
||||
|
||||
#[test]
|
||||
fn test_move_funds() {
|
||||
let tokens: i64 = 100;
|
||||
let data: Vec<u8> = serialize(&tokens).unwrap();
|
||||
let keys = vec![Pubkey::default(); 2];
|
||||
let mut accounts = vec![Account::default(), Account::default()];
|
||||
accounts[0].tokens = 100;
|
||||
accounts[1].tokens = 1;
|
||||
|
||||
{
|
||||
let mut infos: Vec<KeyedAccount> = Vec::new();
|
||||
for (key, account) in keys.iter().zip(&mut accounts).collect::<Vec<_>>() {
|
||||
infos.push(KeyedAccount { key, account });
|
||||
}
|
||||
|
||||
process(&mut infos, &data);
|
||||
}
|
||||
assert_eq!(0, accounts[0].tokens);
|
||||
assert_eq!(101, accounts[1].tokens);
|
||||
}
|
||||
}
|
24
programs/native/bpf_loader/Cargo.toml
Normal file
24
programs/native/bpf_loader/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "solana-bpfloader"
|
||||
version = "0.10.5"
|
||||
description = "Solana BPF Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
byteorder = "1.2.1"
|
||||
elf = "0.0.10"
|
||||
env_logger = "0.5.12"
|
||||
libc = "0.2.43"
|
||||
log = "0.4.2"
|
||||
solana_rbpf = "0.1.3"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||
|
||||
[lib]
|
||||
name = "solana_bpf_loader"
|
||||
crate-type = ["cdylib"]
|
||||
|
324
programs/native/bpf_loader/src/bpf_verifier.rs
Normal file
324
programs/native/bpf_loader/src/bpf_verifier.rs
Normal file
@ -0,0 +1,324 @@
|
||||
use solana_rbpf::ebpf;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {
|
||||
let full_msg = format!("[Verifier] Error: {}", msg.as_ref());
|
||||
Err(Error::new(ErrorKind::Other, full_msg))
|
||||
}
|
||||
|
||||
fn check_prog_len(prog: &[u8]) -> Result<(), Error> {
|
||||
if prog.len() % ebpf::INSN_SIZE != 0 {
|
||||
reject(format!(
|
||||
"eBPF program length must be a multiple of {:?} octets",
|
||||
ebpf::INSN_SIZE
|
||||
))?;
|
||||
}
|
||||
if prog.len() > ebpf::PROG_MAX_SIZE {
|
||||
reject(format!(
|
||||
"eBPF program length limited to {:?}, here {:?}",
|
||||
ebpf::PROG_MAX_INSNS,
|
||||
prog.len() / ebpf::INSN_SIZE
|
||||
))?;
|
||||
}
|
||||
|
||||
if prog.is_empty() {
|
||||
reject("No program set, call prog_set() to load one".to_string())?;
|
||||
}
|
||||
|
||||
// TODO BPF program may deterministically exit even if the last
|
||||
// instruction in the block is not an exit (might be earlier and jumped to)
|
||||
// TODO need to validate more intelligently
|
||||
// let last_insn = ebpf::get_insn(prog, (prog.len() / ebpf::INSN_SIZE) - 1);
|
||||
// if last_insn.opc != ebpf::EXIT {
|
||||
// reject("program does not end with “EXIT” instruction".to_string())?;
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Error> {
|
||||
if insn.imm == 0 {
|
||||
reject(format!("division by 0 (insn #{:?})", insn_ptr))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Error> {
|
||||
match insn.imm {
|
||||
16 | 32 | 64 => Ok(()),
|
||||
_ => reject(format!(
|
||||
"unsupported argument for LE/BE (insn #{:?})",
|
||||
insn_ptr
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), Error> {
|
||||
// We know we can reach next insn since we enforce an EXIT insn at the end of program, while
|
||||
// this function should be called only for LD_DW insn, that cannot be last in program.
|
||||
let next_insn = ebpf::get_insn(prog, insn_ptr + 1);
|
||||
if next_insn.opc != 0 {
|
||||
reject(format!(
|
||||
"incomplete LD_DW instruction (insn #{:?})",
|
||||
insn_ptr
|
||||
))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), Error> {
|
||||
let insn = ebpf::get_insn(prog, insn_ptr);
|
||||
if insn.off == -1 {
|
||||
reject(format!("infinite loop (insn #{:?})", insn_ptr))?;
|
||||
}
|
||||
|
||||
let dst_insn_ptr = insn_ptr as isize + 1 + insn.off as isize;
|
||||
if dst_insn_ptr < 0 || dst_insn_ptr as usize >= (prog.len() / ebpf::INSN_SIZE) {
|
||||
reject(format!(
|
||||
"jump out of code to #{:?} (insn #{:?})",
|
||||
dst_insn_ptr, insn_ptr
|
||||
))?;
|
||||
}
|
||||
|
||||
let dst_insn = ebpf::get_insn(prog, dst_insn_ptr as usize);
|
||||
if dst_insn.opc == 0 {
|
||||
reject(format!(
|
||||
"jump to middle of LD_DW at #{:?} (insn #{:?})",
|
||||
dst_insn_ptr, insn_ptr
|
||||
))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), Error> {
|
||||
if insn.src > 10 {
|
||||
reject(format!("invalid source register (insn #{:?})", insn_ptr))?;
|
||||
}
|
||||
|
||||
match (insn.dst, store) {
|
||||
(0...9, _) | (10, true) => Ok(()),
|
||||
(10, false) => reject(format!(
|
||||
"cannot write into register r10 (insn #{:?})",
|
||||
insn_ptr
|
||||
)),
|
||||
(_, _) => reject(format!(
|
||||
"invalid destination register (insn #{:?})",
|
||||
insn_ptr
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(prog: &[u8]) -> Result<(), Error> {
|
||||
check_prog_len(prog)?;
|
||||
|
||||
let mut insn_ptr: usize = 0;
|
||||
while insn_ptr * ebpf::INSN_SIZE < prog.len() {
|
||||
let insn = ebpf::get_insn(prog, insn_ptr);
|
||||
let mut store = false;
|
||||
|
||||
match insn.opc {
|
||||
// BPF_LD class
|
||||
ebpf::LD_ABS_B => {}
|
||||
ebpf::LD_ABS_H => {}
|
||||
ebpf::LD_ABS_W => {}
|
||||
ebpf::LD_ABS_DW => {}
|
||||
ebpf::LD_IND_B => {}
|
||||
ebpf::LD_IND_H => {}
|
||||
ebpf::LD_IND_W => {}
|
||||
ebpf::LD_IND_DW => {}
|
||||
|
||||
ebpf::LD_DW_IMM => {
|
||||
store = true;
|
||||
check_load_dw(prog, insn_ptr)?;
|
||||
insn_ptr += 1;
|
||||
}
|
||||
|
||||
// BPF_LDX class
|
||||
ebpf::LD_B_REG => {}
|
||||
ebpf::LD_H_REG => {}
|
||||
ebpf::LD_W_REG => {}
|
||||
ebpf::LD_DW_REG => {}
|
||||
|
||||
// BPF_ST class
|
||||
ebpf::ST_B_IMM => store = true,
|
||||
ebpf::ST_H_IMM => store = true,
|
||||
ebpf::ST_W_IMM => store = true,
|
||||
ebpf::ST_DW_IMM => store = true,
|
||||
|
||||
// BPF_STX class
|
||||
ebpf::ST_B_REG => store = true,
|
||||
ebpf::ST_H_REG => store = true,
|
||||
ebpf::ST_W_REG => store = true,
|
||||
ebpf::ST_DW_REG => store = true,
|
||||
ebpf::ST_W_XADD => {
|
||||
unimplemented!();
|
||||
}
|
||||
ebpf::ST_DW_XADD => {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
// BPF_ALU class
|
||||
ebpf::ADD32_IMM => {}
|
||||
ebpf::ADD32_REG => {}
|
||||
ebpf::SUB32_IMM => {}
|
||||
ebpf::SUB32_REG => {}
|
||||
ebpf::MUL32_IMM => {}
|
||||
ebpf::MUL32_REG => {}
|
||||
ebpf::DIV32_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::DIV32_REG => {}
|
||||
ebpf::OR32_IMM => {}
|
||||
ebpf::OR32_REG => {}
|
||||
ebpf::AND32_IMM => {}
|
||||
ebpf::AND32_REG => {}
|
||||
ebpf::LSH32_IMM => {}
|
||||
ebpf::LSH32_REG => {}
|
||||
ebpf::RSH32_IMM => {}
|
||||
ebpf::RSH32_REG => {}
|
||||
ebpf::NEG32 => {}
|
||||
ebpf::MOD32_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::MOD32_REG => {}
|
||||
ebpf::XOR32_IMM => {}
|
||||
ebpf::XOR32_REG => {}
|
||||
ebpf::MOV32_IMM => {}
|
||||
ebpf::MOV32_REG => {}
|
||||
ebpf::ARSH32_IMM => {}
|
||||
ebpf::ARSH32_REG => {}
|
||||
ebpf::LE => {
|
||||
check_imm_endian(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::BE => {
|
||||
check_imm_endian(&insn, insn_ptr)?;
|
||||
}
|
||||
|
||||
// BPF_ALU64 class
|
||||
ebpf::ADD64_IMM => {}
|
||||
ebpf::ADD64_REG => {}
|
||||
ebpf::SUB64_IMM => {}
|
||||
ebpf::SUB64_REG => {}
|
||||
ebpf::MUL64_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::MUL64_REG => {}
|
||||
ebpf::DIV64_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::DIV64_REG => {}
|
||||
ebpf::OR64_IMM => {}
|
||||
ebpf::OR64_REG => {}
|
||||
ebpf::AND64_IMM => {}
|
||||
ebpf::AND64_REG => {}
|
||||
ebpf::LSH64_IMM => {}
|
||||
ebpf::LSH64_REG => {}
|
||||
ebpf::RSH64_IMM => {}
|
||||
ebpf::RSH64_REG => {}
|
||||
ebpf::NEG64 => {}
|
||||
ebpf::MOD64_IMM => {}
|
||||
ebpf::MOD64_REG => {}
|
||||
ebpf::XOR64_IMM => {}
|
||||
ebpf::XOR64_REG => {}
|
||||
ebpf::MOV64_IMM => {}
|
||||
ebpf::MOV64_REG => {}
|
||||
ebpf::ARSH64_IMM => {}
|
||||
ebpf::ARSH64_REG => {}
|
||||
|
||||
// BPF_JMP class
|
||||
ebpf::JA => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JEQ_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JEQ_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSET_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSET_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JNE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JNE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::CALL => {}
|
||||
ebpf::TAIL_CALL => unimplemented!(),
|
||||
ebpf::EXIT => {}
|
||||
|
||||
_ => {
|
||||
reject(format!(
|
||||
"unknown eBPF opcode {:#2x} (insn #{:?})",
|
||||
insn.opc, insn_ptr
|
||||
))?;
|
||||
}
|
||||
}
|
||||
|
||||
check_registers(&insn, store, insn_ptr)?;
|
||||
|
||||
insn_ptr += 1;
|
||||
}
|
||||
|
||||
// insn_ptr should now be equal to number of instructions.
|
||||
if insn_ptr != prog.len() / ebpf::INSN_SIZE {
|
||||
reject(format!("jumped out of code to #{:?}", insn_ptr))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
230
programs/native/bpf_loader/src/lib.rs
Normal file
230
programs/native/bpf_loader/src/lib.rs
Normal file
@ -0,0 +1,230 @@
|
||||
pub mod bpf_verifier;
|
||||
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate libc;
|
||||
extern crate solana_rbpf;
|
||||
extern crate solana_sdk;
|
||||
|
||||
use bincode::deserialize;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use libc::c_char;
|
||||
use solana_rbpf::EbpfVmRaw;
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
use solana_sdk::loader_instruction::LoaderInstruction;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::ffi::CStr;
|
||||
use std::io::prelude::*;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::mem;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
|
||||
// TODO use rbpf's disassemble
|
||||
#[allow(dead_code)]
|
||||
fn dump_program(key: &Pubkey, prog: &[u8]) {
|
||||
let mut eight_bytes: Vec<u8> = Vec::new();
|
||||
info!("BPF Program: {:?}", key);
|
||||
for i in prog.iter() {
|
||||
if eight_bytes.len() >= 7 {
|
||||
info!("{:02X?}", eight_bytes);
|
||||
eight_bytes.clear();
|
||||
} else {
|
||||
eight_bytes.push(i.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub fn helper_sol_log_verify(
|
||||
addr: u64,
|
||||
unused2: u64,
|
||||
unused3: u64,
|
||||
unused4: u64,
|
||||
unused5: u64,
|
||||
ro_regions: &[&[u8]],
|
||||
unused7: &[&[u8]],
|
||||
) -> Result<(()), Error> {
|
||||
for region in ro_regions.iter() {
|
||||
if region.as_ptr() as u64 <= addr
|
||||
&& addr as u64 <= region.as_ptr() as u64 + region.len() as u64
|
||||
{
|
||||
let c_buf: *const c_char = addr as *const c_char;
|
||||
let max_size = (region.as_ptr() as u64 + region.len() as u64) - addr;
|
||||
unsafe {
|
||||
for i in 0..max_size {
|
||||
if std::ptr::read(c_buf.offset(i as isize)) == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Err(Error::new(ErrorKind::Other, "Error, Unterminated string"));
|
||||
}
|
||||
}
|
||||
Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
"Error: Load segfault, bad string pointer",
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub fn helper_sol_log(addr: u64, unused2: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 {
|
||||
let c_buf: *const c_char = addr as *const c_char;
|
||||
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
|
||||
match c_str.to_str() {
|
||||
Ok(slice) => info!("sol_log: {:?}", slice),
|
||||
Err(e) => warn!("Error: Cannot print invalid string"),
|
||||
};
|
||||
0
|
||||
}
|
||||
|
||||
pub fn helper_sol_log_u64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
|
||||
info!(
|
||||
"sol_log_u64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
|
||||
arg1, arg2, arg3, arg4, arg5
|
||||
);
|
||||
0
|
||||
}
|
||||
|
||||
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
|
||||
let mut vm = EbpfVmRaw::new(None)?;
|
||||
vm.set_verifier(bpf_verifier::check)?;
|
||||
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
|
||||
vm.set_elf(&prog)?;
|
||||
vm.register_helper_ex("sol_log", Some(helper_sol_log_verify), helper_sol_log)?;
|
||||
vm.register_helper_ex("sol_log_64", None, helper_sol_log_u64)?;
|
||||
Ok(vm)
|
||||
}
|
||||
|
||||
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
|
||||
assert_eq!(32, mem::size_of::<Pubkey>());
|
||||
|
||||
let mut v: Vec<u8> = Vec::new();
|
||||
v.write_u64::<LittleEndian>(keyed_accounts.len() as u64)
|
||||
.unwrap();
|
||||
for info in keyed_accounts.iter_mut() {
|
||||
v.write_all(info.key.as_ref()).unwrap();
|
||||
v.write_i64::<LittleEndian>(info.account.tokens).unwrap();
|
||||
v.write_u64::<LittleEndian>(info.account.userdata.len() as u64)
|
||||
.unwrap();
|
||||
v.write_all(&info.account.userdata).unwrap();
|
||||
v.write_all(info.account.program_id.as_ref()).unwrap();
|
||||
}
|
||||
v.write_u64::<LittleEndian>(data.len() as u64).unwrap();
|
||||
v.write_all(data).unwrap();
|
||||
v
|
||||
}
|
||||
|
||||
fn deserialize_parameters(keyed_accounts: &mut [KeyedAccount], buffer: &[u8]) {
|
||||
assert_eq!(32, mem::size_of::<Pubkey>());
|
||||
|
||||
let mut start = mem::size_of::<u64>();
|
||||
for info in keyed_accounts.iter_mut() {
|
||||
start += mem::size_of::<Pubkey>(); // skip pubkey
|
||||
info.account.tokens = LittleEndian::read_i64(&buffer[start..]);
|
||||
|
||||
start += mem::size_of::<u64>() // skip tokens
|
||||
+ mem::size_of::<u64>(); // skip length tag
|
||||
let end = start + info.account.userdata.len();
|
||||
info.account.userdata.clone_from_slice(&buffer[start..end]);
|
||||
|
||||
start += info.account.userdata.len() // skip userdata
|
||||
+ mem::size_of::<Pubkey>(); // skip program_id
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -> bool {
|
||||
static INIT: Once = ONCE_INIT;
|
||||
INIT.call_once(|| {
|
||||
// env_logger can only be initialized once
|
||||
env_logger::init();
|
||||
});
|
||||
|
||||
if keyed_accounts[0].account.executable {
|
||||
let prog = keyed_accounts[0].account.userdata.clone();
|
||||
trace!("Call BPF, {} instructions", prog.len() / 8);
|
||||
//dump_program(keyed_accounts[0].key, &prog);
|
||||
let mut vm = match create_vm(&prog) {
|
||||
Ok(vm) => vm,
|
||||
Err(e) => {
|
||||
warn!("create_vm failed: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
let mut v = serialize_parameters(&mut keyed_accounts[1..], &tx_data);
|
||||
match vm.execute_program(v.as_mut_slice()) {
|
||||
Ok(status) => if 0 == status {
|
||||
return false;
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("execute_program failed: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
deserialize_parameters(&mut keyed_accounts[1..], &v);
|
||||
trace!(
|
||||
"BPF program executed {} instructions",
|
||||
vm.get_last_instruction_count()
|
||||
);
|
||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||
match instruction {
|
||||
LoaderInstruction::Write { offset, bytes } => {
|
||||
let offset = offset as usize;
|
||||
let len = bytes.len();
|
||||
debug!("Write: offset={} length={}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
warn!(
|
||||
"Write overflow: {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
offset + len
|
||||
);
|
||||
return false;
|
||||
}
|
||||
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
|
||||
}
|
||||
LoaderInstruction::Finalize => {
|
||||
keyed_accounts[0].account.executable = true;
|
||||
info!("Finalize: account {:?}", keyed_accounts[0].key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Invalid program transaction: {:?}", tx_data);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_rbpf::helpers;
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
|
||||
fn test_non_terminating_program() {
|
||||
#[rustfmt::skip]
|
||||
let prog = &[
|
||||
0xb7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r6 = 0
|
||||
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = 0
|
||||
0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r2 = 0
|
||||
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
|
||||
0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r4 = 0
|
||||
0xbf, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r5 = r6
|
||||
0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call 6
|
||||
0x07, 0x06, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // r6 + 1
|
||||
0x05, 0x00, 0xf8, 0xff, 0x00, 0x00, 0x00, 0x00, // goto -8
|
||||
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
|
||||
];
|
||||
let input = &mut [0x00];
|
||||
|
||||
let mut vm = EbpfVmRaw::new(None).unwrap();
|
||||
vm.set_verifier(bpf_verifier::check).unwrap();
|
||||
vm.set_max_instruction_count(36000).unwrap(); // 36000 is a wag, need to tune
|
||||
vm.set_program(prog).unwrap();
|
||||
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helpers::bpf_trace_printf)
|
||||
.unwrap();
|
||||
vm.execute_program(input).unwrap();
|
||||
}
|
||||
}
|
24
programs/native/lua_loader/Cargo.toml
Normal file
24
programs/native/lua_loader/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "solana-lualoader"
|
||||
version = "0.10.5"
|
||||
description = "Solana Lua Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
env_logger = "0.5.12"
|
||||
log = "0.4.2"
|
||||
rlua = "0.15.2"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
bincode = "1.0.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_lua_loader"
|
||||
crate-type = ["cdylib"]
|
||||
|
50
programs/native/lua_loader/multisig.lua
Normal file
50
programs/native/lua_loader/multisig.lua
Normal file
@ -0,0 +1,50 @@
|
||||
-- M-N Multisig. Pass in a table "{m=M, n=N, tokens=T}" where M is the number
|
||||
-- of signatures required, and N is a list of the pubkeys identifying
|
||||
-- those signatures. Once M of len(N) signatures are collected, tokens T
|
||||
-- are subtracted from account 1 and given to account 4. Note that unlike
|
||||
-- Rust, Lua is one-based and that account 1 is the first account.
|
||||
|
||||
function find(t, x)
|
||||
for i, v in pairs(t) do
|
||||
if v == x then
|
||||
return i
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function deserialize(bytes)
|
||||
return load("return" .. bytes)()
|
||||
end
|
||||
|
||||
local from_account,
|
||||
serialize_account,
|
||||
state_account,
|
||||
to_account = table.unpack(accounts)
|
||||
|
||||
local serialize = load(serialize_account.userdata)().serialize
|
||||
|
||||
if #state_account.userdata == 0 then
|
||||
local cfg = deserialize(data)
|
||||
state_account.userdata = serialize(cfg, nil, "s")
|
||||
return
|
||||
end
|
||||
|
||||
local cfg = deserialize(state_account.userdata)
|
||||
local key = deserialize(data)
|
||||
|
||||
local i = find(cfg.n, key)
|
||||
if i == nil then
|
||||
return
|
||||
end
|
||||
|
||||
table.remove(cfg.n, i)
|
||||
cfg.m = cfg.m - 1
|
||||
state_account.userdata = serialize(cfg, nil, "s")
|
||||
|
||||
if cfg.m == 0 then
|
||||
from_account.tokens = from_account.tokens - cfg.tokens
|
||||
to_account.tokens = to_account.tokens + cfg.tokens
|
||||
|
||||
-- End of game.
|
||||
state_account.tokens = 0
|
||||
end
|
174
programs/native/lua_loader/serialize.lua
Normal file
174
programs/native/lua_loader/serialize.lua
Normal file
@ -0,0 +1,174 @@
|
||||
----------------------------------------------------------------
|
||||
-- serialize.lua
|
||||
--
|
||||
-- Exports:
|
||||
--
|
||||
-- orderedPairs : deterministically ordered version of pairs()
|
||||
--
|
||||
-- serialize : convert Lua value to string in Lua syntax
|
||||
--
|
||||
----------------------------------------------------------------
|
||||
|
||||
|
||||
-- orderedPairs: iterate over table elements in deterministic order. First,
|
||||
-- array elements are returned, then remaining elements sorted by the key's
|
||||
-- type and value.
|
||||
|
||||
-- compare any two Lua values, establishing a complete ordering
|
||||
local function ltAny(a,b)
|
||||
local ta, tb = type(a), type(b)
|
||||
if ta ~= tb then
|
||||
return ta < tb
|
||||
end
|
||||
if ta == "string" or ta == "number" then
|
||||
return a < b
|
||||
end
|
||||
return tostring(a) < tostring(b)
|
||||
end
|
||||
|
||||
local inext = ipairs{}
|
||||
|
||||
local function orderedPairs(t)
|
||||
local keys = {}
|
||||
local keyIndex = 1
|
||||
local counting = true
|
||||
|
||||
local function _next(seen, s)
|
||||
local v
|
||||
|
||||
if counting then
|
||||
-- return next array index
|
||||
s, v = inext(t, s)
|
||||
if s ~= nil then
|
||||
seen[s] = true
|
||||
return s,v
|
||||
end
|
||||
counting = false
|
||||
|
||||
-- construct sorted unseen keys
|
||||
for k,v in pairs(t) do
|
||||
if not seen[k] then
|
||||
table.insert(keys, k)
|
||||
end
|
||||
end
|
||||
table.sort(keys, ltAny)
|
||||
end
|
||||
|
||||
-- return next unseen table element
|
||||
s = keys[keyIndex]
|
||||
if s ~= nil then
|
||||
keyIndex = keyIndex + 1
|
||||
v = t[s]
|
||||
end
|
||||
return s, v
|
||||
end
|
||||
|
||||
return _next, {}, 0
|
||||
end
|
||||
|
||||
|
||||
-- avoid 'nan', 'inf', and '-inf'
|
||||
local numtostring = {
|
||||
[tostring(-1/0)] = "-1/0",
|
||||
[tostring(1/0)] = "1/0",
|
||||
[tostring(0/0)] = "0/0"
|
||||
}
|
||||
|
||||
setmetatable(numtostring, { __index = function (t, k) return k end })
|
||||
|
||||
-- serialize: Serialize a Lua data structure
|
||||
--
|
||||
-- x = value to serialize
|
||||
-- out = function to be called repeatedly with strings, or
|
||||
-- table into which strings should be inserted, or
|
||||
-- nil => return a string
|
||||
-- iter = function to iterate over table elements, or
|
||||
-- "s" to sort elements by key, or
|
||||
-- nil for default (fastest)
|
||||
--
|
||||
-- Notes:
|
||||
-- * Does not support self-referential data structures.
|
||||
-- * Does not optimize for repeated sub-expressions.
|
||||
-- * Does not preserve topology; only values.
|
||||
-- * Does not handle types other than nil, number, boolean, string, table
|
||||
--
|
||||
local function serialize(x, out, iter)
|
||||
local visited = {}
|
||||
local iter = iter=="s" and orderedPairs or iter or pairs
|
||||
assert(type(iter) == "function")
|
||||
|
||||
local function _serialize(x)
|
||||
if type(x) == "string" then
|
||||
|
||||
out(string.format("%q", x))
|
||||
|
||||
elseif type(x) == "number" then
|
||||
|
||||
out(numtostring[tostring(x)])
|
||||
|
||||
elseif type(x) == "boolean" or
|
||||
type(x) == "nil" then
|
||||
|
||||
out(tostring(x))
|
||||
|
||||
elseif type(x) == "table" then
|
||||
|
||||
if visited[x] then
|
||||
error("serialize: recursive structure")
|
||||
end
|
||||
visited[x] = true
|
||||
local first, nextIndex = true, 1
|
||||
|
||||
out "{"
|
||||
|
||||
for k,v in iter(x) do
|
||||
if first then
|
||||
first = false
|
||||
else
|
||||
out ","
|
||||
end
|
||||
if k == nextIndex then
|
||||
nextIndex = nextIndex + 1
|
||||
else
|
||||
if type(k) == "string" and k:match("^[%a_][%w_]*$") then
|
||||
out(k.."=")
|
||||
else
|
||||
out "["
|
||||
_serialize(k)
|
||||
out "]="
|
||||
end
|
||||
end
|
||||
_serialize(v)
|
||||
end
|
||||
|
||||
out "}"
|
||||
visited[x] = false
|
||||
else
|
||||
error("serialize: unsupported type")
|
||||
end
|
||||
end
|
||||
|
||||
local result
|
||||
if not out then
|
||||
result = {}
|
||||
out = result
|
||||
end
|
||||
|
||||
if type(out) == "table" then
|
||||
local t = out
|
||||
function out(s)
|
||||
table.insert(t,s)
|
||||
end
|
||||
end
|
||||
|
||||
_serialize(x)
|
||||
|
||||
if result then
|
||||
return table.concat(result)
|
||||
end
|
||||
end
|
||||
|
||||
return {
|
||||
orderedPairs = orderedPairs,
|
||||
serialize = serialize
|
||||
}
|
298
programs/native/lua_loader/src/lib.rs
Normal file
298
programs/native/lua_loader/src/lib.rs
Normal file
@ -0,0 +1,298 @@
|
||||
extern crate bincode;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rlua;
|
||||
extern crate solana_sdk;
|
||||
|
||||
use bincode::deserialize;
|
||||
use rlua::{Lua, Result, Table};
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
use solana_sdk::loader_instruction::LoaderInstruction;
|
||||
use std::str;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
|
||||
/// Make KeyAccount values available to Lua.
|
||||
fn set_accounts(lua: &Lua, name: &str, keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
let accounts = lua.create_table()?;
|
||||
for (i, keyed_account) in keyed_accounts.iter().enumerate() {
|
||||
let account = lua.create_table()?;
|
||||
account.set("key", keyed_account.key.to_string())?;
|
||||
account.set("tokens", keyed_account.account.tokens)?;
|
||||
let data_str = lua.create_string(&keyed_account.account.userdata)?;
|
||||
account.set("userdata", data_str)?;
|
||||
accounts.set(i + 1, account)?;
|
||||
}
|
||||
let globals = lua.globals();
|
||||
globals.set(name, accounts)
|
||||
}
|
||||
|
||||
/// Commit the new KeyedAccount values.
|
||||
fn update_accounts(lua: &Lua, name: &str, keyed_accounts: &mut [KeyedAccount]) -> Result<()> {
|
||||
let globals = lua.globals();
|
||||
let accounts: Table = globals.get(name)?;
|
||||
for (i, keyed_account) in keyed_accounts.into_iter().enumerate() {
|
||||
let account: Table = accounts.get(i + 1)?;
|
||||
keyed_account.account.tokens = account.get("tokens")?;
|
||||
let data_str: rlua::String = account.get("userdata")?;
|
||||
keyed_account.account.userdata = data_str.as_bytes().to_vec();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_lua(keyed_accounts: &mut [KeyedAccount], code: &str, data: &[u8]) -> Result<()> {
|
||||
let lua = Lua::new();
|
||||
let globals = lua.globals();
|
||||
let data_str = lua.create_string(data)?;
|
||||
globals.set("data", data_str)?;
|
||||
|
||||
set_accounts(&lua, "accounts", keyed_accounts)?;
|
||||
lua.exec::<_, ()>(code, None)?;
|
||||
update_accounts(&lua, "accounts", keyed_accounts)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -> bool {
|
||||
static INIT: Once = ONCE_INIT;
|
||||
INIT.call_once(|| {
|
||||
// env_logger can only be initialized once
|
||||
env_logger::init();
|
||||
});
|
||||
|
||||
if keyed_accounts[0].account.executable {
|
||||
let code = keyed_accounts[0].account.userdata.clone();
|
||||
let code = str::from_utf8(&code).unwrap();
|
||||
match run_lua(&mut keyed_accounts[1..], &code, tx_data) {
|
||||
Ok(()) => {
|
||||
trace!("Lua success");
|
||||
return true;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Lua Error: {:#?}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||
match instruction {
|
||||
LoaderInstruction::Write { offset, bytes } => {
|
||||
let offset = offset as usize;
|
||||
let len = bytes.len();
|
||||
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
warn!(
|
||||
"Write overflow {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
offset + len
|
||||
);
|
||||
return false;
|
||||
}
|
||||
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
|
||||
}
|
||||
|
||||
LoaderInstruction::Finalize => {
|
||||
keyed_accounts[0].account.executable = true;
|
||||
trace!("LuaLoader::Finalize prog: {:?}", keyed_accounts[0].key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Invalid program transaction: {:?}", tx_data);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
extern crate bincode;
|
||||
|
||||
use self::bincode::serialize;
|
||||
use super::*;
|
||||
use solana_sdk::account::{create_keyed_accounts, Account};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_update_accounts() -> Result<()> {
|
||||
let mut accounts = [(Pubkey::default(), Account::default())];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
let lua = Lua::new();
|
||||
set_accounts(&lua, "xs", &keyed_accounts)?;
|
||||
keyed_accounts[0].account.tokens = 42;
|
||||
keyed_accounts[0].account.userdata = vec![];
|
||||
update_accounts(&lua, "xs", &mut keyed_accounts)?;
|
||||
|
||||
// Ensure update_accounts() overwrites the local value 42.
|
||||
assert_eq!(keyed_accounts[0].account.tokens, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credit_with_lua() -> Result<()> {
|
||||
let code = r#"accounts[1].tokens = accounts[1].tokens + 1"#;
|
||||
let mut accounts = [(Pubkey::default(), Account::default())];
|
||||
run_lua(&mut create_keyed_accounts(&mut accounts), code, &[])?;
|
||||
assert_eq!(accounts[0].1.tokens, 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_with_lua() {
|
||||
let code = r#"accounts[1].tokens += 1"#;
|
||||
let mut accounts = [(Pubkey::default(), Account::default())];
|
||||
assert!(run_lua(&mut create_keyed_accounts(&mut accounts), code, &[]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_move_funds_with_lua_via_process() {
|
||||
let userdata = r#"
|
||||
local tokens, _ = string.unpack("I", data)
|
||||
accounts[1].tokens = accounts[1].tokens - tokens
|
||||
accounts[2].tokens = accounts[2].tokens + tokens
|
||||
"#.as_bytes()
|
||||
.to_vec();
|
||||
|
||||
let alice_pubkey = Pubkey::default();
|
||||
let bob_pubkey = Pubkey::default();
|
||||
let program_id = Pubkey::default();
|
||||
|
||||
let mut accounts = [
|
||||
(
|
||||
Pubkey::default(),
|
||||
Account {
|
||||
tokens: 1,
|
||||
userdata,
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
},
|
||||
),
|
||||
(alice_pubkey, Account::new(100, 0, program_id)),
|
||||
(bob_pubkey, Account::new(1, 0, program_id)),
|
||||
];
|
||||
let data = serialize(&10u64).unwrap();
|
||||
process(&mut create_keyed_accounts(&mut accounts), &data);
|
||||
assert_eq!(accounts[1].1.tokens, 90);
|
||||
assert_eq!(accounts[2].1.tokens, 11);
|
||||
|
||||
process(&mut create_keyed_accounts(&mut accounts), &data);
|
||||
assert_eq!(accounts[1].1.tokens, 80);
|
||||
assert_eq!(accounts[2].1.tokens, 21);
|
||||
}
|
||||
|
||||
fn read_test_file(name: &str) -> Vec<u8> {
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push(name);
|
||||
let mut file = File::open(path).unwrap();
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).unwrap();
|
||||
contents
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_lua_library() {
|
||||
let userdata = r#"
|
||||
local serialize = load(accounts[2].userdata)().serialize
|
||||
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
|
||||
"#.as_bytes()
|
||||
.to_vec();
|
||||
let program_id = Pubkey::default();
|
||||
let program_account = Account {
|
||||
tokens: 1,
|
||||
userdata,
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
let alice_account = Account::new(100, 0, program_id);
|
||||
let serialize_account = Account {
|
||||
tokens: 100,
|
||||
userdata: read_test_file("serialize.lua"),
|
||||
program_id,
|
||||
executable: false,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
let mut accounts = [
|
||||
(Pubkey::default(), program_account),
|
||||
(Pubkey::default(), alice_account),
|
||||
(Pubkey::default(), serialize_account),
|
||||
(Pubkey::default(), Account::new(1, 0, program_id)),
|
||||
];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
process(&mut keyed_accounts, &[]);
|
||||
// Verify deterministic ordering of a serialized Lua table.
|
||||
assert_eq!(
|
||||
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
|
||||
"{a=1,b=2,c=3}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lua_multisig() {
|
||||
let program_id = Pubkey::default();
|
||||
|
||||
let alice_pubkey = Pubkey::new(&[0; 32]);
|
||||
let serialize_pubkey = Pubkey::new(&[1; 32]);
|
||||
let state_pubkey = Pubkey::new(&[2; 32]);
|
||||
let bob_pubkey = Pubkey::new(&[3; 32]);
|
||||
let carol_pubkey = Pubkey::new(&[4; 32]);
|
||||
let dan_pubkey = Pubkey::new(&[5; 32]);
|
||||
let erin_pubkey = Pubkey::new(&[6; 32]);
|
||||
|
||||
let program_account = Account {
|
||||
tokens: 1,
|
||||
userdata: read_test_file("multisig.lua"),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let alice_account = Account {
|
||||
tokens: 100,
|
||||
userdata: Vec::new(),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let serialize_account = Account {
|
||||
tokens: 100,
|
||||
userdata: read_test_file("serialize.lua"),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let mut accounts = [
|
||||
(Pubkey::default(), program_account), // Account holding the program
|
||||
(alice_pubkey, alice_account), // The payer
|
||||
(serialize_pubkey, serialize_account), // Where the serialize library is stored.
|
||||
(state_pubkey, Account::new(1, 0, program_id)), // Where program state is stored.
|
||||
(bob_pubkey, Account::new(1, 0, program_id)), // The payee once M signatures are collected.
|
||||
];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
|
||||
let data = format!(
|
||||
r#"{{m=2, n={{"{}","{}","{}"}}, tokens=100}}"#,
|
||||
carol_pubkey, dan_pubkey, erin_pubkey
|
||||
).as_bytes()
|
||||
.to_vec();
|
||||
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 1);
|
||||
|
||||
let data = format!(r#""{}""#, carol_pubkey).into_bytes();
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 1);
|
||||
|
||||
let data = format!(r#""{}""#, dan_pubkey).into_bytes();
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 101); // Pay day!
|
||||
|
||||
let data = format!(r#""{}""#, erin_pubkey).into_bytes();
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 101); // No change!
|
||||
}
|
||||
}
|
15
programs/native/noop/Cargo.toml
Normal file
15
programs/native/noop/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "solana-noop"
|
||||
version = "0.10.5"
|
||||
description = "Solana noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||
|
||||
[lib]
|
||||
name = "noop"
|
||||
crate-type = ["cdylib"]
|
||||
|
10
programs/native/noop/src/lib.rs
Normal file
10
programs/native/noop/src/lib.rs
Normal file
@ -0,0 +1,10 @@
|
||||
extern crate solana_sdk;
|
||||
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> bool {
|
||||
println!("noop: keyed_accounts: {:#?}", keyed_accounts);
|
||||
println!("noop: data: {:?}", data);
|
||||
true
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "noop"
|
||||
version = "0.1.0"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
"Jack May <jack@solana.com>",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
solana_program_interface = { path = "../../common" }
|
||||
|
||||
[lib]
|
||||
name = "noop"
|
||||
crate-type = ["dylib"]
|
||||
|
@ -1,6 +0,0 @@
|
||||
extern crate solana_program_interface;
|
||||
|
||||
use solana_program_interface::account::KeyedAccount;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(_infos: &mut Vec<KeyedAccount>, _data: &[u8]) {}
|
@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "print"
|
||||
version = "0.1.0"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
"Jack May <jack@solana.com>",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
solana_program_interface = { path = "../../common" }
|
||||
|
||||
[lib]
|
||||
name = "print"
|
||||
crate-type = ["dylib"]
|
||||
|
@ -1,9 +0,0 @@
|
||||
extern crate solana_program_interface;
|
||||
|
||||
use solana_program_interface::account::KeyedAccount;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(infos: &mut Vec<KeyedAccount>, _data: &[u8]) {
|
||||
println!("AccountInfos: {:#?}", infos);
|
||||
//println!("data: {:#?}", data);
|
||||
}
|
@ -1,10 +1,17 @@
|
||||
# Smart Contracts Engine
|
||||
|
||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||
The goal of this RFC is to define a set of constraints for APIs and smart contracts runtime such that we can execute our contracts safely on massively parallel hardware such as a GPU.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.2
|
||||
Version 0.3
|
||||
|
||||
## Definitions
|
||||
|
||||
* Transaction - an atomic operation with multiple instructions. All Instruction must complete successfully for the transaction to be comitted.
|
||||
* Instruction - a call to a program that modifies Account token balances and Account specific userdata state. A single transaction may have multiple Instructions with different Accounts and Programs.
|
||||
* Program - Programs are code that modifies Account token balances and Account specific userdata state.
|
||||
* Account - A single instance of state. Accounts are looked up by account Pubkeys and are associated with a Program's Pubkey.
|
||||
|
||||
## Toolchain Stack
|
||||
|
||||
@ -39,173 +46,136 @@ In Figure 1 an untrusted client, creates a program in the front-end language of
|
||||
|
||||
## Runtime
|
||||
|
||||
The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable and doesn't require dynamic resource management. The goal is to execute as many contracts as possible in parallel, and have them pass or fail without a destructive state change.
|
||||
The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable. To achieve this goal the runtime forces each Instruction to specify all of its memory dependencies up front, and therefore a single Instruction cannot cause a dynamic memory allocation. An explicit Instruction for memory allocation from the `SystemProgram::CreateAccount` is the only way to allocate new memory in the engine. A Transaction may compose multiple Instruction, including `SystemProgram::CreateAccount`, into a single atomic sequence which allows for memory allocation to achieve a result that is similar to dynamic allocation.
|
||||
|
||||
|
||||
### State
|
||||
|
||||
State is addressed by an account which is at the moment simply the Pubkey. Our goal is to eliminate memory allocation from within the smart contract itself. Thus the client of the contract provides all the state that is necessary for the contract to execute in the transaction itself. The runtime interacts with the contract through a state transition function, which takes a mapping of [(Pubkey,State)] and returns [(Pubkey, State')]. The State is an opeque type to the runtime, a `Vec<u8>`, the contents of which the contract has full control over.
|
||||
State is addressed by an Account which is at the moment simply the Pubkey. Our goal is to eliminate memory allocation from within the program itself. Thus the client of the program provides all the state that is necessary for the program to execute in the transaction itself. The runtime interacts with the program through an entry point with a well defined interface. The userdata stored in an Account is an opaque type to the runtime, a `Vec<u8>`, the contents of which the program code has full control over.
|
||||
|
||||
### Call Structure
|
||||
### Transaction structure
|
||||
```
|
||||
/// Call definition
|
||||
/// Signed portion
|
||||
/// An atomic transaction
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct CallData {
|
||||
/// Each Pubkey in this vector is mapped to a corresponding `Page` that is loaded for contract execution
|
||||
/// In a simple pay transaction `key[0]` is the token owner's key and `key[1]` is the recipient's key.
|
||||
pub keys: Vec<Pubkey>,
|
||||
pub struct Transaction {
|
||||
/// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`.
|
||||
pub signature: Signature,
|
||||
|
||||
/// The Pubkeys that are required to have a proof. The proofs are a `Vec<Signature> which encoded along side this data structure
|
||||
/// Each Signature signs the `required_proofs` vector as well as the `keys` vectors. The transaction is valid if and only if all
|
||||
/// the required signatures are present and the public key vector is unchanged between signatures.
|
||||
pub required_proofs: Vec<u8>,
|
||||
/// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is
|
||||
/// program-specific.
|
||||
/// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0].
|
||||
/// In the future which key pays the fee and which keys have signatures would be configurable.
|
||||
/// * account_keys[1] - Typically this is the program context or the recipient of the tokens
|
||||
pub account_keys: Vec<Pubkey>,
|
||||
|
||||
/// PoH data
|
||||
/// last PoH hash observed by the sender
|
||||
/// The ID of a recent ledger entry.
|
||||
pub last_id: Hash,
|
||||
|
||||
/// Program
|
||||
/// The address of the program we want to call. ContractId is just a Pubkey that is the address of the loaded code that will execute this Call.
|
||||
pub contract_id: ContractId,
|
||||
/// OS scheduling fee
|
||||
/// The number of tokens paid for processing and storage of this transaction.
|
||||
pub fee: i64,
|
||||
/// struct version to prevent duplicate spends
|
||||
/// Calls with a version <= Page.version are rejected
|
||||
pub version: u64,
|
||||
/// method to call in the contract
|
||||
pub method: u8,
|
||||
/// usedata in bytes
|
||||
pub userdata: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Call {
|
||||
/// Signatures and Keys
|
||||
/// (signature, key index)
|
||||
/// This vector contains a tuple of signatures, and the key index the signature is for
|
||||
/// proofs[0] is always key[0]
|
||||
pub proofs: Vec<Signature>,
|
||||
pub data: CallData,
|
||||
/// Keys identifying programs in the instructions vector.
|
||||
pub program_ids: Vec<Pubkey>,
|
||||
/// Programs that will be executed in sequence and commited in one atomic transaction if all
|
||||
/// succeed.
|
||||
pub instructions: Vec<Instruction>,
|
||||
}
|
||||
```
|
||||
|
||||
At it's core, this is just a set of Pubkeys and Signatures with a bit of metadata. The contract Pubkey routes this transaction into that contracts entry point. `version` is used for dropping retransmitted requests.
|
||||
The Transaction structure specifies a list of Pubkey's and signatures for those keys and a sequentail list of instructions that will operate over the state's assosciated with the `account_keys`. For the transaction to be committed all the instructions must execute successfully, if any abort the whole transaction fails to commit.
|
||||
|
||||
Contracts should be able to read any state that is part of runtime, but only write to state that the contract allocated.
|
||||
### Account structure
|
||||
Accounts maintain token state as well as program specific memory.
|
||||
```
|
||||
/// An Account with userdata that is stored on chain
|
||||
pub struct Account {
|
||||
/// tokens in the account
|
||||
pub tokens: i64,
|
||||
/// user data
|
||||
/// A transaction can write to its userdata
|
||||
pub userdata: Vec<u8>,
|
||||
/// program id this Account belongs to
|
||||
pub program_id: Pubkey,
|
||||
}
|
||||
```
|
||||
|
||||
### Execution
|
||||
# Transaction Engine
|
||||
|
||||
Calls batched and processed in a pipeline
|
||||
At it's core, the engine looks up all the Pubkeys maps them to accounts and routs them to the `program_id` entry point.
|
||||
|
||||
## Execution
|
||||
|
||||
Transactions are batched and processed in a pipeline
|
||||
|
||||
```
|
||||
+-----------+ +-------------+ +--------------+ +--------------------+
|
||||
| sigverify |--->| lock memory |--->| validate fee |--->| allocate new pages |--->
|
||||
| sigverify |--->| lock memory |--->| validate fee |--->| allocate accounts |--->
|
||||
+-----------+ +-------------+ +--------------+ +--------------------+
|
||||
|
||||
+------------+ +---------+ +--------------+ +-=------------+
|
||||
--->| load pages |--->| execute |--->|unlock memory |--->| commit pages |
|
||||
+------------+ +---------+ +--------------+ +--------------+
|
||||
+------------+ +---------+ +-=------------+ +--------------+
|
||||
--->| load data |--->| execute |--->| commit data |-->|unlock memory |
|
||||
+------------+ +---------+ +--------------+ +--------------+
|
||||
|
||||
```
|
||||
|
||||
At the `execute` stage, the loaded pages have no data dependencies, so all the contracts can be executed in parallel.
|
||||
## Memory Management
|
||||
```
|
||||
pub struct Page {
|
||||
/// key that indexes this page
|
||||
/// prove ownership of this key to spend from this Page
|
||||
owner: Pubkey,
|
||||
/// contract that owns this page
|
||||
/// contract can write to the data that is in `memory` vector
|
||||
contract: Pubkey,
|
||||
/// balance that belongs to owner
|
||||
balance: u64,
|
||||
/// version of the structure, public for testing
|
||||
version: u64,
|
||||
/// hash of the page data
|
||||
memhash: Hash,
|
||||
/// The following could be in a separate structure
|
||||
memory: Vec<u8>,
|
||||
}
|
||||
```
|
||||
At the `execute` stage, the loaded pages have no data dependencies, so all the programs can be executed in parallel.
|
||||
|
||||
The guarantee that runtime enforces:
|
||||
1. The contract code is the only code that will modify the contents of `memory`
|
||||
2. Total balances on all the pages is equal before and after exectuion of a call
|
||||
3. Balances of each of the pages not owned by the contract must be equal to or greater after the call than before the call.
|
||||
The runtime enforces the following rules:
|
||||
|
||||
1. The `program_id` code is the only code that will modify the contents of `Account::userdata` of Account's that have been assigned to it. This means that upon assignment userdata vector is guarnteed to be `0`.
|
||||
2. Total balances on all the accounts is equal before and after execution of a Transaction.
|
||||
3. Balances of each of the accounts not assigned to `program_id` must be equal to or greater after the Transaction than before the transaction.
|
||||
4. All Instructions in the Transaction executed without a failure.
|
||||
|
||||
## Entry Point
|
||||
Exectuion of the contract involves maping the contract's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages.
|
||||
Execution of the program involves mapping the Program's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages.
|
||||
|
||||
```
|
||||
// Find the method
|
||||
match (tx.contract, tx.method) {
|
||||
// system interface
|
||||
// everyone has the same reallocate
|
||||
(_, 0) => system_0_realloc(&tx, &mut call_pages),
|
||||
(_, 1) => system_1_assign(&tx, &mut call_pages),
|
||||
// contract methods
|
||||
(DEFAULT_CONTRACT, 128) => default_contract_128_move_funds(&tx, &mut call_pages),
|
||||
(contract, method) => //...
|
||||
pub fn process_transaction(
|
||||
tx: &Transaction,
|
||||
pix: usize,
|
||||
accounts: &mut [&mut Account],
|
||||
) -> Result<()>;
|
||||
```
|
||||
|
||||
The first 127 methods are reserved for the system interface, which implements allocation and assignment of memory. The rest, including the contract for moving funds are implemented by the contract itself.
|
||||
|
||||
## System Interface
|
||||
```
|
||||
/// SYSTEM interface, same for very contract, methods 0 to 127
|
||||
/// method 0
|
||||
/// reallocate
|
||||
/// spend the funds from the call to the first recipient's
|
||||
pub fn system_0_realloc(call: &Call, pages: &mut Vec<Page>) {
|
||||
if call.contract == DEFAULT_CONTRACT {
|
||||
let size: u64 = deserialize(&call.userdata).unwrap();
|
||||
pages[0].memory.resize(size as usize, 0u8);
|
||||
}
|
||||
pub enum SystemProgram {
|
||||
/// Create a new account
|
||||
/// * Transaction::keys[0] - source
|
||||
/// * Transaction::keys[1] - new account key
|
||||
/// * tokens - number of tokens to transfer to the new account
|
||||
/// * space - memory to allocate if greater then zero
|
||||
/// * program_id - the program id of the new account
|
||||
CreateAccount {
|
||||
tokens: i64,
|
||||
space: u64,
|
||||
program_id: Pubkey,
|
||||
},
|
||||
/// Assign account to a program
|
||||
/// * Transaction::keys[0] - account to assign
|
||||
Assign { program_id: Pubkey },
|
||||
/// Move tokens
|
||||
/// * Transaction::keys[0] - source
|
||||
/// * Transaction::keys[1] - destination
|
||||
Move { tokens: i64 },
|
||||
}
|
||||
/// method 1
|
||||
/// assign
|
||||
/// assign the page to a contract
|
||||
pub fn system_1_assign(call: &Call, pages: &mut Vec<Page>) {
|
||||
let contract = deserialize(&call.userdata).unwrap();
|
||||
if call.contract == DEFAULT_CONTRACT {
|
||||
pages[0].contract = contract;
|
||||
//zero out the memory in pages[0].memory
|
||||
//Contracts need to own the state of that data otherwise a use could fabricate the state and
|
||||
//manipulate the contract
|
||||
pages[0].memory.clear();
|
||||
}
|
||||
}
|
||||
```
|
||||
The first method resizes the memory that is assosciated with the callers page. The second system call assignes the page to the contract. Both methods check if the current contract is 0, otherwise the method does nothing and the caller spent their fees.
|
||||
|
||||
This ensures that when memory is assigned to the contract the initial state of all the bytes is 0, and the contract itself is the only thing that can modify that state.
|
||||
|
||||
## Simplest contract
|
||||
```
|
||||
/// DEFAULT_CONTRACT interface
|
||||
/// All contracts start with 128
|
||||
/// method 128
|
||||
/// move_funds
|
||||
/// spend the funds from the call to the first recipient's
|
||||
pub fn default_contract_128_move_funds(call: &Call, pages: &mut Vec<Page>) {
|
||||
let amount: u64 = deserialize(&call.userdata).unwrap();
|
||||
if pages[0].balance >= amount {
|
||||
pages[0].balance -= amount;
|
||||
pages[1].balance += amount;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This simply moves the amount from page[0], which is the callers page, to page[1], which is the recipient's page.
|
||||
The interface is best described by the `Instruction::userdata` that the user encodes.
|
||||
* `CreateAccount` - This allows the user to create and assign an Account to a Program.
|
||||
* `Assign` - allows the user to assign an existing account to a `Program`.
|
||||
* `Move` - moves tokens between `Account`s that are assosciated with `SystemProgram`. This cannot be used to move tokens of other `Account`s. Programs need to implement their own version of Move.
|
||||
|
||||
## Notes
|
||||
|
||||
1. There is no dynamic memory allocation.
|
||||
2. Persistent Memory is allocated to a Key with ownership
|
||||
3. Contracts can `call` to update key owned state
|
||||
4. `call` is just a *syscall* that does a cryptographic check of memory ownership
|
||||
5. Kernel guarantees that when memory is assigned to the contract its state is 0
|
||||
6. Kernel guarantees that contract is the only thing that can modify memory that its assigned to
|
||||
7. Kernel guarantees that the contract can only spend tokens that are in pages that are assigned to it
|
||||
8. Kernel guarantees the balances belonging to pages are balanced before and after the call
|
||||
1. There is no dynamic memory allocation. Client's need to call the `SystemProgram` to create memory before passing it to another program. This Instruction can be composed into a single Transaction with the call to the program itself.
|
||||
2. Runtime guarantees that when memory is assigned to the `Program` it is zero initialized.
|
||||
3. Runtime guarantees that `Program`'s code is the only thing that can modify memory that its assigned to
|
||||
4. Runtime guarantees that the `Program` can only spend tokens that are in `Account`s that are assigned to it
|
||||
5. Runtime guarantees the balances belonging to `Account`s are balanced before and after the transaction
|
||||
6. Runtime guarantees that multiple instructions all executed successfully when a transaction is committed.
|
||||
|
||||
# Future Work
|
||||
|
||||
* Continuations and Signals for long running Transactions. https://github.com/solana-labs/solana/issues/1485
|
||||
|
@ -1,32 +1,42 @@
|
||||
# Consensus
|
||||
|
||||
VERY WIP
|
||||
The goal of this RFC is to define the consensus algorithm used in Solana. This proposal covers a Proof of Stake (PoS) algorithm that leverages Proof of History (PoH). PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time among partitions.
|
||||
|
||||
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
version 0.4
|
||||
|
||||
## Basic Design Idea
|
||||
|
||||
Nodes on the network can be "up" or "down". A node indicates it is up either by voting as a validator or by generating a PoH stream as the designated leader. Consensus is reached when a supermajority + 1 of the staked nodes have voted on the state of the network at a particular PoH tick count.
|
||||
|
||||
Nodes take turns being leader and generating the PoH that encodes state changes. The network can tolerate loss of connection to any leader by synthesizing what the leader ***would have generated*** had it been connected but not ingesting any state changes. The complexity of forks is thereby limited to a "there/not-there" skip list of branches that may arise on leader rotation periods boundaries.
|
||||
|
||||
|
||||
## Message Flow
|
||||
|
||||
1. Transactions are ingested at the leader.
|
||||
2. Leader filters for valid transactions
|
||||
3. Leader executes valid transactions on its state
|
||||
4. Leader packages transactions into blobs
|
||||
5. Leader transmits blobs to validator nodes.
|
||||
1. Transactions are ingested at the current leader.
|
||||
2. Leader filters for valid transactions.
|
||||
3. Leader executes valid transactions on its state.
|
||||
4. Leader packages transactions into entries based off the longest observed PoH branch.
|
||||
5. Leader transmits the entries to validator nodes (in signed blobs)
|
||||
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
|
||||
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
|
||||
b. The PoH stream includes ticks; empty entries that indicate liveness of the leader and the passage of time on the network.
|
||||
c. A leader's stream begins with the tick entries necessary complete the PoH back to that node's most recently observed prior leader period.
|
||||
6. Validators retransmit entries to peers in their set and to further downstream nodes.
|
||||
7. Validators validate the transactions and execute them on their state.
|
||||
8. Validators compute the hash of the state.
|
||||
9. Validators transmit votes to the leader.
|
||||
a. Votes are signatures of the hash of the computed state.
|
||||
10. Leader executes the votes as any other transaction and broadcasts them out to the network
|
||||
11. Validators observe their votes, and all the votes from the network.
|
||||
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
|
||||
9. At specific times, i.e. specific PoH tick counts, validators transmit votes to the leader.
|
||||
a. Votes are signatures of the hash of the computed state at that PoH tick count
|
||||
10. Leader executes the votes as any other transaction and broadcasts them to the network
|
||||
a. The leader votes at that same height once a majority of stake is represented on the PoH stream *(open question: do leaders vote?)*
|
||||
11. Validators observe their votes and all the votes from the network.
|
||||
12. Validators vote on the longest chain of periods that contains their vote.
|
||||
|
||||
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
|
||||
|
||||
|
||||
## Staking
|
||||
|
||||
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||
@ -43,7 +53,7 @@ CreateStake(
|
||||
)
|
||||
```
|
||||
|
||||
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
|
||||
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain number of validation voting rounds.
|
||||
|
||||
## Validation Votes
|
||||
|
||||
@ -63,7 +73,7 @@ Validators `stake` some of their spendable sol into a staking account. The stak
|
||||
|
||||
```
|
||||
Slash(Validate(
|
||||
PoH count,
|
||||
PoH tick count,
|
||||
PoH hash,
|
||||
stake public key,
|
||||
...
|
||||
@ -75,48 +85,14 @@ When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH
|
||||
|
||||
## Leader Slashing
|
||||
|
||||
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
|
||||
The goal is to discourage leaders from generating multiple PoH streams. When this occurs, the network adopts ticks for that leader's period. Leaders can be slashed for generating multiple conflicting PoH streams during their period.
|
||||
|
||||
## Validation Vote Contract
|
||||
|
||||
The goal of this contract is to simulate economic cost of mining on a shorter branch.
|
||||
|
||||
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
|
||||
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
|
||||
1. With my signature I am certifying that I computed `state hash` at `PoH count tick count` and `PoH hash`.
|
||||
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH tick count` + `N` is reached by the PoH stream (lockout period).
|
||||
3. I will not vote for any other branch below `PoH count`.
|
||||
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
|
||||
|
||||
## Leader Seed Generation
|
||||
|
||||
Leader selection is decided via a random seed. The process is as follows:
|
||||
|
||||
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
|
||||
2. append them together
|
||||
3. hash the string for `N` counts via a similar process as PoH itself.
|
||||
4. The resulting hash is the random seed for `M` counts, where M > N
|
||||
|
||||
## Leader Ranking and Rotation
|
||||
|
||||
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
|
||||
|
||||
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
|
||||
|
||||
## Partition selection
|
||||
|
||||
Validators should select the first branch to reach finality, or the highest ranking leader.
|
||||
|
||||
## Examples
|
||||
|
||||
### Small Partition
|
||||
1. Network partition M occurs for 10% of the nodes
|
||||
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||
3. M cycles through the ranks until one of them is leader.
|
||||
4. M validators observe 10% of the vote pool, finality is not reached
|
||||
5. M and K re-connect.
|
||||
6. M validators cancel their votes on K which are below K's `PoH count`
|
||||
|
||||
### Leader Timeout
|
||||
1. Next rank node observes a timeout.
|
||||
2. Nodes receiving both PoH streams pick the higher rank node.
|
||||
3. 2, causes a partition, since nodes can only vote for 1 leader.
|
||||
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
||||
4. Each vote on a branch increases the lockout for all prior votes on that branch according to a network-specified function.
|
||||
|
@ -52,3 +52,4 @@ Our solution to this is to force the clients to continue using the same identity
|
||||
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
|
||||
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
|
||||
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.
|
||||
* Validators should also get rewarded for validating submitted storage proofs as incentive for storing the ledger. They can only validate proofs if they are storing that slice of the ledger.
|
||||
|
108
rfcs/rfc-004-leader-rotation.md
Normal file
108
rfcs/rfc-004-leader-rotation.md
Normal file
@ -0,0 +1,108 @@
|
||||
# Leader Rotation
|
||||
|
||||
The goal of this RFC is to define how leader nodes are rotated in Solana, how rotation may cause forks to arise, and how the converges
|
||||
in response.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
|
||||
## Leader Seed Generation
|
||||
|
||||
Leader selection is decided via a random seed. The process is as follows:
|
||||
|
||||
1. Periodically at a specific `PoH tick count` select the first vote signatures that create a supermajority from the previous voting round.
|
||||
2. Append them together.
|
||||
3. Hash the string for `N` counts via a similar process as PoH itself.
|
||||
4. The resulting hash is the random seed for `M` counts, `M` leader periods, where M > N
|
||||
|
||||
## Leader Rotation
|
||||
|
||||
1. The leader is chosen via a random seed generated from stake weights and votes (the leader schedule)
|
||||
2. The leader is rotated every `T` PoH ticks (leader period), accoding to the leader schedule
|
||||
3. The schedule is applicable for `M` voting rounds
|
||||
|
||||
Leader's transmit for a count of `T` PoH ticks. When `T` is reached all the validators should switch to the next scheduled leader. To schedule leaders, the supermajority + `M` nodes are shuffled using the above calculated random seed.
|
||||
|
||||
All `T` ticks must be observed from the current leader for that part of PoH to be accepted by the network. If `T` ticks (and any intervening transactions) are not observed, the network optimistically fills in the `T` ticks, and continues with PoH from the next leader.
|
||||
|
||||
## Partitions, Forks
|
||||
|
||||
Forks can arise at PoH tick counts that correspond to leader rotations, because leader nodes may or may not have observed the previous leader's data. These empty ticks are generated by all nodes in the network at a network-specified rate for hashes/per/tick `Z`.
|
||||
|
||||
There are only two possible versions of the PoH during a voting period: PoH with `T` ticks and entries generated by the current leader, or PoH with just ticks. The "just ticks" version of the PoH can be thought of as a virtual ledger, one that all nodes in the network can derive from the last tick in the previous period.
|
||||
|
||||
Validators can ignore forks at other points (e.g. from the wrong leader), or slash the leader responsible for the fork.
|
||||
|
||||
Validators vote on the longest chain that contains their previous vote, or a longer chain if the lockout on their previous vote has expired.
|
||||
|
||||
|
||||
#### Validator's View
|
||||
|
||||
##### Time Progression
|
||||
The diagram below represents a validator's view of the PoH stream with possible forks over time. L1, L2, etc. are leader periods, and `E`s represent entries from that leader during that leader's period. The 'x's represent ticks only, and time flows downwards in the diagram.
|
||||
|
||||
|
||||
```
|
||||
time +----+ validator action
|
||||
| | L1 | E(L1)
|
||||
| |----| / \ vote(E(L2))
|
||||
| | L2 | E(L2) x
|
||||
| |----| / \ / \ vote(E(L2))
|
||||
| | L3 | E(L3) x E(L3)' x
|
||||
| |----| / \ / \ / \ / \ slash(L3)
|
||||
| | L4 | x x E(L4) x x x x x
|
||||
V |----| | | | | | | | | vote(E(L4))
|
||||
V | L5 | xx xx xx E(L5) xx xx xx xx
|
||||
V +----+ hang on to E(L4) and E(L5) for more...
|
||||
|
||||
```
|
||||
|
||||
Note that an `E` appearing on 2 branches at the same period is a slashable condition, so a validator observing `E(L3)` and `E(L3)'` can slash L3 and safely choose `x` for that period. Once a validator observes a supermajority vote on any branch, other branches can be discarded below that tick count. For any period, validators need only consider a single "has entries" chain or a "ticks only" chain.
|
||||
|
||||
##### Time Division
|
||||
|
||||
It's useful to consider leader rotation over PoH tick count as time division of the job of encoding state for the network. The following table presents the above tree of forks as a time-divided ledger.
|
||||
|
||||
leader period | L1 | L2 | L3 | L4 | L5
|
||||
-------|----|----|----|----|----
|
||||
data | E(L1)| E(L2) | E(L3) | E(L4) | E(L5)
|
||||
ticks to prev | | | | x | xx
|
||||
|
||||
Note that only data from leader L3 will be accepted during leader period L3. Data from L3 may include "catchup" ticks back to a period other than L2 if L3 did not observe L2's data. L4 and L5's transmissions include the "ticks to prev" PoH entries.
|
||||
|
||||
This arrangement of the network data streams permits nodes to save exactly this to the ledger for replay, restart, and checkpoints.
|
||||
|
||||
#### Leader's View
|
||||
|
||||
When a new leader begins a period, it must first transmit any PoH (ticks) required to link the new period with the most recently observed and voted period.
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
### Small Partition
|
||||
1. Network partition M occurs for 10% of the nodes
|
||||
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||
3. M cycles through the ranks until one of them is leader, generating ticks for periods where the leader is in K.
|
||||
4. M validators observe 10% of the vote pool, finality is not reached.
|
||||
5. M and K re-connect.
|
||||
6. M validators cancel their votes on M, which has not reached finality, and re-cast on K (after their vote lockout on M).
|
||||
|
||||
### Leader Timeout
|
||||
1. Next rank leader node V observes a timeout from current leader A, fills in A's period with virtual ticks and starts sending out entries.
|
||||
2. Nodes observing both streams keep track of the forks, waiting for:
|
||||
a. their vote on leader A to expire in order to be able to vote on B
|
||||
b. a supermajority on A's period
|
||||
3. If a occurs, leader B's period is filled with ticks, if b occurs, A's period is filled with ticks
|
||||
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
||||
|
||||
|
||||
## Network Variables
|
||||
|
||||
`M` - number of nodes outside the supermajority to whom leaders broadcast their PoH for validation
|
||||
|
||||
`N` - number of voting rounds for which a leader schedule is considered before a new leader schedule is used
|
||||
|
||||
`T` - number of PoH ticks per leader period (also voting period)
|
||||
|
||||
`Z` - number of hashes per PoH tick
|
@ -117,6 +117,14 @@ $ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
### Deploy program
|
||||
```
|
||||
// Command
|
||||
$ solana-wallet deploy <PATH>
|
||||
|
||||
// Return
|
||||
<PROGRAM_ID>
|
||||
```
|
||||
|
||||
## Javascript solana-web3.js Interface
|
||||
|
||||
|
98
scripts/increment-cargo-version.sh
Executable file
98
scripts/increment-cargo-version.sh
Executable file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
usage: $0 [major|minor|patch|-preXYZ]
|
||||
|
||||
Increments the Cargo.toml version.
|
||||
A minor version increment is the default
|
||||
EOF
|
||||
exit 0
|
||||
}
|
||||
|
||||
here="$(dirname "$0")"
|
||||
cd "$here"/..
|
||||
source ci/semver_bash/semver.sh
|
||||
|
||||
readCargoVariable() {
|
||||
declare variable="$1"
|
||||
declare Cargo_toml="$2"
|
||||
|
||||
while read -r name equals value _; do
|
||||
if [[ $name = "$variable" && $equals = = ]]; then
|
||||
echo "${value//\"/}"
|
||||
return
|
||||
fi
|
||||
done < <(cat "$Cargo_toml")
|
||||
echo "Unable to locate $variable in $Cargo_toml" 1>&2
|
||||
}
|
||||
|
||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||
Cargo_tomls="$(find . -name Cargo.toml)"
|
||||
|
||||
# Collect the name of all the internal crates
|
||||
crates=()
|
||||
for Cargo_toml in $Cargo_tomls; do
|
||||
crates+=("$(readCargoVariable name "$Cargo_toml")")
|
||||
done
|
||||
|
||||
# Read the current version
|
||||
MAJOR=0
|
||||
MINOR=0
|
||||
PATCH=0
|
||||
SPECIAL=""
|
||||
semverParseInto "$(readCargoVariable version ./Cargo.toml)" MAJOR MINOR PATCH SPECIAL
|
||||
[[ -n $MAJOR ]] || usage
|
||||
|
||||
currentVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||
SPECIAL=""
|
||||
|
||||
# Figure out what to increment
|
||||
case ${1:-minor} in
|
||||
patch)
|
||||
PATCH=$((PATCH + 1))
|
||||
;;
|
||||
major)
|
||||
MAJOR=$((MAJOR+ 1))
|
||||
;;
|
||||
minor)
|
||||
MINOR=$((MINOR+ 1))
|
||||
;;
|
||||
-*)
|
||||
if [[ $1 =~ ^-[A-Za-z0-9]*$ ]]; then
|
||||
SPECIAL="$1"
|
||||
else
|
||||
echo "Error: Unsupported characters found in $1"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown argument: $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
newVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||
|
||||
# Update all the Cargo.toml files
|
||||
for Cargo_toml in $Cargo_tomls; do
|
||||
# Set new crate version
|
||||
(
|
||||
set -x
|
||||
sed -i "$Cargo_toml" -e "s/^version = \"[^\"]*\"$/version = \"$newVersion\"/"
|
||||
)
|
||||
|
||||
# Fix up the version references to other internal crates
|
||||
for crate in "${crates[@]}"; do
|
||||
(
|
||||
set -x
|
||||
sed -i "$Cargo_toml" -e "
|
||||
s/^$crate = .*path = \"\([^\"]*\)\".*\$/$crate = \{ path = \"\1\", version = \"$newVersion\" \}/
|
||||
"
|
||||
)
|
||||
done
|
||||
done
|
||||
|
||||
echo "$currentVersion -> $newVersion"
|
||||
|
||||
exit 0
|
29
scripts/install-native-programs.sh
Executable file
29
scripts/install-native-programs.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Installs native programs as |cargo install| doesn't know about them
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
SOLANA_ROOT="$(cd "$here"/..; pwd)"
|
||||
|
||||
installDir=$1
|
||||
variant=${2:-release}
|
||||
|
||||
if [[ -z $installDir ]]; then
|
||||
echo Install directory not specified
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d $installDir ]]; then
|
||||
echo "Not a directory: $installDir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for dir in "$SOLANA_ROOT"/programs/native/*; do
|
||||
for program in echo "$SOLANA_ROOT"/target/"$variant"/deps/lib{,solana_}"$(basename "$dir")".{so,dylib,dll}; do
|
||||
if [[ -f $program ]]; then
|
||||
cp -v "$program" "$installDir"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
@ -18,6 +18,10 @@ receive_errors=0
|
||||
receive_errors_diff=0
|
||||
rcvbuf_errors=0
|
||||
rcvbuf_errors_diff=0
|
||||
in_octets=0
|
||||
in_octets_diff=0
|
||||
out_octets=0
|
||||
out_octets_diff=0
|
||||
|
||||
update_netstat() {
|
||||
declare net_stat
|
||||
@ -39,13 +43,21 @@ update_netstat() {
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /RcvbufErrors/ {tmp_var = $2} END { print tmp_var }')
|
||||
rcvbuf_errors_diff=$((stats - rcvbuf_errors))
|
||||
rcvbuf_errors="$stats"
|
||||
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /InOctets/ {tmp_var = $2} END { print tmp_var }')
|
||||
in_octets_diff=$((stats - in_octets))
|
||||
in_octets="$stats"
|
||||
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /OutOctets/ {tmp_var = $2} END { print tmp_var }')
|
||||
out_octets_diff=$((stats - out_octets))
|
||||
out_octets="$stats"
|
||||
}
|
||||
|
||||
update_netstat
|
||||
|
||||
while true; do
|
||||
update_netstat
|
||||
report="packets_sent=$packets_sent_diff,packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff"
|
||||
report="packets_sent=$packets_sent_diff,packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff,in_octets=$in_octets_diff,out_octets=$out_octets_diff"
|
||||
|
||||
echo "$report"
|
||||
./metrics-write-datapoint.sh "net-stats,hostname=$HOSTNAME $report"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user