Compare commits
476 Commits
Author | SHA1 | Date | |
---|---|---|---|
09db7b5b52 | |||
fa9faa2cec | |||
d2dc585974 | |||
6721bdde3d | |||
a733873b8f | |||
7c02bbc47c | |||
16a815d2b1 | |||
ddb490e2fb | |||
242d0a23fb | |||
869009243d | |||
7b61f5279c | |||
7ef0b815ec | |||
8742de789e | |||
bfadd7b787 | |||
2e14bfcf4e | |||
a19426f055 | |||
df366017a7 | |||
7d76badd03 | |||
8047ab777c | |||
0d0a1c2919 | |||
1da90017ce | |||
0909618efa | |||
28bb7849f4 | |||
9cffd3a1ea | |||
917151ce54 | |||
6dcd127634 | |||
af66edf8c0 | |||
ab5b921e8f | |||
6c2843543b | |||
85f74cc537 | |||
43665115b4 | |||
156115c04c | |||
a66577eb87 | |||
3345d059e8 | |||
8c8c5de779 | |||
f03e971598 | |||
b4a1cdceaa | |||
b250d20059 | |||
dc3b270410 | |||
9d5092a71c | |||
a287c9e5fa | |||
ee85d534f9 | |||
6e1b291c17 | |||
68f7b1ecf3 | |||
58fe5cabd6 | |||
8993c6ae24 | |||
0e56473add | |||
f6b709ca48 | |||
ffa1fa557b | |||
e7631c85a1 | |||
edeadb503f | |||
d2044f2562 | |||
5703c740cf | |||
6ae20e78e2 | |||
506fc3baeb | |||
68523f4a7f | |||
beae217ab9 | |||
2c8c117e3c | |||
3a1285ebe5 | |||
e2660f2ac1 | |||
22eb1b977f | |||
43ef8d7bb7 | |||
d9271f2d30 | |||
dfbfd4d4dd | |||
9cb262ad4b | |||
73ee0cb100 | |||
9a6154beaf | |||
3f494bb91b | |||
2eb312796d | |||
3fb86662fb | |||
dce31f6002 | |||
39c42a6aba | |||
9961c0ee0a | |||
3f843f21b9 | |||
d07961a58b | |||
b85aa9282e | |||
1cd354cf15 | |||
92cd2d09ed | |||
a40122548f | |||
6e27f797bd | |||
476a585222 | |||
aa74ddb6c0 | |||
95921ce129 | |||
ee6d00a2fe | |||
212cbc4977 | |||
a6af1ba08d | |||
ee27e9e1cf | |||
4d21ee0546 | |||
493a2477b5 | |||
e284af33b9 | |||
f0aa14e135 | |||
fb9d8dfa99 | |||
4b02bbc802 | |||
18cf660f61 | |||
376303a1eb | |||
f295eb06d0 | |||
f423f61d8b | |||
94b06b2cbf | |||
9b2fc8cde7 | |||
d810752e86 | |||
fdaad1d85b | |||
7f29c1fe23 | |||
68df9d06db | |||
b60cb48c18 | |||
0fee854220 | |||
0cc7bbfe7d | |||
68834bd4c5 | |||
2df40cf9c9 | |||
f671b7f63f | |||
236113e417 | |||
a340b18b19 | |||
f6c8e1a4bf | |||
160cff4a30 | |||
48685cf766 | |||
0f32102684 | |||
d46682d1f2 | |||
55833e20b1 | |||
02cfa76916 | |||
9314eea7e9 | |||
1733beabf7 | |||
471d8f6ff9 | |||
e47fcb196b | |||
3ae53961c8 | |||
113b002095 | |||
9447537d8c | |||
7404b8739e | |||
7239395d95 | |||
926d459c8f | |||
7cabe203dc | |||
1e53f4266a | |||
24b513c3c7 | |||
b982595c73 | |||
af8a36b7fb | |||
208e7d7943 | |||
557736f1cf | |||
61927e1941 | |||
fc75827aaf | |||
2f2531d921 | |||
d5f20980eb | |||
21eae981f9 | |||
ead7f4287a | |||
3b33150cfb | |||
6d34a68e54 | |||
5c483c9928 | |||
a68c99d782 | |||
0aebbae909 | |||
a3a2215bda | |||
eb377993b3 | |||
5ca52d785c | |||
8d9912b4e2 | |||
c77b1c9687 | |||
8849ecd772 | |||
7977b97227 | |||
4f34822900 | |||
bbb38ac106 | |||
ce934a547e | |||
16b19d35dd | |||
45cfa5b574 | |||
df9ccce5b2 | |||
f8516b677a | |||
dfde83bdce | |||
cb0f19e4f1 | |||
26b99d3f85 | |||
2f9c0d1d9e | |||
0423cafbeb | |||
0bd1412562 | |||
0339642e77 | |||
37a0b7b132 | |||
c30b605047 | |||
76076d6fad | |||
0a819ec4e2 | |||
57a717056e | |||
856c48541f | |||
2045091c4f | |||
03ac5a6eef | |||
32fadc9c30 | |||
15a89d4f17 | |||
d0f43e9934 | |||
31e779d3f2 | |||
30c79fd40d | |||
639c93460a | |||
7611730cdb | |||
9df9c1433a | |||
4ea422bcec | |||
6074e4f962 | |||
d52e6d01ec | |||
63caca33be | |||
64efa62a74 | |||
912eb5e8e9 | |||
bb628e8495 | |||
d0c19c2c97 | |||
926fdb7519 | |||
c886625c83 | |||
f6c10d8a2e | |||
2bd877528f | |||
d09889b1dd | |||
1b2e9122d5 | |||
7424388924 | |||
537436bd5e | |||
32fc0cd7e9 | |||
fb99494858 | |||
5b4d4b97bc | |||
c5180c8092 | |||
515c200d86 | |||
32aab82e32 | |||
6aaa350145 | |||
d3b4dfe104 | |||
9fc30f6db4 | |||
2d0f07091d | |||
3828eda507 | |||
1e736ec16d | |||
bba6437ea9 | |||
e5ab9a856c | |||
1515bba9c6 | |||
14a9ef4bbe | |||
041040c659 | |||
47f69f2d24 | |||
9dd4dc2088 | |||
b534c32ee3 | |||
d2712f1457 | |||
183f560d06 | |||
ae150c0897 | |||
606e1396cf | |||
5c85e037f8 | |||
5c523716aa | |||
5f8cbf359e | |||
e83834e6be | |||
02225aa95c | |||
9931ac9780 | |||
2ba2bc72ca | |||
45b8ba9ede | |||
40968e09b7 | |||
262f26cf76 | |||
785c619198 | |||
24a993710d | |||
c240bb12ae | |||
eed3b9db94 | |||
29a8823db1 | |||
a80955eacb | |||
9716c3de71 | |||
34fa3208e0 | |||
9c4e19958b | |||
0403299728 | |||
95701114e3 | |||
a99d17c3ac | |||
517149d325 | |||
32aa2575b5 | |||
8fe7b96629 | |||
9350619afa | |||
d8d8f0bfc8 | |||
0a39722719 | |||
9c0fa4d1d2 | |||
da0404ad03 | |||
b508fdb62c | |||
680f90df21 | |||
1a68807ad9 | |||
d901767b54 | |||
13d4443d4d | |||
74b63c12a0 | |||
cd42f6591a | |||
5491422b12 | |||
23f3ff3cf0 | |||
f90488c77b | |||
beb4536841 | |||
3fa46dd66d | |||
ad5fcf778f | |||
83b000ae88 | |||
33e179caa6 | |||
b1e941cab9 | |||
6db961d256 | |||
83409ded59 | |||
396b2e9772 | |||
94459deb94 | |||
660af84b8d | |||
7b31020903 | |||
9a4143b4d9 | |||
aebc47ad55 | |||
b6b5455917 | |||
5bc01cd51a | |||
c79acac37b | |||
a5f2aa6777 | |||
4169e5c510 | |||
0727c440b3 | |||
19a7ff0c43 | |||
5f18403199 | |||
9f325fca09 | |||
10d08acefa | |||
52d50e6bc4 | |||
e7de7c32db | |||
a5f07638ec | |||
aa2a3fe201 | |||
abd13ba4ca | |||
485ba093b3 | |||
36b18e4fb5 | |||
8d92232949 | |||
e4d8c094a4 | |||
d26e1c51a9 | |||
675ff64094 | |||
423e7ebc3f | |||
f9fe6a0f72 | |||
8d007bd7f7 | |||
6cdbdfbbcb | |||
35e6343d61 | |||
7fb7839c8f | |||
dbc1ffc75e | |||
1fdbe893c5 | |||
55a542bff0 | |||
e10574c64d | |||
2e00be262e | |||
4172bde081 | |||
9c47e022dc | |||
874addc51a | |||
b7ae5b712a | |||
c6d7cd2d33 | |||
386a96b7e0 | |||
b238c57179 | |||
1821e72812 | |||
a23c230603 | |||
4e01fd5458 | |||
e416cf7adf | |||
25edb9e447 | |||
93c4f6c9b8 | |||
718031ec35 | |||
d546614936 | |||
ac8d738045 | |||
ca962371b8 | |||
e6f8922e35 | |||
7292ece7ad | |||
df3b78c18c | |||
c83dcea87d | |||
be20c99758 | |||
694add9919 | |||
afc764752c | |||
113c8b5880 | |||
a5b28349ed | |||
bb7ecc7cd9 | |||
14bc160674 | |||
d438c22618 | |||
bcbae0a64f | |||
f636408647 | |||
3ffc7aa5bc | |||
7b7e8c0d3f | |||
11ea9e7c4b | |||
2b82121325 | |||
5038e5ccd7 | |||
e943ed8caf | |||
c196952afd | |||
e7383a7e66 | |||
8a7545197f | |||
680072e5e2 | |||
4ca377a655 | |||
751dd7eebb | |||
8f0e0c4440 | |||
50cf73500e | |||
db310a044c | |||
88a609ade5 | |||
304d63623f | |||
407b2682e8 | |||
0f4fd8367d | |||
747ba6a8d3 | |||
bb99fd40de | |||
e972d6639d | |||
22e77c9485 | |||
bc88473030 | |||
95677a81c5 | |||
ea37d29d3a | |||
e030673c9d | |||
3e76efe97e | |||
f5a30615c1 | |||
e5e325154b | |||
9e3d2956d8 | |||
26b1466ef6 | |||
a1f01fb8f8 | |||
b2be0e2e5e | |||
1a45587c08 | |||
3199f174a3 | |||
a51c2f193e | |||
be31da3dce | |||
54b407b4ca | |||
e87cac06da | |||
ad4fef4f09 | |||
e3b3701e13 | |||
9228fe11c9 | |||
5ab38afa51 | |||
e49b8f0ce7 | |||
c50ac96f75 | |||
a9355c33b2 | |||
3dcee9f79e | |||
2614189157 | |||
beeb09646a | |||
67f1fbab5f | |||
c0e7e43e96 | |||
9bfead2e01 | |||
6073cd57fa | |||
5174be5fe7 | |||
62a18d4c02 | |||
a6c15684c9 | |||
5691bf557c | |||
8f01f7cf21 | |||
bb8c94ad2c | |||
d98e35e095 | |||
3163fbad0e | |||
0172422961 | |||
8ccfb26923 | |||
12a474b6ee | |||
270fd6d61c | |||
7b9c7d4150 | |||
55126f5fb6 | |||
431692d9d0 | |||
6732a9078d | |||
2981076a14 | |||
5740ea3807 | |||
cd2d50e06c | |||
8c8a4ba705 | |||
b10de40506 | |||
2030dfa435 | |||
bfe64f5f6e | |||
6d27751365 | |||
1fb1c0a681 | |||
062f654fe0 | |||
d3cb161c36 | |||
98b47d2540 | |||
f28ba3937b | |||
91cf14e641 | |||
7601a8001c | |||
0ee6c5bf9d | |||
6dee632d67 | |||
51e5de4d97 | |||
1f08b22c8e | |||
83ae5bcee2 | |||
339a570b26 | |||
5310b6e5a2 | |||
7d14f44a7c | |||
c830eeeae4 | |||
157fcf1de5 | |||
e050160ce5 | |||
f273351789 | |||
aebf7f88e5 | |||
aac1571670 | |||
8bae75a8a6 | |||
c2f7ca9d8f | |||
6ec0e42220 | |||
072b244575 | |||
7ac9d6c604 | |||
0125163190 | |||
a06f4b1d44 | |||
10daa015c4 | |||
0babee39a4 | |||
7c08b397eb | |||
155ee8792f | |||
f89f121d2b | |||
27986d7abb | |||
8b7edc6d64 | |||
7dfab867fe | |||
fd36954477 | |||
fd51599fa8 | |||
3ca80c676c | |||
be7cce1fd2 | |||
e142aafca9 | |||
4196cf43e8 | |||
a344eb7dd0 | |||
d12537bdb7 | |||
bcb3b3c21f | |||
d8c9a1aae9 | |||
9ca2f5b3f7 | |||
9e24775051 | |||
4dc30ea104 | |||
90df6237c6 | |||
80caa8fdce | |||
8706774ea7 | |||
1d7e87d430 | |||
1a4cd763f8 | |||
ee74b367ce | |||
f06113500d | |||
9ab5692acf | |||
e7a910b664 |
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
||||
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
#### Problem
|
||||
|
||||
#### Summary of Changes
|
||||
|
||||
Fixes #
|
28
.github/RELEASE_TEMPLATE.md
vendored
Normal file
28
.github/RELEASE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# Release v0.X.Y <milestone name>
|
||||
|
||||
fun blurb about the name, what's in the release
|
||||
|
||||
## Major Features And Improvements
|
||||
|
||||
* bulleted
|
||||
* list of features and improvements
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
* bulleted
|
||||
* list
|
||||
* of
|
||||
* protocol changes/breaks
|
||||
* API breaks
|
||||
* CLI changes
|
||||
* etc.
|
||||
|
||||
## Bug Fixes and Other Changes
|
||||
|
||||
* can be pulled from commit log, or synthesized
|
||||
|
||||
## Thanks to our Contributors
|
||||
|
||||
This release contains contributions from many people at Solana, as well as:
|
||||
|
||||
pull from commit log
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -14,3 +14,6 @@ Cargo.lock
|
||||
|
||||
# test temp files, ledgers, etc.
|
||||
/farf/
|
||||
|
||||
# log files
|
||||
*.log
|
||||
|
69
Cargo.toml
69
Cargo.toml
@ -1,34 +1,35 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.8.0"
|
||||
version = "0.10.3"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
name = "solana-upload-perf"
|
||||
path = "src/bin/upload-perf.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-bench-streamer"
|
||||
path = "src/bin/bench-streamer.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[[bin]]
|
||||
required-features = ["chacha"]
|
||||
name = "solana-replicator"
|
||||
path = "src/bin/replicator.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
@ -57,11 +58,13 @@ path = "src/bin/wallet.rs"
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
ipv6 = []
|
||||
bpf_c = []
|
||||
chacha = []
|
||||
cuda = []
|
||||
erasure = []
|
||||
ipv6 = []
|
||||
test = []
|
||||
unstable = []
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2"
|
||||
@ -72,31 +75,43 @@ bytes = "0.4"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
clap = "2.31"
|
||||
dirs = "1.0.2"
|
||||
elf = "0.0.10"
|
||||
env_logger = "0.5.12"
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
influx_db_client = "0.3.4"
|
||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc", rev = "4b6060b" }
|
||||
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc", rev = "4b6060b" }
|
||||
jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc", rev = "4b6060b" }
|
||||
hex-literal = "0.1.1"
|
||||
influx_db_client = "0.3.6"
|
||||
solana-jsonrpc-core = "0.3.0"
|
||||
solana-jsonrpc-http-server = "0.3.0"
|
||||
solana-jsonrpc-macros = "0.3.0"
|
||||
solana-jsonrpc-pubsub = "0.3.0"
|
||||
solana-jsonrpc-ws-server = "0.3.0"
|
||||
ipnetwork = "0.12.7"
|
||||
itertools = "0.7.8"
|
||||
libc = "0.2.43"
|
||||
libloading = "0.5.0"
|
||||
log = "0.4.2"
|
||||
matches = "0.1.6"
|
||||
nix = "0.11.0"
|
||||
pnet_datalink = "0.21.0"
|
||||
rand = "0.5.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.8.6"
|
||||
reqwest = "0.9.0"
|
||||
ring = "0.13.2"
|
||||
sha2 = "0.7.0"
|
||||
sha2 = "0.8.0"
|
||||
serde = "1.0.27"
|
||||
serde_cbor = "0.9.0"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
socket2 = "0.3.8"
|
||||
solana-sdk = { path = "sdk", version = "0.10.3" }
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
untrusted = "0.6.2"
|
||||
solana-noop = { path = "programs/native/noop", version = "0.10.3" }
|
||||
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.3" }
|
||||
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.3" }
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
@ -112,3 +127,17 @@ name = "signature"
|
||||
|
||||
[[bench]]
|
||||
name = "sigverify"
|
||||
|
||||
[[bench]]
|
||||
required-features = ["chacha"]
|
||||
name = "chacha"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
".",
|
||||
"sdk",
|
||||
"programs/native/noop",
|
||||
"programs/native/bpf_loader",
|
||||
"programs/native/lua_loader",
|
||||
"programs/bpf/rust/noop",
|
||||
]
|
||||
|
18
README.md
18
README.md
@ -21,7 +21,7 @@ It's possible for a centralized database to process 710,000 transactions per sec
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
|
||||
Testnet Demos
|
||||
@ -62,7 +62,7 @@ your odds of success if you check out the
|
||||
before proceeding:
|
||||
|
||||
```bash
|
||||
$ git checkout v0.7.2
|
||||
$ git checkout v0.8.0
|
||||
```
|
||||
|
||||
Configuration Setup
|
||||
@ -113,7 +113,7 @@ To run a multinode testnet, after starting a leader node, spin up some validator
|
||||
separate shells:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
$ ./multinode-demo/validator.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced leader or validator (on Linux),
|
||||
@ -123,22 +123,20 @@ your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh
|
||||
```
|
||||
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
Now that your singlenode or multinode testnet is up and running let's send it some transactions! Note that we pass in
|
||||
the expected number of nodes in the network. If running singlenode, pass 1; if multinode, pass the number
|
||||
of validators you started.
|
||||
Now that your singlenode or multinode testnet is up and running let's send it
|
||||
some transactions!
|
||||
|
||||
In a separate shell start the client:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 1
|
||||
$ ./multinode-demo/client.sh # runs against localhost by default
|
||||
```
|
||||
|
||||
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||
@ -155,7 +153,7 @@ Public Testnet
|
||||
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/client.sh testnet.solana.com 1 #The minumum number of nodes to discover on the network
|
||||
$ ./multinode-demo/client.sh --network $(dig +short testnet.solana.com):8001 --identity config-private/client-id.json --duration 60
|
||||
```
|
||||
|
||||
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
||||
|
32
RELEASE.md
Normal file
32
RELEASE.md
Normal file
@ -0,0 +1,32 @@
|
||||
# Solana Release process
|
||||
|
||||
## Introduction
|
||||
|
||||
Solana uses a channel-oriented, date-based branching process described [here](https://github.com/solana-labs/solana/blob/master/rfcs/rfc-005-branches-tags-and-channels.md).
|
||||
|
||||
## Release Steps
|
||||
|
||||
### Changing channels
|
||||
|
||||
When cutting a new channel branch these pre-steps are required:
|
||||
|
||||
1. Pick your branch point for release on master.
|
||||
2. Create the branch. The name should be "v" + the first 2 "version" fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9".
|
||||
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0) by running `./scripts/increment-cargo-version.sh`.
|
||||
4. Push your new branch to solana.git
|
||||
5. Land your Cargo.toml change as a master PR.
|
||||
|
||||
At this point, ci/channel-info.sh should show your freshly cut release branch as "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
|
||||
|
||||
### Updating channels (i.e. "making a release")
|
||||
|
||||
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
|
||||
|
||||
1. Go [there ;)](https://github.com/solana-labs/solana/releases).
|
||||
2. Click "Draft new release".
|
||||
3. If the first major release on the branch (e.g. v0.8.0), paste in [this template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md) and fill it in.
|
||||
4. Test the release by generating a tag using semver's rules. First try at a release should be <branchname>.X-rc.0.
|
||||
5. Verify release automation:
|
||||
1. [Crates.io](https://crates.io/crates/solana) should have an updated Solana version.
|
||||
2. ...
|
||||
6. After testnet deployment, verify that testnets are running correct software. http://metrics.solana.com should show testnet running on a hash from your newly created branch.
|
@ -4,12 +4,11 @@ extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use bincode::serialize;
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use test::Bencher;
|
||||
|
||||
@ -20,30 +19,39 @@ fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
let tx = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
rando0.pubkey(),
|
||||
10_000,
|
||||
bank.last_id(),
|
||||
0,
|
||||
);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = Keypair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, bank.last_id(), 0);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
|
||||
let mut id = bank.last_id();
|
||||
|
||||
for _ in 0..(MAX_ENTRY_IDS - 1) {
|
||||
bank.register_entry_id(&id);
|
||||
id = hash(&id.as_ref())
|
||||
}
|
||||
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
let results = bank.process_transactions(transactions.clone());
|
||||
let results = bank.process_transactions(&transactions);
|
||||
assert!(results.iter().all(Result::is_ok));
|
||||
})
|
||||
}
|
||||
|
@ -1,95 +1,42 @@
|
||||
#![feature(test)]
|
||||
extern crate bincode;
|
||||
extern crate rand;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate solana_sdk;
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::Bank;
|
||||
use solana::banking_stage::BankingStage;
|
||||
use solana::bank::{Bank, MAX_ENTRY_IDS};
|
||||
use solana::banking_stage::{BankingStage, NUM_THREADS};
|
||||
use solana::entry::Entry;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::{to_packets_chunked, PacketRecycler};
|
||||
use solana::record_stage::Signal;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::packet::to_packets_chunked;
|
||||
use solana::signature::{KeypairUtil, Signature};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use test::Bencher;
|
||||
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{Keypair, KeypairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = Keypair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = Keypair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
|
||||
fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
fn check_txs(receiver: &Receiver<Vec<Entry>>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
loop {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
let entries = receiver.recv_timeout(Duration::new(1, 0));
|
||||
if let Ok(entries) = entries {
|
||||
for entry in &entries {
|
||||
total += entry.transactions.len();
|
||||
}
|
||||
} else {
|
||||
assert!(false);
|
||||
break;
|
||||
}
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
@ -97,116 +44,185 @@ fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let txes = 1000 * NUM_THREADS;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| Keypair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| Keypair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let dummy = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
mint.keypair().pubkey(),
|
||||
1,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
new.signature = Signature::new(&sig[0..64]);
|
||||
new
|
||||
}).collect();
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
tx.account_keys[0],
|
||||
mint_total / txes as i64,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
assert!(bank.process_transaction(&fund).is_ok());
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
}).collect();
|
||||
let (_stage, signal_receiver) = BankingStage::new(
|
||||
&bank,
|
||||
verified_receiver,
|
||||
Default::default(),
|
||||
&mint.last_id(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
let mut id = mint.last_id();
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_entry_id(&id);
|
||||
}
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
// make sure the tx last id is still registered
|
||||
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
}
|
||||
for v in verified.chunks(verified.len() / NUM_THREADS) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes);
|
||||
bank.clear_signatures();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(Keypair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
||||
let progs = 5;
|
||||
let txes = 1000 * NUM_THREADS;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let dummy = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
mint.keypair().pubkey(),
|
||||
1,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
let prog = new.instructions[0].clone();
|
||||
for i in 1..progs {
|
||||
//generate programs that spend to random keys
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let to_key = Pubkey::new(&to[0..32]);
|
||||
new.account_keys.push(to_key);
|
||||
assert_eq!(new.account_keys.len(), i + 2);
|
||||
new.instructions.push(prog.clone());
|
||||
assert_eq!(new.instructions.len(), i + 1);
|
||||
new.instructions[i].accounts[1] = 1 + i as u8;
|
||||
assert_eq!(new.key(i, 1), Some(&to_key));
|
||||
assert_eq!(
|
||||
new.account_keys[new.instructions[i].accounts[1] as usize],
|
||||
to_key
|
||||
);
|
||||
}
|
||||
assert_eq!(new.instructions.len(), progs);
|
||||
new.signature = Signature::new(&sig[0..64]);
|
||||
new
|
||||
}).collect();
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
tx.account_keys[0],
|
||||
mint_total / txes as i64,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
assert!(bank.process_transaction(&fund).is_ok());
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
}).collect();
|
||||
let (_stage, signal_receiver) = BankingStage::new(
|
||||
&bank,
|
||||
verified_receiver,
|
||||
Default::default(),
|
||||
&mint.last_id(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
let mut id = mint.last_id();
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_entry_id(&id);
|
||||
}
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
// make sure the transactions are still valid
|
||||
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
}
|
||||
for v in verified.chunks(verified.len() / NUM_THREADS) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes);
|
||||
bank.clear_signatures();
|
||||
});
|
||||
}
|
||||
|
29
benches/chacha.rs
Normal file
29
benches/chacha.rs
Normal file
@ -0,0 +1,29 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use solana::chacha::chacha_cbc_encrypt_files;
|
||||
use std::fs::remove_file;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_chacha_encrypt(bench: &mut Bencher) {
|
||||
let in_path = Path::new("bench_chacha_encrypt_file_input.txt");
|
||||
let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc");
|
||||
{
|
||||
let mut in_file = File::create(in_path).unwrap();
|
||||
for _ in 0..1024 {
|
||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
bench.iter(move || {
|
||||
chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap();
|
||||
});
|
||||
|
||||
remove_file(in_path).unwrap();
|
||||
remove_file(out_path).unwrap();
|
||||
}
|
@ -4,8 +4,8 @@ extern crate test;
|
||||
|
||||
use solana::hash::{hash, Hash};
|
||||
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
|
||||
use solana::packet::BlobRecycler;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use test::Bencher;
|
||||
|
||||
@ -14,13 +14,12 @@ fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let tx0 = Transaction::system_move(&keypair, keypair.pubkey(), 1, one, 0);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let blobs = entries.to_blobs(&blob_recycler);
|
||||
let blobs = entries.to_blobs();
|
||||
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
@ -4,9 +4,9 @@ extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use solana::packet::{to_packets, PacketRecycler};
|
||||
use solana::packet::to_packets;
|
||||
use solana::sigverify;
|
||||
use solana::transaction::test_tx;
|
||||
use solana::system_transaction::test_tx;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
@ -14,8 +14,7 @@ fn bench_sigverify(bencher: &mut Bencher) {
|
||||
let tx = test_tx();
|
||||
|
||||
// generate packet vector
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let batches = to_packets(&packet_recycler, &vec![tx; 128]);
|
||||
let batches = to_packets(&vec![tx; 128]);
|
||||
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
|
34
build.rs
34
build.rs
@ -1,8 +1,8 @@
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs");
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
// Ensure target/perf-libs/ exists. It's been observed that
|
||||
@ -14,20 +14,48 @@ fn main() {
|
||||
}
|
||||
});
|
||||
|
||||
let bpf_c = !env::var("CARGO_FEATURE_BPF_C").is_err();
|
||||
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
|
||||
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
|
||||
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
||||
|
||||
if cuda || erasure {
|
||||
if bpf_c {
|
||||
let out_dir = "OUT_DIR=../../../target/".to_string()
|
||||
+ &env::var("PROFILE").unwrap()
|
||||
+ &"/bpf".to_string();
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
|
||||
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
|
||||
let status = Command::new("make")
|
||||
.current_dir("programs/bpf/c")
|
||||
.arg("all")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to build C-based BPF programs");
|
||||
assert!(status.success());
|
||||
}
|
||||
if chacha || cuda || erasure {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs");
|
||||
println!("cargo:rustc-link-search=native=target/perf-libs");
|
||||
}
|
||||
if chacha {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libcpu-crypt.a");
|
||||
}
|
||||
if cuda {
|
||||
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libcuda-crypt.a");
|
||||
println!("cargo:rustc-link-lib=static=cuda-crypt");
|
||||
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||
}
|
||||
if erasure {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libgf_complete.so");
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libJerasure.so");
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
|
@ -29,4 +29,4 @@ maybe_cargo_install() {
|
||||
maybe_cargo_install audit tree
|
||||
|
||||
_ cargo tree
|
||||
_ cargo audit || true
|
||||
_ cargo audit
|
||||
|
16
ci/buildkite-secondary.yml
Normal file
16
ci/buildkite-secondary.yml
Normal file
@ -0,0 +1,16 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
||||
- command: "ci/docker-solana/build.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "docker-solana"
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-solana-tar.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish solana release tar"
|
@ -1,4 +0,0 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
@ -1,18 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-stable.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
# - command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-bench.sh"
|
||||
# name: "bench [public]"
|
||||
# env:
|
||||
# CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
# timeout_in_minutes: 30
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-bench.sh"
|
||||
name: "bench [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-09-03 ci/test-nightly.sh || true"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -24,21 +24,19 @@ steps:
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-large-network.sh || true"
|
||||
name: "large-network [public] [ignored]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=large"
|
||||
# TODO: Fix and re-enable test-large-network.sh
|
||||
# - command: "ci/test-large-network.sh || true"
|
||||
# name: "large-network [public] [ignored]"
|
||||
# env:
|
||||
# CARGO_TARGET_CACHE_NAME: "stable"
|
||||
# timeout_in_minutes: 20
|
||||
# agents:
|
||||
# - "queue=large"
|
||||
- command: "ci/pr-snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- trigger: "solana-snap"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
|
16
ci/crate-version.sh
Executable file
16
ci/crate-version.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Outputs the current crate version
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
while read -r name equals value _; do
|
||||
if [[ $name = version && $equals = = ]]; then
|
||||
echo "${value//\"/}"
|
||||
exit 0
|
||||
fi
|
||||
done < <(cat Cargo.toml)
|
||||
|
||||
echo Unable to locate version in Cargo.toml 1>&2
|
||||
exit 1
|
@ -7,11 +7,18 @@ usage() {
|
||||
echo a CI-appropriate environment.
|
||||
echo
|
||||
echo "--nopull Skip the dockerhub image update"
|
||||
echo "--shell Skip command and enter an interactive shell"
|
||||
echo
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
INTERACTIVE=false
|
||||
if [[ $1 = --shell ]]; then
|
||||
INTERACTIVE=true
|
||||
shift
|
||||
fi
|
||||
|
||||
NOPULL=false
|
||||
if [[ $1 = --nopull ]]; then
|
||||
NOPULL=true
|
||||
@ -64,5 +71,15 @@ ARGS+=(
|
||||
--env SNAPCRAFT_CREDENTIALS_KEY
|
||||
)
|
||||
|
||||
if $INTERACTIVE; then
|
||||
if [[ -n $1 ]]; then
|
||||
echo
|
||||
echo "Note: '$*' ignored due to --shell argument"
|
||||
echo
|
||||
fi
|
||||
set -x
|
||||
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
|
@ -4,7 +4,6 @@ ARG date
|
||||
RUN set -x && \
|
||||
rustup install nightly-$date && \
|
||||
rustup default nightly-$date && \
|
||||
rustup component add clippy-preview --toolchain=nightly-$date && \
|
||||
rustc --version && \
|
||||
cargo --version && \
|
||||
cargo +nightly-$date install cargo-cov
|
||||
|
@ -1,23 +1,26 @@
|
||||
# Note: when the rust version (1.28) is changed also modify
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/buildkite.yml to pick up the new image tag
|
||||
FROM rust:1.28
|
||||
FROM rust:1.30.1
|
||||
|
||||
RUN set -x && \
|
||||
apt update && \
|
||||
apt-get install apt-transport-https && \
|
||||
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
|
||||
echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main > /etc/apt/sources.list.d/llvm.list && \
|
||||
echo deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-7 main > /etc/apt/sources.list.d/llvm.list && \
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
buildkite-agent \
|
||||
cmake \
|
||||
llvm-6.0 \
|
||||
lcov \
|
||||
libclang-common-7-dev \
|
||||
llvm-7 \
|
||||
rsync \
|
||||
sudo \
|
||||
&& \
|
||||
rustup component add rustfmt-preview && \
|
||||
rustup component add clippy-preview && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rustc --version && \
|
||||
cargo --version
|
||||
|
1
ci/docker-solana/.gitignore
vendored
Normal file
1
ci/docker-solana/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
cargo-install/
|
13
ci/docker-solana/Dockerfile
Normal file
13
ci/docker-solana/Dockerfile
Normal file
@ -0,0 +1,13 @@
|
||||
FROM debian:stretch
|
||||
|
||||
# JSON RPC port
|
||||
EXPOSE 8899/tcp
|
||||
|
||||
# Install libssl
|
||||
RUN apt update && \
|
||||
apt-get install -y libssl-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY usr/bin /usr/bin/
|
||||
ENTRYPOINT [ "/usr/bin/solana-entrypoint.sh" ]
|
||||
CMD [""]
|
17
ci/docker-solana/README.md
Normal file
17
ci/docker-solana/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
## Minimal Solana Docker image
|
||||
This image is automatically updated by CI
|
||||
|
||||
https://hub.docker.com/r/solanalabs/solana/
|
||||
|
||||
### Usage:
|
||||
Run the latest beta image:
|
||||
```bash
|
||||
$ docker run --rm -p 8899:8899 solanalabs/solana:beta
|
||||
```
|
||||
|
||||
Run the latest edge image:
|
||||
```bash
|
||||
$ docker run --rm -p 8899:8899 solanalabs/solana:edge
|
||||
```
|
||||
|
||||
Port *8899* is the JSON RPC port, which is used by clients to communicate with the network.
|
39
ci/docker-solana/build.sh
Executable file
39
ci/docker-solana/build.sh
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
eval "$(../channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rm -rf usr/
|
||||
../docker-run.sh solanalabs/rust:1.30.0 \
|
||||
cargo install --path . --root ci/docker-solana/usr
|
||||
cp -f entrypoint.sh usr/bin/solana-entrypoint.sh
|
||||
../../scripts/install-native-programs.sh usr/bin/
|
||||
|
||||
docker build -t solanalabs/solana:$CHANNEL .
|
||||
|
||||
maybeEcho=
|
||||
if [[ -z $CI ]]; then
|
||||
echo "Not CI, skipping |docker push|"
|
||||
maybeEcho="echo"
|
||||
else
|
||||
(
|
||||
set +x
|
||||
if [[ -n $DOCKER_PASSWORD && -n $DOCKER_USERNAME ]]; then
|
||||
echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
||||
fi
|
||||
)
|
||||
fi
|
||||
$maybeEcho docker push solanalabs/solana:$CHANNEL
|
23
ci/docker-solana/entrypoint.sh
Executable file
23
ci/docker-solana/entrypoint.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
solana-keygen -o /config/leader-keypair.json
|
||||
solana-keygen -o /config/drone-keypair.json
|
||||
|
||||
solana-genesis --tokens=1000000000 --ledger /ledger < /config/drone-keypair.json
|
||||
solana-fullnode-config --keypair=/config/leader-keypair.json -l > /config/leader-config.json
|
||||
|
||||
solana-drone --keypair /config/drone-keypair.json --network 127.0.0.1:8001 &
|
||||
drone=$!
|
||||
solana-fullnode --identity /config/leader-config.json --ledger /ledger/ &
|
||||
fullnode=$!
|
||||
|
||||
abort() {
|
||||
kill "$drone" "$fullnode"
|
||||
}
|
||||
|
||||
trap abort SIGINT SIGTERM
|
||||
wait "$fullnode"
|
||||
kill "$drone" "$fullnode"
|
@ -73,7 +73,7 @@ echo "--- Node count"
|
||||
set -x
|
||||
client_id=/tmp/client-id.json-$$
|
||||
$solana_keygen -o $client_id
|
||||
$solana_bench_tps --identity $client_id --num-nodes 3 --converge-only
|
||||
$solana_bench_tps --identity $client_id --num-nodes 3 --reject-extra-nodes --converge-only
|
||||
rm -rf $client_id
|
||||
) || flag_error
|
||||
|
||||
|
36
ci/publish-bpf-sdk.sh
Executable file
36
ci/publish-bpf-sdk.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
version=$(./ci/crate-version.sh)
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf bpf-sdk/
|
||||
mkdir bpf-sdk/
|
||||
(
|
||||
echo "$version"
|
||||
git rev-parse HEAD
|
||||
) > bpf-sdk/version.txt
|
||||
|
||||
cp -ra programs/bpf/c/sdk/* bpf-sdk/
|
||||
|
||||
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
|
||||
)
|
||||
|
||||
|
||||
echo --- AWS S3 Store
|
||||
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
|
||||
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
|
||||
|
||||
exit 0
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
@ -12,8 +12,18 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
maybePublish="echo Publish skipped"
|
||||
if [[ -n $CI ]]; then
|
||||
maybePublish="cargo publish --token $CRATES_IO_TOKEN"
|
||||
fi
|
||||
|
||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
(
|
||||
set -x
|
||||
ci/docker-run.sh rust bash -exc "cd $(dirname "$Cargo_toml"); cargo package; $maybePublish"
|
||||
)
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
71
ci/publish-solana-tar.sh
Executable file
71
ci/publish-solana-tar.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]]; then
|
||||
DRYRUN="echo"
|
||||
CHANNEL=unknown
|
||||
fi
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -n "$BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
||||
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf solana-release/
|
||||
mkdir solana-release/
|
||||
(
|
||||
echo "$CHANNEL_OR_TAG"
|
||||
git rev-parse HEAD
|
||||
) > solana-release/version.txt
|
||||
|
||||
cargo install --root solana-release
|
||||
./scripts/install-native-programs.sh solana-release/bin
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda --root solana-release-cuda
|
||||
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
|
||||
|
||||
tar jvcf solana-release.tar.bz2 solana-release/
|
||||
)
|
||||
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
|
||||
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
|
||||
)
|
||||
else
|
||||
echo Skipped due to DRYRUN
|
||||
fi
|
||||
exit 0
|
||||
|
23
ci/snap.sh
23
ci/snap.sh
@ -2,6 +2,13 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
@ -10,14 +17,14 @@ fi
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
SNAP_CHANNEL=stable
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
SNAP_CHANNEL=beta
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $SNAP_CHANNEL ]]; then
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
@ -51,11 +58,13 @@ if [[ ! -x /usr/bin/multilog ]]; then
|
||||
sudo apt-get install -y daemontools
|
||||
fi
|
||||
|
||||
echo --- build: $SNAP_CHANNEL channel
|
||||
echo --- build: $CHANNEL channel
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish: $SNAP_CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
||||
if [[ -z $DO_NOT_PUBLISH_SNAP ]]; then
|
||||
echo --- publish: $CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $CHANNEL
|
||||
fi
|
||||
|
18
ci/solana-testnet.yml
Executable file
18
ci/solana-testnet.yml
Executable file
@ -0,0 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
label: "create snap"
|
||||
|
||||
- wait
|
||||
|
||||
- command: "ci/testnet-automation.sh"
|
||||
label: "run testnet"
|
||||
agents:
|
||||
- "queue=testnet-deploy"
|
||||
|
||||
- wait: ~
|
||||
continue_on_failure: true
|
||||
|
||||
- command: "ci/testnet-automation-cleanup.sh"
|
||||
label: "delete testnet"
|
||||
agents:
|
||||
- "queue=testnet-deploy"
|
@ -2,6 +2,11 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
@ -10,4 +15,19 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo bench --features=unstable --verbose
|
||||
set -o pipefail
|
||||
|
||||
UPLOAD_METRICS=""
|
||||
TARGET_BRANCH=$BUILDKITE_BRANCH
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
TARGET_BRANCH=$EDGE_CHANNEL
|
||||
else
|
||||
UPLOAD_METRICS="upload"
|
||||
fi
|
||||
|
||||
BENCH_FILE=bench_output.log
|
||||
BENCH_ARTIFACT=current_bench_results.log
|
||||
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee "$BENCH_FILE"
|
||||
_ cargo run --release --bin solana-upload-perf -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" >"$BENCH_ARTIFACT"
|
||||
|
||||
upload_ci_artifact "$BENCH_ARTIFACT"
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
@ -10,24 +11,50 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features=unstable
|
||||
# Uncomment this to run nightly test suit
|
||||
# _ cargo test --verbose --features=unstable
|
||||
|
||||
# TODO: Re-enable warnings-as-errors after clippy offers a way to not warn on unscoped lint names.
|
||||
#_ cargo clippy -- --deny=warnings
|
||||
_ cargo clippy
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
exit 0
|
||||
maybe_cargo_install cov
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo cov test
|
||||
# Generate coverage data and report via unit-test suite.
|
||||
_ cargo cov clean
|
||||
_ cargo cov test --lib
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
# Generate a coverage report with grcov via lcov.
|
||||
if [[ ! -f ./grcov ]]; then
|
||||
uname=$(uname | tr '[:upper:]' '[:lower:]')
|
||||
uname_m=$(uname -m | tr '[:upper:]' '[:lower:]')
|
||||
name=grcov-${uname}-${uname_m}.tar.bz2
|
||||
_ wget "https://github.com/mozilla/grcov/releases/download/v0.2.3/${name}"
|
||||
_ tar -xjf "${name}"
|
||||
fi
|
||||
_ ./grcov . -t lcov > lcov.info
|
||||
_ genhtml -o target/cov/report-lcov --show-details --highlight --ignore-errors source --legend lcov.info
|
||||
|
||||
# Upload to tarballs to buildkite.
|
||||
_ cd target/cov && tar -cjf cov-report.tar.bz2 report/* && cd -
|
||||
_ upload_ci_artifact "target/cov/cov-report.tar.bz2"
|
||||
|
||||
_ cd target/cov && tar -cjf lcov-report.tar.bz2 report-lcov/* && cd -
|
||||
_ upload_ci_artifact "target/cov/lcov-report.tar.bz2"
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
true
|
||||
# TODO: Why doesn't codecov grok our lcov files?
|
||||
#bash <(curl -s https://codecov.io/bash) -X gcov
|
||||
fi
|
||||
|
@ -9,6 +9,7 @@ if ! ci/version-check.sh stable; then
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH=$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH
|
||||
@ -19,7 +20,15 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo test --features=cuda,erasure
|
||||
FEATURES=cuda,erasure,chacha
|
||||
_ cargo test --verbose --features="$FEATURES" --lib
|
||||
|
||||
# Run integration tests serially
|
||||
for test in tests/*.rs; do
|
||||
test=${test##*/} # basename x
|
||||
test=${test%.rs} # basename x .rs
|
||||
_ cargo test --verbose --jobs=1 --features="$FEATURES" --test="$test"
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
|
@ -4,6 +4,7 @@ cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
@ -12,7 +13,25 @@ _() {
|
||||
|
||||
_ cargo fmt -- --check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test --verbose --lib
|
||||
_ cargo clippy -- --deny=warnings
|
||||
|
||||
# Run integration tests serially
|
||||
for test in tests/*.rs; do
|
||||
test=${test##*/} # basename x
|
||||
test=${test%.rs} # basename x .rs
|
||||
_ cargo test --verbose --jobs=1 --test="$test"
|
||||
done
|
||||
|
||||
# Run native program's tests
|
||||
for program in programs/native/*; do
|
||||
echo --- "$program"
|
||||
(
|
||||
set -x
|
||||
cd "$program"
|
||||
cargo test --verbose
|
||||
)
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
@ -22,4 +41,4 @@ echo --- ci/localnet-sanity.sh
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
||||
_ ci/audit.sh || true
|
||||
_ ci/audit.sh
|
||||
|
9
ci/testnet-automation-cleanup.sh
Executable file
9
ci/testnet-automation-cleanup.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- find testnet configuration
|
||||
net/gce.sh config -p testnet-automation
|
||||
|
||||
echo --- delete testnet
|
||||
net/gce.sh delete -p testnet-automation
|
7
ci/testnet-automation-json-parser.py
Executable file
7
ci/testnet-automation-json-parser.py
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, json
|
||||
|
||||
data=json.load(sys.stdin)
|
||||
print[\
|
||||
([result['series'][0]['columns'][1].encode(), result['series'][0]['values'][0][1]]) \
|
||||
for result in data['results']]
|
80
ci/testnet-automation.sh
Executable file
80
ci/testnet-automation.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- downloading snap from build artifacts
|
||||
buildkite-agent artifact download "solana_*.snap" .
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300
|
||||
[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100"
|
||||
[[ -n $LEADER_CPU_MACHINE_TYPE ]] ||
|
||||
LEADER_CPU_MACHINE_TYPE="n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
|
||||
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
|
||||
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
|
||||
[[ -n $TESTNET_ZONE ]] || TESTNET_ZONE=us-west1-b
|
||||
|
||||
launchTestnet() {
|
||||
declare nodeCount=$1
|
||||
echo --- setup "$nodeCount" node test
|
||||
net/gce.sh create \
|
||||
-n "$nodeCount" -c "$CLIENT_COUNT" \
|
||||
-G "$LEADER_CPU_MACHINE_TYPE" \
|
||||
-p "$TESTNET_TAG" -z "$TESTNET_ZONE"
|
||||
|
||||
echo --- configure database
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- start "$nodeCount" node test
|
||||
net/net.sh start -o noValidatorSanity -S solana_*.snap
|
||||
|
||||
echo --- wait "$ITERATION_WAIT" seconds to complete test
|
||||
sleep "$ITERATION_WAIT"
|
||||
|
||||
declare q_mean_tps='
|
||||
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
|
||||
SELECT sum("count") AS "sum_count"
|
||||
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||
WHERE time > now() - 300s GROUP BY time(1s)
|
||||
)'
|
||||
|
||||
declare q_max_tps='
|
||||
SELECT round(max("sum_count")) AS "max_tps" FROM (
|
||||
SELECT sum("count") AS "sum_count"
|
||||
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||
WHERE time > now() - 300s GROUP BY time(1s)
|
||||
)'
|
||||
|
||||
declare q_mean_finality='
|
||||
SELECT round(mean("duration_ms")) as "mean_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
declare q_max_finality='
|
||||
SELECT round(max("duration_ms")) as "max_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
declare q_99th_finality='
|
||||
SELECT round(percentile("duration_ms", 99)) as "99th_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
curl -G "https://metrics.solana.com:8086/query?u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
--data-urlencode "db=$INFLUX_DATABASE" \
|
||||
--data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_finality;$q_max_finality;$q_99th_finality" |
|
||||
python ci/testnet-automation-json-parser.py >>TPS"$nodeCount".log
|
||||
|
||||
upload_ci_artifact TPS"$nodeCount".log
|
||||
}
|
||||
|
||||
# This is needed, because buildkite doesn't let us define an array of numbers.
|
||||
# The array is defined as a space separated string of numbers
|
||||
# shellcheck disable=SC2206
|
||||
nodes_count_array=($NUMBER_OF_NODES)
|
||||
|
||||
for n in "${nodes_count_array[@]}"; do
|
||||
launchTestnet "$n"
|
||||
done
|
@ -4,11 +4,15 @@ cd "$(dirname "$0")"/..
|
||||
|
||||
zone=
|
||||
leaderAddress=
|
||||
leaderMachineType=
|
||||
clientNodeCount=0
|
||||
validatorNodeCount=10
|
||||
publicNetwork=false
|
||||
snapChannel=edge
|
||||
tarChannelOrTag=edge
|
||||
delete=false
|
||||
enableGpu=false
|
||||
useTarReleaseChannel=false
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
@ -17,19 +21,26 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [zone] [options...]
|
||||
usage: $0 [name] [cloud] [zone] [options...]
|
||||
|
||||
Deploys a CD testnet
|
||||
|
||||
name - name of the network
|
||||
zone - GCE to deploy the network into
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone - cloud provider zone to deploy the network into
|
||||
|
||||
options:
|
||||
-s edge|beta|stable - Deploy the specified Snap release channel
|
||||
(default: $snapChannel)
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
(default: $tarChannelOrTag)
|
||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
-G - Enable GPU, and set count/type of GPUs to use (e.g n1-standard-16 --accelerator count=4,type=nvidia-tesla-k80)
|
||||
-g - Enable GPU (default: $enableGpu)
|
||||
-a [address] - Set the leader node's external IP address to this GCE address
|
||||
-d - Delete the network
|
||||
|
||||
@ -40,12 +51,14 @@ EOF
|
||||
}
|
||||
|
||||
netName=$1
|
||||
zone=$2
|
||||
cloudProvider=$2
|
||||
zone=$3
|
||||
[[ -n $netName ]] || usage
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
shift 2
|
||||
shift 3
|
||||
|
||||
while getopts "h?p:Pn:c:s:a:d" opt; do
|
||||
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -69,6 +82,24 @@ while getopts "h?p:Pn:c:s:a:d" opt; do
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
tarChannelOrTag=$OPTARG
|
||||
useTarReleaseChannel=true
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
leaderMachineType=$OPTARG
|
||||
;;
|
||||
a)
|
||||
leaderAddress=$OPTARG
|
||||
;;
|
||||
@ -82,32 +113,55 @@ while getopts "h?p:Pn:c:s:a:d" opt; do
|
||||
done
|
||||
|
||||
|
||||
gce_create_args=(
|
||||
create_args=(
|
||||
-a "$leaderAddress"
|
||||
-c "$clientNodeCount"
|
||||
-n "$validatorNodeCount"
|
||||
-g
|
||||
-p "$netName"
|
||||
-z "$zone"
|
||||
)
|
||||
|
||||
if $enableGpu; then
|
||||
if [[ -z $leaderMachineType ]]; then
|
||||
create_args+=(-g)
|
||||
else
|
||||
create_args+=(-G "$leaderMachineType")
|
||||
fi
|
||||
fi
|
||||
|
||||
if $publicNetwork; then
|
||||
gce_create_args+=(-P)
|
||||
create_args+=(-P)
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- gce.sh delete
|
||||
time net/gce.sh delete -p "$netName"
|
||||
echo "--- $cloudProvider.sh delete"
|
||||
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
|
||||
if $delete; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- gce.sh create
|
||||
time net/gce.sh create "${gce_create_args[@]}"
|
||||
echo "--- $cloudProvider.sh create"
|
||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- net.sh start
|
||||
time net/net.sh start -s "$snapChannel"
|
||||
|
||||
maybeRejectExtraNodes=
|
||||
if ! $publicNetwork; then
|
||||
maybeRejectExtraNodes="-o rejectExtraNodes"
|
||||
fi
|
||||
maybeNoValidatorSanity=
|
||||
if [[ -n $NO_VALIDATOR_SANITY ]]; then
|
||||
maybeNoValidatorSanity="-o noValidatorSanity"
|
||||
fi
|
||||
maybeNoLedgerVerify=
|
||||
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||
maybeNoLedgerVerify="-o noLedgerVerify"
|
||||
fi
|
||||
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
|
||||
if $useTarReleaseChannel; then
|
||||
time net/net.sh start -t "$tarChannelOrTag" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
else
|
||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
fi
|
||||
exit 0
|
||||
|
359
ci/testnet-manager.sh
Executable file
359
ci/testnet-manager.sh
Executable file
@ -0,0 +1,359 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
if [[ -z $BUILDKITE ]]; then
|
||||
echo BUILDKITE not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
|
||||
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET ]]; then
|
||||
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET_OP ]]; then
|
||||
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- block: "Manage Testnet"
|
||||
fields:
|
||||
- select: "Network"
|
||||
key: "testnet"
|
||||
options:
|
||||
- label: "testnet"
|
||||
value: "testnet"
|
||||
- label: "testnet-perf"
|
||||
value: "testnet-perf"
|
||||
- label: "testnet-master"
|
||||
value: "testnet-master"
|
||||
- label: "testnet-master-perf"
|
||||
value: "testnet-master-perf"
|
||||
- label: "testnet-edge"
|
||||
value: "testnet-edge"
|
||||
- label: "testnet-edge-perf"
|
||||
value: "testnet-edge-perf"
|
||||
- label: "testnet-beta"
|
||||
value: "testnet-beta"
|
||||
- label: "testnet-beta-perf"
|
||||
value: "testnet-beta-perf"
|
||||
- select: "Operation"
|
||||
key: "testnet-operation"
|
||||
default: "sanity-or-restart"
|
||||
options:
|
||||
- label: "Sanity check. Restart network on failure"
|
||||
value: "sanity-or-restart"
|
||||
- label: "Start (or restart) the network"
|
||||
value: "start"
|
||||
- label: "Stop the network"
|
||||
value: "stop"
|
||||
- label: "Sanity check only"
|
||||
value: "sanity"
|
||||
- command: "ci/$(basename "$0")"
|
||||
agents:
|
||||
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||
|
||||
ci/channel-info.sh
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge|testnet-edge-perf|testnet-master|testnet-master-perf)
|
||||
CHANNEL_OR_TAG=edge
|
||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||
;;
|
||||
testnet-beta|testnet-beta-perf)
|
||||
CHANNEL_OR_TAG=beta
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
testnet|testnet-perf)
|
||||
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
|
||||
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
fi
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
branch: "$CHANNEL_BRANCH"
|
||||
env:
|
||||
TESTNET: "$TESTNET"
|
||||
TESTNET_OP: "$TESTNET_OP"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
sanity() {
|
||||
echo "--- sanity $TESTNET"
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh edge-perf-testnet-solana-com ec2 us-west-2b
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh beta-perf-testnet-solana-com ec2 us-west-2b
|
||||
)
|
||||
;;
|
||||
testnet-master)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh master-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-master-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh master-perf-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
|
||||
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
|
||||
ci/testnet-sanity.sh perf-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
start() {
|
||||
declare maybeDelete=$1
|
||||
if [[ -z $maybeDelete ]]; then
|
||||
echo "--- start $TESTNET"
|
||||
else
|
||||
echo "--- stop $TESTNET"
|
||||
fi
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
|
||||
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0ccd4f2239886fa94 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
|
||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0f286cf8a0771ce35 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
|
||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-master)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh master-testnet-solana-com gce us-west1-b \
|
||||
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a master-testnet-solana-com \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-master-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh master-perf-testnet-solana-com gce us-west1-b \
|
||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
|
||||
# -s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
|
||||
# ${maybeDelete:+-d}
|
||||
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0fa502bf95f6f18b2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
|
||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
|
||||
# -g \
|
||||
# -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
# ${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
stop() {
|
||||
start delete
|
||||
}
|
||||
|
||||
case $TESTNET_OP in
|
||||
sanity)
|
||||
sanity
|
||||
;;
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
sanity-or-restart)
|
||||
if sanity; then
|
||||
echo Pass
|
||||
else
|
||||
echo "Sanity failed, restarting the network"
|
||||
echo "^^^ +++"
|
||||
start
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo --- fin
|
||||
exit 0
|
@ -9,11 +9,13 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name]
|
||||
usage: $0 [name] [cloud] [zone]
|
||||
|
||||
Sanity check a CD testnet
|
||||
|
||||
name - name of the network
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone - cloud provider zone of the network
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
@ -22,15 +24,20 @@ EOF
|
||||
}
|
||||
|
||||
netName=$1
|
||||
cloudProvider=$2
|
||||
zone=$3
|
||||
[[ -n $netName ]] || usage ""
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
|
||||
set -x
|
||||
echo --- gce.sh config
|
||||
net/gce.sh config -p "$netName"
|
||||
echo "--- $cloudProvider.sh config"
|
||||
net/"$cloudProvider".sh config -p "$netName" -z "$zone"
|
||||
net/init-metrics.sh -e
|
||||
echo --- net.sh sanity
|
||||
net/net.sh sanity \
|
||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||
|
||||
exit 0
|
||||
|
@ -19,12 +19,12 @@ require() {
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.30.[0-9]+-nightly
|
||||
require cargo 1.29.[0-9]+-nightly
|
||||
require rustc 1.31.[0-9]+-nightly
|
||||
require cargo 1.31.[0-9]+-nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.28.[0-9]+
|
||||
require cargo 1.28.[0-9]+
|
||||
require rustc 1.30.[0-9]+
|
||||
require cargo 1.30.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
|
167
doc/json-rpc.md
167
doc/json-rpc.md
@ -5,22 +5,37 @@ Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.o
|
||||
|
||||
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
|
||||
|
||||
RPC Endpoint
|
||||
RPC HTTP Endpoint
|
||||
---
|
||||
|
||||
**Default port:** 8899
|
||||
**Default port:** 8899
|
||||
eg. http://localhost:8899, http://192.168.1.88:8899
|
||||
|
||||
RPC PubSub WebSocket Endpoint
|
||||
---
|
||||
|
||||
**Default port:** 8900
|
||||
eg. ws://localhost:8900, http://192.168.1.88:8900
|
||||
|
||||
|
||||
Methods
|
||||
---
|
||||
|
||||
* [confirmTransaction](#confirmtransaction)
|
||||
* [getAddress](#getaddress)
|
||||
* [getBalance](#getbalance)
|
||||
* [getAccountInfo](#getaccountinfo)
|
||||
* [getLastId](#getlastid)
|
||||
* [getSignatureStatus](#getsignaturestatus)
|
||||
* [getTransactionCount](#gettransactioncount)
|
||||
* [requestAirdrop](#requestairdrop)
|
||||
* [sendTransaction](#sendtransaction)
|
||||
* [startSubscriptionChannel](#startsubscriptionchannel)
|
||||
|
||||
* [Subscription Websocket](#subscription-websocket)
|
||||
* [accountSubscribe](#accountsubscribe)
|
||||
* [accountUnsubscribe](#accountunsubscribe)
|
||||
* [signatureSubscribe](#signaturesubscribe)
|
||||
* [signatureUnsubscribe](#signatureunsubscribe)
|
||||
|
||||
Request Formatting
|
||||
---
|
||||
@ -96,6 +111,30 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
||||
|
||||
---
|
||||
|
||||
### getAccountInfo
|
||||
Returns all information associated with the account of provided Pubkey
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
The result field will be a JSON object with the following sub fields:
|
||||
|
||||
* `tokens`, number of tokens assigned to this account, as a signed 64-bit integer
|
||||
* `program_id`, array of 32 bytes representing the program this account has been assigned to
|
||||
* `userdata`, array of bytes representing any userdata associated with the account
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["FVxxngPx368XvMCoeskdd6U8cZJFsfa1BEtGWqyAxRj4"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getLastId
|
||||
Returns the last entry ID from the ledger
|
||||
|
||||
@ -116,6 +155,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
---
|
||||
|
||||
### getSignatureStatus
|
||||
Returns the status of a given signature. This method is similar to
|
||||
[confirmTransaction](#confirmtransaction) but provides more resolution for error
|
||||
events.
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `string` - Transaction status:
|
||||
* `Confirmed` - Transaction was successful
|
||||
* `SignatureNotFound` - Unknown transaction
|
||||
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
|
||||
* `AccountInUse` - Another Transaction had a write lock one of the Accounts specified in this Transaction. The Transaction may succeed if retried
|
||||
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
||||
```
|
||||
|
||||
---
|
||||
### getTransactionCount
|
||||
Returns the current Transaction count from the ledger
|
||||
|
||||
@ -176,3 +241,99 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Subscription Websocket
|
||||
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||
- Submit subscription requests to the websocket using the methods below
|
||||
- Multiple subscriptions may be active at once
|
||||
|
||||
---
|
||||
|
||||
### accountSubscribe
|
||||
Subscribe to an account to receive notifications when the userdata for a given account public key changes
|
||||
|
||||
##### Parameters:
|
||||
* `string` - account Pubkey, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - Subscription id (needed to unsubscribe)
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### accountUnsubscribe
|
||||
Unsubscribe from account userdata change notifications
|
||||
|
||||
##### Parameters:
|
||||
* `integer` - id of account Subscription to cancel
|
||||
|
||||
##### Results:
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### signatureSubscribe
|
||||
Subscribe to a transaction signature to receive notification when the transaction is confirmed
|
||||
On `signatureNotification`, the subscription is automatically cancelled
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Transaction Signature, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - subscription id (needed to unsubscribe)
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### signatureUnsubscribe
|
||||
Unsubscribe from account userdata change notifications
|
||||
|
||||
##### Parameters:
|
||||
* `integer` - id of account subscription to cancel
|
||||
|
||||
##### Results:
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
@ -1,28 +1,40 @@
|
||||
# TestNet debugging info
|
||||
|
||||
Currently we have two testnets, 'perf' and 'master', both on the master branch of the solana repo. Deploys happen
|
||||
at the top of every hour with the latest code. 'perf' has more cores for the client machine to flood the network
|
||||
with transactions until failure.
|
||||
Currently we have three testnets:
|
||||
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
|
||||
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs 24/7
|
||||
* `testnet-msater` - public edge channel testnet accessible via master.testnet.solana.com. Runs 24/7
|
||||
* `testnet-master-perf` - private edge channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs on weekday mornings for a couple hours
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
||||
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
||||
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
||||
Each testnet can be manually manipulated from buildkite as well. The `-perf`
|
||||
testnets use a release tarball while the non`-perf` builds use the snap build
|
||||
(we've observed that the snap build runs slower than a tarball but this has yet
|
||||
to be root caused).
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
For the client they are put in `/tmp/solana`; for validators and leaders they are in `/var/snap/solana/current/`.
|
||||
You can also see the backtrace of the client by ssh'ing into the client node and doing:
|
||||
|
||||
Attach to the testnet first by running one of:
|
||||
```bash
|
||||
$ sudo -u testnet-deploy
|
||||
$ tmux attach -t solana
|
||||
$ net/gce.sh config testnet-solana-com
|
||||
$ net/gce.sh config master-testnet-solana-com
|
||||
$ net/gce.sh config perf-testnet-solana-com
|
||||
```
|
||||
|
||||
## How do I reset the testnet?
|
||||
Then run:
|
||||
```bash
|
||||
$ net/ssh.sh
|
||||
```
|
||||
for log location details
|
||||
|
||||
Through buildkite.
|
||||
## How do I reset the testnet?
|
||||
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
||||
and when prompted select the desired testnet
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
@ -32,13 +44,9 @@ variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet.
|
||||
Currently, a merged PR is the only way to test a change on the testnet. But you
|
||||
can run your own testnet using the scripts in the `net/` directory.
|
||||
|
||||
## Adjusting the number of clients or validators on the testnet
|
||||
Edit `ci/testnet-manager.sh`
|
||||
|
||||
1. Go to the [GCP Instance Group](https://console.cloud.google.com/compute/instanceGroups/list?project=principal-lane-200702) tab
|
||||
2. Find the client or validator instance group you'd like to adjust
|
||||
3. Edit it (pencil icon), change the "Number of instances", then click "Save" button
|
||||
4. Refresh until the change to number of instances has been executed
|
||||
5. Click the "New Build" button on the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/)
|
||||
buildkite job to initiate a redeploy of the network with the updated instance count.
|
||||
|
@ -15,7 +15,7 @@ mkdir -p target/perf-libs
|
||||
cd target/perf-libs
|
||||
(
|
||||
set -x
|
||||
curl https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
|
@ -18,4 +18,8 @@ usage() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
$solana_bench_tps "$@"
|
||||
if [[ -z $1 ]]; then # default behavior
|
||||
$solana_bench_tps --identity config-private/client-id.json --network 127.0.0.1:8001 --duration 90
|
||||
else
|
||||
$solana_bench_tps "$@"
|
||||
fi
|
||||
|
@ -8,9 +8,9 @@
|
||||
#
|
||||
|
||||
rsync=rsync
|
||||
leader_logger="cat"
|
||||
validator_logger="cat"
|
||||
drone_logger="cat"
|
||||
leader_logger="tee leader.log"
|
||||
validator_logger="tee validator.log"
|
||||
drone_logger="tee drone.log"
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
# Protect against unsupported configurations to prevent non-obvious errors
|
||||
@ -28,13 +28,7 @@ fi
|
||||
if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet || "$program" = bench-tps ]]; then
|
||||
# TODO: Merge wallet.sh/client.sh functionality into
|
||||
# solana-wallet/solana-demo-client proper and remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
multilog="$SNAP/bin/multilog t s16777215 n200"
|
||||
@ -55,8 +49,6 @@ elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
}
|
||||
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
||||
unset SOLANA_CUDA
|
||||
else
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
@ -110,10 +102,16 @@ tune_networking() {
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=67108864 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
|
@ -34,6 +34,7 @@ ip_address_arg=-l
|
||||
num_tokens=1000000000
|
||||
node_type_leader=true
|
||||
node_type_validator=true
|
||||
node_type_client=true
|
||||
while getopts "h?n:lpt:" opt; do
|
||||
case $opt in
|
||||
h|\?)
|
||||
@ -55,10 +56,17 @@ while getopts "h?n:lpt:" opt; do
|
||||
leader)
|
||||
node_type_leader=true
|
||||
node_type_validator=false
|
||||
node_type_client=false
|
||||
;;
|
||||
validator)
|
||||
node_type_leader=false
|
||||
node_type_validator=true
|
||||
node_type_client=false
|
||||
;;
|
||||
client)
|
||||
node_type_leader=false
|
||||
node_type_validator=false
|
||||
node_type_client=true
|
||||
;;
|
||||
*)
|
||||
usage "Error: unknown node type: $node_type"
|
||||
@ -74,13 +82,19 @@ done
|
||||
|
||||
set -e
|
||||
|
||||
if $node_type_leader; then
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_PRIVATE_DIR"; do
|
||||
echo "Cleaning $i"
|
||||
rm -rvf "$i"
|
||||
mkdir -p "$i"
|
||||
done
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR" "$SOLANA_CONFIG_PRIVATE_DIR"; do
|
||||
echo "Cleaning $i"
|
||||
rm -rvf "$i"
|
||||
mkdir -p "$i"
|
||||
done
|
||||
|
||||
if $node_type_client; then
|
||||
client_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/client-id.json
|
||||
$solana_keygen -o "$client_id_path"
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"/
|
||||
fi
|
||||
|
||||
if $node_type_leader; then
|
||||
leader_address_args=("$ip_address_arg")
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
@ -102,11 +116,6 @@ fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
echo "Cleaning $SOLANA_CONFIG_VALIDATOR_DIR"
|
||||
rm -rvf "$SOLANA_CONFIG_VALIDATOR_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_VALIDATOR_DIR"
|
||||
|
||||
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
|
||||
|
@ -1,50 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Runs solana-wallet against the specified network
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>"
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
# shellcheck source=scripts/oom-score-adj.sh
|
||||
source "$here"/../scripts/oom-score-adj.sh
|
||||
|
||||
# if $1 isn't host:path, something.com, or a valid local path
|
||||
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
|
||||
leader=$1 # interpret
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" = "reset" ]]; then
|
||||
echo Wallet resetting
|
||||
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
echo "Fetching leader configuration from $rsync_leader_url"
|
||||
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
fi
|
||||
|
||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||
if [[ ! -r $client_id_path ]]; then
|
||||
echo "Generating client identity: $client_id_path"
|
||||
$solana_keygen -o "$client_id_path"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" --timeout 10 "$@"
|
@ -5,15 +5,30 @@ intended to be both dev and CD friendly.
|
||||
|
||||
### User Account Prerequisites
|
||||
|
||||
Log in to GCP with:
|
||||
GCP and AWS are supported.
|
||||
|
||||
#### GCP
|
||||
First authenticate with
|
||||
```bash
|
||||
$ gcloud auth login
|
||||
```
|
||||
|
||||
Also ensure that `$(whoami)` is the name of an InfluxDB user account with enough
|
||||
access to create a new database.
|
||||
#### AWS
|
||||
Obtain your credentials from the AWS IAM Console and configure the AWS CLI with
|
||||
```bash
|
||||
$ aws configure
|
||||
```
|
||||
More information on AWS CLI configuration can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-quick-configuration)
|
||||
|
||||
### Metrics configuration
|
||||
Ensure that `$(whoami)` is the name of an InfluxDB user account with enough
|
||||
access to create a new InfluxDB database. Ask mvines@ for help if needed.
|
||||
|
||||
## Quick Start
|
||||
|
||||
NOTE: This example uses GCP. If you are using AWS, replace `./gce.sh` with
|
||||
`./ec2.sh` in the commands.
|
||||
|
||||
```bash
|
||||
$ cd net/
|
||||
$ ./gce.sh create -n 5 -c 1 #<-- Create a GCE testnet with 5 validators, 1 client (billing starts here)
|
||||
@ -32,6 +47,10 @@ network over public IP addresses:
|
||||
```bash
|
||||
$ ./gce.sh create -P ...
|
||||
```
|
||||
or
|
||||
```bash
|
||||
$ ./ec2.sh create -P ...
|
||||
```
|
||||
|
||||
### Deploying a Snap-based network
|
||||
To deploy the latest pre-built `edge` channel Snap (ie, latest from the `master`
|
||||
@ -46,6 +65,10 @@ First ensure the network instances are created with GPU enabled:
|
||||
```bash
|
||||
$ ./gce.sh create -g ...
|
||||
```
|
||||
or
|
||||
```bash
|
||||
$ ./ec2.sh create -g ...
|
||||
```
|
||||
|
||||
If deploying a Snap-based network nothing further is required, as GPU presence
|
||||
is detected at runtime and the CUDA build is auto selected.
|
||||
@ -58,9 +81,20 @@ $ ./net.sh start -f "cuda,erasure"
|
||||
|
||||
### How to interact with a CD testnet deployed by ci/testnet-deploy.sh
|
||||
|
||||
**AWS-Specific Extra Setup**: Follow the steps in `scripts/add-solana-user-authorized_keys.sh`,
|
||||
then redeploy the testnet before continuing in this section.
|
||||
|
||||
Taking **master-testnet-solana-com** as an example, configure your workspace for
|
||||
the testnet using:
|
||||
```
|
||||
```bash
|
||||
$ ./gce.sh config -p master-testnet-solana-com
|
||||
$ ./ssh.sh # <-- Details on how to ssh into any testnet node
|
||||
```
|
||||
or
|
||||
```bash
|
||||
$ ./ec2.sh config -p master-testnet-solana-com
|
||||
```
|
||||
|
||||
Then run the following for details on how to ssh into any testnet node
|
||||
```bash
|
||||
$ ./ssh.sh
|
||||
```
|
||||
|
1
net/ec2.sh
Symbolic link
1
net/ec2.sh
Symbolic link
@ -0,0 +1 @@
|
||||
gce.sh
|
297
net/gce.sh
297
net/gce.sh
@ -1,27 +1,47 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=net/scripts/gcloud.sh
|
||||
source "$here"/scripts/gcloud.sh
|
||||
# shellcheck source=net/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
cloudProvider=$(basename "$0" .sh)
|
||||
bootDiskType=""
|
||||
case $cloudProvider in
|
||||
gce)
|
||||
# shellcheck source=net/scripts/gce-provider.sh
|
||||
source "$here"/scripts/gce-provider.sh
|
||||
|
||||
cpuLeaderMachineType=n1-standard-16
|
||||
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=n1-standard-16
|
||||
clientMachineType=n1-standard-16
|
||||
;;
|
||||
ec2)
|
||||
# shellcheck source=net/scripts/ec2-provider.sh
|
||||
source "$here"/scripts/ec2-provider.sh
|
||||
|
||||
cpuLeaderMachineType=m4.4xlarge
|
||||
gpuLeaderMachineType=p2.xlarge
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=m4.2xlarge
|
||||
clientMachineType=m4.2xlarge
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
prefix=testnet-dev-${USER//[^A-Za-z0-9]/}
|
||||
validatorNodeCount=5
|
||||
clientNodeCount=1
|
||||
leaderBootDiskSize=1TB
|
||||
leaderMachineType=n1-standard-16
|
||||
leaderAccelerator=
|
||||
validatorMachineType=n1-standard-4
|
||||
validatorBootDiskSize=$leaderBootDiskSize
|
||||
validatorAccelerator=
|
||||
clientMachineType=n1-standard-16
|
||||
clientBootDiskSize=40GB
|
||||
clientAccelerator=
|
||||
leaderBootDiskSizeInGb=1000
|
||||
validatorBootDiskSizeInGb=$leaderBootDiskSizeInGb
|
||||
clientBootDiskSizeInGb=75
|
||||
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
publicNetwork=false
|
||||
zone="us-west1-b"
|
||||
enableGpu=false
|
||||
leaderAddress=
|
||||
|
||||
usage() {
|
||||
@ -33,7 +53,7 @@ usage() {
|
||||
cat <<EOF
|
||||
usage: $0 [create|config|delete] [common options] [command-specific options]
|
||||
|
||||
Configure a GCE-based testnet
|
||||
Manage testnet instances
|
||||
|
||||
create - create a new testnet (implies 'config')
|
||||
config - configure the testnet and write a config file describing it
|
||||
@ -47,10 +67,15 @@ Configure a GCE-based testnet
|
||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
-z [zone] - GCP Zone for the nodes (default: $zone)
|
||||
-i [imageName] - Existing image on GCE (default: $imageName)
|
||||
-g - Enable GPU
|
||||
-a [address] - Set the leader node's external IP address to this GCE address
|
||||
-z [zone] - Zone for the nodes (default: $zone)
|
||||
-g - Enable GPU (default: $enableGpu)
|
||||
-G - Enable GPU, and set count/type of GPUs to use (e.g $cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80)
|
||||
-a [address] - Set the leader node's external IP address to this value.
|
||||
For GCE, [address] is the "name" of the desired External
|
||||
IP Address.
|
||||
For EC2, [address] is the "allocation ID" of the desired
|
||||
Elastic IP.
|
||||
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
|
||||
|
||||
config-specific options:
|
||||
none
|
||||
@ -68,7 +93,7 @@ command=$1
|
||||
shift
|
||||
[[ $command = create || $command = config || $command = delete ]] || usage "Invalid command: $command"
|
||||
|
||||
while getopts "h?p:Pi:n:c:z:ga:" opt; do
|
||||
while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -80,9 +105,6 @@ while getopts "h?p:Pi:n:c:z:ga:" opt; do
|
||||
P)
|
||||
publicNetwork=true
|
||||
;;
|
||||
i)
|
||||
imageName=$OPTARG
|
||||
;;
|
||||
n)
|
||||
validatorNodeCount=$OPTARG
|
||||
;;
|
||||
@ -90,23 +112,101 @@ while getopts "h?p:Pi:n:c:z:ga:" opt; do
|
||||
clientNodeCount=$OPTARG
|
||||
;;
|
||||
z)
|
||||
zone=$OPTARG
|
||||
cloud_SetZone "$OPTARG"
|
||||
;;
|
||||
g)
|
||||
leaderAccelerator="count=4,type=nvidia-tesla-k80"
|
||||
enableGpu=true
|
||||
leaderMachineType=$gpuLeaderMachineType
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
leaderMachineType="$OPTARG"
|
||||
;;
|
||||
a)
|
||||
leaderAddress=$OPTARG
|
||||
;;
|
||||
d)
|
||||
bootDiskType=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
usage "unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
[[ -z $1 ]] || usage "Unexpected argument: $1"
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
if [[ $cloudProvider = ec2 ]]; then
|
||||
# EC2 keys can't be retrieved from running instances like GCE keys can so save
|
||||
# EC2 keys in the user's home directory so |./ec2.sh config| can at least be
|
||||
# used on the same host that ran |./ec2.sh create| .
|
||||
sshPrivateKey="$HOME/.ssh/solana-net-id_$prefix"
|
||||
else
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
fi
|
||||
|
||||
case $cloudProvider in
|
||||
gce)
|
||||
if $enableGpu; then
|
||||
# TODO: GPU image is still 16.04-based pending resolution of
|
||||
# https://github.com/solana-labs/solana/issues/1702
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
else
|
||||
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
|
||||
fi
|
||||
;;
|
||||
ec2)
|
||||
# Deep Learning AMI (Ubuntu 16.04-based)
|
||||
case $region in # (region global variable is set by cloud_SetZone)
|
||||
us-east-1)
|
||||
imageName="ami-047daf3f2b162fc35"
|
||||
;;
|
||||
us-west-1)
|
||||
imageName="ami-08c8c7c4a57a6106d"
|
||||
;;
|
||||
us-west-2)
|
||||
imageName="ami-0b63040ee445728bf"
|
||||
;;
|
||||
*)
|
||||
usage "Unsupported region: $region"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
||||
#
|
||||
# Execute a command for each element in the `instances` array
|
||||
#
|
||||
# cmd - The command to execute on each instance
|
||||
# The command will receive arguments followed by any
|
||||
# additionl arguments supplied to cloud_ForEachInstance:
|
||||
# name - name of the instance
|
||||
# publicIp - The public IP address of this instance
|
||||
# privateIp - The priate IP address of this instance
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
cloud_ForEachInstance() {
|
||||
declare cmd="$1"
|
||||
shift
|
||||
[[ -n $cmd ]] || { echo cloud_ForEachInstance: cmd not specified; exit 1; }
|
||||
|
||||
declare count=1
|
||||
for info in "${instances[@]}"; do
|
||||
declare name publicIp privateIp
|
||||
IFS=: read -r name publicIp privateIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$name" "$publicIp" "$privateIp" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
prepareInstancesAndWriteConfigFile() {
|
||||
$metricsWriteDatapoint "testnet-deploy net-config-begin=1"
|
||||
@ -122,10 +222,10 @@ EOF
|
||||
|
||||
recordInstanceIp() {
|
||||
declare name="$1"
|
||||
declare publicIp="$3"
|
||||
declare privateIp="$4"
|
||||
declare publicIp="$2"
|
||||
declare privateIp="$3"
|
||||
|
||||
declare arrayName="$6"
|
||||
declare arrayName="$5"
|
||||
|
||||
echo "$arrayName+=($publicIp) # $name" >> "$configFile"
|
||||
if [[ $arrayName = "leaderIp" ]]; then
|
||||
@ -139,121 +239,138 @@ EOF
|
||||
|
||||
waitForStartupComplete() {
|
||||
declare name="$1"
|
||||
declare publicIp="$3"
|
||||
declare publicIp="$2"
|
||||
|
||||
echo "Waiting for $name to finish booting..."
|
||||
(
|
||||
for i in $(seq 1 30); do
|
||||
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.gce-startup-complete"); then
|
||||
break
|
||||
set -x +e
|
||||
for i in $(seq 1 60); do
|
||||
timeout 20s ssh "${sshOptions[@]}" "$publicIp" "ls -l /.instance-startup-complete"
|
||||
ret=$?
|
||||
if [[ $ret -eq 0 ]]; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
echo "Retry $i..."
|
||||
done
|
||||
echo "$name failed to boot."
|
||||
exit 1
|
||||
)
|
||||
echo "$name has booted."
|
||||
}
|
||||
|
||||
echo "Looking for leader instance..."
|
||||
gcloud_FindInstances "name=$prefix-leader" show
|
||||
cloud_FindInstance "$prefix-leader"
|
||||
[[ ${#instances[@]} -eq 1 ]] || {
|
||||
echo "Unable to find leader"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Fetching $sshPrivateKey from $leaderName"
|
||||
(
|
||||
rm -rf "$sshPrivateKey"{,pub}
|
||||
|
||||
declare leaderName
|
||||
declare leaderZone
|
||||
declare leaderIp
|
||||
IFS=: read -r leaderName leaderZone leaderIp _ < <(echo "${instances[0]}")
|
||||
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
|
||||
|
||||
set -x
|
||||
# Try to ping the machine first.
|
||||
timeout 90s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
|
||||
# Try to ping the machine first. There can be a delay between when the
|
||||
# instance is reported as RUNNING and when it's reachable over the network
|
||||
timeout 30s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
if [[ ! -r $sshPrivateKey ]]; then
|
||||
echo "Fetching $sshPrivateKey from $leaderName"
|
||||
|
||||
# Try to scp in a couple times, sshd may not yet be up even though the
|
||||
# machine can be pinged...
|
||||
set -o pipefail
|
||||
for i in $(seq 1 10); do
|
||||
if gcloud compute scp --zone "$leaderZone" \
|
||||
"$leaderName:/solana-id_ecdsa" "$sshPrivateKey"; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
echo "Retry $i..."
|
||||
done
|
||||
# Try to scp in a couple times, sshd may not yet be up even though the
|
||||
# machine can be pinged...
|
||||
set -x -o pipefail
|
||||
for i in $(seq 1 30); do
|
||||
if cloud_FetchFile "$leaderName" "$leaderIp" /solana-id_ecdsa "$sshPrivateKey"; then
|
||||
break
|
||||
fi
|
||||
|
||||
chmod 400 "$sshPrivateKey"
|
||||
sleep 1
|
||||
echo "Retry $i..."
|
||||
done
|
||||
|
||||
chmod 400 "$sshPrivateKey"
|
||||
ls -l "$sshPrivateKey"
|
||||
fi
|
||||
)
|
||||
|
||||
echo "leaderIp=()" >> "$configFile"
|
||||
gcloud_ForEachInstance recordInstanceIp leaderIp
|
||||
gcloud_ForEachInstance waitForStartupComplete
|
||||
cloud_ForEachInstance recordInstanceIp leaderIp
|
||||
cloud_ForEachInstance waitForStartupComplete
|
||||
|
||||
echo "Looking for validator instances..."
|
||||
gcloud_FindInstances "name~^$prefix-validator" show
|
||||
cloud_FindInstances "$prefix-validator"
|
||||
[[ ${#instances[@]} -gt 0 ]] || {
|
||||
echo "Unable to find validators"
|
||||
exit 1
|
||||
}
|
||||
echo "validatorIpList=()" >> "$configFile"
|
||||
gcloud_ForEachInstance recordInstanceIp validatorIpList
|
||||
gcloud_ForEachInstance waitForStartupComplete
|
||||
cloud_ForEachInstance recordInstanceIp validatorIpList
|
||||
cloud_ForEachInstance waitForStartupComplete
|
||||
|
||||
echo "clientIpList=()" >> "$configFile"
|
||||
echo "Looking for client instances..."
|
||||
gcloud_FindInstances "name~^$prefix-client" show
|
||||
cloud_FindInstances "$prefix-client"
|
||||
[[ ${#instances[@]} -eq 0 ]] || {
|
||||
gcloud_ForEachInstance recordInstanceIp clientIpList
|
||||
gcloud_ForEachInstance waitForStartupComplete
|
||||
cloud_ForEachInstance recordInstanceIp clientIpList
|
||||
cloud_ForEachInstance waitForStartupComplete
|
||||
}
|
||||
|
||||
echo "Wrote $configFile"
|
||||
$metricsWriteDatapoint "testnet-deploy net-config-complete=1"
|
||||
}
|
||||
|
||||
case $command in
|
||||
delete)
|
||||
delete() {
|
||||
$metricsWriteDatapoint "testnet-deploy net-delete-begin=1"
|
||||
|
||||
# Delete the leader node first to prevent unusual metrics on the dashboard
|
||||
# during shutdown.
|
||||
# TODO: It would be better to fully cut-off metrics reporting before any
|
||||
# instances are deleted.
|
||||
for filter in "^$prefix-leader" "^$prefix-"; do
|
||||
gcloud_FindInstances "name~$filter"
|
||||
for filter in "$prefix-leader" "$prefix-"; do
|
||||
echo "Searching for instances: $filter"
|
||||
cloud_FindInstances "$filter"
|
||||
|
||||
if [[ ${#instances[@]} -eq 0 ]]; then
|
||||
echo "No instances found matching '$filter'"
|
||||
else
|
||||
gcloud_DeleteInstances true
|
||||
cloud_DeleteInstances true
|
||||
fi
|
||||
done
|
||||
rm -f "$configFile"
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-delete-complete=1"
|
||||
|
||||
}
|
||||
|
||||
case $command in
|
||||
delete)
|
||||
delete
|
||||
;;
|
||||
|
||||
create)
|
||||
[[ -n $validatorNodeCount ]] || usage "Need number of nodes"
|
||||
if [[ $validatorNodeCount -le 0 ]]; then
|
||||
usage "One or more validator nodes is required"
|
||||
fi
|
||||
|
||||
delete
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-create-begin=1"
|
||||
|
||||
rm -rf "$sshPrivateKey"{,.pub}
|
||||
ssh-keygen -t ecdsa -N '' -f "$sshPrivateKey"
|
||||
|
||||
# Note: using rsa because |aws ec2 import-key-pair| seems to fail for ecdsa
|
||||
ssh-keygen -t rsa -N '' -f "$sshPrivateKey"
|
||||
|
||||
printNetworkInfo() {
|
||||
cat <<EOF
|
||||
========================================================================================
|
||||
|
||||
Network composition:
|
||||
Leader = $leaderMachineType (GPU=${leaderAccelerator:-none})
|
||||
Validators = $validatorNodeCount x $validatorMachineType (GPU=${validatorAccelerator:-none})
|
||||
Client(s) = $clientNodeCount x $clientMachineType (GPU=${clientAccelerator:-none})
|
||||
Leader = $leaderMachineType (GPU=$enableGpu)
|
||||
Validators = $validatorNodeCount x $validatorMachineType
|
||||
Client(s) = $clientNodeCount x $clientMachineType
|
||||
|
||||
========================================================================================
|
||||
|
||||
@ -261,7 +378,7 @@ EOF
|
||||
}
|
||||
printNetworkInfo
|
||||
|
||||
declare startupScript="$netConfigDir"/gce-startup-script.sh
|
||||
declare startupScript="$netConfigDir"/instance-startup-script.sh
|
||||
cat > "$startupScript" <<EOF
|
||||
#!/bin/bash -ex
|
||||
# autogenerated at $(date)
|
||||
@ -270,11 +387,12 @@ cat > /etc/motd <<EOM
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
This instance has not been fully configured.
|
||||
See "startup-script" log messages in /var/log/syslog for status:
|
||||
$ sudo cat /var/log/syslog | grep startup-script
|
||||
|
||||
See startup script log messages in /var/log/syslog for status:
|
||||
$ sudo cat /var/log/syslog | egrep \\(startup-script\\|cloud-init\)
|
||||
|
||||
To block until setup is complete, run:
|
||||
$ until [[ -f /.gce-startup-complete ]]; do sleep 1; done
|
||||
$ until [[ -f /.instance-startup-complete ]]; do sleep 1; done
|
||||
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
EOM
|
||||
@ -296,31 +414,36 @@ $(
|
||||
cat \
|
||||
disable-background-upgrades.sh \
|
||||
create-solana-user.sh \
|
||||
add-solana-user-authorized_keys.sh \
|
||||
install-earlyoom.sh \
|
||||
install-rsync.sh \
|
||||
install-libssl-compatability.sh \
|
||||
install-rsync.sh \
|
||||
network-config.sh \
|
||||
remove-docker-interface.sh \
|
||||
update-default-cuda.sh \
|
||||
|
||||
)
|
||||
|
||||
cat > /etc/motd <<EOM
|
||||
$(printNetworkInfo)
|
||||
EOM
|
||||
|
||||
touch /.gce-startup-complete
|
||||
touch /.instance-startup-complete
|
||||
|
||||
EOF
|
||||
|
||||
gcloud_CreateInstances "$prefix-leader" 1 "$zone" \
|
||||
"$imageName" "$leaderMachineType" "$leaderBootDiskSize" "$leaderAccelerator" \
|
||||
"$startupScript" "$leaderAddress"
|
||||
cloud_CreateInstances "$prefix" "$prefix-leader" 1 \
|
||||
"$imageName" "$leaderMachineType" "$leaderBootDiskSizeInGb" \
|
||||
"$startupScript" "$leaderAddress" "$bootDiskType"
|
||||
|
||||
gcloud_CreateInstances "$prefix-validator" "$validatorNodeCount" "$zone" \
|
||||
"$imageName" "$validatorMachineType" "$validatorBootDiskSize" "$validatorAccelerator" \
|
||||
"$startupScript" ""
|
||||
cloud_CreateInstances "$prefix" "$prefix-validator" "$validatorNodeCount" \
|
||||
"$imageName" "$validatorMachineType" "$validatorBootDiskSizeInGb" \
|
||||
"$startupScript" "" "$bootDiskType"
|
||||
|
||||
if [[ $clientNodeCount -gt 0 ]]; then
|
||||
gcloud_CreateInstances "$prefix-client" "$clientNodeCount" "$zone" \
|
||||
"$imageName" "$clientMachineType" "$clientBootDiskSize" "$clientAccelerator" \
|
||||
"$startupScript" ""
|
||||
cloud_CreateInstances "$prefix" "$prefix-client" "$clientNodeCount" \
|
||||
"$imageName" "$clientMachineType" "$clientBootDiskSizeInGb" \
|
||||
"$startupScript" "" "$bootDiskType"
|
||||
fi
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-create-complete=1"
|
||||
|
91
net/net.sh
91
net/net.sh
@ -23,10 +23,14 @@ Operate a configured testnet
|
||||
restart - Shortcut for stop then start
|
||||
|
||||
start-specific options:
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-T [tarFilename] - Deploy the specified release tarball
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
|
||||
Note: if RUST_LOG is set in the environment it will be propogated into the
|
||||
network nodes.
|
||||
@ -34,6 +38,7 @@ Operate a configured testnet
|
||||
sanity/start-specific options:
|
||||
-o noLedgerVerify - Skip ledger verification
|
||||
-o noValidatorSanity - Skip validator sanity
|
||||
-o rejectExtraNodes - Require the exact number of nodes
|
||||
|
||||
stop-specific options:
|
||||
none
|
||||
@ -43,6 +48,7 @@ EOF
|
||||
}
|
||||
|
||||
snapChannel=
|
||||
releaseChannel=
|
||||
snapFilename=
|
||||
deployMethod=local
|
||||
sanityExtraArgs=
|
||||
@ -52,7 +58,7 @@ command=$1
|
||||
[[ -n $command ]] || usage
|
||||
shift
|
||||
|
||||
while getopts "h?S:s:o:f:" opt; do
|
||||
while getopts "h?S:s:T:t:o:f:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -73,12 +79,28 @@ while getopts "h?S:s:o:f:" opt; do
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
T)
|
||||
tarballFilename=$OPTARG
|
||||
[[ -f $tarballFilename ]] || usage "Snap not readable: $tarballFilename"
|
||||
deployMethod=tar
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
releaseChannel=$OPTARG
|
||||
deployMethod=tar
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
f)
|
||||
cargoFeatures=$OPTARG
|
||||
;;
|
||||
o)
|
||||
case $OPTARG in
|
||||
noLedgerVerify|noValidatorSanity)
|
||||
noLedgerVerify|noValidatorSanity|rejectExtraNodes)
|
||||
sanityExtraArgs="$sanityExtraArgs -o $OPTARG"
|
||||
;;
|
||||
*)
|
||||
@ -109,6 +131,7 @@ build() {
|
||||
set -x
|
||||
rm -rf farf
|
||||
$MAYBE_DOCKER cargo install --features="$cargoFeatures" --root farf
|
||||
./scripts/install-native-programs.sh farf/
|
||||
)
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
@ -137,6 +160,9 @@ startLeader() {
|
||||
snap)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
|
||||
;;
|
||||
tar)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
local)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
@ -158,7 +184,7 @@ startValidator() {
|
||||
declare ipAddress=$1
|
||||
declare logFile="$netLogDir/validator-$ipAddress.log"
|
||||
|
||||
echo "--- Starting validator: $leaderIp"
|
||||
echo "--- Starting validator: $ipAddress"
|
||||
echo "start log: $logFile"
|
||||
(
|
||||
set -x
|
||||
@ -180,7 +206,7 @@ startClient() {
|
||||
set -x
|
||||
startCommon "$ipAddress"
|
||||
ssh "${sshOptions[@]}" -f "$ipAddress" \
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \"$RUST_LOG\""
|
||||
) >> "$logFile" 2>&1 || {
|
||||
cat "$logFile"
|
||||
echo "^^^ +++"
|
||||
@ -195,10 +221,11 @@ sanity() {
|
||||
echo "--- Sanity"
|
||||
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
|
||||
|
||||
declare host=$leaderIp # TODO: maybe use ${validatorIpList[0]} ?
|
||||
(
|
||||
set -x
|
||||
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
ssh "${sshOptions[@]}" "$host" \
|
||||
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
|
||||
) || ok=false
|
||||
|
||||
@ -218,13 +245,17 @@ start() {
|
||||
set -ex;
|
||||
apt-get -qq update;
|
||||
apt-get -qq -y install snapd;
|
||||
snap download --channel=$snapChannel solana;
|
||||
until snap download --channel=$snapChannel solana; do
|
||||
sleep 1;
|
||||
done
|
||||
"
|
||||
)
|
||||
else
|
||||
(
|
||||
cd "$SOLANA_ROOT"
|
||||
snap download --channel="$snapChannel" solana
|
||||
until snap download --channel="$snapChannel" solana; do
|
||||
sleep 1
|
||||
done
|
||||
)
|
||||
fi
|
||||
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
|
||||
@ -234,6 +265,17 @@ start() {
|
||||
}
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
if [[ -n $releaseChannel ]]; then
|
||||
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
|
||||
cd "$SOLANA_ROOT"
|
||||
|
||||
set -x
|
||||
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
|
||||
tarballFilename=solana-release.tar.bz2
|
||||
fi
|
||||
tar jxvf $tarballFilename
|
||||
;;
|
||||
local)
|
||||
build
|
||||
;;
|
||||
@ -253,8 +295,14 @@ start() {
|
||||
|
||||
SECONDS=0
|
||||
pids=()
|
||||
loopCount=0
|
||||
for ipAddress in "${validatorIpList[@]}"; do
|
||||
startValidator "$ipAddress"
|
||||
|
||||
# Staggering validator startup time. If too many validators
|
||||
# bootup simultaneously, leader node gets more rsync requests
|
||||
# from the validators than it can handle.
|
||||
((loopCount++ % 2 == 0)) && sleep 2
|
||||
done
|
||||
|
||||
for pid in "${pids[@]}"; do
|
||||
@ -279,15 +327,28 @@ start() {
|
||||
clientDeployTime=$SECONDS
|
||||
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
|
||||
|
||||
if [[ $deployMethod = "snap" ]]; then
|
||||
declare networkVersion=unknown
|
||||
declare networkVersion=unknown
|
||||
case $deployMethod in
|
||||
snap)
|
||||
IFS=\ read -r _ networkVersion _ < <(
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
"snap info solana | grep \"^installed:\""
|
||||
)
|
||||
networkVersion=${networkVersion/0+git./}
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
networkVersion="$(
|
||||
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
|
||||
)"
|
||||
;;
|
||||
local)
|
||||
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
|
||||
;;
|
||||
*)
|
||||
usage "Internal error: invalid deployMethod: $deployMethod"
|
||||
;;
|
||||
esac
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
|
||||
|
||||
echo
|
||||
echo "+++ Deployment Successful"
|
||||
|
@ -6,8 +6,7 @@ echo "$(date) | $0 $*" > client.log
|
||||
|
||||
deployMethod="$1"
|
||||
entrypointIp="$2"
|
||||
numNodes="$3"
|
||||
RUST_LOG="$4"
|
||||
RUST_LOG="$3"
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
|
||||
missing() {
|
||||
@ -17,7 +16,6 @@ missing() {
|
||||
|
||||
[[ -n $deployMethod ]] || missing deployMethod
|
||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||
[[ -n $numNodes ]] || missing numNodes
|
||||
|
||||
source net/common.sh
|
||||
loadConfigFile
|
||||
@ -35,7 +33,7 @@ snap)
|
||||
solana_bench_tps=/snap/bin/solana.bench-tps
|
||||
solana_keygen=/snap/bin/solana.keygen
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
@ -58,8 +56,7 @@ clientCommand="\
|
||||
$solana_bench_tps \
|
||||
--network $entrypointIp:8001 \
|
||||
--identity client.json \
|
||||
--num-nodes $numNodes \
|
||||
--duration 600 \
|
||||
--duration 7500 \
|
||||
--sustained \
|
||||
--threads $threadCount \
|
||||
"
|
||||
|
@ -35,7 +35,6 @@ else
|
||||
setupArgs="-l"
|
||||
fi
|
||||
|
||||
|
||||
case $deployMethod in
|
||||
snap)
|
||||
SECONDS=0
|
||||
@ -43,12 +42,13 @@ snap)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/solana/solana.snap" .
|
||||
sudo snap install solana.snap --devmode --dangerous
|
||||
|
||||
# shellcheck disable=SC2089
|
||||
commonNodeConfig="\
|
||||
leader-ip=$entrypointIp \
|
||||
leader-ip=\"$entrypointIp\" \
|
||||
default-metrics-rate=1 \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
rust-log=$RUST_LOG \
|
||||
setup-args=$setupArgs \
|
||||
metrics-config=\"$SOLANA_METRICS_CONFIG\" \
|
||||
rust-log=\"$RUST_LOG\" \
|
||||
setup-args=\"$setupArgs\" \
|
||||
"
|
||||
|
||||
if [[ -e /dev/nvidia0 ]]; then
|
||||
@ -67,7 +67,7 @@ snap)
|
||||
logmarker="solana deploy $(date)/$RANDOM"
|
||||
logger "$logmarker"
|
||||
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$nodeConfig"
|
||||
# shellcheck disable=SC2086,SC2090 # Don't want to double quote "$nodeConfig"
|
||||
sudo snap set solana $nodeConfig
|
||||
snap info solana
|
||||
sudo snap get solana
|
||||
@ -77,20 +77,25 @@ snap)
|
||||
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export RUST_LOG
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
|
||||
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
|
||||
|
||||
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||
|
||||
case $nodeType in
|
||||
leader)
|
||||
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||
echo Selecting solana-fullnode-cuda
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
./multinode-demo/setup.sh -t leader $setupArgs
|
||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||
./multinode-demo/leader.sh > leader.log 2>&1 &
|
||||
@ -98,6 +103,11 @@ local)
|
||||
validator)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
||||
|
||||
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||
echo Selecting solana-fullnode-cuda
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
|
||||
./multinode-demo/setup.sh -t validator $setupArgs
|
||||
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
|
||||
;;
|
||||
|
@ -27,6 +27,7 @@ missing() {
|
||||
|
||||
ledgerVerify=true
|
||||
validatorSanity=true
|
||||
rejectExtraNodes=false
|
||||
while [[ $1 = -o ]]; do
|
||||
opt="$2"
|
||||
shift 2
|
||||
@ -37,6 +38,9 @@ while [[ $1 = -o ]]; do
|
||||
noValidatorSanity)
|
||||
validatorSanity=false
|
||||
;;
|
||||
rejectExtraNodes)
|
||||
rejectExtraNodes=true
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown option: $opt"
|
||||
exit 1
|
||||
@ -61,7 +65,7 @@ snap)
|
||||
client_id=~/snap/solana/current/config/client-id.json
|
||||
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
entrypointRsyncUrl="$entrypointIp:~/solana"
|
||||
@ -82,14 +86,25 @@ esac
|
||||
echo "--- $entrypointIp: wallet sanity"
|
||||
(
|
||||
set -x
|
||||
scripts/wallet-sanity.sh "$entrypointRsyncUrl"
|
||||
scripts/wallet-sanity.sh "$entrypointIp:8001"
|
||||
)
|
||||
|
||||
echo "+++ $entrypointIp: node count ($numNodes expected)"
|
||||
(
|
||||
set -x
|
||||
$solana_keygen -o "$client_id"
|
||||
$solana_bench_tps --network "$entrypointIp:8001" --identity "$client_id" --num-nodes "$numNodes" --converge-only
|
||||
|
||||
maybeRejectExtraNodes=
|
||||
if $rejectExtraNodes; then
|
||||
maybeRejectExtraNodes="--reject-extra-nodes"
|
||||
fi
|
||||
|
||||
$solana_bench_tps \
|
||||
--network "$entrypointIp:8001" \
|
||||
--identity "$client_id" \
|
||||
--num-nodes "$numNodes" \
|
||||
$maybeRejectExtraNodes \
|
||||
--converge-only
|
||||
)
|
||||
|
||||
echo "--- $entrypointIp: verify ledger"
|
||||
|
20
net/scripts/add-solana-user-authorized_keys.sh
Executable file
20
net/scripts/add-solana-user-authorized_keys.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
[[ -d /home/solana/.ssh ]] || exit 1
|
||||
|
||||
# /solana-authorized_keys contains the public keys for users that should
|
||||
# automatically be granted access to ALL testnets.
|
||||
#
|
||||
# To add an entry into this list:
|
||||
# 1. Run: ssh-keygen -t ecdsa -N '' -f ~/.ssh/id-solana-testnet
|
||||
# 2. Inline ~/.ssh/id-solana-testnet.pub below
|
||||
cat > /solana-authorized_keys <<EOF
|
||||
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
|
||||
EOF
|
||||
|
||||
sudo -u solana bash -c "
|
||||
cat /solana-authorized_keys >> /home/solana/.ssh/authorized_keys
|
||||
"
|
240
net/scripts/ec2-provider.sh
Normal file
240
net/scripts/ec2-provider.sh
Normal file
@ -0,0 +1,240 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Utilities for working with EC2 instances
|
||||
#
|
||||
|
||||
zone=
|
||||
region=
|
||||
|
||||
cloud_SetZone() {
|
||||
zone="$1"
|
||||
# AWS region is zone with the last character removed
|
||||
region="${zone:0:$((${#zone} - 1))}"
|
||||
}
|
||||
|
||||
# Set the default zone
|
||||
cloud_SetZone "us-east-1b"
|
||||
|
||||
# sshPrivateKey should be globally defined whenever this function is called.
|
||||
#
|
||||
# TODO: Remove usage of the sshPrivateKey global
|
||||
__cloud_SshPrivateKeyCheck() {
|
||||
# shellcheck disable=SC2154
|
||||
if [[ -z $sshPrivateKey ]]; then
|
||||
echo Error: sshPrivateKey not defined
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -r $sshPrivateKey ]]; then
|
||||
echo "Error: file is not readable: $sshPrivateKey"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# __cloud_FindInstances
|
||||
#
|
||||
# Find instances with name matching the specified pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# filter - The instances to filter on
|
||||
#
|
||||
# examples:
|
||||
# $ __cloud_FindInstances "exact-machine-name"
|
||||
# $ __cloud_FindInstances "all-machines-with-a-common-machine-prefix*"
|
||||
#
|
||||
__cloud_FindInstances() {
|
||||
declare filter="$1"
|
||||
|
||||
instances=()
|
||||
declare name publicIp privateIp
|
||||
while read -r name publicIp privateIp; do
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s\n" "$name" "$publicIp" "$privateIp"
|
||||
instances+=("$name:$publicIp:$privateIp")
|
||||
done < <(aws ec2 describe-instances \
|
||||
--region "$region" \
|
||||
--filters \
|
||||
"Name=tag:name,Values=$filter" \
|
||||
"Name=instance-state-name,Values=pending,running" \
|
||||
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \
|
||||
--output text
|
||||
)
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_FindInstances [namePrefix]
|
||||
#
|
||||
# Find instances with names matching the specified prefix
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# namePrefix - The instance name prefix to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstances all-machines-with-a-common-machine-prefix
|
||||
#
|
||||
cloud_FindInstances() {
|
||||
declare namePrefix="$1"
|
||||
__cloud_FindInstances "$namePrefix*"
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_FindInstance [name]
|
||||
#
|
||||
# Find an instance with a name matching the exact pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# name - The instance name to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstance exact-machine-name
|
||||
#
|
||||
cloud_FindInstance() {
|
||||
declare name="$1"
|
||||
__cloud_FindInstances "$name"
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName]
|
||||
# [machineType] [bootDiskSize] [startupScript] [address]
|
||||
#
|
||||
# Creates one more identical instances.
|
||||
#
|
||||
# networkName - unique name of this testnet
|
||||
# namePrefix - unique string to prefix all the instance names with
|
||||
# numNodes - number of instances to create
|
||||
# imageName - Disk image for the instances
|
||||
# machineType - GCE machine type
|
||||
# bootDiskSize - Optional size of the boot disk in GB
|
||||
# startupScript - Optional startup script to execute when the instance boots
|
||||
# address - Optional name of the GCE static IP address to attach to the
|
||||
# instance. Requires that |numNodes| = 1 and that addressName
|
||||
# has been provisioned in the GCE region that is hosting |zone|
|
||||
#
|
||||
# Tip: use cloud_FindInstances to locate the instances once this function
|
||||
# returns
|
||||
cloud_CreateInstances() {
|
||||
declare networkName="$1"
|
||||
declare namePrefix="$2"
|
||||
declare numNodes="$3"
|
||||
declare imageName="$4"
|
||||
declare machineType="$5"
|
||||
declare optionalBootDiskSize="$6"
|
||||
declare optionalStartupScript="$7"
|
||||
declare optionalAddress="$8"
|
||||
|
||||
__cloud_SshPrivateKeyCheck
|
||||
(
|
||||
set -x
|
||||
aws ec2 delete-key-pair --region "$region" --key-name "$networkName"
|
||||
aws ec2 import-key-pair --region "$region" --key-name "$networkName" \
|
||||
--public-key-material file://"${sshPrivateKey}".pub
|
||||
)
|
||||
|
||||
declare -a args
|
||||
args=(
|
||||
--key-name "$networkName"
|
||||
--count "$numNodes"
|
||||
--region "$region"
|
||||
--placement "AvailabilityZone=$zone"
|
||||
--security-groups testnet
|
||||
--image-id "$imageName"
|
||||
--instance-type "$machineType"
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=name,Value=$namePrefix}]"
|
||||
)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
args+=(
|
||||
--block-device-mapping "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": { \"VolumeSize\": $optionalBootDiskSize }}]"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalStartupScript ]]; then
|
||||
args+=(
|
||||
--user-data "file://$optionalStartupScript"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
[[ $numNodes = 1 ]] || {
|
||||
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
aws ec2 run-instances "${args[@]}"
|
||||
)
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
cloud_FindInstance "$namePrefix"
|
||||
if [[ ${#instances[@]} -ne 1 ]]; then
|
||||
echo "Failed to find newly created instance: $namePrefix"
|
||||
fi
|
||||
|
||||
declare instanceId
|
||||
IFS=: read -r instanceId _ < <(echo "${instances[0]}")
|
||||
(
|
||||
set -x
|
||||
# TODO: Poll that the instance has moved to the 'running' state instead of
|
||||
# blindly sleeping for 30 seconds...
|
||||
sleep 30
|
||||
aws ec2 associate-address \
|
||||
--instance-id "$instanceId" \
|
||||
--region "$region" \
|
||||
--allocation-id "$optionalAddress"
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_DeleteInstances
|
||||
#
|
||||
# Deletes all the instances listed in the `instances` array
|
||||
#
|
||||
cloud_DeleteInstances() {
|
||||
if [[ ${#instances[0]} -eq 0 ]]; then
|
||||
echo No instances to delete
|
||||
return
|
||||
fi
|
||||
declare names=("${instances[@]/:*/}")
|
||||
(
|
||||
set -x
|
||||
aws ec2 terminate-instances --region "$region" --instance-ids "${names[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile]
|
||||
#
|
||||
# Fetch a file from the given instance. This function uses a cloud-specific
|
||||
# mechanism to fetch the file
|
||||
#
|
||||
cloud_FetchFile() {
|
||||
# shellcheck disable=SC2034 # instanceName is unused
|
||||
declare instanceName="$1"
|
||||
declare publicIp="$2"
|
||||
declare remoteFile="$3"
|
||||
declare localFile="$4"
|
||||
|
||||
__cloud_SshPrivateKeyCheck
|
||||
(
|
||||
set -x
|
||||
scp \
|
||||
-o "StrictHostKeyChecking=no" \
|
||||
-o "UserKnownHostsFile=/dev/null" \
|
||||
-o "User=solana" \
|
||||
-o "IdentityFile=$sshPrivateKey" \
|
||||
-o "LogLevel=ERROR" \
|
||||
-F /dev/null \
|
||||
"solana@$publicIp:$remoteFile" "$localFile"
|
||||
)
|
||||
}
|
204
net/scripts/gce-provider.sh
Normal file
204
net/scripts/gce-provider.sh
Normal file
@ -0,0 +1,204 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Utilities for working with GCE instances
|
||||
#
|
||||
|
||||
# Default zone
|
||||
zone="us-west1-b"
|
||||
cloud_SetZone() {
|
||||
zone="$1"
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# __cloud_FindInstances
|
||||
#
|
||||
# Find instances matching the specified pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:zone:public IP:private IP"
|
||||
#
|
||||
# filter - The instances to filter on
|
||||
#
|
||||
# examples:
|
||||
# $ __cloud_FindInstances "name=exact-machine-name"
|
||||
# $ __cloud_FindInstances "name~^all-machines-with-a-common-machine-prefix"
|
||||
#
|
||||
__cloud_FindInstances() {
|
||||
declare filter="$1"
|
||||
instances=()
|
||||
|
||||
declare name zone publicIp privateIp status
|
||||
while read -r name publicIp privateIp status; do
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
||||
|
||||
instances+=("$name:$publicIp:$privateIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter "$filter" \
|
||||
--format 'value(name,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status)')
|
||||
}
|
||||
#
|
||||
# cloud_FindInstances [namePrefix]
|
||||
#
|
||||
# Find instances with names matching the specified prefix
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# namePrefix - The instance name prefix to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstances all-machines-with-a-common-machine-prefix
|
||||
#
|
||||
cloud_FindInstances() {
|
||||
declare namePrefix="$1"
|
||||
__cloud_FindInstances "name~^$namePrefix"
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_FindInstance [name]
|
||||
#
|
||||
# Find an instance with a name matching the exact pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# name - The instance name to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstance exact-machine-name
|
||||
#
|
||||
cloud_FindInstance() {
|
||||
declare name="$1"
|
||||
__cloud_FindInstances "name=$name"
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName]
|
||||
# [machineType] [bootDiskSize] [enableGpu]
|
||||
# [startupScript] [address]
|
||||
#
|
||||
# Creates one more identical instances.
|
||||
#
|
||||
# networkName - unique name of this testnet
|
||||
# namePrefix - unique string to prefix all the instance names with
|
||||
# numNodes - number of instances to create
|
||||
# imageName - Disk image for the instances
|
||||
# machineType - GCE machine type. Note that this may also include an
|
||||
# `--accelerator=` or other |gcloud compute instances create|
|
||||
# options
|
||||
# bootDiskSize - Optional size of the boot disk in GB
|
||||
# enableGpu - Optionally enable GPU, use the value "true" to enable
|
||||
# eg, request 4 K80 GPUs with "count=4,type=nvidia-tesla-k80"
|
||||
# startupScript - Optional startup script to execute when the instance boots
|
||||
# address - Optional name of the GCE static IP address to attach to the
|
||||
# instance. Requires that |numNodes| = 1 and that addressName
|
||||
# has been provisioned in the GCE region that is hosting `$zone`
|
||||
#
|
||||
# Tip: use cloud_FindInstances to locate the instances once this function
|
||||
# returns
|
||||
cloud_CreateInstances() {
|
||||
declare networkName="$1"
|
||||
declare namePrefix="$2"
|
||||
declare numNodes="$3"
|
||||
declare imageName="$4"
|
||||
declare machineType="$5"
|
||||
declare optionalBootDiskSize="$6"
|
||||
declare optionalStartupScript="$7"
|
||||
declare optionalAddress="$8"
|
||||
declare optionalBootDiskType="$9"
|
||||
|
||||
declare nodes
|
||||
if [[ $numNodes = 1 ]]; then
|
||||
nodes=("$namePrefix")
|
||||
else
|
||||
read -ra nodes <<<$(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes")
|
||||
fi
|
||||
|
||||
declare -a args
|
||||
args=(
|
||||
--zone "$zone"
|
||||
--tags testnet
|
||||
--metadata "testnet=$networkName"
|
||||
--image "$imageName"
|
||||
--maintenance-policy TERMINATE
|
||||
--no-restart-on-failure
|
||||
)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $imageName as it may contain extra args
|
||||
args+=(--image $imageName)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
|
||||
args+=(--machine-type $machineType)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
args+=(
|
||||
--boot-disk-size "${optionalBootDiskSize}GB"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalStartupScript ]]; then
|
||||
args+=(
|
||||
--metadata-from-file "startup-script=$optionalStartupScript"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalBootDiskType ]]; then
|
||||
args+=(
|
||||
--boot-disk-type "${optionalBootDiskType}"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
[[ $numNodes = 1 ]] || {
|
||||
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
|
||||
exit 1
|
||||
}
|
||||
args+=(
|
||||
--address "$optionalAddress"
|
||||
)
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_DeleteInstances
|
||||
#
|
||||
# Deletes all the instances listed in the `instances` array
|
||||
#
|
||||
cloud_DeleteInstances() {
|
||||
if [[ ${#instances[0]} -eq 0 ]]; then
|
||||
echo No instances to delete
|
||||
return
|
||||
fi
|
||||
declare names=("${instances[@]/:*/}")
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud beta compute instances delete --zone "$zone" --quiet "${names[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile]
|
||||
#
|
||||
# Fetch a file from the given instance. This function uses a cloud-specific
|
||||
# mechanism to fetch the file
|
||||
#
|
||||
cloud_FetchFile() {
|
||||
declare instanceName="$1"
|
||||
# shellcheck disable=SC2034 # publicIp is unused
|
||||
declare publicIp="$2"
|
||||
declare remoteFile="$3"
|
||||
declare localFile="$4"
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
|
||||
)
|
||||
}
|
@ -1,187 +0,0 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Utilities for working with gcloud
|
||||
#
|
||||
|
||||
|
||||
#
|
||||
# gcloud_FindInstances [filter] [options]
|
||||
#
|
||||
# Find instances matching the specified pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:zone:public IP:private IP"
|
||||
#
|
||||
# filter - The instances to filter on
|
||||
# options - If set to the string "show", the list of instances will be echoed
|
||||
# to stdout
|
||||
#
|
||||
# examples:
|
||||
# $ gcloud_FindInstances "name=exact-machine-name"
|
||||
# $ gcloud_FindInstances "name~^all-machines-with-a-common-machine-prefix"
|
||||
#
|
||||
gcloud_FindInstances() {
|
||||
declare filter="$1"
|
||||
declare options="$2"
|
||||
instances=()
|
||||
|
||||
declare name zone publicIp privateIp status
|
||||
while read -r name zone publicIp privateIp status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $name is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
if [[ $options = show ]]; then
|
||||
printf "%-30s | %-16s publicIp=%-16s privateIp=%s\n" "$name" "$zone" "$publicIp" "$privateIp"
|
||||
fi
|
||||
|
||||
instances+=("$name:$zone:$publicIp:$privateIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter="$filter" \
|
||||
--format 'value(name,zone,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status)')
|
||||
}
|
||||
|
||||
#
|
||||
# gcloud_ForEachInstance [cmd] [extra args to cmd]
|
||||
#
|
||||
# Execute a command for each element in the `instances` array
|
||||
#
|
||||
# cmd - The command to execute on each instance
|
||||
# The command will receive arguments followed by any
|
||||
# additionl arguments supplied to gcloud_ForEachInstance:
|
||||
# name - name of the instance
|
||||
# zone - zone the instance is located in
|
||||
# publicIp - The public IP address of this instance
|
||||
# privateIp - The priate IP address of this instance
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
gcloud_ForEachInstance() {
|
||||
declare cmd="$1"
|
||||
shift
|
||||
[[ -n $cmd ]] || { echo gcloud_ForEachInstance: cmd not specified; exit 1; }
|
||||
|
||||
declare count=1
|
||||
for info in "${instances[@]}"; do
|
||||
declare name zone publicIp privateIp
|
||||
IFS=: read -r name zone publicIp privateIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$name" "$zone" "$publicIp" "$privateIp" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# gcloud_CreateInstances [namePrefix] [numNodes] [zone] [imageName]
|
||||
# [machineType] [bootDiskSize] [accelerator]
|
||||
# [startupScript] [address]
|
||||
#
|
||||
# Creates one more identical instances.
|
||||
#
|
||||
# namePrefix - unique string to prefix all the instance names with
|
||||
# numNodes - number of instances to create
|
||||
# zone - zone to create the instances in
|
||||
# imageName - Disk image for the instances
|
||||
# machineType - GCE machine type
|
||||
# bootDiskSize - Optional disk of the boot disk
|
||||
# accelerator - Optional accelerator to attach to the instance(s), see
|
||||
# eg, request 4 K80 GPUs with "count=4,type=nvidia-tesla-k80"
|
||||
# startupScript - Optional startup script to execute when the instance boots
|
||||
# address - Optional name of the GCE static IP address to attach to the
|
||||
# instance. Requires that |numNodes| = 1 and that addressName
|
||||
# has been provisioned in the GCE region that is hosting |zone|
|
||||
#
|
||||
# Tip: use gcloud_FindInstances to locate the instances once this function
|
||||
# returns
|
||||
gcloud_CreateInstances() {
|
||||
declare namePrefix="$1"
|
||||
declare numNodes="$2"
|
||||
declare zone="$3"
|
||||
declare imageName="$4"
|
||||
declare machineType="$5"
|
||||
declare optionalBootDiskSize="$6"
|
||||
declare optionalAccelerator="$7"
|
||||
declare optionalStartupScript="$8"
|
||||
declare optionalAddress="$9"
|
||||
|
||||
declare nodes
|
||||
if [[ $numNodes = 1 ]]; then
|
||||
nodes=("$namePrefix")
|
||||
else
|
||||
read -ra nodes <<<$(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes")
|
||||
fi
|
||||
|
||||
declare -a args
|
||||
args=(
|
||||
"--zone=$zone"
|
||||
"--tags=testnet"
|
||||
"--image=$imageName"
|
||||
"--machine-type=$machineType"
|
||||
)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
args+=(
|
||||
"--boot-disk-size=$optionalBootDiskSize"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalAccelerator ]]; then
|
||||
args+=(
|
||||
"--accelerator=$optionalAccelerator"
|
||||
--maintenance-policy TERMINATE
|
||||
--restart-on-failure
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalStartupScript ]]; then
|
||||
args+=(
|
||||
--metadata-from-file "startup-script=$optionalStartupScript"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
[[ $numNodes = 1 ]] || {
|
||||
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
|
||||
exit 1
|
||||
}
|
||||
args+=(
|
||||
"--address=$optionalAddress"
|
||||
)
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
#
|
||||
# gcloud_DeleteInstances [yes]
|
||||
#
|
||||
# Deletes all the instances listed in the `instances` array
|
||||
#
|
||||
# If yes = "true", skip the delete confirmation
|
||||
#
|
||||
gcloud_DeleteInstances() {
|
||||
declare maybeQuiet=
|
||||
if [[ $1 = true ]]; then
|
||||
maybeQuiet=--quiet
|
||||
fi
|
||||
|
||||
if [[ ${#instances[0]} -eq 0 ]]; then
|
||||
echo No instances to delete
|
||||
return
|
||||
fi
|
||||
declare names=("${instances[@]/:*/}")
|
||||
|
||||
# Assume all instances are in the same zone
|
||||
# TODO: One day this assumption will be invalid
|
||||
declare zone
|
||||
IFS=: read -r _ zone _ < <(echo "${instances[0]}")
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud beta compute instances delete --zone "$zone" $maybeQuiet "${names[@]}"
|
||||
)
|
||||
}
|
||||
|
25
net/scripts/install-docker.sh
Executable file
25
net/scripts/install-docker.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
software-properties-common \
|
||||
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
|
||||
apt-get update
|
||||
apt-get install -y docker-ce
|
||||
docker run hello-world
|
||||
|
||||
# Grant the solana user access to docker
|
||||
if id solana; then
|
||||
addgroup solana docker
|
||||
fi
|
@ -13,8 +13,8 @@ sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
if command -v earlyoom; then
|
||||
systemctl status earlyoom
|
||||
else
|
||||
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
|
||||
apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
|
||||
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.1-*_amd64.deb' -e robots=off -nd
|
||||
apt install --quiet --yes ./earlyoom_1.1-*_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
|
11
net/scripts/network-config.sh
Executable file
11
net/scripts/network-config.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
sudo sysctl -w net.core.rmem_default=1610612736
|
||||
sudo sysctl -w net.core.rmem_max=1610612736
|
||||
|
||||
sudo sysctl -w net.core.wmem_default=1610612736
|
||||
sudo sysctl -w net.core.wmem_max=1610612736
|
11
net/scripts/remove-docker-interface.sh
Executable file
11
net/scripts/remove-docker-interface.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Some instances have docker running and docker0 network interface confuses
|
||||
# gossip and airdrops fail. As a workaround for now simply remove the docker0
|
||||
# interface
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
ip link delete docker0 || true
|
9
net/scripts/update-default-cuda.sh
Executable file
9
net/scripts/update-default-cuda.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Updates the default cuda symlink to the supported version
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
ln -sfT /usr/local/cuda-9.2 /usr/local/cuda
|
1
programs/bpf/c/.gitignore
vendored
Normal file
1
programs/bpf/c/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/out/
|
1
programs/bpf/c/makefile
Normal file
1
programs/bpf/c/makefile
Normal file
@ -0,0 +1 @@
|
||||
include sdk/bpf.mk
|
63
programs/bpf/c/sdk/README.md
Normal file
63
programs/bpf/c/sdk/README.md
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
## Prerequisites
|
||||
|
||||
## LLVM / clang 7.0.0
|
||||
http://releases.llvm.org/download.html
|
||||
|
||||
### Linux Ubuntu 16.04 (xenial)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### Linux Ubuntu 14.04 (trusty)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### macOS
|
||||
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
|
||||
|
||||
Once Homebrew is installed, ensure the latest llvm is installed:
|
||||
```
|
||||
$ brew update # <- ensure your brew is up to date
|
||||
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
|
||||
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Quick start
|
||||
To get started create a `makefile` containing:
|
||||
```make
|
||||
include path/to/bpf.mk
|
||||
```
|
||||
and `src/program.c` containing:
|
||||
```c
|
||||
#include <solana_sdk.h>
|
||||
|
||||
bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[1];
|
||||
uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
print_params(1, ka, data, data_len);
|
||||
return true;
|
||||
}
|
||||
```
|
||||
|
||||
Then run `make` to build `out/program.o`.
|
||||
Run `make help` for more details.
|
||||
|
||||
### Limitations
|
||||
* Programs must be fully contained within a single .c file
|
||||
* No libc is available but `solana_sdk.h` provides a minimal set of
|
||||
primitives.
|
115
programs/bpf/c/sdk/bpf.mk
Normal file
115
programs/bpf/c/sdk/bpf.mk
Normal file
@ -0,0 +1,115 @@
|
||||
|
||||
all:
|
||||
.PHONY: help all clean
|
||||
|
||||
ifneq ($(V),1)
|
||||
_@ :=@
|
||||
endif
|
||||
|
||||
INC_DIRS ?=
|
||||
SRC_DIR ?= ./src
|
||||
OUT_DIR ?= ./out
|
||||
|
||||
OS=$(shell uname)
|
||||
ifeq ($(OS),Darwin)
|
||||
LLVM_DIR ?= $(shell brew --prefix llvm)
|
||||
endif
|
||||
|
||||
ifdef LLVM_DIR
|
||||
CC := $(LLVM_DIR)/bin/clang
|
||||
LLC := $(LLVM_DIR)/bin/llc
|
||||
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
|
||||
else
|
||||
CC := clang-7
|
||||
LLC := llc-7
|
||||
OBJ_DUMP := llvm-objdump-7
|
||||
endif
|
||||
|
||||
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
|
||||
|
||||
CC_FLAGS := \
|
||||
-Werror \
|
||||
-target bpf \
|
||||
-O2 \
|
||||
-emit-llvm \
|
||||
-fno-builtin \
|
||||
|
||||
LLC_FLAGS := \
|
||||
-march=bpf \
|
||||
-filetype=obj \
|
||||
|
||||
OBJ_DUMP_FLAGS := \
|
||||
-color \
|
||||
-source \
|
||||
-disassemble \
|
||||
|
||||
help:
|
||||
@echo 'BPF Program makefile'
|
||||
@echo ''
|
||||
@echo 'This makefile will build BPF Programs from C source files into ELFs'
|
||||
@echo ''
|
||||
@echo 'Assumptions:'
|
||||
@echo ' - Programs are a single .c source file (may include headers)'
|
||||
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
|
||||
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
|
||||
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
|
||||
@echo ''
|
||||
@echo 'User settings'
|
||||
@echo ' - The following setting are overridable on the command line, default values shown:'
|
||||
@echo ' - Show commands while building:'
|
||||
@echo ' V=1'
|
||||
@echo ' - List of include directories:'
|
||||
@echo ' INC_DIRS=$(INC_DIRS)'
|
||||
@echo ' - List of system include directories:'
|
||||
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
|
||||
@echo ' - Location of source files:'
|
||||
@echo ' SRC_DIR=$(SRC_DIR)'
|
||||
@echo ' - Location to place output files:'
|
||||
@echo ' OUT_DIR=$(OUT_DIR)'
|
||||
@echo ' - Location of LLVM:'
|
||||
@echo ' LLVM_DIR=$(LLVM_DIR)'
|
||||
@echo ''
|
||||
@echo 'Usage:'
|
||||
@echo ' - make help - This help message'
|
||||
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
|
||||
@echo ' - make clean - Cleans all programs'
|
||||
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
|
||||
@echo ' - make <program name> - Build a single program by name'
|
||||
@echo ''
|
||||
@echo 'Available programs:'
|
||||
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
|
||||
@echo ''
|
||||
@echo 'Example:'
|
||||
@echo ' - Assuming a programed named foo (src/foo.c)'
|
||||
@echo ' - make foo'
|
||||
@echo ' - make dump_foo'
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.bc
|
||||
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
|
||||
@echo "[cc] $@ ($<)"
|
||||
$(_@)mkdir -p $(OUT_DIR)
|
||||
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.o
|
||||
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
|
||||
@echo "[llc] $@ ($<)"
|
||||
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
|
||||
|
||||
-include $(wildcard $(OUT_DIR)/*.d)
|
||||
|
||||
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
endef
|
||||
|
||||
all: $(PROGRAM_NAMES)
|
||||
|
||||
%: $(addprefix $(OUT_DIR)/, %.o) ;
|
||||
|
||||
dump_%: %
|
||||
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
|
||||
|
||||
clean:
|
||||
rm -rf $(OUT_DIR)
|
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
@ -0,0 +1,298 @@
|
||||
#pragma once
|
||||
/**
|
||||
* @brief Solana C-based BPF program utility functions and types
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Numeric types
|
||||
*/
|
||||
#ifndef __LP64__
|
||||
#error LP64 data model required
|
||||
#endif
|
||||
|
||||
typedef signed char int8_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned int uint32_t;
|
||||
typedef signed long int int64_t;
|
||||
typedef unsigned long int uint64_t;
|
||||
|
||||
/**
|
||||
* NULL
|
||||
*/
|
||||
#define NULL 0
|
||||
|
||||
/**
|
||||
* Boolean type
|
||||
*/
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
/**
|
||||
* Helper function that prints a string to stdout
|
||||
*/
|
||||
extern void sol_log(const char*);
|
||||
|
||||
/**
|
||||
* Helper function that prints a 64 bit values represented in hexadecimal
|
||||
* to stdout
|
||||
*/
|
||||
extern void sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||
|
||||
/**
|
||||
* Prefix for all BPF functions
|
||||
*
|
||||
* This prefix should be used for functions in order to facilitate
|
||||
* interoperability with BPF representation
|
||||
*/
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
/**
|
||||
* Size of Public key in bytes
|
||||
*/
|
||||
#define SIZE_PUBKEY 32
|
||||
|
||||
/**
|
||||
* Public key
|
||||
*/
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
/**
|
||||
* Compares two public keys
|
||||
*
|
||||
* @param one First public key
|
||||
* @param two Second public key
|
||||
* @return true if the same
|
||||
*/
|
||||
SOL_FN_PREFIX bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) {
|
||||
for (int i = 0; i < sizeof(*one); i++) {
|
||||
if (one->x[i] != two->x[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Keyed Accounts
|
||||
*/
|
||||
typedef struct {
|
||||
SolPubkey *key; /** Public Key of the account owner */
|
||||
int64_t *tokens; /** Numer of tokens owned by this account */
|
||||
uint64_t userdata_len; /** Length of userdata in bytes */
|
||||
uint8_t *userdata; /** On-chain data owned by this account */
|
||||
SolPubkey *program_id; /** Program that owns this account */
|
||||
} SolKeyedAccounts;
|
||||
|
||||
/**
|
||||
* Copies memory
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, const void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((const uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares memory
|
||||
*/
|
||||
SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
|
||||
for (int i = 0; i < n; i++) {
|
||||
uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i);
|
||||
if (diff) {
|
||||
return diff;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of elements in an array
|
||||
*/
|
||||
#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
|
||||
|
||||
/**
|
||||
* Panics
|
||||
*
|
||||
* Prints the line number where the panic occurred and then causes
|
||||
* the BPF VM to immediately halt execution. No accounts' userdata are updated
|
||||
*/
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_log_64(0xFF, 0xFF, 0xFF, 0xFF, line);
|
||||
uint8_t *pv = (uint8_t *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts
|
||||
*/
|
||||
#define sol_assert(expr) \
|
||||
if (!(expr)) { \
|
||||
_sol_panic(__LINE__); \
|
||||
}
|
||||
|
||||
/**
|
||||
* De-serializes the input parameters into usable types
|
||||
*
|
||||
* Use this function to deserialize the buffer passed to the program entrypoint
|
||||
* into usable types. This function does not perform copy deserialization,
|
||||
* instead it populates the pointers and lengths in SolKeyedAccounts and data so
|
||||
* that any modification to tokens or account data take place on the original
|
||||
* buffer. Doing so also eliminates the need to serialize back into the buffer
|
||||
* at program end.
|
||||
*
|
||||
* @param input Source buffer containing serialized input parameters
|
||||
* @param ka Pointer to an array of SolKeyedAccounts to deserialize into
|
||||
* @param ka_len Number of SolKeyedAccounts entries in `ka`
|
||||
* @param ka_len_out If NULL, fill exactly `ka_len` accounts or fail.
|
||||
* If not NULL, fill up to `ka_len` accounts and return the
|
||||
* number of filled accounts in `ka_len_out`.
|
||||
* @param data On return, a pointer to the instruction data
|
||||
* @param data_len On return, the length in bytes of the instruction data
|
||||
* @return Boolean true if successful
|
||||
*/
|
||||
SOL_FN_PREFIX bool sol_deserialize(
|
||||
const uint8_t *input,
|
||||
SolKeyedAccounts *ka,
|
||||
uint64_t ka_len,
|
||||
uint64_t *ka_len_out,
|
||||
const uint8_t **data,
|
||||
uint64_t *data_len
|
||||
) {
|
||||
|
||||
|
||||
if (ka_len_out == NULL) {
|
||||
if (ka_len != *(uint64_t *) input) {
|
||||
return false;
|
||||
}
|
||||
ka_len = *(uint64_t *) input;
|
||||
} else {
|
||||
if (ka_len > *(uint64_t *) input) {
|
||||
ka_len = *(uint64_t *) input;
|
||||
}
|
||||
*ka_len_out = ka_len;
|
||||
}
|
||||
|
||||
input += sizeof(uint64_t);
|
||||
for (int i = 0; i < ka_len; i++) {
|
||||
// key
|
||||
ka[i].key = (SolPubkey *) input;
|
||||
input += sizeof(SolPubkey);
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = (int64_t *) input;
|
||||
input += sizeof(int64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *) input;
|
||||
input += sizeof(uint64_t);
|
||||
ka[i].userdata = input;
|
||||
input += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *) input;
|
||||
input += sizeof(SolPubkey);
|
||||
}
|
||||
|
||||
// input data
|
||||
*data_len = *(uint64_t *) input;
|
||||
input += sizeof(uint64_t);
|
||||
*data = input;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Debugging utilities
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of a public key
|
||||
*
|
||||
* @param key The public key to print
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_key(const SolPubkey *key) {
|
||||
for (int j = 0; j < sizeof(*key); j++) {
|
||||
sol_log_64(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of an array
|
||||
*
|
||||
* @param array The array to print
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_array(const uint8_t *array, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_log_64(0, 0, 0, j, array[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of the program's input parameters
|
||||
*
|
||||
* @param num_ka Numer of SolKeyedAccounts to print
|
||||
* @param ka A pointer to an array of SolKeyedAccounts to print
|
||||
* @param data A pointer to the instruction data to print
|
||||
* @param data_len The length in bytes of the instruction data
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_params(
|
||||
uint64_t num_ka,
|
||||
const SolKeyedAccounts *ka,
|
||||
const uint8_t *data,
|
||||
uint64_t data_len
|
||||
) {
|
||||
sol_log_64(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
sol_log_key(ka[i].key);
|
||||
sol_log_64(0, 0, 0, 0, *ka[i].tokens);
|
||||
sol_log_array(ka[i].userdata, ka[i].userdata_len);
|
||||
sol_log_key(ka[i].program_id);
|
||||
}
|
||||
sol_log_array(data, data_len);
|
||||
}
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* Program entrypoint
|
||||
* @{
|
||||
*
|
||||
* The following is an example of a simple program that prints the input
|
||||
* parameters it received:
|
||||
*
|
||||
* bool entrypoint(const uint8_t *input) {
|
||||
* SolKeyedAccounts ka[1];
|
||||
* uint8_t *data;
|
||||
* uint64_t data_len;
|
||||
*
|
||||
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||
* return false;
|
||||
* }
|
||||
* sol_log_params(1, ka, data, data_len);
|
||||
* return true;
|
||||
* }
|
||||
*/
|
||||
|
||||
/**
|
||||
* Program entrypoint signature
|
||||
*
|
||||
* @param input An array containing serialized input parameters
|
||||
* @return true if successful
|
||||
*/
|
||||
extern bool entrypoint(const uint8_t *input);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
/**@}*/
|
32
programs/bpf/c/src/move_funds.c
Normal file
32
programs/bpf/c/src/move_funds.c
Normal file
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @brief Example C-based BPF program that moves funds from one account to
|
||||
* another
|
||||
*/
|
||||
|
||||
#include <solana_sdk.h>
|
||||
|
||||
/**
|
||||
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||
* unexpected number of accounts are passed to the program's entrypoint
|
||||
*/
|
||||
#define NUM_KA 3
|
||||
|
||||
extern bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[NUM_KA];
|
||||
const uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t tokens = *(int64_t *)data;
|
||||
if (*ka[0].tokens >= tokens) {
|
||||
*ka[0].tokens -= tokens;
|
||||
*ka[2].tokens += tokens;
|
||||
// sol_log_64(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
|
||||
} else {
|
||||
// sol_log_64(0, 0, 0xFF, *ka[0].tokens, tokens);
|
||||
}
|
||||
return true;
|
||||
}
|
35
programs/bpf/c/src/noop.c
Normal file
35
programs/bpf/c/src/noop.c
Normal file
@ -0,0 +1,35 @@
|
||||
/**
|
||||
* @brief Example C-based BPF program that prints out the parameters
|
||||
* passed to it
|
||||
*/
|
||||
|
||||
#include <solana_sdk.h>
|
||||
|
||||
/**
|
||||
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||
* unexpected number of accounts are passed to the program's entrypoint
|
||||
*/
|
||||
#define NUM_KA 1
|
||||
|
||||
extern bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[NUM_KA];
|
||||
const uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
sol_log("noop");
|
||||
|
||||
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
sol_log_params(NUM_KA, ka, data, data_len);
|
||||
|
||||
sol_assert(sizeof(int8_t) == 1);
|
||||
sol_assert(sizeof(uint8_t) == 1);
|
||||
sol_assert(sizeof(int16_t) == 2);
|
||||
sol_assert(sizeof(uint16_t) == 2);
|
||||
sol_assert(sizeof(int32_t) == 4);
|
||||
sol_assert(sizeof(uint32_t) == 4);
|
||||
sol_assert(sizeof(int64_t) == 8);
|
||||
sol_assert(sizeof(uint64_t) == 8);
|
||||
return true;
|
||||
}
|
11
programs/bpf/rust/noop/Cargo.toml
Normal file
11
programs/bpf/rust/noop/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "solana-bpf-noop"
|
||||
version = "0.10.3"
|
||||
description = "Solana BPF noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
rbpf = "0.1.0"
|
||||
solana-sdk = { path = "../../../../sdk", version = "0.10.3" }
|
10
programs/bpf/rust/noop/build.sh
Executable file
10
programs/bpf/rust/noop/build.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# TODO building release flavor with rust produces a bunch of output .bc files
|
||||
INTERDIR=../../../target/release
|
||||
OUTDIR="${1:-../../../target/debug/}"
|
||||
mkdir -p "$OUTDIR"
|
||||
# cargo +nightly rustc --release -- -C panic=abort --emit=llvm-ir
|
||||
cargo +nightly rustc --release -- -C panic=abort --emit=llvm-bc
|
||||
cp "$INTERDIR"/deps/noop_rust-*.bc "$OUTDIR"/noop_rust.bc
|
||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -o "$OUTDIR"/noop_rust.o "$OUTDIR"/noop_rust.bc
|
3
programs/bpf/rust/noop/dump.sh
Executable file
3
programs/bpf/rust/noop/dump.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble target/release/noop_rust.o
|
15
programs/bpf/rust/noop/src/lib.rs
Normal file
15
programs/bpf/rust/noop/src/lib.rs
Normal file
@ -0,0 +1,15 @@
|
||||
extern crate rbpf;
|
||||
|
||||
use std::mem::transmute;
|
||||
|
||||
#[no_mangle]
|
||||
#[link_section = ".text,entrypoint"] // TODO platform independent needed
|
||||
pub extern "C" fn entrypoint(_raw: *mut u8) {
|
||||
let bpf_func_trace_printk = unsafe {
|
||||
transmute::<u64, extern "C" fn(u64, u64, u64, u64, u64)>(
|
||||
rbpf::helpers::BPF_TRACE_PRINTK_IDX as u64,
|
||||
)
|
||||
};
|
||||
|
||||
bpf_func_trace_printk(0, 0, 1, 2, 3);
|
||||
}
|
24
programs/native/bpf_loader/Cargo.toml
Normal file
24
programs/native/bpf_loader/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "solana-bpfloader"
|
||||
version = "0.10.3"
|
||||
description = "Solana BPF Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
byteorder = "1.2.1"
|
||||
elf = "0.0.10"
|
||||
env_logger = "0.5.12"
|
||||
libc = "0.2.43"
|
||||
log = "0.4.2"
|
||||
solana_rbpf = "0.1.3"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.3" }
|
||||
|
||||
[lib]
|
||||
name = "solana_bpf_loader"
|
||||
crate-type = ["cdylib"]
|
||||
|
324
programs/native/bpf_loader/src/bpf_verifier.rs
Normal file
324
programs/native/bpf_loader/src/bpf_verifier.rs
Normal file
@ -0,0 +1,324 @@
|
||||
use solana_rbpf::ebpf;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {
|
||||
let full_msg = format!("[Verifier] Error: {}", msg.as_ref());
|
||||
Err(Error::new(ErrorKind::Other, full_msg))
|
||||
}
|
||||
|
||||
fn check_prog_len(prog: &[u8]) -> Result<(), Error> {
|
||||
if prog.len() % ebpf::INSN_SIZE != 0 {
|
||||
reject(format!(
|
||||
"eBPF program length must be a multiple of {:?} octets",
|
||||
ebpf::INSN_SIZE
|
||||
))?;
|
||||
}
|
||||
if prog.len() > ebpf::PROG_MAX_SIZE {
|
||||
reject(format!(
|
||||
"eBPF program length limited to {:?}, here {:?}",
|
||||
ebpf::PROG_MAX_INSNS,
|
||||
prog.len() / ebpf::INSN_SIZE
|
||||
))?;
|
||||
}
|
||||
|
||||
if prog.is_empty() {
|
||||
reject("No program set, call prog_set() to load one".to_string())?;
|
||||
}
|
||||
|
||||
// TODO BPF program may deterministically exit even if the last
|
||||
// instruction in the block is not an exit (might be earlier and jumped to)
|
||||
// TODO need to validate more intelligently
|
||||
// let last_insn = ebpf::get_insn(prog, (prog.len() / ebpf::INSN_SIZE) - 1);
|
||||
// if last_insn.opc != ebpf::EXIT {
|
||||
// reject("program does not end with “EXIT” instruction".to_string())?;
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_imm_nonzero(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Error> {
|
||||
if insn.imm == 0 {
|
||||
reject(format!("division by 0 (insn #{:?})", insn_ptr))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_imm_endian(insn: &ebpf::Insn, insn_ptr: usize) -> Result<(), Error> {
|
||||
match insn.imm {
|
||||
16 | 32 | 64 => Ok(()),
|
||||
_ => reject(format!(
|
||||
"unsupported argument for LE/BE (insn #{:?})",
|
||||
insn_ptr
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn check_load_dw(prog: &[u8], insn_ptr: usize) -> Result<(), Error> {
|
||||
// We know we can reach next insn since we enforce an EXIT insn at the end of program, while
|
||||
// this function should be called only for LD_DW insn, that cannot be last in program.
|
||||
let next_insn = ebpf::get_insn(prog, insn_ptr + 1);
|
||||
if next_insn.opc != 0 {
|
||||
reject(format!(
|
||||
"incomplete LD_DW instruction (insn #{:?})",
|
||||
insn_ptr
|
||||
))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_jmp_offset(prog: &[u8], insn_ptr: usize) -> Result<(), Error> {
|
||||
let insn = ebpf::get_insn(prog, insn_ptr);
|
||||
if insn.off == -1 {
|
||||
reject(format!("infinite loop (insn #{:?})", insn_ptr))?;
|
||||
}
|
||||
|
||||
let dst_insn_ptr = insn_ptr as isize + 1 + insn.off as isize;
|
||||
if dst_insn_ptr < 0 || dst_insn_ptr as usize >= (prog.len() / ebpf::INSN_SIZE) {
|
||||
reject(format!(
|
||||
"jump out of code to #{:?} (insn #{:?})",
|
||||
dst_insn_ptr, insn_ptr
|
||||
))?;
|
||||
}
|
||||
|
||||
let dst_insn = ebpf::get_insn(prog, dst_insn_ptr as usize);
|
||||
if dst_insn.opc == 0 {
|
||||
reject(format!(
|
||||
"jump to middle of LD_DW at #{:?} (insn #{:?})",
|
||||
dst_insn_ptr, insn_ptr
|
||||
))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_registers(insn: &ebpf::Insn, store: bool, insn_ptr: usize) -> Result<(), Error> {
|
||||
if insn.src > 10 {
|
||||
reject(format!("invalid source register (insn #{:?})", insn_ptr))?;
|
||||
}
|
||||
|
||||
match (insn.dst, store) {
|
||||
(0...9, _) | (10, true) => Ok(()),
|
||||
(10, false) => reject(format!(
|
||||
"cannot write into register r10 (insn #{:?})",
|
||||
insn_ptr
|
||||
)),
|
||||
(_, _) => reject(format!(
|
||||
"invalid destination register (insn #{:?})",
|
||||
insn_ptr
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check(prog: &[u8]) -> Result<(), Error> {
|
||||
check_prog_len(prog)?;
|
||||
|
||||
let mut insn_ptr: usize = 0;
|
||||
while insn_ptr * ebpf::INSN_SIZE < prog.len() {
|
||||
let insn = ebpf::get_insn(prog, insn_ptr);
|
||||
let mut store = false;
|
||||
|
||||
match insn.opc {
|
||||
// BPF_LD class
|
||||
ebpf::LD_ABS_B => {}
|
||||
ebpf::LD_ABS_H => {}
|
||||
ebpf::LD_ABS_W => {}
|
||||
ebpf::LD_ABS_DW => {}
|
||||
ebpf::LD_IND_B => {}
|
||||
ebpf::LD_IND_H => {}
|
||||
ebpf::LD_IND_W => {}
|
||||
ebpf::LD_IND_DW => {}
|
||||
|
||||
ebpf::LD_DW_IMM => {
|
||||
store = true;
|
||||
check_load_dw(prog, insn_ptr)?;
|
||||
insn_ptr += 1;
|
||||
}
|
||||
|
||||
// BPF_LDX class
|
||||
ebpf::LD_B_REG => {}
|
||||
ebpf::LD_H_REG => {}
|
||||
ebpf::LD_W_REG => {}
|
||||
ebpf::LD_DW_REG => {}
|
||||
|
||||
// BPF_ST class
|
||||
ebpf::ST_B_IMM => store = true,
|
||||
ebpf::ST_H_IMM => store = true,
|
||||
ebpf::ST_W_IMM => store = true,
|
||||
ebpf::ST_DW_IMM => store = true,
|
||||
|
||||
// BPF_STX class
|
||||
ebpf::ST_B_REG => store = true,
|
||||
ebpf::ST_H_REG => store = true,
|
||||
ebpf::ST_W_REG => store = true,
|
||||
ebpf::ST_DW_REG => store = true,
|
||||
ebpf::ST_W_XADD => {
|
||||
unimplemented!();
|
||||
}
|
||||
ebpf::ST_DW_XADD => {
|
||||
unimplemented!();
|
||||
}
|
||||
|
||||
// BPF_ALU class
|
||||
ebpf::ADD32_IMM => {}
|
||||
ebpf::ADD32_REG => {}
|
||||
ebpf::SUB32_IMM => {}
|
||||
ebpf::SUB32_REG => {}
|
||||
ebpf::MUL32_IMM => {}
|
||||
ebpf::MUL32_REG => {}
|
||||
ebpf::DIV32_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::DIV32_REG => {}
|
||||
ebpf::OR32_IMM => {}
|
||||
ebpf::OR32_REG => {}
|
||||
ebpf::AND32_IMM => {}
|
||||
ebpf::AND32_REG => {}
|
||||
ebpf::LSH32_IMM => {}
|
||||
ebpf::LSH32_REG => {}
|
||||
ebpf::RSH32_IMM => {}
|
||||
ebpf::RSH32_REG => {}
|
||||
ebpf::NEG32 => {}
|
||||
ebpf::MOD32_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::MOD32_REG => {}
|
||||
ebpf::XOR32_IMM => {}
|
||||
ebpf::XOR32_REG => {}
|
||||
ebpf::MOV32_IMM => {}
|
||||
ebpf::MOV32_REG => {}
|
||||
ebpf::ARSH32_IMM => {}
|
||||
ebpf::ARSH32_REG => {}
|
||||
ebpf::LE => {
|
||||
check_imm_endian(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::BE => {
|
||||
check_imm_endian(&insn, insn_ptr)?;
|
||||
}
|
||||
|
||||
// BPF_ALU64 class
|
||||
ebpf::ADD64_IMM => {}
|
||||
ebpf::ADD64_REG => {}
|
||||
ebpf::SUB64_IMM => {}
|
||||
ebpf::SUB64_REG => {}
|
||||
ebpf::MUL64_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::MUL64_REG => {}
|
||||
ebpf::DIV64_IMM => {
|
||||
check_imm_nonzero(&insn, insn_ptr)?;
|
||||
}
|
||||
ebpf::DIV64_REG => {}
|
||||
ebpf::OR64_IMM => {}
|
||||
ebpf::OR64_REG => {}
|
||||
ebpf::AND64_IMM => {}
|
||||
ebpf::AND64_REG => {}
|
||||
ebpf::LSH64_IMM => {}
|
||||
ebpf::LSH64_REG => {}
|
||||
ebpf::RSH64_IMM => {}
|
||||
ebpf::RSH64_REG => {}
|
||||
ebpf::NEG64 => {}
|
||||
ebpf::MOD64_IMM => {}
|
||||
ebpf::MOD64_REG => {}
|
||||
ebpf::XOR64_IMM => {}
|
||||
ebpf::XOR64_REG => {}
|
||||
ebpf::MOV64_IMM => {}
|
||||
ebpf::MOV64_REG => {}
|
||||
ebpf::ARSH64_IMM => {}
|
||||
ebpf::ARSH64_REG => {}
|
||||
|
||||
// BPF_JMP class
|
||||
ebpf::JA => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JEQ_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JEQ_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JGE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JLE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSET_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSET_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JNE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JNE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSGE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLT_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLT_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLE_IMM => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::JSLE_REG => {
|
||||
check_jmp_offset(prog, insn_ptr)?;
|
||||
}
|
||||
ebpf::CALL => {}
|
||||
ebpf::TAIL_CALL => unimplemented!(),
|
||||
ebpf::EXIT => {}
|
||||
|
||||
_ => {
|
||||
reject(format!(
|
||||
"unknown eBPF opcode {:#2x} (insn #{:?})",
|
||||
insn.opc, insn_ptr
|
||||
))?;
|
||||
}
|
||||
}
|
||||
|
||||
check_registers(&insn, store, insn_ptr)?;
|
||||
|
||||
insn_ptr += 1;
|
||||
}
|
||||
|
||||
// insn_ptr should now be equal to number of instructions.
|
||||
if insn_ptr != prog.len() / ebpf::INSN_SIZE {
|
||||
reject(format!("jumped out of code to #{:?}", insn_ptr))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
230
programs/native/bpf_loader/src/lib.rs
Normal file
230
programs/native/bpf_loader/src/lib.rs
Normal file
@ -0,0 +1,230 @@
|
||||
pub mod bpf_verifier;
|
||||
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate libc;
|
||||
extern crate solana_rbpf;
|
||||
extern crate solana_sdk;
|
||||
|
||||
use bincode::deserialize;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use libc::c_char;
|
||||
use solana_rbpf::EbpfVmRaw;
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
use solana_sdk::loader_instruction::LoaderInstruction;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::ffi::CStr;
|
||||
use std::io::prelude::*;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::mem;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
|
||||
// TODO use rbpf's disassemble
|
||||
#[allow(dead_code)]
|
||||
fn dump_program(key: &Pubkey, prog: &[u8]) {
|
||||
let mut eight_bytes: Vec<u8> = Vec::new();
|
||||
info!("BPF Program: {:?}", key);
|
||||
for i in prog.iter() {
|
||||
if eight_bytes.len() >= 7 {
|
||||
info!("{:02X?}", eight_bytes);
|
||||
eight_bytes.clear();
|
||||
} else {
|
||||
eight_bytes.push(i.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub fn helper_sol_log_verify(
|
||||
addr: u64,
|
||||
unused2: u64,
|
||||
unused3: u64,
|
||||
unused4: u64,
|
||||
unused5: u64,
|
||||
ro_regions: &[&[u8]],
|
||||
unused7: &[&[u8]],
|
||||
) -> Result<(()), Error> {
|
||||
for region in ro_regions.iter() {
|
||||
if region.as_ptr() as u64 <= addr
|
||||
&& addr as u64 <= region.as_ptr() as u64 + region.len() as u64
|
||||
{
|
||||
let c_buf: *const c_char = addr as *const c_char;
|
||||
let max_size = (region.as_ptr() as u64 + region.len() as u64) - addr;
|
||||
unsafe {
|
||||
for i in 0..max_size {
|
||||
if std::ptr::read(c_buf.offset(i as isize)) == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Err(Error::new(ErrorKind::Other, "Error, Unterminated string"));
|
||||
}
|
||||
}
|
||||
Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
"Error: Load segfault, bad string pointer",
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub fn helper_sol_log(addr: u64, unused2: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 {
|
||||
let c_buf: *const c_char = addr as *const c_char;
|
||||
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
|
||||
match c_str.to_str() {
|
||||
Ok(slice) => info!("sol_log: {:?}", slice),
|
||||
Err(e) => warn!("Error: Cannot print invalid string"),
|
||||
};
|
||||
0
|
||||
}
|
||||
|
||||
pub fn helper_sol_log_u64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
|
||||
info!(
|
||||
"sol_log_u64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
|
||||
arg1, arg2, arg3, arg4, arg5
|
||||
);
|
||||
0
|
||||
}
|
||||
|
||||
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
|
||||
let mut vm = EbpfVmRaw::new(None)?;
|
||||
vm.set_verifier(bpf_verifier::check)?;
|
||||
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
|
||||
vm.set_elf(&prog)?;
|
||||
vm.register_helper_ex("sol_log", Some(helper_sol_log_verify), helper_sol_log)?;
|
||||
vm.register_helper_ex("sol_log_64", None, helper_sol_log_u64)?;
|
||||
Ok(vm)
|
||||
}
|
||||
|
||||
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
|
||||
assert_eq!(32, mem::size_of::<Pubkey>());
|
||||
|
||||
let mut v: Vec<u8> = Vec::new();
|
||||
v.write_u64::<LittleEndian>(keyed_accounts.len() as u64)
|
||||
.unwrap();
|
||||
for info in keyed_accounts.iter_mut() {
|
||||
v.write_all(info.key.as_ref()).unwrap();
|
||||
v.write_i64::<LittleEndian>(info.account.tokens).unwrap();
|
||||
v.write_u64::<LittleEndian>(info.account.userdata.len() as u64)
|
||||
.unwrap();
|
||||
v.write_all(&info.account.userdata).unwrap();
|
||||
v.write_all(info.account.program_id.as_ref()).unwrap();
|
||||
}
|
||||
v.write_u64::<LittleEndian>(data.len() as u64).unwrap();
|
||||
v.write_all(data).unwrap();
|
||||
v
|
||||
}
|
||||
|
||||
fn deserialize_parameters(keyed_accounts: &mut [KeyedAccount], buffer: &[u8]) {
|
||||
assert_eq!(32, mem::size_of::<Pubkey>());
|
||||
|
||||
let mut start = mem::size_of::<u64>();
|
||||
for info in keyed_accounts.iter_mut() {
|
||||
start += mem::size_of::<Pubkey>(); // skip pubkey
|
||||
info.account.tokens = LittleEndian::read_i64(&buffer[start..]);
|
||||
|
||||
start += mem::size_of::<u64>() // skip tokens
|
||||
+ mem::size_of::<u64>(); // skip length tag
|
||||
let end = start + info.account.userdata.len();
|
||||
info.account.userdata.clone_from_slice(&buffer[start..end]);
|
||||
|
||||
start += info.account.userdata.len() // skip userdata
|
||||
+ mem::size_of::<Pubkey>(); // skip program_id
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -> bool {
|
||||
static INIT: Once = ONCE_INIT;
|
||||
INIT.call_once(|| {
|
||||
// env_logger can only be initialized once
|
||||
env_logger::init();
|
||||
});
|
||||
|
||||
if keyed_accounts[0].account.executable {
|
||||
let prog = keyed_accounts[0].account.userdata.clone();
|
||||
trace!("Call BPF, {} instructions", prog.len() / 8);
|
||||
//dump_program(keyed_accounts[0].key, &prog);
|
||||
let mut vm = match create_vm(&prog) {
|
||||
Ok(vm) => vm,
|
||||
Err(e) => {
|
||||
warn!("create_vm failed: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
let mut v = serialize_parameters(&mut keyed_accounts[1..], &tx_data);
|
||||
match vm.execute_program(v.as_mut_slice()) {
|
||||
Ok(status) => if 0 == status {
|
||||
return false;
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("execute_program failed: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
deserialize_parameters(&mut keyed_accounts[1..], &v);
|
||||
trace!(
|
||||
"BPF program executed {} instructions",
|
||||
vm.get_last_instruction_count()
|
||||
);
|
||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||
match instruction {
|
||||
LoaderInstruction::Write { offset, bytes } => {
|
||||
let offset = offset as usize;
|
||||
let len = bytes.len();
|
||||
debug!("Write: offset={} length={}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
warn!(
|
||||
"Write overflow: {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
offset + len
|
||||
);
|
||||
return false;
|
||||
}
|
||||
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
|
||||
}
|
||||
LoaderInstruction::Finalize => {
|
||||
keyed_accounts[0].account.executable = true;
|
||||
info!("Finalize: account {:?}", keyed_accounts[0].key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Invalid program transaction: {:?}", tx_data);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_rbpf::helpers;
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
|
||||
fn test_non_terminating_program() {
|
||||
#[rustfmt::skip]
|
||||
let prog = &[
|
||||
0xb7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r6 = 0
|
||||
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = 0
|
||||
0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r2 = 0
|
||||
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
|
||||
0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r4 = 0
|
||||
0xbf, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r5 = r6
|
||||
0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call 6
|
||||
0x07, 0x06, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // r6 + 1
|
||||
0x05, 0x00, 0xf8, 0xff, 0x00, 0x00, 0x00, 0x00, // goto -8
|
||||
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
|
||||
];
|
||||
let input = &mut [0x00];
|
||||
|
||||
let mut vm = EbpfVmRaw::new(None).unwrap();
|
||||
vm.set_verifier(bpf_verifier::check).unwrap();
|
||||
vm.set_max_instruction_count(36000).unwrap(); // 36000 is a wag, need to tune
|
||||
vm.set_program(prog).unwrap();
|
||||
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helpers::bpf_trace_printf)
|
||||
.unwrap();
|
||||
vm.execute_program(input).unwrap();
|
||||
}
|
||||
}
|
24
programs/native/lua_loader/Cargo.toml
Normal file
24
programs/native/lua_loader/Cargo.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "solana-lualoader"
|
||||
version = "0.10.3"
|
||||
description = "Solana Lua Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
env_logger = "0.5.12"
|
||||
log = "0.4.2"
|
||||
rlua = "0.15.2"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
bincode = "1.0.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_lua_loader"
|
||||
crate-type = ["cdylib"]
|
||||
|
50
programs/native/lua_loader/multisig.lua
Normal file
50
programs/native/lua_loader/multisig.lua
Normal file
@ -0,0 +1,50 @@
|
||||
-- M-N Multisig. Pass in a table "{m=M, n=N, tokens=T}" where M is the number
|
||||
-- of signatures required, and N is a list of the pubkeys identifying
|
||||
-- those signatures. Once M of len(N) signatures are collected, tokens T
|
||||
-- are subtracted from account 1 and given to account 4. Note that unlike
|
||||
-- Rust, Lua is one-based and that account 1 is the first account.
|
||||
|
||||
function find(t, x)
|
||||
for i, v in pairs(t) do
|
||||
if v == x then
|
||||
return i
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function deserialize(bytes)
|
||||
return load("return" .. bytes)()
|
||||
end
|
||||
|
||||
local from_account,
|
||||
serialize_account,
|
||||
state_account,
|
||||
to_account = table.unpack(accounts)
|
||||
|
||||
local serialize = load(serialize_account.userdata)().serialize
|
||||
|
||||
if #state_account.userdata == 0 then
|
||||
local cfg = deserialize(data)
|
||||
state_account.userdata = serialize(cfg, nil, "s")
|
||||
return
|
||||
end
|
||||
|
||||
local cfg = deserialize(state_account.userdata)
|
||||
local key = deserialize(data)
|
||||
|
||||
local i = find(cfg.n, key)
|
||||
if i == nil then
|
||||
return
|
||||
end
|
||||
|
||||
table.remove(cfg.n, i)
|
||||
cfg.m = cfg.m - 1
|
||||
state_account.userdata = serialize(cfg, nil, "s")
|
||||
|
||||
if cfg.m == 0 then
|
||||
from_account.tokens = from_account.tokens - cfg.tokens
|
||||
to_account.tokens = to_account.tokens + cfg.tokens
|
||||
|
||||
-- End of game.
|
||||
state_account.tokens = 0
|
||||
end
|
174
programs/native/lua_loader/serialize.lua
Normal file
174
programs/native/lua_loader/serialize.lua
Normal file
@ -0,0 +1,174 @@
|
||||
----------------------------------------------------------------
|
||||
-- serialize.lua
|
||||
--
|
||||
-- Exports:
|
||||
--
|
||||
-- orderedPairs : deterministically ordered version of pairs()
|
||||
--
|
||||
-- serialize : convert Lua value to string in Lua syntax
|
||||
--
|
||||
----------------------------------------------------------------
|
||||
|
||||
|
||||
-- orderedPairs: iterate over table elements in deterministic order. First,
|
||||
-- array elements are returned, then remaining elements sorted by the key's
|
||||
-- type and value.
|
||||
|
||||
-- compare any two Lua values, establishing a complete ordering
|
||||
local function ltAny(a,b)
|
||||
local ta, tb = type(a), type(b)
|
||||
if ta ~= tb then
|
||||
return ta < tb
|
||||
end
|
||||
if ta == "string" or ta == "number" then
|
||||
return a < b
|
||||
end
|
||||
return tostring(a) < tostring(b)
|
||||
end
|
||||
|
||||
local inext = ipairs{}
|
||||
|
||||
local function orderedPairs(t)
|
||||
local keys = {}
|
||||
local keyIndex = 1
|
||||
local counting = true
|
||||
|
||||
local function _next(seen, s)
|
||||
local v
|
||||
|
||||
if counting then
|
||||
-- return next array index
|
||||
s, v = inext(t, s)
|
||||
if s ~= nil then
|
||||
seen[s] = true
|
||||
return s,v
|
||||
end
|
||||
counting = false
|
||||
|
||||
-- construct sorted unseen keys
|
||||
for k,v in pairs(t) do
|
||||
if not seen[k] then
|
||||
table.insert(keys, k)
|
||||
end
|
||||
end
|
||||
table.sort(keys, ltAny)
|
||||
end
|
||||
|
||||
-- return next unseen table element
|
||||
s = keys[keyIndex]
|
||||
if s ~= nil then
|
||||
keyIndex = keyIndex + 1
|
||||
v = t[s]
|
||||
end
|
||||
return s, v
|
||||
end
|
||||
|
||||
return _next, {}, 0
|
||||
end
|
||||
|
||||
|
||||
-- avoid 'nan', 'inf', and '-inf'
|
||||
local numtostring = {
|
||||
[tostring(-1/0)] = "-1/0",
|
||||
[tostring(1/0)] = "1/0",
|
||||
[tostring(0/0)] = "0/0"
|
||||
}
|
||||
|
||||
setmetatable(numtostring, { __index = function (t, k) return k end })
|
||||
|
||||
-- serialize: Serialize a Lua data structure
|
||||
--
|
||||
-- x = value to serialize
|
||||
-- out = function to be called repeatedly with strings, or
|
||||
-- table into which strings should be inserted, or
|
||||
-- nil => return a string
|
||||
-- iter = function to iterate over table elements, or
|
||||
-- "s" to sort elements by key, or
|
||||
-- nil for default (fastest)
|
||||
--
|
||||
-- Notes:
|
||||
-- * Does not support self-referential data structures.
|
||||
-- * Does not optimize for repeated sub-expressions.
|
||||
-- * Does not preserve topology; only values.
|
||||
-- * Does not handle types other than nil, number, boolean, string, table
|
||||
--
|
||||
local function serialize(x, out, iter)
|
||||
local visited = {}
|
||||
local iter = iter=="s" and orderedPairs or iter or pairs
|
||||
assert(type(iter) == "function")
|
||||
|
||||
local function _serialize(x)
|
||||
if type(x) == "string" then
|
||||
|
||||
out(string.format("%q", x))
|
||||
|
||||
elseif type(x) == "number" then
|
||||
|
||||
out(numtostring[tostring(x)])
|
||||
|
||||
elseif type(x) == "boolean" or
|
||||
type(x) == "nil" then
|
||||
|
||||
out(tostring(x))
|
||||
|
||||
elseif type(x) == "table" then
|
||||
|
||||
if visited[x] then
|
||||
error("serialize: recursive structure")
|
||||
end
|
||||
visited[x] = true
|
||||
local first, nextIndex = true, 1
|
||||
|
||||
out "{"
|
||||
|
||||
for k,v in iter(x) do
|
||||
if first then
|
||||
first = false
|
||||
else
|
||||
out ","
|
||||
end
|
||||
if k == nextIndex then
|
||||
nextIndex = nextIndex + 1
|
||||
else
|
||||
if type(k) == "string" and k:match("^[%a_][%w_]*$") then
|
||||
out(k.."=")
|
||||
else
|
||||
out "["
|
||||
_serialize(k)
|
||||
out "]="
|
||||
end
|
||||
end
|
||||
_serialize(v)
|
||||
end
|
||||
|
||||
out "}"
|
||||
visited[x] = false
|
||||
else
|
||||
error("serialize: unsupported type")
|
||||
end
|
||||
end
|
||||
|
||||
local result
|
||||
if not out then
|
||||
result = {}
|
||||
out = result
|
||||
end
|
||||
|
||||
if type(out) == "table" then
|
||||
local t = out
|
||||
function out(s)
|
||||
table.insert(t,s)
|
||||
end
|
||||
end
|
||||
|
||||
_serialize(x)
|
||||
|
||||
if result then
|
||||
return table.concat(result)
|
||||
end
|
||||
end
|
||||
|
||||
return {
|
||||
orderedPairs = orderedPairs,
|
||||
serialize = serialize
|
||||
}
|
298
programs/native/lua_loader/src/lib.rs
Normal file
298
programs/native/lua_loader/src/lib.rs
Normal file
@ -0,0 +1,298 @@
|
||||
extern crate bincode;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rlua;
|
||||
extern crate solana_sdk;
|
||||
|
||||
use bincode::deserialize;
|
||||
use rlua::{Lua, Result, Table};
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
use solana_sdk::loader_instruction::LoaderInstruction;
|
||||
use std::str;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
|
||||
/// Make KeyAccount values available to Lua.
|
||||
fn set_accounts(lua: &Lua, name: &str, keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||
let accounts = lua.create_table()?;
|
||||
for (i, keyed_account) in keyed_accounts.iter().enumerate() {
|
||||
let account = lua.create_table()?;
|
||||
account.set("key", keyed_account.key.to_string())?;
|
||||
account.set("tokens", keyed_account.account.tokens)?;
|
||||
let data_str = lua.create_string(&keyed_account.account.userdata)?;
|
||||
account.set("userdata", data_str)?;
|
||||
accounts.set(i + 1, account)?;
|
||||
}
|
||||
let globals = lua.globals();
|
||||
globals.set(name, accounts)
|
||||
}
|
||||
|
||||
/// Commit the new KeyedAccount values.
|
||||
fn update_accounts(lua: &Lua, name: &str, keyed_accounts: &mut [KeyedAccount]) -> Result<()> {
|
||||
let globals = lua.globals();
|
||||
let accounts: Table = globals.get(name)?;
|
||||
for (i, keyed_account) in keyed_accounts.into_iter().enumerate() {
|
||||
let account: Table = accounts.get(i + 1)?;
|
||||
keyed_account.account.tokens = account.get("tokens")?;
|
||||
let data_str: rlua::String = account.get("userdata")?;
|
||||
keyed_account.account.userdata = data_str.as_bytes().to_vec();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_lua(keyed_accounts: &mut [KeyedAccount], code: &str, data: &[u8]) -> Result<()> {
|
||||
let lua = Lua::new();
|
||||
let globals = lua.globals();
|
||||
let data_str = lua.create_string(data)?;
|
||||
globals.set("data", data_str)?;
|
||||
|
||||
set_accounts(&lua, "accounts", keyed_accounts)?;
|
||||
lua.exec::<_, ()>(code, None)?;
|
||||
update_accounts(&lua, "accounts", keyed_accounts)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -> bool {
|
||||
static INIT: Once = ONCE_INIT;
|
||||
INIT.call_once(|| {
|
||||
// env_logger can only be initialized once
|
||||
env_logger::init();
|
||||
});
|
||||
|
||||
if keyed_accounts[0].account.executable {
|
||||
let code = keyed_accounts[0].account.userdata.clone();
|
||||
let code = str::from_utf8(&code).unwrap();
|
||||
match run_lua(&mut keyed_accounts[1..], &code, tx_data) {
|
||||
Ok(()) => {
|
||||
trace!("Lua success");
|
||||
return true;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Lua Error: {:#?}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||
match instruction {
|
||||
LoaderInstruction::Write { offset, bytes } => {
|
||||
let offset = offset as usize;
|
||||
let len = bytes.len();
|
||||
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
warn!(
|
||||
"Write overflow {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
offset + len
|
||||
);
|
||||
return false;
|
||||
}
|
||||
keyed_accounts[0].account.userdata[offset..offset + len].copy_from_slice(&bytes);
|
||||
}
|
||||
|
||||
LoaderInstruction::Finalize => {
|
||||
keyed_accounts[0].account.executable = true;
|
||||
trace!("LuaLoader::Finalize prog: {:?}", keyed_accounts[0].key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Invalid program transaction: {:?}", tx_data);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
extern crate bincode;
|
||||
|
||||
use self::bincode::serialize;
|
||||
use super::*;
|
||||
use solana_sdk::account::{create_keyed_accounts, Account};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_update_accounts() -> Result<()> {
|
||||
let mut accounts = [(Pubkey::default(), Account::default())];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
let lua = Lua::new();
|
||||
set_accounts(&lua, "xs", &keyed_accounts)?;
|
||||
keyed_accounts[0].account.tokens = 42;
|
||||
keyed_accounts[0].account.userdata = vec![];
|
||||
update_accounts(&lua, "xs", &mut keyed_accounts)?;
|
||||
|
||||
// Ensure update_accounts() overwrites the local value 42.
|
||||
assert_eq!(keyed_accounts[0].account.tokens, 0);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credit_with_lua() -> Result<()> {
|
||||
let code = r#"accounts[1].tokens = accounts[1].tokens + 1"#;
|
||||
let mut accounts = [(Pubkey::default(), Account::default())];
|
||||
run_lua(&mut create_keyed_accounts(&mut accounts), code, &[])?;
|
||||
assert_eq!(accounts[0].1.tokens, 1);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_with_lua() {
|
||||
let code = r#"accounts[1].tokens += 1"#;
|
||||
let mut accounts = [(Pubkey::default(), Account::default())];
|
||||
assert!(run_lua(&mut create_keyed_accounts(&mut accounts), code, &[]).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_move_funds_with_lua_via_process() {
|
||||
let userdata = r#"
|
||||
local tokens, _ = string.unpack("I", data)
|
||||
accounts[1].tokens = accounts[1].tokens - tokens
|
||||
accounts[2].tokens = accounts[2].tokens + tokens
|
||||
"#.as_bytes()
|
||||
.to_vec();
|
||||
|
||||
let alice_pubkey = Pubkey::default();
|
||||
let bob_pubkey = Pubkey::default();
|
||||
let program_id = Pubkey::default();
|
||||
|
||||
let mut accounts = [
|
||||
(
|
||||
Pubkey::default(),
|
||||
Account {
|
||||
tokens: 1,
|
||||
userdata,
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
},
|
||||
),
|
||||
(alice_pubkey, Account::new(100, 0, program_id)),
|
||||
(bob_pubkey, Account::new(1, 0, program_id)),
|
||||
];
|
||||
let data = serialize(&10u64).unwrap();
|
||||
process(&mut create_keyed_accounts(&mut accounts), &data);
|
||||
assert_eq!(accounts[1].1.tokens, 90);
|
||||
assert_eq!(accounts[2].1.tokens, 11);
|
||||
|
||||
process(&mut create_keyed_accounts(&mut accounts), &data);
|
||||
assert_eq!(accounts[1].1.tokens, 80);
|
||||
assert_eq!(accounts[2].1.tokens, 21);
|
||||
}
|
||||
|
||||
fn read_test_file(name: &str) -> Vec<u8> {
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push(name);
|
||||
let mut file = File::open(path).unwrap();
|
||||
let mut contents = vec![];
|
||||
file.read_to_end(&mut contents).unwrap();
|
||||
contents
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_lua_library() {
|
||||
let userdata = r#"
|
||||
local serialize = load(accounts[2].userdata)().serialize
|
||||
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
|
||||
"#.as_bytes()
|
||||
.to_vec();
|
||||
let program_id = Pubkey::default();
|
||||
let program_account = Account {
|
||||
tokens: 1,
|
||||
userdata,
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
let alice_account = Account::new(100, 0, program_id);
|
||||
let serialize_account = Account {
|
||||
tokens: 100,
|
||||
userdata: read_test_file("serialize.lua"),
|
||||
program_id,
|
||||
executable: false,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
let mut accounts = [
|
||||
(Pubkey::default(), program_account),
|
||||
(Pubkey::default(), alice_account),
|
||||
(Pubkey::default(), serialize_account),
|
||||
(Pubkey::default(), Account::new(1, 0, program_id)),
|
||||
];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
process(&mut keyed_accounts, &[]);
|
||||
// Verify deterministic ordering of a serialized Lua table.
|
||||
assert_eq!(
|
||||
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
|
||||
"{a=1,b=2,c=3}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lua_multisig() {
|
||||
let program_id = Pubkey::default();
|
||||
|
||||
let alice_pubkey = Pubkey::new(&[0; 32]);
|
||||
let serialize_pubkey = Pubkey::new(&[1; 32]);
|
||||
let state_pubkey = Pubkey::new(&[2; 32]);
|
||||
let bob_pubkey = Pubkey::new(&[3; 32]);
|
||||
let carol_pubkey = Pubkey::new(&[4; 32]);
|
||||
let dan_pubkey = Pubkey::new(&[5; 32]);
|
||||
let erin_pubkey = Pubkey::new(&[6; 32]);
|
||||
|
||||
let program_account = Account {
|
||||
tokens: 1,
|
||||
userdata: read_test_file("multisig.lua"),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let alice_account = Account {
|
||||
tokens: 100,
|
||||
userdata: Vec::new(),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let serialize_account = Account {
|
||||
tokens: 100,
|
||||
userdata: read_test_file("serialize.lua"),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let mut accounts = [
|
||||
(Pubkey::default(), program_account), // Account holding the program
|
||||
(alice_pubkey, alice_account), // The payer
|
||||
(serialize_pubkey, serialize_account), // Where the serialize library is stored.
|
||||
(state_pubkey, Account::new(1, 0, program_id)), // Where program state is stored.
|
||||
(bob_pubkey, Account::new(1, 0, program_id)), // The payee once M signatures are collected.
|
||||
];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
|
||||
let data = format!(
|
||||
r#"{{m=2, n={{"{}","{}","{}"}}, tokens=100}}"#,
|
||||
carol_pubkey, dan_pubkey, erin_pubkey
|
||||
).as_bytes()
|
||||
.to_vec();
|
||||
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 1);
|
||||
|
||||
let data = format!(r#""{}""#, carol_pubkey).into_bytes();
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 1);
|
||||
|
||||
let data = format!(r#""{}""#, dan_pubkey).into_bytes();
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 101); // Pay day!
|
||||
|
||||
let data = format!(r#""{}""#, erin_pubkey).into_bytes();
|
||||
process(&mut keyed_accounts, &data);
|
||||
assert_eq!(keyed_accounts[4].account.tokens, 101); // No change!
|
||||
}
|
||||
}
|
15
programs/native/noop/Cargo.toml
Normal file
15
programs/native/noop/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "solana-noop"
|
||||
version = "0.10.3"
|
||||
description = "Solana noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.3" }
|
||||
|
||||
[lib]
|
||||
name = "noop"
|
||||
crate-type = ["cdylib"]
|
||||
|
10
programs/native/noop/src/lib.rs
Normal file
10
programs/native/noop/src/lib.rs
Normal file
@ -0,0 +1,10 @@
|
||||
extern crate solana_sdk;
|
||||
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> bool {
|
||||
println!("noop: keyed_accounts: {:#?}", keyed_accounts);
|
||||
println!("noop: data: {:?}", data);
|
||||
true
|
||||
}
|
@ -1,10 +1,17 @@
|
||||
# Smart Contracts Engine
|
||||
|
||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||
The goal of this RFC is to define a set of constraints for APIs and smart contracts runtime such that we can execute our contracts safely on massively parallel hardware such as a GPU.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.2
|
||||
Version 0.3
|
||||
|
||||
## Definitions
|
||||
|
||||
* Transaction - an atomic operation with multiple instructions. All Instruction must complete successfully for the transaction to be comitted.
|
||||
* Instruction - a call to a program that modifies Account token balances and Account specific userdata state. A single transaction may have multiple Instructions with different Accounts and Programs.
|
||||
* Program - Programs are code that modifies Account token balances and Account specific userdata state.
|
||||
* Account - A single instance of state. Accounts are looked up by account Pubkeys and are associated with a Program's Pubkey.
|
||||
|
||||
## Toolchain Stack
|
||||
|
||||
@ -39,173 +46,136 @@ In Figure 1 an untrusted client, creates a program in the front-end language of
|
||||
|
||||
## Runtime
|
||||
|
||||
The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable and doesn't require dynamic resource management. The goal is to execute as many contracts as possible in parallel, and have them pass or fail without a destructive state change.
|
||||
The goal with the runtime is to have a general purpose execution environment that is highly parallelizeable. To achieve this goal the runtime forces each Instruction to specify all of its memory dependencies up front, and therefore a single Instruction cannot cause a dynamic memory allocation. An explicit Instruction for memory allocation from the `SystemProgram::CreateAccount` is the only way to allocate new memory in the engine. A Transaction may compose multiple Instruction, including `SystemProgram::CreateAccount`, into a single atomic sequence which allows for memory allocation to achieve a result that is similar to dynamic allocation.
|
||||
|
||||
|
||||
### State
|
||||
|
||||
State is addressed by an account which is at the moment simply the Pubkey. Our goal is to eliminate memory allocation from within the smart contract itself. Thus the client of the contract provides all the state that is necessary for the contract to execute in the transaction itself. The runtime interacts with the contract through a state transition function, which takes a mapping of [(Pubkey,State)] and returns [(Pubkey, State')]. The State is an opeque type to the runtime, a `Vec<u8>`, the contents of which the contract has full control over.
|
||||
State is addressed by an Account which is at the moment simply the Pubkey. Our goal is to eliminate memory allocation from within the program itself. Thus the client of the program provides all the state that is necessary for the program to execute in the transaction itself. The runtime interacts with the program through an entry point with a well defined interface. The userdata stored in an Account is an opaque type to the runtime, a `Vec<u8>`, the contents of which the program code has full control over.
|
||||
|
||||
### Call Structure
|
||||
### Transaction structure
|
||||
```
|
||||
/// Call definition
|
||||
/// Signed portion
|
||||
/// An atomic transaction
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct CallData {
|
||||
/// Each Pubkey in this vector is mapped to a corresponding `Page` that is loaded for contract execution
|
||||
/// In a simple pay transaction `key[0]` is the token owner's key and `key[1]` is the recipient's key.
|
||||
pub keys: Vec<Pubkey>,
|
||||
pub struct Transaction {
|
||||
/// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`.
|
||||
pub signature: Signature,
|
||||
|
||||
/// The Pubkeys that are required to have a proof. The proofs are a `Vec<Signature> which encoded along side this data structure
|
||||
/// Each Signature signs the `required_proofs` vector as well as the `keys` vectors. The transaction is valid if and only if all
|
||||
/// the required signatures are present and the public key vector is unchanged between signatures.
|
||||
pub required_proofs: Vec<u8>,
|
||||
/// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is
|
||||
/// program-specific.
|
||||
/// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0].
|
||||
/// In the future which key pays the fee and which keys have signatures would be configurable.
|
||||
/// * account_keys[1] - Typically this is the program context or the recipient of the tokens
|
||||
pub account_keys: Vec<Pubkey>,
|
||||
|
||||
/// PoH data
|
||||
/// last PoH hash observed by the sender
|
||||
/// The ID of a recent ledger entry.
|
||||
pub last_id: Hash,
|
||||
|
||||
/// Program
|
||||
/// The address of the program we want to call. ContractId is just a Pubkey that is the address of the loaded code that will execute this Call.
|
||||
pub contract_id: ContractId,
|
||||
/// OS scheduling fee
|
||||
/// The number of tokens paid for processing and storage of this transaction.
|
||||
pub fee: i64,
|
||||
/// struct version to prevent duplicate spends
|
||||
/// Calls with a version <= Page.version are rejected
|
||||
pub version: u64,
|
||||
/// method to call in the contract
|
||||
pub method: u8,
|
||||
/// usedata in bytes
|
||||
pub userdata: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Call {
|
||||
/// Signatures and Keys
|
||||
/// (signature, key index)
|
||||
/// This vector contains a tuple of signatures, and the key index the signature is for
|
||||
/// proofs[0] is always key[0]
|
||||
pub proofs: Vec<Signature>,
|
||||
pub data: CallData,
|
||||
/// Keys identifying programs in the instructions vector.
|
||||
pub program_ids: Vec<Pubkey>,
|
||||
/// Programs that will be executed in sequence and commited in one atomic transaction if all
|
||||
/// succeed.
|
||||
pub instructions: Vec<Instruction>,
|
||||
}
|
||||
```
|
||||
|
||||
At it's core, this is just a set of Pubkeys and Signatures with a bit of metadata. The contract Pubkey routes this transaction into that contracts entry point. `version` is used for dropping retransmitted requests.
|
||||
The Transaction structure specifies a list of Pubkey's and signatures for those keys and a sequentail list of instructions that will operate over the state's assosciated with the `account_keys`. For the transaction to be committed all the instructions must execute successfully, if any abort the whole transaction fails to commit.
|
||||
|
||||
Contracts should be able to read any state that is part of runtime, but only write to state that the contract allocated.
|
||||
### Account structure
|
||||
Accounts maintain token state as well as program specific memory.
|
||||
```
|
||||
/// An Account with userdata that is stored on chain
|
||||
pub struct Account {
|
||||
/// tokens in the account
|
||||
pub tokens: i64,
|
||||
/// user data
|
||||
/// A transaction can write to its userdata
|
||||
pub userdata: Vec<u8>,
|
||||
/// program id this Account belongs to
|
||||
pub program_id: Pubkey,
|
||||
}
|
||||
```
|
||||
|
||||
### Execution
|
||||
# Transaction Engine
|
||||
|
||||
Calls batched and processed in a pipeline
|
||||
At it's core, the engine looks up all the Pubkeys maps them to accounts and routs them to the `program_id` entry point.
|
||||
|
||||
## Execution
|
||||
|
||||
Transactions are batched and processed in a pipeline
|
||||
|
||||
```
|
||||
+-----------+ +-------------+ +--------------+ +--------------------+
|
||||
| sigverify |--->| lock memory |--->| validate fee |--->| allocate new pages |--->
|
||||
| sigverify |--->| lock memory |--->| validate fee |--->| allocate accounts |--->
|
||||
+-----------+ +-------------+ +--------------+ +--------------------+
|
||||
|
||||
+------------+ +---------+ +--------------+ +-=------------+
|
||||
--->| load pages |--->| execute |--->|unlock memory |--->| commit pages |
|
||||
+------------+ +---------+ +--------------+ +--------------+
|
||||
+------------+ +---------+ +-=------------+ +--------------+
|
||||
--->| load data |--->| execute |--->| commit data |-->|unlock memory |
|
||||
+------------+ +---------+ +--------------+ +--------------+
|
||||
|
||||
```
|
||||
|
||||
At the `execute` stage, the loaded pages have no data dependencies, so all the contracts can be executed in parallel.
|
||||
## Memory Management
|
||||
```
|
||||
pub struct Page {
|
||||
/// key that indexes this page
|
||||
/// prove ownership of this key to spend from this Page
|
||||
owner: Pubkey,
|
||||
/// contract that owns this page
|
||||
/// contract can write to the data that is in `memory` vector
|
||||
contract: Pubkey,
|
||||
/// balance that belongs to owner
|
||||
balance: u64,
|
||||
/// version of the structure, public for testing
|
||||
version: u64,
|
||||
/// hash of the page data
|
||||
memhash: Hash,
|
||||
/// The following could be in a separate structure
|
||||
memory: Vec<u8>,
|
||||
}
|
||||
```
|
||||
At the `execute` stage, the loaded pages have no data dependencies, so all the programs can be executed in parallel.
|
||||
|
||||
The guarantee that runtime enforces:
|
||||
1. The contract code is the only code that will modify the contents of `memory`
|
||||
2. Total balances on all the pages is equal before and after exectuion of a call
|
||||
3. Balances of each of the pages not owned by the contract must be equal to or greater after the call than before the call.
|
||||
The runtime enforces the following rules:
|
||||
|
||||
1. The `program_id` code is the only code that will modify the contents of `Account::userdata` of Account's that have been assigned to it. This means that upon assignment userdata vector is guarnteed to be `0`.
|
||||
2. Total balances on all the accounts is equal before and after execution of a Transaction.
|
||||
3. Balances of each of the accounts not assigned to `program_id` must be equal to or greater after the Transaction than before the transaction.
|
||||
4. All Instructions in the Transaction executed without a failure.
|
||||
|
||||
## Entry Point
|
||||
Exectuion of the contract involves maping the contract's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages.
|
||||
Execution of the program involves mapping the Program's public key to an entry point which takes a pointer to the transaction, and an array of loaded pages.
|
||||
|
||||
```
|
||||
// Find the method
|
||||
match (tx.contract, tx.method) {
|
||||
// system interface
|
||||
// everyone has the same reallocate
|
||||
(_, 0) => system_0_realloc(&tx, &mut call_pages),
|
||||
(_, 1) => system_1_assign(&tx, &mut call_pages),
|
||||
// contract methods
|
||||
(DEFAULT_CONTRACT, 128) => default_contract_128_move_funds(&tx, &mut call_pages),
|
||||
(contract, method) => //...
|
||||
pub fn process_transaction(
|
||||
tx: &Transaction,
|
||||
pix: usize,
|
||||
accounts: &mut [&mut Account],
|
||||
) -> Result<()>;
|
||||
```
|
||||
|
||||
The first 127 methods are reserved for the system interface, which implements allocation and assignment of memory. The rest, including the contract for moving funds are implemented by the contract itself.
|
||||
|
||||
## System Interface
|
||||
```
|
||||
/// SYSTEM interface, same for very contract, methods 0 to 127
|
||||
/// method 0
|
||||
/// reallocate
|
||||
/// spend the funds from the call to the first recipient's
|
||||
pub fn system_0_realloc(call: &Call, pages: &mut Vec<Page>) {
|
||||
if call.contract == DEFAULT_CONTRACT {
|
||||
let size: u64 = deserialize(&call.userdata).unwrap();
|
||||
pages[0].memory.resize(size as usize, 0u8);
|
||||
}
|
||||
pub enum SystemProgram {
|
||||
/// Create a new account
|
||||
/// * Transaction::keys[0] - source
|
||||
/// * Transaction::keys[1] - new account key
|
||||
/// * tokens - number of tokens to transfer to the new account
|
||||
/// * space - memory to allocate if greater then zero
|
||||
/// * program_id - the program id of the new account
|
||||
CreateAccount {
|
||||
tokens: i64,
|
||||
space: u64,
|
||||
program_id: Pubkey,
|
||||
},
|
||||
/// Assign account to a program
|
||||
/// * Transaction::keys[0] - account to assign
|
||||
Assign { program_id: Pubkey },
|
||||
/// Move tokens
|
||||
/// * Transaction::keys[0] - source
|
||||
/// * Transaction::keys[1] - destination
|
||||
Move { tokens: i64 },
|
||||
}
|
||||
/// method 1
|
||||
/// assign
|
||||
/// assign the page to a contract
|
||||
pub fn system_1_assign(call: &Call, pages: &mut Vec<Page>) {
|
||||
let contract = deserialize(&call.userdata).unwrap();
|
||||
if call.contract == DEFAULT_CONTRACT {
|
||||
pages[0].contract = contract;
|
||||
//zero out the memory in pages[0].memory
|
||||
//Contracts need to own the state of that data otherwise a use could fabricate the state and
|
||||
//manipulate the contract
|
||||
pages[0].memory.clear();
|
||||
}
|
||||
}
|
||||
```
|
||||
The first method resizes the memory that is assosciated with the callers page. The second system call assignes the page to the contract. Both methods check if the current contract is 0, otherwise the method does nothing and the caller spent their fees.
|
||||
|
||||
This ensures that when memory is assigned to the contract the initial state of all the bytes is 0, and the contract itself is the only thing that can modify that state.
|
||||
|
||||
## Simplest contract
|
||||
```
|
||||
/// DEFAULT_CONTRACT interface
|
||||
/// All contracts start with 128
|
||||
/// method 128
|
||||
/// move_funds
|
||||
/// spend the funds from the call to the first recipient's
|
||||
pub fn default_contract_128_move_funds(call: &Call, pages: &mut Vec<Page>) {
|
||||
let amount: u64 = deserialize(&call.userdata).unwrap();
|
||||
if pages[0].balance >= amount {
|
||||
pages[0].balance -= amount;
|
||||
pages[1].balance += amount;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This simply moves the amount from page[0], which is the callers page, to page[1], which is the recipient's page.
|
||||
The interface is best described by the `Instruction::userdata` that the user encodes.
|
||||
* `CreateAccount` - This allows the user to create and assign an Account to a Program.
|
||||
* `Assign` - allows the user to assign an existing account to a `Program`.
|
||||
* `Move` - moves tokens between `Account`s that are assosciated with `SystemProgram`. This cannot be used to move tokens of other `Account`s. Programs need to implement their own version of Move.
|
||||
|
||||
## Notes
|
||||
|
||||
1. There is no dynamic memory allocation.
|
||||
2. Persistent Memory is allocated to a Key with ownership
|
||||
3. Contracts can `call` to update key owned state
|
||||
4. `call` is just a *syscall* that does a cryptographic check of memory ownership
|
||||
5. Kernel guarantees that when memory is assigned to the contract its state is 0
|
||||
6. Kernel guarantees that contract is the only thing that can modify memory that its assigned to
|
||||
7. Kernel guarantees that the contract can only spend tokens that are in pages that are assigned to it
|
||||
8. Kernel guarantees the balances belonging to pages are balanced before and after the call
|
||||
1. There is no dynamic memory allocation. Client's need to call the `SystemProgram` to create memory before passing it to another program. This Instruction can be composed into a single Transaction with the call to the program itself.
|
||||
2. Runtime guarantees that when memory is assigned to the `Program` it is zero initialized.
|
||||
3. Runtime guarantees that `Program`'s code is the only thing that can modify memory that its assigned to
|
||||
4. Runtime guarantees that the `Program` can only spend tokens that are in `Account`s that are assigned to it
|
||||
5. Runtime guarantees the balances belonging to `Account`s are balanced before and after the transaction
|
||||
6. Runtime guarantees that multiple instructions all executed successfully when a transaction is committed.
|
||||
|
||||
# Future Work
|
||||
|
||||
* Continuations and Signals for long running Transactions. https://github.com/solana-labs/solana/issues/1485
|
||||
|
@ -1,32 +1,42 @@
|
||||
# Consensus
|
||||
|
||||
VERY WIP
|
||||
The goal of this RFC is to define the consensus algorithm used in Solana. This proposal covers a Proof of Stake (PoS) algorithm that leverages Proof of History (PoH). PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time among partitions.
|
||||
|
||||
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
version 0.4
|
||||
|
||||
## Basic Design Idea
|
||||
|
||||
Nodes on the network can be "up" or "down". A node indicates it is up either by voting as a validator or by generating a PoH stream as the designated leader. Consensus is reached when a supermajority + 1 of the staked nodes have voted on the state of the network at a particular PoH tick count.
|
||||
|
||||
Nodes take turns being leader and generating the PoH that encodes state changes. The network can tolerate loss of connection to any leader by synthesizing what the leader ***would have generated*** had it been connected but not ingesting any state changes. The complexity of forks is thereby limited to a "there/not-there" skip list of branches that may arise on leader rotation periods boundaries.
|
||||
|
||||
|
||||
## Message Flow
|
||||
|
||||
1. Transactions are ingested at the leader.
|
||||
2. Leader filters for valid transactions
|
||||
3. Leader executes valid transactions on its state
|
||||
4. Leader packages transactions into blobs
|
||||
5. Leader transmits blobs to validator nodes.
|
||||
1. Transactions are ingested at the current leader.
|
||||
2. Leader filters for valid transactions.
|
||||
3. Leader executes valid transactions on its state.
|
||||
4. Leader packages transactions into entries based off the longest observed PoH branch.
|
||||
5. Leader transmits the entries to validator nodes (in signed blobs)
|
||||
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
|
||||
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
|
||||
b. The PoH stream includes ticks; empty entries that indicate liveness of the leader and the passage of time on the network.
|
||||
c. A leader's stream begins with the tick entries necessary complete the PoH back to that node's most recently observed prior leader period.
|
||||
6. Validators retransmit entries to peers in their set and to further downstream nodes.
|
||||
7. Validators validate the transactions and execute them on their state.
|
||||
8. Validators compute the hash of the state.
|
||||
9. Validators transmit votes to the leader.
|
||||
a. Votes are signatures of the hash of the computed state.
|
||||
10. Leader executes the votes as any other transaction and broadcasts them out to the network
|
||||
11. Validators observe their votes, and all the votes from the network.
|
||||
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
|
||||
9. At specific times, i.e. specific PoH tick counts, validators transmit votes to the leader.
|
||||
a. Votes are signatures of the hash of the computed state at that PoH tick count
|
||||
10. Leader executes the votes as any other transaction and broadcasts them to the network
|
||||
a. The leader votes at that same height once a majority of stake is represented on the PoH stream *(open question: do leaders vote?)*
|
||||
11. Validators observe their votes and all the votes from the network.
|
||||
12. Validators vote on the longest chain of periods that contains their vote.
|
||||
|
||||
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
|
||||
|
||||
|
||||
## Staking
|
||||
|
||||
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||
@ -43,7 +53,7 @@ CreateStake(
|
||||
)
|
||||
```
|
||||
|
||||
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
|
||||
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain number of validation voting rounds.
|
||||
|
||||
## Validation Votes
|
||||
|
||||
@ -63,7 +73,7 @@ Validators `stake` some of their spendable sol into a staking account. The stak
|
||||
|
||||
```
|
||||
Slash(Validate(
|
||||
PoH count,
|
||||
PoH tick count,
|
||||
PoH hash,
|
||||
stake public key,
|
||||
...
|
||||
@ -75,48 +85,14 @@ When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH
|
||||
|
||||
## Leader Slashing
|
||||
|
||||
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
|
||||
The goal is to discourage leaders from generating multiple PoH streams. When this occurs, the network adopts ticks for that leader's period. Leaders can be slashed for generating multiple conflicting PoH streams during their period.
|
||||
|
||||
## Validation Vote Contract
|
||||
|
||||
The goal of this contract is to simulate economic cost of mining on a shorter branch.
|
||||
|
||||
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
|
||||
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
|
||||
1. With my signature I am certifying that I computed `state hash` at `PoH count tick count` and `PoH hash`.
|
||||
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH tick count` + `N` is reached by the PoH stream (lockout period).
|
||||
3. I will not vote for any other branch below `PoH count`.
|
||||
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
|
||||
|
||||
## Leader Seed Generation
|
||||
|
||||
Leader selection is decided via a random seed. The process is as follows:
|
||||
|
||||
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
|
||||
2. append them together
|
||||
3. hash the string for `N` counts via a similar process as PoH itself.
|
||||
4. The resulting hash is the random seed for `M` counts, where M > N
|
||||
|
||||
## Leader Ranking and Rotation
|
||||
|
||||
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
|
||||
|
||||
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
|
||||
|
||||
## Partition selection
|
||||
|
||||
Validators should select the first branch to reach finality, or the highest ranking leader.
|
||||
|
||||
## Examples
|
||||
|
||||
### Small Partition
|
||||
1. Network partition M occurs for 10% of the nodes
|
||||
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||
3. M cycles through the ranks until one of them is leader.
|
||||
4. M validators observe 10% of the vote pool, finality is not reached
|
||||
5. M and K re-connect.
|
||||
6. M validators cancel their votes on K which are below K's `PoH count`
|
||||
|
||||
### Leader Timeout
|
||||
1. Next rank node observes a timeout.
|
||||
2. Nodes receiving both PoH streams pick the higher rank node.
|
||||
3. 2, causes a partition, since nodes can only vote for 1 leader.
|
||||
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
||||
4. Each vote on a branch increases the lockout for all prior votes on that branch according to a network-specified function.
|
||||
|
@ -52,3 +52,4 @@ Our solution to this is to force the clients to continue using the same identity
|
||||
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
|
||||
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
|
||||
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.
|
||||
* Validators should also get rewarded for validating submitted storage proofs as incentive for storing the ledger. They can only validate proofs if they are storing that slice of the ledger.
|
||||
|
108
rfcs/rfc-004-leader-rotation.md
Normal file
108
rfcs/rfc-004-leader-rotation.md
Normal file
@ -0,0 +1,108 @@
|
||||
# Leader Rotation
|
||||
|
||||
The goal of this RFC is to define how leader nodes are rotated in Solana, how rotation may cause forks to arise, and how the converges
|
||||
in response.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
|
||||
## Leader Seed Generation
|
||||
|
||||
Leader selection is decided via a random seed. The process is as follows:
|
||||
|
||||
1. Periodically at a specific `PoH tick count` select the first vote signatures that create a supermajority from the previous voting round.
|
||||
2. Append them together.
|
||||
3. Hash the string for `N` counts via a similar process as PoH itself.
|
||||
4. The resulting hash is the random seed for `M` counts, `M` leader periods, where M > N
|
||||
|
||||
## Leader Rotation
|
||||
|
||||
1. The leader is chosen via a random seed generated from stake weights and votes (the leader schedule)
|
||||
2. The leader is rotated every `T` PoH ticks (leader period), accoding to the leader schedule
|
||||
3. The schedule is applicable for `M` voting rounds
|
||||
|
||||
Leader's transmit for a count of `T` PoH ticks. When `T` is reached all the validators should switch to the next scheduled leader. To schedule leaders, the supermajority + `M` nodes are shuffled using the above calculated random seed.
|
||||
|
||||
All `T` ticks must be observed from the current leader for that part of PoH to be accepted by the network. If `T` ticks (and any intervening transactions) are not observed, the network optimistically fills in the `T` ticks, and continues with PoH from the next leader.
|
||||
|
||||
## Partitions, Forks
|
||||
|
||||
Forks can arise at PoH tick counts that correspond to leader rotations, because leader nodes may or may not have observed the previous leader's data. These empty ticks are generated by all nodes in the network at a network-specified rate for hashes/per/tick `Z`.
|
||||
|
||||
There are only two possible versions of the PoH during a voting period: PoH with `T` ticks and entries generated by the current leader, or PoH with just ticks. The "just ticks" version of the PoH can be thought of as a virtual ledger, one that all nodes in the network can derive from the last tick in the previous period.
|
||||
|
||||
Validators can ignore forks at other points (e.g. from the wrong leader), or slash the leader responsible for the fork.
|
||||
|
||||
Validators vote on the longest chain that contains their previous vote, or a longer chain if the lockout on their previous vote has expired.
|
||||
|
||||
|
||||
#### Validator's View
|
||||
|
||||
##### Time Progression
|
||||
The diagram below represents a validator's view of the PoH stream with possible forks over time. L1, L2, etc. are leader periods, and `E`s represent entries from that leader during that leader's period. The 'x's represent ticks only, and time flows downwards in the diagram.
|
||||
|
||||
|
||||
```
|
||||
time +----+ validator action
|
||||
| | L1 | E(L1)
|
||||
| |----| / \ vote(E(L2))
|
||||
| | L2 | E(L2) x
|
||||
| |----| / \ / \ vote(E(L2))
|
||||
| | L3 | E(L3) x E(L3)' x
|
||||
| |----| / \ / \ / \ / \ slash(L3)
|
||||
| | L4 | x x E(L4) x x x x x
|
||||
V |----| | | | | | | | | vote(E(L4))
|
||||
V | L5 | xx xx xx E(L5) xx xx xx xx
|
||||
V +----+ hang on to E(L4) and E(L5) for more...
|
||||
|
||||
```
|
||||
|
||||
Note that an `E` appearing on 2 branches at the same period is a slashable condition, so a validator observing `E(L3)` and `E(L3)'` can slash L3 and safely choose `x` for that period. Once a validator observes a supermajority vote on any branch, other branches can be discarded below that tick count. For any period, validators need only consider a single "has entries" chain or a "ticks only" chain.
|
||||
|
||||
##### Time Division
|
||||
|
||||
It's useful to consider leader rotation over PoH tick count as time division of the job of encoding state for the network. The following table presents the above tree of forks as a time-divided ledger.
|
||||
|
||||
leader period | L1 | L2 | L3 | L4 | L5
|
||||
-------|----|----|----|----|----
|
||||
data | E(L1)| E(L2) | E(L3) | E(L4) | E(L5)
|
||||
ticks to prev | | | | x | xx
|
||||
|
||||
Note that only data from leader L3 will be accepted during leader period L3. Data from L3 may include "catchup" ticks back to a period other than L2 if L3 did not observe L2's data. L4 and L5's transmissions include the "ticks to prev" PoH entries.
|
||||
|
||||
This arrangement of the network data streams permits nodes to save exactly this to the ledger for replay, restart, and checkpoints.
|
||||
|
||||
#### Leader's View
|
||||
|
||||
When a new leader begins a period, it must first transmit any PoH (ticks) required to link the new period with the most recently observed and voted period.
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
### Small Partition
|
||||
1. Network partition M occurs for 10% of the nodes
|
||||
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||
3. M cycles through the ranks until one of them is leader, generating ticks for periods where the leader is in K.
|
||||
4. M validators observe 10% of the vote pool, finality is not reached.
|
||||
5. M and K re-connect.
|
||||
6. M validators cancel their votes on M, which has not reached finality, and re-cast on K (after their vote lockout on M).
|
||||
|
||||
### Leader Timeout
|
||||
1. Next rank leader node V observes a timeout from current leader A, fills in A's period with virtual ticks and starts sending out entries.
|
||||
2. Nodes observing both streams keep track of the forks, waiting for:
|
||||
a. their vote on leader A to expire in order to be able to vote on B
|
||||
b. a supermajority on A's period
|
||||
3. If a occurs, leader B's period is filled with ticks, if b occurs, A's period is filled with ticks
|
||||
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
||||
|
||||
|
||||
## Network Variables
|
||||
|
||||
`M` - number of nodes outside the supermajority to whom leaders broadcast their PoH for validation
|
||||
|
||||
`N` - number of voting rounds for which a leader schedule is considered before a new leader schedule is used
|
||||
|
||||
`T` - number of PoH ticks per leader period (also voting period)
|
||||
|
||||
`Z` - number of hashes per PoH tick
|
@ -1,77 +0,0 @@
|
||||
|
||||
Two players want to play tic-tac-toe with each other on Solana.
|
||||
|
||||
The tic-tac-toe program has already been provisioned on the network, and the
|
||||
program author has advertised the following information to potential gamers:
|
||||
* `tictactoe_publickey` - the program's public key
|
||||
* `tictactoe_gamestate_size` - the number of bytes needed to maintain the game state
|
||||
|
||||
The game state is a well-documented data structure consisting of:
|
||||
- Player 1's public key
|
||||
- Player 2's public key
|
||||
- Game status. An 8-bit value where:
|
||||
* 0 = game uninitialized
|
||||
* 1 = Player 1's turn
|
||||
* 2 = Player 2's turn
|
||||
* 3 = Player 1 won
|
||||
* 4 = Player 2 won
|
||||
- Current board configuration. A 3x3 character array containing the values '\0', 'X' or 'O'
|
||||
|
||||
### Game Setup
|
||||
|
||||
1. Two players want to start a game. Player 2 sends Player 1 their public key,
|
||||
`player2_publickey` off-chain (IM, email, etc)
|
||||
|
||||
2. Player 1 creates a new keypair to represent the game state, `(gamestate_publickey,
|
||||
gamestate_privatekey)`.
|
||||
|
||||
3. Player 1 issues an allocate_memory transaction, assigning that memory page to the
|
||||
tic-tac-toe program. The `memory_fee` is used to *rent* the memory page for the
|
||||
duration of the game and is subtracted from current account balance of Player
|
||||
1:
|
||||
```
|
||||
allocate_memory(gamestate_publickey, tictactoe_publickey, tictactoe_gamestate_size, memory_fee)
|
||||
```
|
||||
|
||||
|
||||
4. Game state is then initialized by issuing a *new* call transaction to the
|
||||
tic-tac-toe program. This transaction is signed by `gamestate_privatekey`, known only
|
||||
to Player 1.
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'new', player1_publickey, player2_publickey)
|
||||
```
|
||||
|
||||
5. Once the game is initialized, Player 1 shares `gamestate_publickey` with
|
||||
Player 2 off-chain (IM, email, etc)
|
||||
|
||||
Note that it's likely each player prefer to generate a game-specific keypair
|
||||
rather than sharing their primary public key (`player1_publickey`,
|
||||
`player2_publickey`) with each other and the tic-tac-toe program.
|
||||
|
||||
### Game Play
|
||||
|
||||
Both players poll the network, via a **TBD off-chain RPC API**, to read the
|
||||
current game state from the `gamestate_publickey` memory page.
|
||||
|
||||
When the *Game status* field indicates it's their turn, the player issues a
|
||||
*move* call transaction passing in the board position (1..9) that they want to
|
||||
mark as X or O:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'move', position)
|
||||
```
|
||||
The program will reject the transaction if it was not signed by the player whose
|
||||
turn it is.
|
||||
|
||||
The outcome of the *move* call is also observed by polling the current game state via
|
||||
the **TBD off-chain RPC API**.
|
||||
|
||||
### Game Cancellation
|
||||
|
||||
At any time Player 1 may conclude the game by issuing:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'abort')
|
||||
```
|
||||
causing any remaining *rent* tokens assigned to the `gamestate_publickey` page
|
||||
to be transferred back to Player 1 by the tic-tac-toe program. Lastly, the
|
||||
network recognizes the empty account and frees the `gamestate_publickey` memory
|
||||
page.
|
132
rfcs/rfc-006-budget-contract-language.md
Normal file
132
rfcs/rfc-006-budget-contract-language.md
Normal file
@ -0,0 +1,132 @@
|
||||
|
||||
### Wallet CLI
|
||||
|
||||
The general form is:
|
||||
```
|
||||
$ solana-wallet [common-options] [command] [command-specific options]
|
||||
```
|
||||
`common-options` include:
|
||||
* `--fee xyz` - Transaction fee (0 by default)
|
||||
* `--output file` - Write the raw Transaction to a file instead of sending it
|
||||
|
||||
`command` variants:
|
||||
* `pay`
|
||||
* `cancel`
|
||||
* `send-signature`
|
||||
* `send-timestamp`
|
||||
|
||||
#### Unconditional Immediate Transfer
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
#### Post-Dated Transfer
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
--after 2018-12-24T23:59:00 --require-timestamp-from <PUBKEY>
|
||||
|
||||
// Return
|
||||
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||
```
|
||||
*`require-timestamp-from` is optional. If not provided, the transaction will expect a timestamp signed by this wallet's secret key*
|
||||
|
||||
#### Authorized Transfer
|
||||
A third party must send a signature to unlock the tokens.
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
--require-signature-from <PUBKEY>
|
||||
|
||||
// Return
|
||||
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||
```
|
||||
|
||||
#### Post-Dated and Authorized Transfer
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
--after 2018-12-24T23:59 --require-timestamp-from <PUBKEY> \
|
||||
--require-signature-from <PUBKEY>
|
||||
|
||||
// Return
|
||||
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||
```
|
||||
|
||||
#### Multiple Witnesses
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
--require-signature-from <PUBKEY> \
|
||||
--require-signature-from <PUBKEY>
|
||||
|
||||
// Return
|
||||
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||
```
|
||||
|
||||
#### Cancelable Transfer
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet pay <PUBKEY> 123 \
|
||||
--require-signature-from <PUBKEY> \
|
||||
--cancelable
|
||||
|
||||
// Return
|
||||
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||
```
|
||||
|
||||
#### Cancel Transfer
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet cancel <PROCESS_ID>
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
#### Send Signature
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet send-signature <PUBKEY> <PROCESS_ID>
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
#### Indicate Elapsed Time
|
||||
|
||||
Use the current system time:
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID>
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
Or specify some other arbitrary timestamp:
|
||||
```sh
|
||||
// Command
|
||||
$ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
|
||||
// Return
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
### Deploy program
|
||||
```
|
||||
// Command
|
||||
$ solana-wallet deploy <PATH>
|
||||
|
||||
// Return
|
||||
<PROGRAM_ID>
|
||||
```
|
||||
|
||||
## Javascript solana-web3.js Interface
|
||||
|
||||
*TBD, but will look similar to what the Wallet CLI offers wrapped up in a
|
||||
Javacsript object*
|
98
scripts/increment-cargo-version.sh
Executable file
98
scripts/increment-cargo-version.sh
Executable file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
usage: $0 [major|minor|patch|-preXYZ]
|
||||
|
||||
Increments the Cargo.toml version.
|
||||
A minor version increment is the default
|
||||
EOF
|
||||
exit 0
|
||||
}
|
||||
|
||||
here="$(dirname "$0")"
|
||||
cd "$here"/..
|
||||
source ci/semver_bash/semver.sh
|
||||
|
||||
readCargoVariable() {
|
||||
declare variable="$1"
|
||||
declare Cargo_toml="$2"
|
||||
|
||||
while read -r name equals value _; do
|
||||
if [[ $name = "$variable" && $equals = = ]]; then
|
||||
echo "${value//\"/}"
|
||||
return
|
||||
fi
|
||||
done < <(cat "$Cargo_toml")
|
||||
echo "Unable to locate $variable in $Cargo_toml" 1>&2
|
||||
}
|
||||
|
||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||
Cargo_tomls="$(find . -name Cargo.toml)"
|
||||
|
||||
# Collect the name of all the internal crates
|
||||
crates=()
|
||||
for Cargo_toml in $Cargo_tomls; do
|
||||
crates+=("$(readCargoVariable name "$Cargo_toml")")
|
||||
done
|
||||
|
||||
# Read the current version
|
||||
MAJOR=0
|
||||
MINOR=0
|
||||
PATCH=0
|
||||
SPECIAL=""
|
||||
semverParseInto "$(readCargoVariable version ./Cargo.toml)" MAJOR MINOR PATCH SPECIAL
|
||||
[[ -n $MAJOR ]] || usage
|
||||
|
||||
currentVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||
SPECIAL=""
|
||||
|
||||
# Figure out what to increment
|
||||
case ${1:-minor} in
|
||||
patch)
|
||||
PATCH=$((PATCH + 1))
|
||||
;;
|
||||
major)
|
||||
MAJOR=$((MAJOR+ 1))
|
||||
;;
|
||||
minor)
|
||||
MINOR=$((MINOR+ 1))
|
||||
;;
|
||||
-*)
|
||||
if [[ $1 =~ ^-[A-Za-z0-9]*$ ]]; then
|
||||
SPECIAL="$1"
|
||||
else
|
||||
echo "Error: Unsupported characters found in $1"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown argument: $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
newVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||
|
||||
# Update all the Cargo.toml files
|
||||
for Cargo_toml in $Cargo_tomls; do
|
||||
# Set new crate version
|
||||
(
|
||||
set -x
|
||||
sed -i "$Cargo_toml" -e "s/^version = \"[^\"]*\"$/version = \"$newVersion\"/"
|
||||
)
|
||||
|
||||
# Fix up the version references to other internal crates
|
||||
for crate in "${crates[@]}"; do
|
||||
(
|
||||
set -x
|
||||
sed -i "$Cargo_toml" -e "
|
||||
s/^$crate = .*path = \"\([^\"]*\)\".*\$/$crate = \{ path = \"\1\", version = \"$newVersion\" \}/
|
||||
"
|
||||
)
|
||||
done
|
||||
done
|
||||
|
||||
echo "$currentVersion -> $newVersion"
|
||||
|
||||
exit 0
|
29
scripts/install-native-programs.sh
Executable file
29
scripts/install-native-programs.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Installs native programs as |cargo install| doesn't know about them
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
SOLANA_ROOT="$(cd "$here"/..; pwd)"
|
||||
|
||||
installDir=$1
|
||||
variant=${2:-release}
|
||||
|
||||
if [[ -z $installDir ]]; then
|
||||
echo Install directory not specified
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -d $installDir ]]; then
|
||||
echo "Not a directory: $installDir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for dir in "$SOLANA_ROOT"/programs/native/*; do
|
||||
for program in echo "$SOLANA_ROOT"/target/"$variant"/deps/lib{,solana_}"$(basename "$dir")".{so,dylib,dll}; do
|
||||
if [[ -f $program ]]; then
|
||||
cp -v "$program" "$installDir"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
@ -10,18 +10,28 @@ cd "$(dirname "$0")"
|
||||
# shellcheck source=scripts/configure-metrics.sh
|
||||
source configure-metrics.sh
|
||||
|
||||
packets_sent=0
|
||||
packets_sent_diff=0
|
||||
packets_received=0
|
||||
packets_received_diff=0
|
||||
receive_errors=0
|
||||
receive_errors_diff=0
|
||||
rcvbuf_errors=0
|
||||
rcvbuf_errors_diff=0
|
||||
in_octets=0
|
||||
in_octets_diff=0
|
||||
out_octets=0
|
||||
out_octets_diff=0
|
||||
|
||||
update_netstat() {
|
||||
declare net_stat
|
||||
net_stat=$(netstat -suna)
|
||||
|
||||
declare stats
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /packets sent/ {tmp_var = $1} END { print tmp_var }')
|
||||
packets_sent_diff=$((stats - packets_sent))
|
||||
packets_sent="$stats"
|
||||
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /packets received/ {tmp_var = $1} END { print tmp_var }')
|
||||
packets_received_diff=$((stats - packets_received))
|
||||
packets_received="$stats"
|
||||
@ -33,13 +43,21 @@ update_netstat() {
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /RcvbufErrors/ {tmp_var = $2} END { print tmp_var }')
|
||||
rcvbuf_errors_diff=$((stats - rcvbuf_errors))
|
||||
rcvbuf_errors="$stats"
|
||||
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /InOctets/ {tmp_var = $2} END { print tmp_var }')
|
||||
in_octets_diff=$((stats - in_octets))
|
||||
in_octets="$stats"
|
||||
|
||||
stats=$(echo "$net_stat" | awk 'BEGIN {tmp_var = 0} /OutOctets/ {tmp_var = $2} END { print tmp_var }')
|
||||
out_octets_diff=$((stats - out_octets))
|
||||
out_octets="$stats"
|
||||
}
|
||||
|
||||
update_netstat
|
||||
|
||||
while true; do
|
||||
update_netstat
|
||||
report="packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff"
|
||||
report="packets_sent=$packets_sent_diff,packets_received=$packets_received_diff,receive_errors=$receive_errors_diff,rcvbuf_errors=$rcvbuf_errors_diff,in_octets=$in_octets_diff,out_octets=$out_octets_diff"
|
||||
|
||||
echo "$report"
|
||||
./metrics-write-datapoint.sh "net-stats,hostname=$HOSTNAME $report"
|
||||
|
@ -5,12 +5,13 @@
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this USE_SNAP case
|
||||
wallet="solana.wallet $1"
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
if [[ -z $1 ]]; then # no network argument, use default
|
||||
entrypoint=()
|
||||
else
|
||||
wallet="multinode-demo/wallet.sh $1"
|
||||
entrypoint=(-n "$1")
|
||||
fi
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
@ -19,7 +20,7 @@ garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
check_balance_output() {
|
||||
declare expected_output="$1"
|
||||
exec 42>&1
|
||||
output=$($wallet balance | tee >(cat - >&42))
|
||||
output=$($solana_wallet "${entrypoint[@]}" balance | tee >(cat - >&42))
|
||||
if [[ ! "$output" =~ $expected_output ]]; then
|
||||
echo "Balance is incorrect. Expected: $expected_output"
|
||||
exit 1
|
||||
@ -28,18 +29,36 @@ check_balance_output() {
|
||||
|
||||
pay_and_confirm() {
|
||||
exec 42>&1
|
||||
signature=$($wallet pay "$@" | tee >(cat - >&42))
|
||||
$wallet confirm "$signature"
|
||||
signature=$($solana_wallet "${entrypoint[@]}" pay "$@" | tee >(cat - >&42))
|
||||
$solana_wallet "${entrypoint[@]}" confirm "$signature"
|
||||
}
|
||||
|
||||
$wallet reset
|
||||
$wallet address
|
||||
leader_readiness=false
|
||||
timeout=60
|
||||
while [[ $timeout -gt 0 ]]; do
|
||||
expected_output="Leader ready"
|
||||
exec 42>&1
|
||||
output=$($solana_wallet "${entrypoint[@]}" get-transaction-count | tee >(cat - >&42))
|
||||
if [[ $output -gt 0 ]]; then
|
||||
leader_readiness=true
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
(( timeout=timeout-2 ))
|
||||
done
|
||||
if ! "$leader_readiness"; then
|
||||
echo "Timed out waiting for leader"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$solana_keygen
|
||||
$solana_wallet "${entrypoint[@]}" address
|
||||
check_balance_output "No account found" "Your balance is: 0"
|
||||
$wallet airdrop --tokens 60
|
||||
$solana_wallet "${entrypoint[@]}" airdrop 60
|
||||
check_balance_output "Your balance is: 60"
|
||||
$wallet airdrop --tokens 40
|
||||
$solana_wallet "${entrypoint[@]}" airdrop 40
|
||||
check_balance_output "Your balance is: 100"
|
||||
pay_and_confirm --to $garbage_address --tokens 99
|
||||
pay_and_confirm $garbage_address 99
|
||||
check_balance_output "Your balance is: 1"
|
||||
|
||||
echo PASS
|
||||
|
16
sdk/Cargo.toml
Normal file
16
sdk/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "solana-sdk"
|
||||
version = "0.10.3"
|
||||
description = "Solana SDK"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.0.0"
|
||||
bs58 = "0.2.0"
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
|
||||
|
56
sdk/src/account.rs
Normal file
56
sdk/src/account.rs
Normal file
@ -0,0 +1,56 @@
|
||||
use pubkey::Pubkey;
|
||||
|
||||
/// An Account with userdata that is stored on chain
|
||||
#[repr(C)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||
pub struct Account {
|
||||
/// tokens in the account
|
||||
pub tokens: i64,
|
||||
/// user data
|
||||
/// A transaction can write to its userdata
|
||||
pub userdata: Vec<u8>,
|
||||
/// contract id this contract belongs to
|
||||
pub program_id: Pubkey,
|
||||
|
||||
/// this account contains a program (and is strictly read-only)
|
||||
pub executable: bool,
|
||||
|
||||
/// the loader for this program (Pubkey::default() for no loader)
|
||||
pub loader_program_id: Pubkey,
|
||||
}
|
||||
|
||||
impl Account {
|
||||
// TODO do we want to add executable and leader_program_id even though they should always be false/default?
|
||||
pub fn new(tokens: i64, space: usize, program_id: Pubkey) -> Account {
|
||||
Account {
|
||||
tokens,
|
||||
userdata: vec![0u8; space],
|
||||
program_id,
|
||||
executable: false,
|
||||
loader_program_id: Pubkey::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct KeyedAccount<'a> {
|
||||
pub key: &'a Pubkey,
|
||||
pub account: &'a mut Account,
|
||||
}
|
||||
|
||||
impl<'a> From<(&'a Pubkey, &'a mut Account)> for KeyedAccount<'a> {
|
||||
fn from((key, account): (&'a Pubkey, &'a mut Account)) -> Self {
|
||||
KeyedAccount { key, account }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a mut (Pubkey, Account)> for KeyedAccount<'a> {
|
||||
fn from((key, account): &'a mut (Pubkey, Account)) -> Self {
|
||||
KeyedAccount { key, account }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_keyed_accounts(accounts: &mut [(Pubkey, Account)]) -> Vec<KeyedAccount> {
|
||||
accounts.iter_mut().map(Into::into).collect()
|
||||
}
|
8
sdk/src/lib.rs
Normal file
8
sdk/src/lib.rs
Normal file
@ -0,0 +1,8 @@
|
||||
pub mod account;
|
||||
pub mod loader_instruction;
|
||||
pub mod pubkey;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
extern crate generic_array;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
18
sdk/src/loader_instruction.rs
Normal file
18
sdk/src/loader_instruction.rs
Normal file
@ -0,0 +1,18 @@
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum LoaderInstruction {
|
||||
/// Write program data into an account
|
||||
///
|
||||
/// * key[0] - the account to write into.
|
||||
///
|
||||
/// The transaction must be signed by key[0]
|
||||
Write { offset: u32, bytes: Vec<u8> },
|
||||
|
||||
/// Finalize an account loaded with program data for execution.
|
||||
/// The exact preparation steps is loader specific but on success the loader must set the executable
|
||||
/// bit of the Account
|
||||
///
|
||||
/// * key[0] - the account to prepare for execution
|
||||
///
|
||||
/// The transaction must be signed by key[0]
|
||||
Finalize,
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user