Compare commits
125 Commits
Author | SHA1 | Date | |
---|---|---|---|
14306a33e7 | |||
babc3847d7 | |||
40fd1befa5 | |||
7808af9a65 | |||
3c17732826 | |||
77aee571ad | |||
a01b55c580 | |||
0ecdc64302 | |||
ba06082d58 | |||
08e9c1a96e | |||
9f38b86df8 | |||
ca12faca9c | |||
97a0791f3f | |||
4791c7e0a7 | |||
1ba13fe180 | |||
9a30100a9c | |||
aa741b3147 | |||
09db7b5b52 | |||
fa9faa2cec | |||
d2dc585974 | |||
6721bdde3d | |||
a733873b8f | |||
7c02bbc47c | |||
16a815d2b1 | |||
ddb490e2fb | |||
242d0a23fb | |||
869009243d | |||
7b61f5279c | |||
7ef0b815ec | |||
8742de789e | |||
bfadd7b787 | |||
2e14bfcf4e | |||
a19426f055 | |||
df366017a7 | |||
7d76badd03 | |||
8047ab777c | |||
0d0a1c2919 | |||
1da90017ce | |||
0909618efa | |||
28bb7849f4 | |||
9cffd3a1ea | |||
917151ce54 | |||
6dcd127634 | |||
af66edf8c0 | |||
ab5b921e8f | |||
6c2843543b | |||
85f74cc537 | |||
43665115b4 | |||
156115c04c | |||
a66577eb87 | |||
3345d059e8 | |||
8c8c5de779 | |||
f03e971598 | |||
b4a1cdceaa | |||
b250d20059 | |||
dc3b270410 | |||
9d5092a71c | |||
a287c9e5fa | |||
ee85d534f9 | |||
6e1b291c17 | |||
68f7b1ecf3 | |||
58fe5cabd6 | |||
8993c6ae24 | |||
0e56473add | |||
f6b709ca48 | |||
ffa1fa557b | |||
e7631c85a1 | |||
edeadb503f | |||
d2044f2562 | |||
5703c740cf | |||
6ae20e78e2 | |||
506fc3baeb | |||
68523f4a7f | |||
beae217ab9 | |||
2c8c117e3c | |||
3a1285ebe5 | |||
e2660f2ac1 | |||
22eb1b977f | |||
43ef8d7bb7 | |||
d9271f2d30 | |||
dfbfd4d4dd | |||
9cb262ad4b | |||
73ee0cb100 | |||
9a6154beaf | |||
3f494bb91b | |||
2eb312796d | |||
3fb86662fb | |||
dce31f6002 | |||
39c42a6aba | |||
9961c0ee0a | |||
3f843f21b9 | |||
d07961a58b | |||
b85aa9282e | |||
1cd354cf15 | |||
92cd2d09ed | |||
a40122548f | |||
6e27f797bd | |||
476a585222 | |||
aa74ddb6c0 | |||
95921ce129 | |||
ee6d00a2fe | |||
212cbc4977 | |||
a6af1ba08d | |||
ee27e9e1cf | |||
4d21ee0546 | |||
493a2477b5 | |||
e284af33b9 | |||
f0aa14e135 | |||
fb9d8dfa99 | |||
4b02bbc802 | |||
18cf660f61 | |||
376303a1eb | |||
f295eb06d0 | |||
f423f61d8b | |||
94b06b2cbf | |||
9b2fc8cde7 | |||
d810752e86 | |||
fdaad1d85b | |||
7f29c1fe23 | |||
68df9d06db | |||
b60cb48c18 | |||
0fee854220 | |||
0cc7bbfe7d | |||
68834bd4c5 | |||
2df40cf9c9 |
31
.buildkite/env/README.md
vendored
Normal file
31
.buildkite/env/README.md
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
|
||||||
|
[ejson](https://github.com/Shopify/ejson) and
|
||||||
|
[ejson2env](https://github.com/Shopify/ejson2env) are used to manage access
|
||||||
|
tokens and other secrets required for CI.
|
||||||
|
|
||||||
|
#### Setup
|
||||||
|
```bash
|
||||||
|
$ sudo gem install ejson ejson2env
|
||||||
|
```
|
||||||
|
|
||||||
|
then obtain the necessary keypair and place it in `/opt/ejson/keys/`.
|
||||||
|
|
||||||
|
#### Usage
|
||||||
|
Run the following command to decrypt the secrets into the environment:
|
||||||
|
```bash
|
||||||
|
eval $(ejson2env secrets.ejson)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Managing secrets.ejson
|
||||||
|
To decrypt `secrets.ejson` for modification, run:
|
||||||
|
```bash
|
||||||
|
$ ejson decrypt secrets.ejson -o secrets_unencrypted.ejson
|
||||||
|
```
|
||||||
|
|
||||||
|
Edit, then run the following to re-encrypt the file **BEFORE COMMITING YOUR
|
||||||
|
CHANGES**:
|
||||||
|
```bash
|
||||||
|
$ ejson encrypt secrets_unencrypted.ejson
|
||||||
|
$ mv secrets_unencrypted.ejson secrets.ejson
|
||||||
|
```
|
||||||
|
|
10
.buildkite/env/secrets.ejson
vendored
Normal file
10
.buildkite/env/secrets.ejson
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||||
|
"environment": {
|
||||||
|
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
|
||||||
|
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
|
||||||
|
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
|
||||||
|
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
|
||||||
|
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
|
||||||
|
}
|
||||||
|
}
|
@ -1,4 +1,7 @@
|
|||||||
#!/bin/bash -e
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
eval "$(ejson2env .buildkite/env/secrets.ejson)"
|
||||||
|
|
||||||
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
|
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
|
||||||
# interprets this as the start of a log group.
|
# interprets this as the start of a log group.
|
||||||
@ -24,4 +27,3 @@ export PS4="++"
|
|||||||
set -x
|
set -x
|
||||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||||
)
|
)
|
||||||
|
|
||||||
|
20
.buildkite/pipeline-upload.sh
Executable file
20
.buildkite/pipeline-upload.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# This script is used to upload the full buildkite pipeline. The steps defined
|
||||||
|
# in the buildkite UI should simply be:
|
||||||
|
#
|
||||||
|
# steps:
|
||||||
|
# - command: "ci/buildkite-pipeline-upload.sh"
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"/..
|
||||||
|
|
||||||
|
buildkite-agent pipeline upload ci/buildkite.yml
|
||||||
|
|
||||||
|
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||||
|
# Add helpful link back to the corresponding Github Pull Request
|
||||||
|
buildkite-agent annotate --style "info" \
|
||||||
|
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||||
|
fi
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,4 +1,3 @@
|
|||||||
Cargo.lock
|
|
||||||
/target/
|
/target/
|
||||||
|
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
|
2464
Cargo.lock
generated
Normal file
2464
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "Blockchain, Rebuilt for Scale"
|
description = "Blockchain, Rebuilt for Scale"
|
||||||
version = "0.10.0-pre2"
|
version = "0.10.5"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "http://solana.com/"
|
homepage = "http://solana.com/"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
@ -80,7 +80,7 @@ env_logger = "0.5.12"
|
|||||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||||
getopts = "0.2"
|
getopts = "0.2"
|
||||||
hex-literal = "0.1.1"
|
hex-literal = "0.1.1"
|
||||||
influx_db_client = "0.3.4"
|
influx_db_client = "0.3.6"
|
||||||
solana-jsonrpc-core = "0.3.0"
|
solana-jsonrpc-core = "0.3.0"
|
||||||
solana-jsonrpc-http-server = "0.3.0"
|
solana-jsonrpc-http-server = "0.3.0"
|
||||||
solana-jsonrpc-macros = "0.3.0"
|
solana-jsonrpc-macros = "0.3.0"
|
||||||
@ -104,14 +104,14 @@ serde_cbor = "0.9.0"
|
|||||||
serde_derive = "1.0.27"
|
serde_derive = "1.0.27"
|
||||||
serde_json = "1.0.10"
|
serde_json = "1.0.10"
|
||||||
socket2 = "0.3.8"
|
socket2 = "0.3.8"
|
||||||
solana-sdk = { path = "sdk", version = "0.10.0-pre2" }
|
solana-sdk = { path = "sdk", version = "0.10.5" }
|
||||||
sys-info = "0.5.6"
|
sys-info = "0.5.6"
|
||||||
tokio = "0.1"
|
tokio = "0.1"
|
||||||
tokio-codec = "0.1"
|
tokio-codec = "0.1"
|
||||||
untrusted = "0.6.2"
|
untrusted = "0.6.2"
|
||||||
solana-noop = { path = "programs/native/noop", version = "0.10.0-pre2" }
|
solana-noop = { path = "programs/native/noop", version = "0.10.5" }
|
||||||
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.0-pre2" }
|
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.5" }
|
||||||
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.0-pre2" }
|
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.5" }
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "bank"
|
name = "bank"
|
||||||
@ -139,5 +139,5 @@ members = [
|
|||||||
"programs/native/noop",
|
"programs/native/noop",
|
||||||
"programs/native/bpf_loader",
|
"programs/native/bpf_loader",
|
||||||
"programs/native/lua_loader",
|
"programs/native/lua_loader",
|
||||||
"programs/bpf/noop_rust",
|
"programs/bpf/rust/noop",
|
||||||
]
|
]
|
||||||
|
50
build.rs
50
build.rs
@ -8,7 +8,7 @@ fn main() {
|
|||||||
// Ensure target/perf-libs/ exists. It's been observed that
|
// Ensure target/perf-libs/ exists. It's been observed that
|
||||||
// a cargo:rerun-if-changed= directive with a non-existent
|
// a cargo:rerun-if-changed= directive with a non-existent
|
||||||
// directory triggers a rebuild on every |cargo build| invocation
|
// directory triggers a rebuild on every |cargo build| invocation
|
||||||
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
|
fs::create_dir_all("target/perf-libs").unwrap_or_else(|err| {
|
||||||
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
||||||
panic!("Unable to create target/perf-libs: {:?}", err);
|
panic!("Unable to create target/perf-libs: {:?}", err);
|
||||||
}
|
}
|
||||||
@ -20,44 +20,22 @@ fn main() {
|
|||||||
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
||||||
|
|
||||||
if bpf_c {
|
if bpf_c {
|
||||||
let out_dir = "target/".to_string() + &env::var("PROFILE").unwrap();
|
let out_dir = "OUT_DIR=../../../target/".to_string()
|
||||||
|
+ &env::var("PROFILE").unwrap()
|
||||||
|
+ &"/bpf".to_string();
|
||||||
|
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/noop_c/build.sh");
|
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/noop_c/src/noop.c");
|
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
|
||||||
println!("cargo:warning=(not a warning) Compiling noop_c");
|
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
|
||||||
let status = Command::new("programs/bpf/noop_c/build.sh")
|
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
|
||||||
|
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
|
||||||
|
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
|
||||||
|
let status = Command::new("make")
|
||||||
|
.current_dir("programs/bpf/c")
|
||||||
|
.arg("all")
|
||||||
.arg(&out_dir)
|
.arg(&out_dir)
|
||||||
.status()
|
.status()
|
||||||
.expect("Failed to call noop_c build script");
|
.expect("Failed to build C-based BPF programs");
|
||||||
assert!(status.success());
|
|
||||||
|
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/move_funds_c/build.sh");
|
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/move_funds_c/src/move_funds.c");
|
|
||||||
println!("cargo:warning=(not a warning) Compiling move_funds_c");
|
|
||||||
let status = Command::new("programs/bpf/move_funds_c/build.sh")
|
|
||||||
.arg(&out_dir)
|
|
||||||
.status()
|
|
||||||
.expect("Failed to call move_funds_c build script");
|
|
||||||
assert!(status.success());
|
|
||||||
|
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_c/build.sh");
|
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_c/src/tictactoe.c");
|
|
||||||
println!("cargo:warning=(not a warning) Compiling tictactoe_c");
|
|
||||||
let status = Command::new("programs/bpf/tictactoe_c/build.sh")
|
|
||||||
.arg(&out_dir)
|
|
||||||
.status()
|
|
||||||
.expect("Failed to call tictactoe_c build script");
|
|
||||||
assert!(status.success());
|
|
||||||
|
|
||||||
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_dashboard_c/build.sh");
|
|
||||||
println!(
|
|
||||||
"cargo:rerun-if-changed=programs/bpf/tictactoe_dashboard_c/src/tictactoe_dashboard.c"
|
|
||||||
);
|
|
||||||
println!("cargo:warning=(not a warning) Compiling tictactoe_dashboard_c");
|
|
||||||
let status = Command::new("programs/bpf/tictactoe_dashboard_c/build.sh")
|
|
||||||
.arg(&out_dir)
|
|
||||||
.status()
|
|
||||||
.expect("Failed to call tictactoe_dashboard_c build script");
|
|
||||||
assert!(status.success());
|
assert!(status.success());
|
||||||
}
|
}
|
||||||
if chacha || cuda || erasure {
|
if chacha || cuda || erasure {
|
||||||
|
@ -8,3 +8,9 @@ steps:
|
|||||||
- command: "ci/publish-crate.sh"
|
- command: "ci/publish-crate.sh"
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
name: "publish crate [public]"
|
name: "publish crate [public]"
|
||||||
|
- command: "ci/publish-bpf-sdk.sh"
|
||||||
|
timeout_in_minutes: 5
|
||||||
|
name: "publish bpf sdk"
|
||||||
|
- command: "ci/publish-solana-tar.sh"
|
||||||
|
timeout_in_minutes: 15
|
||||||
|
name: "publish solana release tar"
|
@ -1,5 +1,5 @@
|
|||||||
steps:
|
steps:
|
||||||
- command: "ci/docker-run.sh solanalabs/rust:1.30.0 ci/test-stable.sh"
|
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
|
||||||
name: "stable [public]"
|
name: "stable [public]"
|
||||||
env:
|
env:
|
||||||
CARGO_TARGET_CACHE_NAME: "stable"
|
CARGO_TARGET_CACHE_NAME: "stable"
|
||||||
@ -36,7 +36,7 @@ steps:
|
|||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
name: "snap [public]"
|
name: "snap [public]"
|
||||||
- wait
|
- wait
|
||||||
- trigger: "solana-snap"
|
- trigger: "solana-secondary"
|
||||||
branches: "!pull/*"
|
branches: "!pull/*"
|
||||||
async: true
|
async: true
|
||||||
build:
|
build:
|
||||||
|
16
ci/crate-version.sh
Executable file
16
ci/crate-version.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
#
|
||||||
|
# Outputs the current crate version
|
||||||
|
#
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"/..
|
||||||
|
|
||||||
|
while read -r name equals value _; do
|
||||||
|
if [[ $name = version && $equals = = ]]; then
|
||||||
|
echo "${value//\"/}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
done < <(cat Cargo.toml)
|
||||||
|
|
||||||
|
echo Unable to locate version in Cargo.toml 1>&2
|
||||||
|
exit 1
|
@ -1,6 +1,6 @@
|
|||||||
# Note: when the rust version is changed also modify
|
# Note: when the rust version is changed also modify
|
||||||
# ci/buildkite.yml to pick up the new image tag
|
# ci/buildkite.yml to pick up the new image tag
|
||||||
FROM rust:1.30.0
|
FROM rust:1.30.1
|
||||||
|
|
||||||
RUN set -x && \
|
RUN set -x && \
|
||||||
apt update && \
|
apt update && \
|
||||||
|
36
ci/publish-bpf-sdk.sh
Executable file
36
ci/publish-bpf-sdk.sh
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
version=$(./ci/crate-version.sh)
|
||||||
|
|
||||||
|
echo --- Creating tarball
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
rm -rf bpf-sdk/
|
||||||
|
mkdir bpf-sdk/
|
||||||
|
(
|
||||||
|
echo "$version"
|
||||||
|
git rev-parse HEAD
|
||||||
|
) > bpf-sdk/version.txt
|
||||||
|
|
||||||
|
cp -ra programs/bpf/c/sdk/* bpf-sdk/
|
||||||
|
|
||||||
|
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
echo --- AWS S3 Store
|
||||||
|
|
||||||
|
set -x
|
||||||
|
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||||
|
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||||
|
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||||
|
tar zxf s3cmd-2.0.1.tar.gz
|
||||||
|
fi
|
||||||
|
|
||||||
|
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
|
||||||
|
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
|
@ -18,7 +18,7 @@ if [[ -n $CI ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||||
for Cargo_toml in {.,sdk,programs/native/{bpf_loader,lua_loader,noop}}/Cargo.toml; do
|
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
|
||||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
73
ci/publish-metrics-dashboard.sh
Executable file
73
ci/publish-metrics-dashboard.sh
Executable file
@ -0,0 +1,73 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
if [[ -z $BUILDKITE ]]; then
|
||||||
|
echo BUILDKITE not defined
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $CHANNEL ]]; then
|
||||||
|
CHANNEL=$(buildkite-agent meta-data get "channel" --default "")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $CHANNEL ]]; then
|
||||||
|
(
|
||||||
|
cat <<EOF
|
||||||
|
steps:
|
||||||
|
- block: "Select Dashboard"
|
||||||
|
fields:
|
||||||
|
- select: "Channel"
|
||||||
|
key: "channel"
|
||||||
|
options:
|
||||||
|
- label: "stable"
|
||||||
|
value: "stable"
|
||||||
|
- label: "edge"
|
||||||
|
value: "edge"
|
||||||
|
- label: "beta"
|
||||||
|
value: "beta"
|
||||||
|
- command: "ci/$(basename "$0")"
|
||||||
|
EOF
|
||||||
|
) | buildkite-agent pipeline upload
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
ci/channel-info.sh
|
||||||
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
|
case $CHANNEL in
|
||||||
|
edge)
|
||||||
|
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||||
|
;;
|
||||||
|
beta)
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
|
;;
|
||||||
|
stable)
|
||||||
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid CHANNEL=$CHANNEL"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
|
(
|
||||||
|
cat <<EOF
|
||||||
|
steps:
|
||||||
|
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||||
|
async: true
|
||||||
|
build:
|
||||||
|
message: "$BUILDKITE_MESSAGE"
|
||||||
|
branch: "$CHANNEL_BRANCH"
|
||||||
|
env:
|
||||||
|
CHANNEL: "$CHANNEL"
|
||||||
|
EOF
|
||||||
|
) | buildkite-agent pipeline upload
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
exec metrics/publish-metrics-dashboard.sh "$CHANNEL"
|
71
ci/publish-solana-tar.sh
Executable file
71
ci/publish-solana-tar.sh
Executable file
@ -0,0 +1,71 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
|
DRYRUN=
|
||||||
|
if [[ -z $BUILDKITE_BRANCH ]]; then
|
||||||
|
DRYRUN="echo"
|
||||||
|
CHANNEL=unknown
|
||||||
|
fi
|
||||||
|
|
||||||
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
|
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||||
|
CHANNEL=stable
|
||||||
|
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||||
|
CHANNEL=edge
|
||||||
|
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||||
|
CHANNEL=beta
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$BUILDKITE_TAG" ]]; then
|
||||||
|
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
||||||
|
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||||
|
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||||
|
else
|
||||||
|
CHANNEL_OR_TAG=$CHANNEL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||||
|
echo Unable to determine channel to publish into, exiting.
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
echo --- Creating tarball
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
rm -rf solana-release/
|
||||||
|
mkdir solana-release/
|
||||||
|
(
|
||||||
|
echo "$CHANNEL_OR_TAG"
|
||||||
|
git rev-parse HEAD
|
||||||
|
) > solana-release/version.txt
|
||||||
|
|
||||||
|
cargo install --root solana-release
|
||||||
|
./scripts/install-native-programs.sh solana-release/bin
|
||||||
|
./fetch-perf-libs.sh
|
||||||
|
cargo install --features=cuda --root solana-release-cuda
|
||||||
|
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
|
||||||
|
|
||||||
|
tar jvcf solana-release.tar.bz2 solana-release/
|
||||||
|
)
|
||||||
|
|
||||||
|
echo --- AWS S3 Store
|
||||||
|
if [[ -z $DRYRUN ]]; then
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||||
|
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||||
|
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||||
|
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
|
||||||
|
fi
|
||||||
|
|
||||||
|
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
|
||||||
|
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
|
||||||
|
)
|
||||||
|
else
|
||||||
|
echo Skipped due to DRYRUN
|
||||||
|
fi
|
||||||
|
exit 0
|
||||||
|
|
@ -23,6 +23,16 @@ for test in tests/*.rs; do
|
|||||||
_ cargo test --verbose --jobs=1 --test="$test"
|
_ cargo test --verbose --jobs=1 --test="$test"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Run native program's tests
|
||||||
|
for program in programs/native/*; do
|
||||||
|
echo --- "$program"
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
cd "$program"
|
||||||
|
cargo test --verbose
|
||||||
|
)
|
||||||
|
done
|
||||||
|
|
||||||
echo --- ci/localnet-sanity.sh
|
echo --- ci/localnet-sanity.sh
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
@ -9,8 +9,10 @@ clientNodeCount=0
|
|||||||
validatorNodeCount=10
|
validatorNodeCount=10
|
||||||
publicNetwork=false
|
publicNetwork=false
|
||||||
snapChannel=edge
|
snapChannel=edge
|
||||||
|
tarChannelOrTag=edge
|
||||||
delete=false
|
delete=false
|
||||||
enableGpu=false
|
enableGpu=false
|
||||||
|
useTarReleaseChannel=false
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
exitcode=0
|
exitcode=0
|
||||||
@ -19,16 +21,21 @@ usage() {
|
|||||||
echo "Error: $*"
|
echo "Error: $*"
|
||||||
fi
|
fi
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
usage: $0 [name] [zone] [options...]
|
usage: $0 [name] [cloud] [zone] [options...]
|
||||||
|
|
||||||
Deploys a CD testnet
|
Deploys a CD testnet
|
||||||
|
|
||||||
name - name of the network
|
name - name of the network
|
||||||
zone - zone to deploy the network into
|
cloud - cloud provider to use (gce, ec2)
|
||||||
|
zone - cloud provider zone to deploy the network into
|
||||||
|
|
||||||
options:
|
options:
|
||||||
-s edge|beta|stable - Deploy the specified Snap release channel
|
-s edge|beta|stable - Deploy the specified Snap release channel
|
||||||
(default: $snapChannel)
|
(default: $snapChannel)
|
||||||
|
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||||
|
specified release channel (edge|beta|stable) or release tag
|
||||||
|
(vX.Y.Z)
|
||||||
|
(default: $tarChannelOrTag)
|
||||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||||
-P - Use public network IP addresses (default: $publicNetwork)
|
-P - Use public network IP addresses (default: $publicNetwork)
|
||||||
@ -44,12 +51,14 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
netName=$1
|
netName=$1
|
||||||
zone=$2
|
cloudProvider=$2
|
||||||
|
zone=$3
|
||||||
[[ -n $netName ]] || usage
|
[[ -n $netName ]] || usage
|
||||||
|
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||||
[[ -n $zone ]] || usage "Zone not specified"
|
[[ -n $zone ]] || usage "Zone not specified"
|
||||||
shift 2
|
shift 3
|
||||||
|
|
||||||
while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
h | \?)
|
h | \?)
|
||||||
usage
|
usage
|
||||||
@ -73,6 +82,17 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
t)
|
||||||
|
case $OPTARG in
|
||||||
|
edge|beta|stable|v*)
|
||||||
|
tarChannelOrTag=$OPTARG
|
||||||
|
useTarReleaseChannel=true
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Invalid release channel: $OPTARG"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
g)
|
g)
|
||||||
enableGpu=true
|
enableGpu=true
|
||||||
;;
|
;;
|
||||||
@ -93,7 +113,7 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
gce_create_args=(
|
create_args=(
|
||||||
-a "$leaderAddress"
|
-a "$leaderAddress"
|
||||||
-c "$clientNodeCount"
|
-c "$clientNodeCount"
|
||||||
-n "$validatorNodeCount"
|
-n "$validatorNodeCount"
|
||||||
@ -103,26 +123,26 @@ gce_create_args=(
|
|||||||
|
|
||||||
if $enableGpu; then
|
if $enableGpu; then
|
||||||
if [[ -z $leaderMachineType ]]; then
|
if [[ -z $leaderMachineType ]]; then
|
||||||
gce_create_args+=(-g)
|
create_args+=(-g)
|
||||||
else
|
else
|
||||||
gce_create_args+=(-G "$leaderMachineType")
|
create_args+=(-G "$leaderMachineType")
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if $publicNetwork; then
|
if $publicNetwork; then
|
||||||
gce_create_args+=(-P)
|
create_args+=(-P)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
echo --- gce.sh delete
|
echo "--- $cloudProvider.sh delete"
|
||||||
time net/gce.sh delete -z "$zone" -p "$netName"
|
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
|
||||||
if $delete; then
|
if $delete; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo --- gce.sh create
|
echo "--- $cloudProvider.sh create"
|
||||||
time net/gce.sh create "${gce_create_args[@]}"
|
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||||
net/init-metrics.sh -e
|
net/init-metrics.sh -e
|
||||||
|
|
||||||
echo --- net.sh start
|
echo --- net.sh start
|
||||||
@ -130,7 +150,18 @@ maybeRejectExtraNodes=
|
|||||||
if ! $publicNetwork; then
|
if ! $publicNetwork; then
|
||||||
maybeRejectExtraNodes="-o rejectExtraNodes"
|
maybeRejectExtraNodes="-o rejectExtraNodes"
|
||||||
fi
|
fi
|
||||||
|
maybeNoValidatorSanity=
|
||||||
|
if [[ -n $NO_VALIDATOR_SANITY ]]; then
|
||||||
|
maybeNoValidatorSanity="-o noValidatorSanity"
|
||||||
|
fi
|
||||||
|
maybeNoLedgerVerify=
|
||||||
|
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||||
|
maybeNoLedgerVerify="-o noLedgerVerify"
|
||||||
|
fi
|
||||||
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
|
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
|
||||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes
|
if $useTarReleaseChannel; then
|
||||||
|
time net/net.sh start -t "$tarChannelOrTag" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||||
|
else
|
||||||
|
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||||
|
fi
|
||||||
exit 0
|
exit 0
|
||||||
|
360
ci/testnet-manager.sh
Executable file
360
ci/testnet-manager.sh
Executable file
@ -0,0 +1,360 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"/..
|
||||||
|
|
||||||
|
if [[ -z $BUILDKITE ]]; then
|
||||||
|
echo BUILDKITE not defined
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
|
||||||
|
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $TESTNET ]]; then
|
||||||
|
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $TESTNET_OP ]]; then
|
||||||
|
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
|
||||||
|
(
|
||||||
|
cat <<EOF
|
||||||
|
steps:
|
||||||
|
- block: "Manage Testnet"
|
||||||
|
fields:
|
||||||
|
- select: "Network"
|
||||||
|
key: "testnet"
|
||||||
|
options:
|
||||||
|
- label: "testnet"
|
||||||
|
value: "testnet"
|
||||||
|
- label: "testnet-perf"
|
||||||
|
value: "testnet-perf"
|
||||||
|
- label: "testnet-master"
|
||||||
|
value: "testnet-master"
|
||||||
|
- label: "testnet-master-perf"
|
||||||
|
value: "testnet-master-perf"
|
||||||
|
- label: "testnet-edge"
|
||||||
|
value: "testnet-edge"
|
||||||
|
- label: "testnet-edge-perf"
|
||||||
|
value: "testnet-edge-perf"
|
||||||
|
- label: "testnet-beta"
|
||||||
|
value: "testnet-beta"
|
||||||
|
- label: "testnet-beta-perf"
|
||||||
|
value: "testnet-beta-perf"
|
||||||
|
- select: "Operation"
|
||||||
|
key: "testnet-operation"
|
||||||
|
default: "sanity-or-restart"
|
||||||
|
options:
|
||||||
|
- label: "Sanity check. Restart network on failure"
|
||||||
|
value: "sanity-or-restart"
|
||||||
|
- label: "Start (or restart) the network"
|
||||||
|
value: "start"
|
||||||
|
- label: "Stop the network"
|
||||||
|
value: "stop"
|
||||||
|
- label: "Sanity check only"
|
||||||
|
value: "sanity"
|
||||||
|
- command: "ci/$(basename "$0")"
|
||||||
|
agents:
|
||||||
|
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
||||||
|
EOF
|
||||||
|
) | buildkite-agent pipeline upload
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||||
|
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||||
|
|
||||||
|
ci/channel-info.sh
|
||||||
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
|
case $TESTNET in
|
||||||
|
testnet-edge|testnet-edge-perf|testnet-master|testnet-master-perf)
|
||||||
|
CHANNEL_OR_TAG=edge
|
||||||
|
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||||
|
;;
|
||||||
|
testnet-beta|testnet-beta-perf)
|
||||||
|
CHANNEL_OR_TAG=beta
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
|
;;
|
||||||
|
testnet|testnet-perf)
|
||||||
|
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
|
||||||
|
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
|
||||||
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
|
else
|
||||||
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
|
(
|
||||||
|
cat <<EOF
|
||||||
|
steps:
|
||||||
|
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||||
|
async: true
|
||||||
|
build:
|
||||||
|
message: "$BUILDKITE_MESSAGE"
|
||||||
|
branch: "$CHANNEL_BRANCH"
|
||||||
|
env:
|
||||||
|
TESTNET: "$TESTNET"
|
||||||
|
TESTNET_OP: "$TESTNET_OP"
|
||||||
|
EOF
|
||||||
|
) | buildkite-agent pipeline upload
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
sanity() {
|
||||||
|
echo "--- sanity $TESTNET"
|
||||||
|
case $TESTNET in
|
||||||
|
testnet-edge)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-edge-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export REJECT_EXTRA_NODES=1
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-sanity.sh edge-perf-testnet-solana-com ec2 us-west-2b
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-beta)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-beta-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export REJECT_EXTRA_NODES=1
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-sanity.sh beta-perf-testnet-solana-com ec2 us-west-2b
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-master)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-sanity.sh master-testnet-solana-com gce us-west1-b
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-master-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export REJECT_EXTRA_NODES=1
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-sanity.sh master-perf-testnet-solana-com gce us-west1-b
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
|
||||||
|
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export REJECT_EXTRA_NODES=1
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
|
||||||
|
ci/testnet-sanity.sh perf-testnet-solana-com gce us-west1-b
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
start() {
|
||||||
|
declare maybeDelete=$1
|
||||||
|
if [[ -z $maybeDelete ]]; then
|
||||||
|
echo "--- start $TESTNET"
|
||||||
|
else
|
||||||
|
echo "--- stop $TESTNET"
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $TESTNET in
|
||||||
|
testnet-edge)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
|
||||||
|
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0ccd4f2239886fa94 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-edge-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
|
||||||
|
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-beta)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0f286cf8a0771ce35 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-beta-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
|
||||||
|
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-master)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh master-testnet-solana-com gce us-west1-b \
|
||||||
|
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a master-testnet-solana-com \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-master-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh master-perf-testnet-solana-com gce us-west1-b \
|
||||||
|
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||||
|
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
|
||||||
|
# -s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
|
||||||
|
# ${maybeDelete:+-d}
|
||||||
|
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0fa502bf95f6f18b2 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
testnet-perf)
|
||||||
|
# shellcheck disable=2030
|
||||||
|
# shellcheck disable=2031
|
||||||
|
(
|
||||||
|
set -ex
|
||||||
|
export NO_LEDGER_VERIFY=1
|
||||||
|
export NO_VALIDATOR_SANITY=1
|
||||||
|
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
|
||||||
|
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||||
|
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
|
${maybeDelete:+-d}
|
||||||
|
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
|
||||||
|
# -g \
|
||||||
|
# -t "$CHANNEL_OR_TAG" -c 2 \
|
||||||
|
# ${maybeDelete:+-d}
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
start delete
|
||||||
|
}
|
||||||
|
|
||||||
|
case $TESTNET_OP in
|
||||||
|
sanity)
|
||||||
|
sanity
|
||||||
|
;;
|
||||||
|
start)
|
||||||
|
start
|
||||||
|
;;
|
||||||
|
stop)
|
||||||
|
stop
|
||||||
|
;;
|
||||||
|
sanity-or-restart)
|
||||||
|
if sanity; then
|
||||||
|
echo Pass
|
||||||
|
else
|
||||||
|
echo "Sanity failed, restarting the network"
|
||||||
|
echo "^^^ +++"
|
||||||
|
start
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo --- fin
|
||||||
|
exit 0
|
@ -9,12 +9,13 @@ usage() {
|
|||||||
echo "Error: $*"
|
echo "Error: $*"
|
||||||
fi
|
fi
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
usage: $0 [name] [zone]
|
usage: $0 [name] [cloud] [zone]
|
||||||
|
|
||||||
Sanity check a CD testnet
|
Sanity check a CD testnet
|
||||||
|
|
||||||
name - name of the network
|
name - name of the network
|
||||||
zone - zone of the network
|
cloud - cloud provider to use (gce, ec2)
|
||||||
|
zone - cloud provider zone of the network
|
||||||
|
|
||||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||||
metrics
|
metrics
|
||||||
@ -23,16 +24,18 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
netName=$1
|
netName=$1
|
||||||
zone=$2
|
cloudProvider=$2
|
||||||
|
zone=$3
|
||||||
[[ -n $netName ]] || usage ""
|
[[ -n $netName ]] || usage ""
|
||||||
|
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||||
[[ -n $zone ]] || usage "Zone not specified"
|
[[ -n $zone ]] || usage "Zone not specified"
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
echo --- gce.sh config
|
echo "--- $cloudProvider.sh config"
|
||||||
net/gce.sh config -p "$netName" -z "$zone"
|
timeout 5m net/"$cloudProvider".sh config -p "$netName" -z "$zone"
|
||||||
net/init-metrics.sh -e
|
net/init-metrics.sh -e
|
||||||
echo --- net.sh sanity
|
echo --- net.sh sanity
|
||||||
net/net.sh sanity \
|
timeout 5m net/net.sh sanity \
|
||||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||||
|
@ -4,14 +4,18 @@ Currently we have three testnets:
|
|||||||
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
|
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
|
||||||
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
|
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
|
||||||
with transactions until failure. Runs 24/7
|
with transactions until failure. Runs 24/7
|
||||||
* `testnet-master` - private edge channel testnet with clients trying to flood the network
|
* `testnet-msater` - public edge channel testnet accessible via master.testnet.solana.com. Runs 24/7
|
||||||
|
* `testnet-master-perf` - private edge channel testnet with clients trying to flood the network
|
||||||
with transactions until failure. Runs on weekday mornings for a couple hours
|
with transactions until failure. Runs on weekday mornings for a couple hours
|
||||||
|
|
||||||
## Deploy process
|
## Deploy process
|
||||||
|
|
||||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
||||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
||||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
Each testnet can be manually manipulated from buildkite as well. The `-perf`
|
||||||
|
testnets use a release tarball while the non`-perf` builds use the snap build
|
||||||
|
(we've observed that the snap build runs slower than a tarball but this has yet
|
||||||
|
to be root caused).
|
||||||
|
|
||||||
## Where are the testnet logs?
|
## Where are the testnet logs?
|
||||||
|
|
||||||
@ -29,7 +33,8 @@ $ net/ssh.sh
|
|||||||
for log location details
|
for log location details
|
||||||
|
|
||||||
## How do I reset the testnet?
|
## How do I reset the testnet?
|
||||||
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
|
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
||||||
|
and when prompted select the desired testnet
|
||||||
|
|
||||||
## How can I scale the tx generation rate?
|
## How can I scale the tx generation rate?
|
||||||
|
|
||||||
@ -43,5 +48,5 @@ Currently, a merged PR is the only way to test a change on the testnet. But you
|
|||||||
can run your own testnet using the scripts in the `net/` directory.
|
can run your own testnet using the scripts in the `net/` directory.
|
||||||
|
|
||||||
## Adjusting the number of clients or validators on the testnet
|
## Adjusting the number of clients or validators on the testnet
|
||||||
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
|
Edit `ci/testnet-manager.sh`
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ mkdir -p target/perf-libs
|
|||||||
cd target/perf-libs
|
cd target/perf-libs
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
curl https://solana-perf.s3.amazonaws.com/v0.10.2/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||||
)
|
)
|
||||||
|
|
||||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||||
|
39
metrics/README.md
Normal file
39
metrics/README.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Metrics
|
||||||
|
|
||||||
|
## Testnet Grafana Dashboard
|
||||||
|
|
||||||
|
There are three versions of the testnet dashboard, corresponding to the three
|
||||||
|
release channels:
|
||||||
|
* https://metrics.solana.com:3000/d/testnet-edge/testnet-monitor-edge
|
||||||
|
* https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta
|
||||||
|
* https://metrics.solana.com:3000/d/testnet/testnet-monitor
|
||||||
|
|
||||||
|
The dashboard for each channel is defined from the
|
||||||
|
`metrics/testnet-monitor.json` source file in the git branch associated with
|
||||||
|
that channel, and deployed by automation running `ci/publish-metrics-dashboard.sh`.
|
||||||
|
|
||||||
|
A deploy can be triggered at any time via the `New Build` button of
|
||||||
|
https://buildkite.com/solana-labs/publish-metrics-dashboard.
|
||||||
|
|
||||||
|
### Modifying a Dashboard
|
||||||
|
|
||||||
|
Dashboard updates are accomplished by modifying `metrics/testnet-monitor.json`,
|
||||||
|
**manual edits made directly in Grafana will be overwritten**.
|
||||||
|
|
||||||
|
1. Open the desired dashboard in Grafana
|
||||||
|
2. Create a development copy of the dashboard by selecting `Save As..` in the
|
||||||
|
`Settings` menu for the dashboard
|
||||||
|
3. Edit dashboard as desired
|
||||||
|
4. Extract the JSON Model by selecting `JSON Model` in the `Settings` menu. Copy the JSON to the clipboard
|
||||||
|
and paste into `metrics/testnet-monitor.json`
|
||||||
|
5. Delete your development dashboard: `Settings` => `Delete`
|
||||||
|
|
||||||
|
### Deploying a Dashboard Manually
|
||||||
|
|
||||||
|
If you need to immediately deploy a dashboard using the contents of
|
||||||
|
`metrics/testnet-monitor.json` in your local workspace,
|
||||||
|
```
|
||||||
|
$ export GRAFANA_API_TOKEN="an API key from https://metrics.solana.com:3000/org/apikeys"
|
||||||
|
$ metrics/publish-metrics-dashboard.sh (edge|beta|stable)
|
||||||
|
```
|
||||||
|
Note that automation will eventually overwrite your manual deploy.
|
69
metrics/adjust-dashboard-for-channel.py
Executable file
69
metrics/adjust-dashboard-for-channel.py
Executable file
@ -0,0 +1,69 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Adjusts the testnet monitor dashboard for the specified release channel
|
||||||
|
#
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
|
||||||
|
if len(sys.argv) != 3:
|
||||||
|
print('Error: Dashboard or Channel not specified')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
dashboard_json = sys.argv[1]
|
||||||
|
channel = sys.argv[2]
|
||||||
|
if channel not in ['edge', 'beta', 'stable']:
|
||||||
|
print('Error: Unknown channel:', channel)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
with open(dashboard_json, 'r') as read_file:
|
||||||
|
data = json.load(read_file)
|
||||||
|
|
||||||
|
if channel == 'stable':
|
||||||
|
# Stable dashboard only allows the user to select between the stable
|
||||||
|
# testnet databases
|
||||||
|
data['title'] = 'Testnet Monitor'
|
||||||
|
data['uid'] = 'testnet'
|
||||||
|
data['templating']['list'] = [{'allValue': None,
|
||||||
|
'current': {'text': 'testnet',
|
||||||
|
'value': 'testnet'},
|
||||||
|
'hide': 1,
|
||||||
|
'includeAll': False,
|
||||||
|
'label': 'Testnet',
|
||||||
|
'multi': False,
|
||||||
|
'name': 'testnet',
|
||||||
|
'options': [{'selected': False,
|
||||||
|
'text': 'testnet',
|
||||||
|
'value': 'testnet'},
|
||||||
|
{'selected': True,
|
||||||
|
'text': 'testnet-perf',
|
||||||
|
'value': 'testnet-perf'}],
|
||||||
|
'query': 'testnet,testnet-perf',
|
||||||
|
'type': 'custom'}]
|
||||||
|
else:
|
||||||
|
# Non-stable dashboard only allows the user to select between all testnet
|
||||||
|
# databases
|
||||||
|
data['title'] = 'Testnet Monitor ({})'.format(channel)
|
||||||
|
data['uid'] = 'testnet-' + channel
|
||||||
|
data['templating']['list'] = [{'allValue': None,
|
||||||
|
'current': {'text': 'testnet',
|
||||||
|
'value': 'testnet'},
|
||||||
|
'datasource': 'Solana Metrics (read-only)',
|
||||||
|
'hide': 1,
|
||||||
|
'includeAll': False,
|
||||||
|
'label': 'Testnet',
|
||||||
|
'multi': False,
|
||||||
|
'name': 'testnet',
|
||||||
|
'options': [],
|
||||||
|
'query': 'show databases',
|
||||||
|
'refresh': 1,
|
||||||
|
'regex': 'testnet.*',
|
||||||
|
'sort': 1,
|
||||||
|
'tagValuesQuery': '',
|
||||||
|
'tags': [],
|
||||||
|
'tagsQuery': '',
|
||||||
|
'type': 'query',
|
||||||
|
'useTags': False}]
|
||||||
|
|
||||||
|
with open(dashboard_json, 'w') as write_file:
|
||||||
|
json.dump(data, write_file, indent=2)
|
15
metrics/grafcli.conf
Normal file
15
metrics/grafcli.conf
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
[grafcli]
|
||||||
|
editor = vim
|
||||||
|
mergetool = vimdiff
|
||||||
|
verbose = on
|
||||||
|
force = on
|
||||||
|
|
||||||
|
[resources]
|
||||||
|
|
||||||
|
[hosts]
|
||||||
|
metrics = on
|
||||||
|
|
||||||
|
[metrics]
|
||||||
|
type = api
|
||||||
|
url = https://metrics.solana.com:3000/api
|
||||||
|
ssl = off
|
71
metrics/publish-metrics-dashboard.sh
Executable file
71
metrics/publish-metrics-dashboard.sh
Executable file
@ -0,0 +1,71 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
CHANNEL=$1
|
||||||
|
if [[ -z $CHANNEL ]]; then
|
||||||
|
echo "usage: $0 [channel]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $CHANNEL in
|
||||||
|
edge)
|
||||||
|
DASHBOARD=testnet-monitor-edge
|
||||||
|
;;
|
||||||
|
beta)
|
||||||
|
DASHBOARD=testnet-monitor-beta
|
||||||
|
;;
|
||||||
|
stable)
|
||||||
|
DASHBOARD=testnet-monitor
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Invalid CHANNEL=$CHANNEL"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
|
||||||
|
if [[ -z $GRAFANA_API_TOKEN ]]; then
|
||||||
|
echo Error: GRAFANA_API_TOKEN not defined
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
DASHBOARD_JSON=./testnet-monitor.json
|
||||||
|
if [[ ! -r $DASHBOARD_JSON ]]; then
|
||||||
|
echo Error: $DASHBOARD_JSON not found
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
./adjust-dashboard-for-channel.py "$DASHBOARD_JSON" "$CHANNEL"
|
||||||
|
)
|
||||||
|
|
||||||
|
rm -rf venv
|
||||||
|
python3 -m venv venv
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source venv/bin/activate
|
||||||
|
|
||||||
|
echo --- Fetch/build grafcli
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
git clone git@github.com:mvines/grafcli.git -b experimental-v5 venv/grafcli
|
||||||
|
cd venv/grafcli
|
||||||
|
python3 setup.py install
|
||||||
|
)
|
||||||
|
|
||||||
|
echo --- Take a backup of existing dashboard if possible
|
||||||
|
(
|
||||||
|
set -x +e
|
||||||
|
grafcli export remote/metrics/$DASHBOARD $DASHBOARD_JSON.org
|
||||||
|
grafcli rm remote/metrics/$DASHBOARD
|
||||||
|
:
|
||||||
|
)
|
||||||
|
|
||||||
|
echo --- Publish $DASHBOARD_JSON to $DASHBOARD
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
grafcli import $DASHBOARD_JSON remote/metrics
|
||||||
|
)
|
||||||
|
|
||||||
|
exit 0
|
5576
metrics/testnet-monitor.json
Normal file
5576
metrics/testnet-monitor.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -49,8 +49,6 @@ elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
|||||||
declare program="$1"
|
declare program="$1"
|
||||||
printf "solana-%s" "$program"
|
printf "solana-%s" "$program"
|
||||||
}
|
}
|
||||||
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
|
||||||
unset SOLANA_CUDA
|
|
||||||
else
|
else
|
||||||
solana_program() {
|
solana_program() {
|
||||||
declare program="$1"
|
declare program="$1"
|
||||||
@ -104,16 +102,16 @@ tune_networking() {
|
|||||||
# test the existence of the sysctls before trying to set them
|
# test the existence of the sysctls before trying to set them
|
||||||
# go ahead and return true and don't exit if these calls fail
|
# go ahead and return true and don't exit if these calls fail
|
||||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||||
sudo sysctl -w net.core.rmem_max=67108864 1>/dev/null 2>/dev/null
|
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||||
|
|
||||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||||
|
|
||||||
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
|
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
|
||||||
sudo sysctl -w net.core.wmem_max=67108864 1>/dev/null 2>/dev/null
|
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||||
|
|
||||||
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
|
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
|
||||||
sudo sysctl -w net.core.wmem_default=26214400 1>/dev/null 2>/dev/null
|
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||||
) || true
|
) || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
68
net/gce.sh
68
net/gce.sh
@ -11,7 +11,6 @@ gce)
|
|||||||
# shellcheck source=net/scripts/gce-provider.sh
|
# shellcheck source=net/scripts/gce-provider.sh
|
||||||
source "$here"/scripts/gce-provider.sh
|
source "$here"/scripts/gce-provider.sh
|
||||||
|
|
||||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
|
||||||
cpuLeaderMachineType=n1-standard-16
|
cpuLeaderMachineType=n1-standard-16
|
||||||
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
||||||
leaderMachineType=$cpuLeaderMachineType
|
leaderMachineType=$cpuLeaderMachineType
|
||||||
@ -22,12 +21,11 @@ ec2)
|
|||||||
# shellcheck source=net/scripts/ec2-provider.sh
|
# shellcheck source=net/scripts/ec2-provider.sh
|
||||||
source "$here"/scripts/ec2-provider.sh
|
source "$here"/scripts/ec2-provider.sh
|
||||||
|
|
||||||
imageName="ami-0466e26ccc0e752c1"
|
|
||||||
cpuLeaderMachineType=m4.4xlarge
|
cpuLeaderMachineType=m4.4xlarge
|
||||||
gpuLeaderMachineType=p2.xlarge
|
gpuLeaderMachineType=p2.xlarge
|
||||||
leaderMachineType=$cpuLeaderMachineType
|
leaderMachineType=$cpuLeaderMachineType
|
||||||
validatorMachineType=m4.xlarge
|
validatorMachineType=m4.2xlarge
|
||||||
clientMachineType=m4.4xlarge
|
clientMachineType=m4.2xlarge
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||||
@ -118,7 +116,7 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
|||||||
;;
|
;;
|
||||||
g)
|
g)
|
||||||
enableGpu=true
|
enableGpu=true
|
||||||
leaderMachineType="$gpuLeaderMachineType"
|
leaderMachineType=$gpuLeaderMachineType
|
||||||
;;
|
;;
|
||||||
G)
|
G)
|
||||||
enableGpu=true
|
enableGpu=true
|
||||||
@ -131,14 +129,53 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
|||||||
bootDiskType=$OPTARG
|
bootDiskType=$OPTARG
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
usage "Error: unhandled option: $opt"
|
usage "unhandled option: $opt"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
shift $((OPTIND - 1))
|
shift $((OPTIND - 1))
|
||||||
|
|
||||||
[[ -z $1 ]] || usage "Unexpected argument: $1"
|
[[ -z $1 ]] || usage "Unexpected argument: $1"
|
||||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
if [[ $cloudProvider = ec2 ]]; then
|
||||||
|
# EC2 keys can't be retrieved from running instances like GCE keys can so save
|
||||||
|
# EC2 keys in the user's home directory so |./ec2.sh config| can at least be
|
||||||
|
# used on the same host that ran |./ec2.sh create| .
|
||||||
|
sshPrivateKey="$HOME/.ssh/solana-net-id_$prefix"
|
||||||
|
else
|
||||||
|
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $cloudProvider in
|
||||||
|
gce)
|
||||||
|
if $enableGpu; then
|
||||||
|
# TODO: GPU image is still 16.04-based pending resolution of
|
||||||
|
# https://github.com/solana-labs/solana/issues/1702
|
||||||
|
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||||
|
else
|
||||||
|
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
ec2)
|
||||||
|
# Deep Learning AMI (Ubuntu 16.04-based)
|
||||||
|
case $region in # (region global variable is set by cloud_SetZone)
|
||||||
|
us-east-1)
|
||||||
|
imageName="ami-047daf3f2b162fc35"
|
||||||
|
;;
|
||||||
|
us-west-1)
|
||||||
|
imageName="ami-08c8c7c4a57a6106d"
|
||||||
|
;;
|
||||||
|
us-west-2)
|
||||||
|
imageName="ami-0b63040ee445728bf"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Unsupported region: $region"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
|
||||||
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
||||||
@ -206,13 +243,18 @@ EOF
|
|||||||
|
|
||||||
echo "Waiting for $name to finish booting..."
|
echo "Waiting for $name to finish booting..."
|
||||||
(
|
(
|
||||||
for i in $(seq 1 30); do
|
set -x +e
|
||||||
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
|
for i in $(seq 1 60); do
|
||||||
break
|
timeout 20s ssh "${sshOptions[@]}" "$publicIp" "ls -l /.instance-startup-complete"
|
||||||
|
ret=$?
|
||||||
|
if [[ $ret -eq 0 ]]; then
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
sleep 2
|
sleep 2
|
||||||
echo "Retry $i..."
|
echo "Retry $i..."
|
||||||
done
|
done
|
||||||
|
echo "$name failed to boot."
|
||||||
|
exit 1
|
||||||
)
|
)
|
||||||
echo "$name has booted."
|
echo "$name has booted."
|
||||||
}
|
}
|
||||||
@ -230,7 +272,7 @@ EOF
|
|||||||
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
|
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
|
||||||
|
|
||||||
# Try to ping the machine first.
|
# Try to ping the machine first.
|
||||||
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
timeout 90s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||||
|
|
||||||
if [[ ! -r $sshPrivateKey ]]; then
|
if [[ ! -r $sshPrivateKey ]]; then
|
||||||
echo "Fetching $sshPrivateKey from $leaderName"
|
echo "Fetching $sshPrivateKey from $leaderName"
|
||||||
@ -376,6 +418,10 @@ $(
|
|||||||
install-earlyoom.sh \
|
install-earlyoom.sh \
|
||||||
install-libssl-compatability.sh \
|
install-libssl-compatability.sh \
|
||||||
install-rsync.sh \
|
install-rsync.sh \
|
||||||
|
network-config.sh \
|
||||||
|
remove-docker-interface.sh \
|
||||||
|
update-default-cuda.sh \
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
cat > /etc/motd <<EOM
|
cat > /etc/motd <<EOM
|
||||||
|
79
net/net.sh
79
net/net.sh
@ -23,10 +23,14 @@ Operate a configured testnet
|
|||||||
restart - Shortcut for stop then start
|
restart - Shortcut for stop then start
|
||||||
|
|
||||||
start-specific options:
|
start-specific options:
|
||||||
-S [snapFilename] - Deploy the specified Snap file
|
-S [snapFilename] - Deploy the specified Snap file
|
||||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
-T [tarFilename] - Deploy the specified release tarball
|
||||||
(ignored if -s or -S is specified)
|
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||||
|
specified release channel (edge|beta|stable) or release tag
|
||||||
|
(vX.Y.Z)
|
||||||
|
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||||
|
(ignored if -s or -S is specified)
|
||||||
|
|
||||||
Note: if RUST_LOG is set in the environment it will be propogated into the
|
Note: if RUST_LOG is set in the environment it will be propogated into the
|
||||||
network nodes.
|
network nodes.
|
||||||
@ -44,6 +48,7 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
snapChannel=
|
snapChannel=
|
||||||
|
releaseChannel=
|
||||||
snapFilename=
|
snapFilename=
|
||||||
deployMethod=local
|
deployMethod=local
|
||||||
sanityExtraArgs=
|
sanityExtraArgs=
|
||||||
@ -53,7 +58,7 @@ command=$1
|
|||||||
[[ -n $command ]] || usage
|
[[ -n $command ]] || usage
|
||||||
shift
|
shift
|
||||||
|
|
||||||
while getopts "h?S:s:o:f:" opt; do
|
while getopts "h?S:s:T:t:o:f:" opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
h | \?)
|
h | \?)
|
||||||
usage
|
usage
|
||||||
@ -74,6 +79,22 @@ while getopts "h?S:s:o:f:" opt; do
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
T)
|
||||||
|
tarballFilename=$OPTARG
|
||||||
|
[[ -f $tarballFilename ]] || usage "Snap not readable: $tarballFilename"
|
||||||
|
deployMethod=tar
|
||||||
|
;;
|
||||||
|
t)
|
||||||
|
case $OPTARG in
|
||||||
|
edge|beta|stable|v*)
|
||||||
|
releaseChannel=$OPTARG
|
||||||
|
deployMethod=tar
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Invalid release channel: $OPTARG"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
;;
|
||||||
f)
|
f)
|
||||||
cargoFeatures=$OPTARG
|
cargoFeatures=$OPTARG
|
||||||
;;
|
;;
|
||||||
@ -139,6 +160,9 @@ startLeader() {
|
|||||||
snap)
|
snap)
|
||||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
|
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
|
||||||
;;
|
;;
|
||||||
|
tar)
|
||||||
|
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
||||||
|
;;
|
||||||
local)
|
local)
|
||||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
|
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
|
||||||
;;
|
;;
|
||||||
@ -182,7 +206,7 @@ startClient() {
|
|||||||
set -x
|
set -x
|
||||||
startCommon "$ipAddress"
|
startCommon "$ipAddress"
|
||||||
ssh "${sshOptions[@]}" -f "$ipAddress" \
|
ssh "${sshOptions[@]}" -f "$ipAddress" \
|
||||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \"$RUST_LOG\""
|
||||||
) >> "$logFile" 2>&1 || {
|
) >> "$logFile" 2>&1 || {
|
||||||
cat "$logFile"
|
cat "$logFile"
|
||||||
echo "^^^ +++"
|
echo "^^^ +++"
|
||||||
@ -197,10 +221,11 @@ sanity() {
|
|||||||
echo "--- Sanity"
|
echo "--- Sanity"
|
||||||
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
|
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
|
||||||
|
|
||||||
|
declare host=$leaderIp # TODO: maybe use ${validatorIpList[0]} ?
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
|
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
|
||||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
ssh "${sshOptions[@]}" "$host" \
|
||||||
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
|
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
|
||||||
) || ok=false
|
) || ok=false
|
||||||
|
|
||||||
@ -220,13 +245,17 @@ start() {
|
|||||||
set -ex;
|
set -ex;
|
||||||
apt-get -qq update;
|
apt-get -qq update;
|
||||||
apt-get -qq -y install snapd;
|
apt-get -qq -y install snapd;
|
||||||
snap download --channel=$snapChannel solana;
|
until snap download --channel=$snapChannel solana; do
|
||||||
|
sleep 1;
|
||||||
|
done
|
||||||
"
|
"
|
||||||
)
|
)
|
||||||
else
|
else
|
||||||
(
|
(
|
||||||
cd "$SOLANA_ROOT"
|
cd "$SOLANA_ROOT"
|
||||||
snap download --channel="$snapChannel" solana
|
until snap download --channel="$snapChannel" solana; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
|
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
|
||||||
@ -236,6 +265,17 @@ start() {
|
|||||||
}
|
}
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
tar)
|
||||||
|
if [[ -n $releaseChannel ]]; then
|
||||||
|
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
|
||||||
|
cd "$SOLANA_ROOT"
|
||||||
|
|
||||||
|
set -x
|
||||||
|
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
|
||||||
|
tarballFilename=solana-release.tar.bz2
|
||||||
|
fi
|
||||||
|
tar jxvf $tarballFilename
|
||||||
|
;;
|
||||||
local)
|
local)
|
||||||
build
|
build
|
||||||
;;
|
;;
|
||||||
@ -287,15 +327,28 @@ start() {
|
|||||||
clientDeployTime=$SECONDS
|
clientDeployTime=$SECONDS
|
||||||
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
|
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
|
||||||
|
|
||||||
if [[ $deployMethod = "snap" ]]; then
|
declare networkVersion=unknown
|
||||||
declare networkVersion=unknown
|
case $deployMethod in
|
||||||
|
snap)
|
||||||
IFS=\ read -r _ networkVersion _ < <(
|
IFS=\ read -r _ networkVersion _ < <(
|
||||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||||
"snap info solana | grep \"^installed:\""
|
"snap info solana | grep \"^installed:\""
|
||||||
)
|
)
|
||||||
networkVersion=${networkVersion/0+git./}
|
networkVersion=${networkVersion/0+git./}
|
||||||
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
|
;;
|
||||||
fi
|
tar)
|
||||||
|
networkVersion="$(
|
||||||
|
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
|
||||||
|
)"
|
||||||
|
;;
|
||||||
|
local)
|
||||||
|
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage "Internal error: invalid deployMethod: $deployMethod"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo "+++ Deployment Successful"
|
echo "+++ Deployment Successful"
|
||||||
|
@ -6,8 +6,7 @@ echo "$(date) | $0 $*" > client.log
|
|||||||
|
|
||||||
deployMethod="$1"
|
deployMethod="$1"
|
||||||
entrypointIp="$2"
|
entrypointIp="$2"
|
||||||
numNodes="$3"
|
RUST_LOG="$3"
|
||||||
RUST_LOG="$4"
|
|
||||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||||
|
|
||||||
missing() {
|
missing() {
|
||||||
@ -17,7 +16,6 @@ missing() {
|
|||||||
|
|
||||||
[[ -n $deployMethod ]] || missing deployMethod
|
[[ -n $deployMethod ]] || missing deployMethod
|
||||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||||
[[ -n $numNodes ]] || missing numNodes
|
|
||||||
|
|
||||||
source net/common.sh
|
source net/common.sh
|
||||||
loadConfigFile
|
loadConfigFile
|
||||||
@ -35,7 +33,7 @@ snap)
|
|||||||
solana_bench_tps=/snap/bin/solana.bench-tps
|
solana_bench_tps=/snap/bin/solana.bench-tps
|
||||||
solana_keygen=/snap/bin/solana.keygen
|
solana_keygen=/snap/bin/solana.keygen
|
||||||
;;
|
;;
|
||||||
local)
|
local|tar)
|
||||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
export USE_INSTALL=1
|
export USE_INSTALL=1
|
||||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||||
@ -58,8 +56,7 @@ clientCommand="\
|
|||||||
$solana_bench_tps \
|
$solana_bench_tps \
|
||||||
--network $entrypointIp:8001 \
|
--network $entrypointIp:8001 \
|
||||||
--identity client.json \
|
--identity client.json \
|
||||||
--num-nodes $numNodes \
|
--duration 7500 \
|
||||||
--duration 600 \
|
|
||||||
--sustained \
|
--sustained \
|
||||||
--threads $threadCount \
|
--threads $threadCount \
|
||||||
"
|
"
|
||||||
|
@ -35,7 +35,6 @@ else
|
|||||||
setupArgs="-l"
|
setupArgs="-l"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
case $deployMethod in
|
case $deployMethod in
|
||||||
snap)
|
snap)
|
||||||
SECONDS=0
|
SECONDS=0
|
||||||
@ -78,20 +77,25 @@ snap)
|
|||||||
|
|
||||||
echo "Succeeded in ${SECONDS} seconds"
|
echo "Succeeded in ${SECONDS} seconds"
|
||||||
;;
|
;;
|
||||||
local)
|
local|tar)
|
||||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
export USE_INSTALL=1
|
export USE_INSTALL=1
|
||||||
export RUST_LOG
|
export RUST_LOG
|
||||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||||
|
|
||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
|
export LD_LIBRARY_PATH="$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
|
||||||
|
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
|
||||||
|
|
||||||
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
||||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||||
|
|
||||||
case $nodeType in
|
case $nodeType in
|
||||||
leader)
|
leader)
|
||||||
|
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||||
|
echo Selecting solana-fullnode-cuda
|
||||||
|
export SOLANA_CUDA=1
|
||||||
|
fi
|
||||||
./multinode-demo/setup.sh -t leader $setupArgs
|
./multinode-demo/setup.sh -t leader $setupArgs
|
||||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||||
./multinode-demo/leader.sh > leader.log 2>&1 &
|
./multinode-demo/leader.sh > leader.log 2>&1 &
|
||||||
@ -99,6 +103,11 @@ local)
|
|||||||
validator)
|
validator)
|
||||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
||||||
|
|
||||||
|
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||||
|
echo Selecting solana-fullnode-cuda
|
||||||
|
export SOLANA_CUDA=1
|
||||||
|
fi
|
||||||
|
|
||||||
./multinode-demo/setup.sh -t validator $setupArgs
|
./multinode-demo/setup.sh -t validator $setupArgs
|
||||||
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
|
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
|
||||||
;;
|
;;
|
||||||
|
@ -65,7 +65,7 @@ snap)
|
|||||||
client_id=~/snap/solana/current/config/client-id.json
|
client_id=~/snap/solana/current/config/client-id.json
|
||||||
|
|
||||||
;;
|
;;
|
||||||
local)
|
local|tar)
|
||||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||||
export USE_INSTALL=1
|
export USE_INSTALL=1
|
||||||
entrypointRsyncUrl="$entrypointIp:~/solana"
|
entrypointRsyncUrl="$entrypointIp:~/solana"
|
||||||
|
@ -31,7 +31,7 @@ __cloud_FindInstances() {
|
|||||||
|
|
||||||
declare name zone publicIp privateIp status
|
declare name zone publicIp privateIp status
|
||||||
while read -r name publicIp privateIp status; do
|
while read -r name publicIp privateIp status; do
|
||||||
printf "%-30s | publicIp=%-16s privateIp=%s staus=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
||||||
|
|
||||||
instances+=("$name:$publicIp:$privateIp")
|
instances+=("$name:$publicIp:$privateIp")
|
||||||
done < <(gcloud compute instances list \
|
done < <(gcloud compute instances list \
|
||||||
@ -128,6 +128,9 @@ cloud_CreateInstances() {
|
|||||||
--no-restart-on-failure
|
--no-restart-on-failure
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# shellcheck disable=SC2206 # Do not want to quote $imageName as it may contain extra args
|
||||||
|
args+=(--image $imageName)
|
||||||
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
|
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
|
||||||
args+=(--machine-type $machineType)
|
args+=(--machine-type $machineType)
|
||||||
if [[ -n $optionalBootDiskSize ]]; then
|
if [[ -n $optionalBootDiskSize ]]; then
|
||||||
|
@ -13,8 +13,8 @@ sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
|||||||
if command -v earlyoom; then
|
if command -v earlyoom; then
|
||||||
systemctl status earlyoom
|
systemctl status earlyoom
|
||||||
else
|
else
|
||||||
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.1-*_amd64.deb' -e robots=off -nd
|
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.2-*_amd64.deb' -e robots=off -nd
|
||||||
apt install --quiet --yes ./earlyoom_1.1-*_amd64.deb
|
apt install --quiet --yes ./earlyoom_1.2-*_amd64.deb
|
||||||
|
|
||||||
cat > earlyoom <<OOM
|
cat > earlyoom <<OOM
|
||||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||||
|
@ -12,7 +12,6 @@ apt-get --assume-yes install libssl-dev
|
|||||||
#
|
#
|
||||||
# cc: https://github.com/solana-labs/solana/issues/1090
|
# cc: https://github.com/solana-labs/solana/issues/1090
|
||||||
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
|
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
|
||||||
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||||
dpkg -i libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
dpkg -i libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||||
rm libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
rm libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||||
|
|
||||||
|
11
net/scripts/network-config.sh
Executable file
11
net/scripts/network-config.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash -ex
|
||||||
|
#
|
||||||
|
|
||||||
|
[[ $(uname) = Linux ]] || exit 1
|
||||||
|
[[ $USER = root ]] || exit 1
|
||||||
|
|
||||||
|
sudo sysctl -w net.core.rmem_default=1610612736
|
||||||
|
sudo sysctl -w net.core.rmem_max=1610612736
|
||||||
|
|
||||||
|
sudo sysctl -w net.core.wmem_default=1610612736
|
||||||
|
sudo sysctl -w net.core.wmem_max=1610612736
|
11
net/scripts/remove-docker-interface.sh
Executable file
11
net/scripts/remove-docker-interface.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash -ex
|
||||||
|
#
|
||||||
|
# Some instances have docker running and docker0 network interface confuses
|
||||||
|
# gossip and airdrops fail. As a workaround for now simply remove the docker0
|
||||||
|
# interface
|
||||||
|
#
|
||||||
|
|
||||||
|
[[ $(uname) = Linux ]] || exit 1
|
||||||
|
[[ $USER = root ]] || exit 1
|
||||||
|
|
||||||
|
ip link delete docker0 || true
|
9
net/scripts/update-default-cuda.sh
Executable file
9
net/scripts/update-default-cuda.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash -ex
|
||||||
|
#
|
||||||
|
# Updates the default cuda symlink to the supported version
|
||||||
|
#
|
||||||
|
|
||||||
|
[[ $(uname) = Linux ]] || exit 1
|
||||||
|
[[ $USER = root ]] || exit 1
|
||||||
|
|
||||||
|
ln -sfT /usr/local/cuda-9.2 /usr/local/cuda
|
1
programs/bpf/c/.gitignore
vendored
Normal file
1
programs/bpf/c/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
/out/
|
1
programs/bpf/c/makefile
Normal file
1
programs/bpf/c/makefile
Normal file
@ -0,0 +1 @@
|
|||||||
|
include sdk/bpf.mk
|
63
programs/bpf/c/sdk/README.md
Normal file
63
programs/bpf/c/sdk/README.md
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
## LLVM / clang 7.0.0
|
||||||
|
http://releases.llvm.org/download.html
|
||||||
|
|
||||||
|
### Linux Ubuntu 16.04 (xenial)
|
||||||
|
```
|
||||||
|
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||||
|
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||||
|
$ sudo apt-get update
|
||||||
|
$ sudo apt-get install -y clang-7
|
||||||
|
```
|
||||||
|
|
||||||
|
### Linux Ubuntu 14.04 (trusty)
|
||||||
|
```
|
||||||
|
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||||
|
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
|
||||||
|
$ sudo apt-get update
|
||||||
|
$ sudo apt-get install -y clang-7
|
||||||
|
```
|
||||||
|
|
||||||
|
### macOS
|
||||||
|
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
|
||||||
|
|
||||||
|
Once Homebrew is installed, ensure the latest llvm is installed:
|
||||||
|
```
|
||||||
|
$ brew update # <- ensure your brew is up to date
|
||||||
|
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
|
||||||
|
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Quick start
|
||||||
|
To get started create a `makefile` containing:
|
||||||
|
```make
|
||||||
|
include path/to/bpf.mk
|
||||||
|
```
|
||||||
|
and `src/program.c` containing:
|
||||||
|
```c
|
||||||
|
#include <solana_sdk.h>
|
||||||
|
|
||||||
|
bool entrypoint(const uint8_t *input) {
|
||||||
|
SolKeyedAccounts ka[1];
|
||||||
|
uint8_t *data;
|
||||||
|
uint64_t data_len;
|
||||||
|
|
||||||
|
if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
print_params(1, ka, data, data_len);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then run `make` to build `out/program.o`.
|
||||||
|
Run `make help` for more details.
|
||||||
|
|
||||||
|
### Limitations
|
||||||
|
* Programs must be fully contained within a single .c file
|
||||||
|
* No libc is available but `solana_sdk.h` provides a minimal set of
|
||||||
|
primitives.
|
115
programs/bpf/c/sdk/bpf.mk
Normal file
115
programs/bpf/c/sdk/bpf.mk
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
|
||||||
|
all:
|
||||||
|
.PHONY: help all clean
|
||||||
|
|
||||||
|
ifneq ($(V),1)
|
||||||
|
_@ :=@
|
||||||
|
endif
|
||||||
|
|
||||||
|
INC_DIRS ?=
|
||||||
|
SRC_DIR ?= ./src
|
||||||
|
OUT_DIR ?= ./out
|
||||||
|
|
||||||
|
OS=$(shell uname)
|
||||||
|
ifeq ($(OS),Darwin)
|
||||||
|
LLVM_DIR ?= $(shell brew --prefix llvm)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef LLVM_DIR
|
||||||
|
CC := $(LLVM_DIR)/bin/clang
|
||||||
|
LLC := $(LLVM_DIR)/bin/llc
|
||||||
|
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
|
||||||
|
else
|
||||||
|
CC := clang-7
|
||||||
|
LLC := llc-7
|
||||||
|
OBJ_DUMP := llvm-objdump-7
|
||||||
|
endif
|
||||||
|
|
||||||
|
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
|
||||||
|
|
||||||
|
CC_FLAGS := \
|
||||||
|
-Werror \
|
||||||
|
-target bpf \
|
||||||
|
-O2 \
|
||||||
|
-emit-llvm \
|
||||||
|
-fno-builtin \
|
||||||
|
|
||||||
|
LLC_FLAGS := \
|
||||||
|
-march=bpf \
|
||||||
|
-filetype=obj \
|
||||||
|
|
||||||
|
OBJ_DUMP_FLAGS := \
|
||||||
|
-color \
|
||||||
|
-source \
|
||||||
|
-disassemble \
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo 'BPF Program makefile'
|
||||||
|
@echo ''
|
||||||
|
@echo 'This makefile will build BPF Programs from C source files into ELFs'
|
||||||
|
@echo ''
|
||||||
|
@echo 'Assumptions:'
|
||||||
|
@echo ' - Programs are a single .c source file (may include headers)'
|
||||||
|
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
|
||||||
|
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
|
||||||
|
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
|
||||||
|
@echo ''
|
||||||
|
@echo 'User settings'
|
||||||
|
@echo ' - The following setting are overridable on the command line, default values shown:'
|
||||||
|
@echo ' - Show commands while building:'
|
||||||
|
@echo ' V=1'
|
||||||
|
@echo ' - List of include directories:'
|
||||||
|
@echo ' INC_DIRS=$(INC_DIRS)'
|
||||||
|
@echo ' - List of system include directories:'
|
||||||
|
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
|
||||||
|
@echo ' - Location of source files:'
|
||||||
|
@echo ' SRC_DIR=$(SRC_DIR)'
|
||||||
|
@echo ' - Location to place output files:'
|
||||||
|
@echo ' OUT_DIR=$(OUT_DIR)'
|
||||||
|
@echo ' - Location of LLVM:'
|
||||||
|
@echo ' LLVM_DIR=$(LLVM_DIR)'
|
||||||
|
@echo ''
|
||||||
|
@echo 'Usage:'
|
||||||
|
@echo ' - make help - This help message'
|
||||||
|
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
|
||||||
|
@echo ' - make clean - Cleans all programs'
|
||||||
|
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
|
||||||
|
@echo ' - make <program name> - Build a single program by name'
|
||||||
|
@echo ''
|
||||||
|
@echo 'Available programs:'
|
||||||
|
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
|
||||||
|
@echo ''
|
||||||
|
@echo 'Example:'
|
||||||
|
@echo ' - Assuming a programed named foo (src/foo.c)'
|
||||||
|
@echo ' - make foo'
|
||||||
|
@echo ' - make dump_foo'
|
||||||
|
|
||||||
|
.PRECIOUS: $(OUT_DIR)/%.bc
|
||||||
|
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
|
||||||
|
@echo "[cc] $@ ($<)"
|
||||||
|
$(_@)mkdir -p $(OUT_DIR)
|
||||||
|
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
|
||||||
|
|
||||||
|
.PRECIOUS: $(OUT_DIR)/%.o
|
||||||
|
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
|
||||||
|
@echo "[llc] $@ ($<)"
|
||||||
|
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
|
||||||
|
|
||||||
|
-include $(wildcard $(OUT_DIR)/*.d)
|
||||||
|
|
||||||
|
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
|
||||||
|
|
||||||
|
define \n
|
||||||
|
|
||||||
|
|
||||||
|
endef
|
||||||
|
|
||||||
|
all: $(PROGRAM_NAMES)
|
||||||
|
|
||||||
|
%: $(addprefix $(OUT_DIR)/, %.o) ;
|
||||||
|
|
||||||
|
dump_%: %
|
||||||
|
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf $(OUT_DIR)
|
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
@ -0,0 +1,298 @@
|
|||||||
|
#pragma once
|
||||||
|
/**
|
||||||
|
* @brief Solana C-based BPF program utility functions and types
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Numeric types
|
||||||
|
*/
|
||||||
|
#ifndef __LP64__
|
||||||
|
#error LP64 data model required
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef signed char int8_t;
|
||||||
|
typedef unsigned char uint8_t;
|
||||||
|
typedef signed short int16_t;
|
||||||
|
typedef unsigned short uint16_t;
|
||||||
|
typedef signed int int32_t;
|
||||||
|
typedef unsigned int uint32_t;
|
||||||
|
typedef signed long int int64_t;
|
||||||
|
typedef unsigned long int uint64_t;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* NULL
|
||||||
|
*/
|
||||||
|
#define NULL 0
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Boolean type
|
||||||
|
*/
|
||||||
|
typedef enum { false = 0, true } bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function that prints a string to stdout
|
||||||
|
*/
|
||||||
|
extern void sol_log(const char*);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper function that prints a 64 bit values represented in hexadecimal
|
||||||
|
* to stdout
|
||||||
|
*/
|
||||||
|
extern void sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prefix for all BPF functions
|
||||||
|
*
|
||||||
|
* This prefix should be used for functions in order to facilitate
|
||||||
|
* interoperability with BPF representation
|
||||||
|
*/
|
||||||
|
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Size of Public key in bytes
|
||||||
|
*/
|
||||||
|
#define SIZE_PUBKEY 32
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Public key
|
||||||
|
*/
|
||||||
|
typedef struct {
|
||||||
|
uint8_t x[SIZE_PUBKEY];
|
||||||
|
} SolPubkey;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compares two public keys
|
||||||
|
*
|
||||||
|
* @param one First public key
|
||||||
|
* @param two Second public key
|
||||||
|
* @return true if the same
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) {
|
||||||
|
for (int i = 0; i < sizeof(*one); i++) {
|
||||||
|
if (one->x[i] != two->x[i]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Keyed Accounts
|
||||||
|
*/
|
||||||
|
typedef struct {
|
||||||
|
SolPubkey *key; /** Public Key of the account owner */
|
||||||
|
int64_t *tokens; /** Numer of tokens owned by this account */
|
||||||
|
uint64_t userdata_len; /** Length of userdata in bytes */
|
||||||
|
uint8_t *userdata; /** On-chain data owned by this account */
|
||||||
|
SolPubkey *program_id; /** Program that owns this account */
|
||||||
|
} SolKeyedAccounts;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copies memory
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX void sol_memcpy(void *dst, const void *src, int len) {
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
*((uint8_t *)dst + i) = *((const uint8_t *)src + i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compares memory
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i);
|
||||||
|
if (diff) {
|
||||||
|
return diff;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Computes the number of elements in an array
|
||||||
|
*/
|
||||||
|
#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Panics
|
||||||
|
*
|
||||||
|
* Prints the line number where the panic occurred and then causes
|
||||||
|
* the BPF VM to immediately halt execution. No accounts' userdata are updated
|
||||||
|
*/
|
||||||
|
#define sol_panic() _sol_panic(__LINE__)
|
||||||
|
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||||
|
sol_log_64(0xFF, 0xFF, 0xFF, 0xFF, line);
|
||||||
|
uint8_t *pv = (uint8_t *)1;
|
||||||
|
*pv = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asserts
|
||||||
|
*/
|
||||||
|
#define sol_assert(expr) \
|
||||||
|
if (!(expr)) { \
|
||||||
|
_sol_panic(__LINE__); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* De-serializes the input parameters into usable types
|
||||||
|
*
|
||||||
|
* Use this function to deserialize the buffer passed to the program entrypoint
|
||||||
|
* into usable types. This function does not perform copy deserialization,
|
||||||
|
* instead it populates the pointers and lengths in SolKeyedAccounts and data so
|
||||||
|
* that any modification to tokens or account data take place on the original
|
||||||
|
* buffer. Doing so also eliminates the need to serialize back into the buffer
|
||||||
|
* at program end.
|
||||||
|
*
|
||||||
|
* @param input Source buffer containing serialized input parameters
|
||||||
|
* @param ka Pointer to an array of SolKeyedAccounts to deserialize into
|
||||||
|
* @param ka_len Number of SolKeyedAccounts entries in `ka`
|
||||||
|
* @param ka_len_out If NULL, fill exactly `ka_len` accounts or fail.
|
||||||
|
* If not NULL, fill up to `ka_len` accounts and return the
|
||||||
|
* number of filled accounts in `ka_len_out`.
|
||||||
|
* @param data On return, a pointer to the instruction data
|
||||||
|
* @param data_len On return, the length in bytes of the instruction data
|
||||||
|
* @return Boolean true if successful
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX bool sol_deserialize(
|
||||||
|
const uint8_t *input,
|
||||||
|
SolKeyedAccounts *ka,
|
||||||
|
uint64_t ka_len,
|
||||||
|
uint64_t *ka_len_out,
|
||||||
|
const uint8_t **data,
|
||||||
|
uint64_t *data_len
|
||||||
|
) {
|
||||||
|
|
||||||
|
|
||||||
|
if (ka_len_out == NULL) {
|
||||||
|
if (ka_len != *(uint64_t *) input) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ka_len = *(uint64_t *) input;
|
||||||
|
} else {
|
||||||
|
if (ka_len > *(uint64_t *) input) {
|
||||||
|
ka_len = *(uint64_t *) input;
|
||||||
|
}
|
||||||
|
*ka_len_out = ka_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
input += sizeof(uint64_t);
|
||||||
|
for (int i = 0; i < ka_len; i++) {
|
||||||
|
// key
|
||||||
|
ka[i].key = (SolPubkey *) input;
|
||||||
|
input += sizeof(SolPubkey);
|
||||||
|
|
||||||
|
// tokens
|
||||||
|
ka[i].tokens = (int64_t *) input;
|
||||||
|
input += sizeof(int64_t);
|
||||||
|
|
||||||
|
// account userdata
|
||||||
|
ka[i].userdata_len = *(uint64_t *) input;
|
||||||
|
input += sizeof(uint64_t);
|
||||||
|
ka[i].userdata = input;
|
||||||
|
input += ka[i].userdata_len;
|
||||||
|
|
||||||
|
// program_id
|
||||||
|
ka[i].program_id = (SolPubkey *) input;
|
||||||
|
input += sizeof(SolPubkey);
|
||||||
|
}
|
||||||
|
|
||||||
|
// input data
|
||||||
|
*data_len = *(uint64_t *) input;
|
||||||
|
input += sizeof(uint64_t);
|
||||||
|
*data = input;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Debugging utilities
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prints the hexadecimal representation of a public key
|
||||||
|
*
|
||||||
|
* @param key The public key to print
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX void sol_log_key(const SolPubkey *key) {
|
||||||
|
for (int j = 0; j < sizeof(*key); j++) {
|
||||||
|
sol_log_64(0, 0, 0, j, key->x[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prints the hexadecimal representation of an array
|
||||||
|
*
|
||||||
|
* @param array The array to print
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX void sol_log_array(const uint8_t *array, int len) {
|
||||||
|
for (int j = 0; j < len; j++) {
|
||||||
|
sol_log_64(0, 0, 0, j, array[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prints the hexadecimal representation of the program's input parameters
|
||||||
|
*
|
||||||
|
* @param num_ka Numer of SolKeyedAccounts to print
|
||||||
|
* @param ka A pointer to an array of SolKeyedAccounts to print
|
||||||
|
* @param data A pointer to the instruction data to print
|
||||||
|
* @param data_len The length in bytes of the instruction data
|
||||||
|
*/
|
||||||
|
SOL_FN_PREFIX void sol_log_params(
|
||||||
|
uint64_t num_ka,
|
||||||
|
const SolKeyedAccounts *ka,
|
||||||
|
const uint8_t *data,
|
||||||
|
uint64_t data_len
|
||||||
|
) {
|
||||||
|
sol_log_64(0, 0, 0, 0, num_ka);
|
||||||
|
for (int i = 0; i < num_ka; i++) {
|
||||||
|
sol_log_key(ka[i].key);
|
||||||
|
sol_log_64(0, 0, 0, 0, *ka[i].tokens);
|
||||||
|
sol_log_array(ka[i].userdata, ka[i].userdata_len);
|
||||||
|
sol_log_key(ka[i].program_id);
|
||||||
|
}
|
||||||
|
sol_log_array(data, data_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**@}*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Program entrypoint
|
||||||
|
* @{
|
||||||
|
*
|
||||||
|
* The following is an example of a simple program that prints the input
|
||||||
|
* parameters it received:
|
||||||
|
*
|
||||||
|
* bool entrypoint(const uint8_t *input) {
|
||||||
|
* SolKeyedAccounts ka[1];
|
||||||
|
* uint8_t *data;
|
||||||
|
* uint64_t data_len;
|
||||||
|
*
|
||||||
|
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||||
|
* return false;
|
||||||
|
* }
|
||||||
|
* sol_log_params(1, ka, data, data_len);
|
||||||
|
* return true;
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Program entrypoint signature
|
||||||
|
*
|
||||||
|
* @param input An array containing serialized input parameters
|
||||||
|
* @return true if successful
|
||||||
|
*/
|
||||||
|
extern bool entrypoint(const uint8_t *input);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**@}*/
|
32
programs/bpf/c/src/move_funds.c
Normal file
32
programs/bpf/c/src/move_funds.c
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
/**
|
||||||
|
* @brief Example C-based BPF program that moves funds from one account to
|
||||||
|
* another
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <solana_sdk.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||||
|
* unexpected number of accounts are passed to the program's entrypoint
|
||||||
|
*/
|
||||||
|
#define NUM_KA 3
|
||||||
|
|
||||||
|
extern bool entrypoint(const uint8_t *input) {
|
||||||
|
SolKeyedAccounts ka[NUM_KA];
|
||||||
|
const uint8_t *data;
|
||||||
|
uint64_t data_len;
|
||||||
|
|
||||||
|
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t tokens = *(int64_t *)data;
|
||||||
|
if (*ka[0].tokens >= tokens) {
|
||||||
|
*ka[0].tokens -= tokens;
|
||||||
|
*ka[2].tokens += tokens;
|
||||||
|
// sol_log_64(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
|
||||||
|
} else {
|
||||||
|
// sol_log_64(0, 0, 0xFF, *ka[0].tokens, tokens);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
35
programs/bpf/c/src/noop.c
Normal file
35
programs/bpf/c/src/noop.c
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
/**
|
||||||
|
* @brief Example C-based BPF program that prints out the parameters
|
||||||
|
* passed to it
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <solana_sdk.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||||
|
* unexpected number of accounts are passed to the program's entrypoint
|
||||||
|
*/
|
||||||
|
#define NUM_KA 1
|
||||||
|
|
||||||
|
extern bool entrypoint(const uint8_t *input) {
|
||||||
|
SolKeyedAccounts ka[NUM_KA];
|
||||||
|
const uint8_t *data;
|
||||||
|
uint64_t data_len;
|
||||||
|
|
||||||
|
sol_log("noop");
|
||||||
|
|
||||||
|
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
sol_log_params(NUM_KA, ka, data, data_len);
|
||||||
|
|
||||||
|
sol_assert(sizeof(int8_t) == 1);
|
||||||
|
sol_assert(sizeof(uint8_t) == 1);
|
||||||
|
sol_assert(sizeof(int16_t) == 2);
|
||||||
|
sol_assert(sizeof(uint16_t) == 2);
|
||||||
|
sol_assert(sizeof(int32_t) == 4);
|
||||||
|
sol_assert(sizeof(uint32_t) == 4);
|
||||||
|
sol_assert(sizeof(int64_t) == 8);
|
||||||
|
sol_assert(sizeof(uint64_t) == 8);
|
||||||
|
return true;
|
||||||
|
}
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash -ex
|
|
||||||
|
|
||||||
OUTDIR="${1:-../../../target/release/}"
|
|
||||||
THISDIR=$(dirname "$0")
|
|
||||||
mkdir -p "$OUTDIR"
|
|
||||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/move_funds_c.bc -c "$THISDIR"/src/move_funds.c
|
|
||||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/move_funds_c.o "$OUTDIR"/move_funds_c.bc
|
|
||||||
|
|
||||||
#/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/move_funds_c.o
|
|
@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/move_funds_c.o
|
|
@ -1,140 +0,0 @@
|
|||||||
|
|
||||||
//#include <stdint.h>
|
|
||||||
//#include <stddef.h>
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
// one way to define a helper function is with index as a fixed value
|
|
||||||
#define BPF_TRACE_PRINTK_IDX 6
|
|
||||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
|
||||||
#else
|
|
||||||
// relocation is another option
|
|
||||||
extern int sol_print(int, int, int, int, int);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef long long unsigned int uint64_t;
|
|
||||||
typedef long long int int64_t;
|
|
||||||
typedef unsigned char uint8_t;
|
|
||||||
|
|
||||||
typedef enum { false = 0, true } bool;
|
|
||||||
|
|
||||||
#define SIZE_PUBKEY 32
|
|
||||||
typedef struct {
|
|
||||||
uint8_t x[SIZE_PUBKEY];
|
|
||||||
} SolPubkey;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
SolPubkey *key;
|
|
||||||
int64_t* tokens;
|
|
||||||
uint64_t userdata_len;
|
|
||||||
uint8_t *userdata;
|
|
||||||
SolPubkey *program_id;
|
|
||||||
} SolKeyedAccounts;
|
|
||||||
|
|
||||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
|
||||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
|
||||||
|
|
||||||
// TODO move this to a registered helper
|
|
||||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define sol_panic() _sol_panic(__LINE__)
|
|
||||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
|
||||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
|
||||||
char *pv = (char *)1;
|
|
||||||
*pv = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka, SolKeyedAccounts *ka,
|
|
||||||
uint8_t **userdata, uint64_t *userdata_len) {
|
|
||||||
if (num_ka != *(uint64_t *)src) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < num_ka; i++) { // TODO this should end up unrolled, confirm
|
|
||||||
// key
|
|
||||||
ka[i].key = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
ka[i].tokens = (int64_t *)src;
|
|
||||||
src += sizeof(int64_t);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
ka[i].userdata_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
ka[i].userdata = src;
|
|
||||||
src += ka[i].userdata_len;
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
ka[i].program_id = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
*userdata_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
*userdata = src;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// -- Debug --
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
|
||||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
|
||||||
sol_print(0, 0, 0, j, key->x[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_userdata(uint8_t *data, int len) {
|
|
||||||
for (int j = 0; j < len; j++) {
|
|
||||||
sol_print(0, 0, 0, j, data[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
|
||||||
uint8_t *userdata, uint64_t userdata_len) {
|
|
||||||
sol_print(0, 0, 0, 0, num_ka);
|
|
||||||
for (int i = 0; i < num_ka; i++) {
|
|
||||||
// key
|
|
||||||
print_key(ka[i].key);
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
sol_print(0, 0, 0, 0, *ka[i].tokens);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
print_userdata(ka[i].userdata, ka[i].userdata_len);
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
print_key(ka[i].program_id);
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
print_userdata(userdata, userdata_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t entrypoint(char *buf) {
|
|
||||||
SolKeyedAccounts ka[3];
|
|
||||||
uint64_t userdata_len;
|
|
||||||
uint8_t *userdata;
|
|
||||||
|
|
||||||
if (1 != sol_deserialize((uint8_t *)buf, 3, ka, &userdata, &userdata_len)) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
print_params(3, ka, userdata, userdata_len);
|
|
||||||
|
|
||||||
int64_t tokens = *(int64_t*)userdata;
|
|
||||||
if (*ka[0].tokens >= tokens) {
|
|
||||||
*ka[0].tokens -= tokens;
|
|
||||||
*ka[2].tokens += tokens;
|
|
||||||
//sol_print(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
|
|
||||||
} else {
|
|
||||||
//sol_print(0, 0, 0xFF, *ka[0].tokens, tokens);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash -ex
|
|
||||||
|
|
||||||
OUTDIR="${1:-../../../target/release/}"
|
|
||||||
THISDIR=$(dirname "$0")
|
|
||||||
mkdir -p "$OUTDIR"
|
|
||||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/noop_c.bc -c "$THISDIR"/src/noop.c
|
|
||||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/noop_c.o "$OUTDIR"/noop_c.bc
|
|
||||||
|
|
||||||
#/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/noop_c.o
|
|
@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/noop_c.o
|
|
@ -1,133 +0,0 @@
|
|||||||
|
|
||||||
//#include <stdint.h>
|
|
||||||
//#include <stddef.h>
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
// one way to define a helper function is with index as a fixed value
|
|
||||||
#define BPF_TRACE_PRINTK_IDX 6
|
|
||||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
|
||||||
#else
|
|
||||||
// relocation is another option
|
|
||||||
extern int sol_print(int, int, int, int, int);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef long long unsigned int uint64_t;
|
|
||||||
typedef long long int int64_t;
|
|
||||||
typedef unsigned char uint8_t;
|
|
||||||
|
|
||||||
typedef enum { false = 0, true } bool;
|
|
||||||
|
|
||||||
#define SIZE_PUBKEY 32
|
|
||||||
typedef struct {
|
|
||||||
uint8_t x[SIZE_PUBKEY];
|
|
||||||
} SolPubkey;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
SolPubkey *key;
|
|
||||||
int64_t* tokens;
|
|
||||||
uint64_t userdata_len;
|
|
||||||
uint8_t *userdata;
|
|
||||||
SolPubkey *program_id;
|
|
||||||
} SolKeyedAccounts;
|
|
||||||
|
|
||||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
|
||||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
|
||||||
|
|
||||||
// TODO move this to a registered helper
|
|
||||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define sol_panic() _sol_panic(__LINE__)
|
|
||||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
|
||||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
|
||||||
char *pv = (char *)1;
|
|
||||||
*pv = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka, SolKeyedAccounts *ka,
|
|
||||||
uint8_t **userdata, uint64_t *userdata_len) {
|
|
||||||
if (num_ka != *(uint64_t *)src) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < num_ka; i++) { // TODO this should end up unrolled, confirm
|
|
||||||
// key
|
|
||||||
ka[i].key = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
ka[i].tokens = (int64_t *)src;
|
|
||||||
src += sizeof(int64_t);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
ka[i].userdata_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
ka[i].userdata = src;
|
|
||||||
src += ka[i].userdata_len;
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
ka[i].program_id = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
*userdata_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
*userdata = src;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// -- Debug --
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
|
||||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
|
||||||
sol_print(0, 0, 0, j, key->x[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_userdata(uint8_t *data, int len) {
|
|
||||||
for (int j = 0; j < len; j++) {
|
|
||||||
sol_print(0, 0, 0, j, data[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
|
||||||
uint8_t *userdata, uint64_t userdata_len) {
|
|
||||||
sol_print(0, 0, 0, 0, num_ka);
|
|
||||||
for (int i = 0; i < num_ka; i++) {
|
|
||||||
// key
|
|
||||||
print_key(ka[i].key);
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
sol_print(0, 0, 0, 0, *ka[i].tokens);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
print_userdata(ka[i].userdata, ka[i].userdata_len);
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
print_key(ka[i].program_id);
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
print_userdata(userdata, userdata_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Program entrypoint --
|
|
||||||
|
|
||||||
uint64_t entrypoint(char *buf) {
|
|
||||||
SolKeyedAccounts ka[1];
|
|
||||||
uint64_t userdata_len;
|
|
||||||
uint8_t *userdata;
|
|
||||||
|
|
||||||
if (1 != sol_deserialize((uint8_t *)buf, 1, ka, &userdata, &userdata_len)) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
print_params(1, ka, userdata, userdata_len);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-bpf-noop"
|
name = "solana-bpf-noop"
|
||||||
version = "0.10.0-pre2"
|
version = "0.10.5"
|
||||||
description = "Solana BPF noop program"
|
description = "Solana BPF noop program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -8,4 +8,4 @@ license = "Apache-2.0"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rbpf = "0.1.0"
|
rbpf = "0.1.0"
|
||||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
solana-sdk = { path = "../../../../sdk", version = "0.10.5" }
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash -ex
|
|
||||||
|
|
||||||
OUTDIR="${1:-../../../target/release/}"
|
|
||||||
THISDIR=$(dirname "$0")
|
|
||||||
mkdir -p "$OUTDIR"
|
|
||||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/tictactoe_c.bc -c "$THISDIR"/src/tictactoe.c
|
|
||||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/tictactoe_c.o "$OUTDIR"/tictactoe_c.bc
|
|
||||||
|
|
||||||
# /usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/tictactoe_c.o
|
|
@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/tictactoe_c.o
|
|
@ -1,373 +0,0 @@
|
|||||||
//#include <stdint.h>
|
|
||||||
//#include <stddef.h>
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
#define BPF_TRACE_PRINTK_IDX 6
|
|
||||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
|
||||||
#else
|
|
||||||
// relocation is another option
|
|
||||||
extern int sol_print(int, int, int, int, int);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef long long unsigned int uint64_t;
|
|
||||||
typedef long long int int64_t;
|
|
||||||
typedef unsigned char uint8_t;
|
|
||||||
|
|
||||||
typedef enum { false = 0, true } bool;
|
|
||||||
|
|
||||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
|
||||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
|
||||||
|
|
||||||
// TODO move this to a registered helper
|
|
||||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define sol_trace() sol_print(0, 0, 0xFF, 0xFF, (__LINE__));
|
|
||||||
#define sol_panic() _sol_panic(__LINE__)
|
|
||||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
|
||||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
|
||||||
char *pv = (char *)1;
|
|
||||||
*pv = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SIZE_PUBKEY 32
|
|
||||||
typedef struct {
|
|
||||||
uint8_t x[SIZE_PUBKEY];
|
|
||||||
} SolPubkey;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
SolPubkey *key;
|
|
||||||
int64_t tokens;
|
|
||||||
uint64_t userdata_len;
|
|
||||||
uint8_t *userdata;
|
|
||||||
SolPubkey *program_id;
|
|
||||||
} SolKeyedAccounts;
|
|
||||||
|
|
||||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka,
|
|
||||||
SolKeyedAccounts *ka, uint8_t **tx_data,
|
|
||||||
uint64_t *tx_data_len) {
|
|
||||||
if (num_ka != *(uint64_t *)src) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < num_ka;
|
|
||||||
i++) { // TODO this should end up unrolled, confirm
|
|
||||||
// key
|
|
||||||
ka[i].key = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
ka[i].tokens = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
ka[i].userdata_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
ka[i].userdata = src;
|
|
||||||
src += ka[i].userdata_len;
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
ka[i].program_id = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
*tx_data_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
*tx_data = src;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// // -- Debug --
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
|
||||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
|
||||||
sol_print(0, 0, 0, j, key->x[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_data(uint8_t *data, int len) {
|
|
||||||
for (int j = 0; j < len; j++) {
|
|
||||||
sol_print(0, 0, 0, j, data[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
|
||||||
uint8_t *tx_data, uint64_t tx_data_len) {
|
|
||||||
sol_print(0, 0, 0, 0, num_ka);
|
|
||||||
for (int i = 0; i < num_ka; i++) {
|
|
||||||
// key
|
|
||||||
print_key(ka[i].key);
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
sol_print(0, 0, 0, 0, ka[i].tokens);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
print_data(ka[i].userdata, ka[i].userdata_len);
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
print_key(ka[i].program_id);
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
print_data(tx_data, tx_data_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- TicTacToe --
|
|
||||||
|
|
||||||
// Board Coodinates
|
|
||||||
// | 0,0 | 1,0 | 2,0 |
|
|
||||||
// | 0,1 | 1,1 | 2,1 |
|
|
||||||
// | 0,2 | 1,2 | 2,2 |
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
Result_Ok,
|
|
||||||
Result_Panic,
|
|
||||||
Result_GameInProgress,
|
|
||||||
Result_InvalidArguments,
|
|
||||||
Result_InvalidMove,
|
|
||||||
Result_InvalidUserdata,
|
|
||||||
Result_InvalidTimestamp,
|
|
||||||
Result_NoGame,
|
|
||||||
Result_NotYourTurn,
|
|
||||||
Result_PlayerNotFound,
|
|
||||||
Result_UserdataTooSmall,
|
|
||||||
} Result;
|
|
||||||
|
|
||||||
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
State_Waiting,
|
|
||||||
State_XMove,
|
|
||||||
State_OMove,
|
|
||||||
State_XWon,
|
|
||||||
State_OWon,
|
|
||||||
State_Draw,
|
|
||||||
} State;
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
// Player who initialized the game
|
|
||||||
SolPubkey player_x;
|
|
||||||
// Player who joined the game
|
|
||||||
SolPubkey player_o;
|
|
||||||
// Current state of the game
|
|
||||||
State state;
|
|
||||||
// Tracks the player moves
|
|
||||||
BoardItem board[9];
|
|
||||||
// Keep Alive for each player
|
|
||||||
int64_t keep_alive[2];
|
|
||||||
} Game;
|
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
Command_Init = 0,
|
|
||||||
Command_Join,
|
|
||||||
Command_KeepAlive,
|
|
||||||
Command_Move,
|
|
||||||
} Command;
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void game_dump_board(Game *self) {
|
|
||||||
sol_print(0, 0, 0x9, 0x9, 0x9);
|
|
||||||
sol_print(0, 0, self->board[0], self->board[1], self->board[2]);
|
|
||||||
sol_print(0, 0, self->board[3], self->board[4], self->board[5]);
|
|
||||||
sol_print(0, 0, self->board[6], self->board[7], self->board[8]);
|
|
||||||
sol_print(0, 0, 0x9, 0x9, 0x9);
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void game_create(Game *self, SolPubkey *player_x) {
|
|
||||||
sol_memcpy(self->player_x.x, player_x, SIZE_PUBKEY);
|
|
||||||
// TODO self->player_o = 0;
|
|
||||||
self->state = State_Waiting;
|
|
||||||
self->keep_alive[0] = 0;
|
|
||||||
self->keep_alive[1] = 0;
|
|
||||||
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < 9; i++) {
|
|
||||||
self->board[i] = BoardItem_F;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX Result game_join(Game *self, SolPubkey *player_o,
|
|
||||||
int64_t timestamp) {
|
|
||||||
if (self->state == State_Waiting) {
|
|
||||||
sol_memcpy(self->player_o.x, player_o, SIZE_PUBKEY);
|
|
||||||
self->state = State_XMove;
|
|
||||||
|
|
||||||
if (timestamp <= self->keep_alive[1]) {
|
|
||||||
return Result_InvalidTimestamp;
|
|
||||||
} else {
|
|
||||||
self->keep_alive[1] = timestamp;
|
|
||||||
return Result_Ok;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Result_GameInProgress;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX bool game_same(BoardItem x_or_o, BoardItem one, BoardItem two,
|
|
||||||
BoardItem three) {
|
|
||||||
if (x_or_o == one && x_or_o == two && x_or_o == three) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX bool game_same_player(SolPubkey *one, SolPubkey *two) {
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < SIZE_PUBKEY; i++) {
|
|
||||||
if (one->x[i] != two->x[i]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX Result game_next_move(Game *self, SolPubkey *player, int x,
|
|
||||||
int y) {
|
|
||||||
int board_index = y * 3 + x;
|
|
||||||
if (board_index >= 9 || self->board[board_index] != BoardItem_F) {
|
|
||||||
return Result_InvalidMove;
|
|
||||||
}
|
|
||||||
|
|
||||||
BoardItem x_or_o;
|
|
||||||
State won_state;
|
|
||||||
|
|
||||||
switch (self->state) {
|
|
||||||
case State_XMove:
|
|
||||||
if (!game_same_player(player, &self->player_x)) {
|
|
||||||
return Result_PlayerNotFound;
|
|
||||||
}
|
|
||||||
self->state = State_OMove;
|
|
||||||
x_or_o = BoardItem_X;
|
|
||||||
won_state = State_XWon;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case State_OMove:
|
|
||||||
if (!game_same_player(player, &self->player_o)) {
|
|
||||||
return Result_PlayerNotFound;
|
|
||||||
}
|
|
||||||
self->state = State_XMove;
|
|
||||||
x_or_o = BoardItem_O;
|
|
||||||
won_state = State_OWon;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
return Result_NotYourTurn;
|
|
||||||
}
|
|
||||||
|
|
||||||
self->board[board_index] = x_or_o;
|
|
||||||
|
|
||||||
// game_dump_board(self);
|
|
||||||
|
|
||||||
bool winner =
|
|
||||||
// Check rows
|
|
||||||
game_same(x_or_o, self->board[0], self->board[1], self->board[2]) ||
|
|
||||||
game_same(x_or_o, self->board[3], self->board[4], self->board[5]) ||
|
|
||||||
game_same(x_or_o, self->board[6], self->board[7], self->board[8]) ||
|
|
||||||
// Check columns
|
|
||||||
game_same(x_or_o, self->board[0], self->board[3], self->board[6]) ||
|
|
||||||
game_same(x_or_o, self->board[1], self->board[4], self->board[7]) ||
|
|
||||||
game_same(x_or_o, self->board[2], self->board[5], self->board[8]) ||
|
|
||||||
// Check both diagonals
|
|
||||||
game_same(x_or_o, self->board[0], self->board[4], self->board[8]) ||
|
|
||||||
game_same(x_or_o, self->board[2], self->board[4], self->board[6]);
|
|
||||||
|
|
||||||
if (winner) {
|
|
||||||
self->state = won_state;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
int draw = true;
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < 9; i++) {
|
|
||||||
if (BoardItem_F == self->board[i]) {
|
|
||||||
draw = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (draw) {
|
|
||||||
self->state = State_Draw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Result_Ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX Result game_keep_alive(Game *self, SolPubkey *player,
|
|
||||||
int64_t timestamp) {
|
|
||||||
switch (self->state) {
|
|
||||||
case State_Waiting:
|
|
||||||
case State_XMove:
|
|
||||||
case State_OMove:
|
|
||||||
if (game_same_player(player, &self->player_x)) {
|
|
||||||
if (timestamp <= self->keep_alive[0]) {
|
|
||||||
return Result_InvalidTimestamp;
|
|
||||||
}
|
|
||||||
self->keep_alive[0] = timestamp;
|
|
||||||
} else if (game_same_player(player, &self->player_o)) {
|
|
||||||
if (timestamp <= self->keep_alive[1]) {
|
|
||||||
return Result_InvalidTimestamp;
|
|
||||||
}
|
|
||||||
self->keep_alive[1] = timestamp;
|
|
||||||
} else {
|
|
||||||
return Result_PlayerNotFound;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return Result_Ok;
|
|
||||||
}
|
|
||||||
|
|
||||||
// accounts[0] On Init must be player X, after that doesn't matter,
|
|
||||||
// anybody can cause a dashboard update
|
|
||||||
// accounts[1] must be a TicTacToe state account
|
|
||||||
// accounts[2] must be account of current player, only Pubkey is used
|
|
||||||
uint64_t entrypoint(uint8_t *buf) {
|
|
||||||
SolKeyedAccounts ka[3];
|
|
||||||
uint64_t tx_data_len;
|
|
||||||
uint8_t *tx_data;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (1 != sol_deserialize(buf, 3, ka, &tx_data, &tx_data_len)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sizeof(Game) > ka[1].userdata_len) {
|
|
||||||
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
Game game;
|
|
||||||
sol_memcpy(&game, ka[1].userdata, sizeof(game));
|
|
||||||
|
|
||||||
Command command = *tx_data;
|
|
||||||
switch (command) {
|
|
||||||
case Command_Init:
|
|
||||||
game_create(&game, ka[2].key);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Command_Join:
|
|
||||||
err = game_join(&game, ka[2].key, *((int64_t *)(tx_data + 4)));
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Command_KeepAlive:
|
|
||||||
err = game_keep_alive(&game, ka[2].key, /*TODO*/ 0);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case Command_Move:
|
|
||||||
err = game_next_move(&game, ka[2].key, tx_data[4], tx_data[5]);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
sol_memcpy(ka[1].userdata, &game, sizeof(game));
|
|
||||||
sol_print(0, 0, 0, err, game.state);
|
|
||||||
if (Result_Ok != err) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash -ex
|
|
||||||
|
|
||||||
OUTDIR="${1:-../../../target/release/}"
|
|
||||||
THISDIR=$(dirname "$0")
|
|
||||||
mkdir -p "$OUTDIR"
|
|
||||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/tictactoe_dashboard_c.bc -c "$THISDIR"/src/tictactoe_dashboard.c
|
|
||||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/tictactoe_dashboard_c.o "$OUTDIR"/tictactoe_dashboard_c.bc
|
|
||||||
|
|
||||||
# /usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/tictactoe_dashboard_c.o
|
|
@ -1,3 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/tictactoe_dashboard_c.o
|
|
@ -1,236 +0,0 @@
|
|||||||
//#include <stdint.h>
|
|
||||||
//#include <stddef.h>
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
#define BPF_TRACE_PRINTK_IDX 6
|
|
||||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
|
||||||
#else
|
|
||||||
// relocation is another option
|
|
||||||
extern int sol_print(int, int, int, int, int);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef long long unsigned int uint64_t;
|
|
||||||
typedef long long int int64_t;
|
|
||||||
typedef long unsigned int uint32_t;
|
|
||||||
typedef long int int32_t;
|
|
||||||
typedef unsigned char uint8_t;
|
|
||||||
|
|
||||||
typedef enum { false = 0, true } bool;
|
|
||||||
|
|
||||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
|
||||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
|
||||||
|
|
||||||
// TODO move this to a registered helper
|
|
||||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define sol_trace() sol_print(0, 0, 0xFF, 0xFF, (__LINE__));
|
|
||||||
#define sol_panic() _sol_panic(__LINE__)
|
|
||||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
|
||||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
|
||||||
char *pv = (char *)1;
|
|
||||||
*pv = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define SIZE_PUBKEY 32
|
|
||||||
typedef struct {
|
|
||||||
uint8_t x[SIZE_PUBKEY];
|
|
||||||
} SolPubkey;
|
|
||||||
|
|
||||||
SOL_FN_PREFIX bool SolPubkey_same(SolPubkey *one, SolPubkey *two) {
|
|
||||||
for (int i = 0; i < SIZE_PUBKEY; i++) {
|
|
||||||
if (one->x[i] != two->x[i]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
SolPubkey *key;
|
|
||||||
int64_t tokens;
|
|
||||||
uint64_t userdata_len;
|
|
||||||
uint8_t *userdata;
|
|
||||||
SolPubkey *program_id;
|
|
||||||
} SolKeyedAccounts;
|
|
||||||
|
|
||||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka,
|
|
||||||
SolKeyedAccounts *ka, uint8_t **tx_data,
|
|
||||||
uint64_t *tx_data_len) {
|
|
||||||
if (num_ka != *(uint64_t *)src) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
|
|
||||||
// TODO fixed iteration loops ok? unrolled?
|
|
||||||
for (int i = 0; i < num_ka;
|
|
||||||
i++) { // TODO this should end up unrolled, confirm
|
|
||||||
// key
|
|
||||||
ka[i].key = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
ka[i].tokens = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
ka[i].userdata_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
ka[i].userdata = src;
|
|
||||||
src += ka[i].userdata_len;
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
ka[i].program_id = (SolPubkey *)src;
|
|
||||||
src += SIZE_PUBKEY;
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
*tx_data_len = *(uint64_t *)src;
|
|
||||||
src += sizeof(uint64_t);
|
|
||||||
*tx_data = src;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Debug --
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
|
||||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
|
||||||
sol_print(0, 0, 0, j, key->x[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_data(uint8_t *data, int len) {
|
|
||||||
for (int j = 0; j < len; j++) {
|
|
||||||
sol_print(0, 0, 0, j, data[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
|
||||||
uint8_t *tx_data, uint64_t tx_data_len) {
|
|
||||||
sol_print(0, 0, 0, 0, num_ka);
|
|
||||||
for (int i = 0; i < num_ka; i++) {
|
|
||||||
// key
|
|
||||||
print_key(ka[i].key);
|
|
||||||
|
|
||||||
// tokens
|
|
||||||
sol_print(0, 0, 0, 0, ka[i].tokens);
|
|
||||||
|
|
||||||
// account userdata
|
|
||||||
print_data(ka[i].userdata, ka[i].userdata_len);
|
|
||||||
|
|
||||||
// program_id
|
|
||||||
print_key(ka[i].program_id);
|
|
||||||
}
|
|
||||||
// tx userdata
|
|
||||||
print_data(tx_data, tx_data_len);
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- TicTacToe Dashboard --
|
|
||||||
|
|
||||||
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
|
|
||||||
typedef enum {
|
|
||||||
State_Waiting,
|
|
||||||
State_XMove,
|
|
||||||
State_OMove,
|
|
||||||
State_XWon,
|
|
||||||
State_OWon,
|
|
||||||
State_Draw,
|
|
||||||
} State;
|
|
||||||
|
|
||||||
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
|
|
||||||
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
|
|
||||||
|
|
||||||
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
|
|
||||||
typedef struct {
|
|
||||||
SolPubkey player_x;
|
|
||||||
SolPubkey player_o;
|
|
||||||
State state;
|
|
||||||
BoardItem board[9];
|
|
||||||
int64_t keep_alive[2];
|
|
||||||
} Game;
|
|
||||||
|
|
||||||
#define MAX_GAMES_TRACKED 5
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
// Latest pending game
|
|
||||||
SolPubkey pending;
|
|
||||||
// Last N completed games (0 is the latest)
|
|
||||||
SolPubkey completed[MAX_GAMES_TRACKED];
|
|
||||||
// Index into completed pointing to latest game completed
|
|
||||||
uint32_t latest_game;
|
|
||||||
// Total number of completed games
|
|
||||||
uint32_t total;
|
|
||||||
} Dashboard;
|
|
||||||
|
|
||||||
SOL_FN_PREFIX bool update(Dashboard *self, Game *game, SolPubkey *game_pubkey) {
|
|
||||||
switch (game->state) {
|
|
||||||
case State_Waiting:
|
|
||||||
sol_memcpy(&self->pending, game_pubkey, SIZE_PUBKEY);
|
|
||||||
break;
|
|
||||||
case State_XMove:
|
|
||||||
case State_OMove:
|
|
||||||
// Nothing to do. In progress games are not managed by the dashboard
|
|
||||||
break;
|
|
||||||
case State_XWon:
|
|
||||||
case State_OWon:
|
|
||||||
case State_Draw:
|
|
||||||
for (int i = 0; i < MAX_GAMES_TRACKED; i++) {
|
|
||||||
if (SolPubkey_same(&self->completed[i], game_pubkey)) {
|
|
||||||
// TODO: Once the PoH height is exposed to programs, it could be used
|
|
||||||
// to ensure
|
|
||||||
// that old games are not being re-added and causing total to
|
|
||||||
// increment incorrectly.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self->total += 1;
|
|
||||||
self->latest_game = (self->latest_game + 1) % MAX_GAMES_TRACKED;
|
|
||||||
sol_memcpy(self->completed[self->latest_game].x, game_pubkey,
|
|
||||||
SIZE_PUBKEY);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// accounts[0] doesn't matter, anybody can cause a dashboard update
|
|
||||||
// accounts[1] must be a Dashboard account
|
|
||||||
// accounts[2] must be a Game account
|
|
||||||
uint64_t entrypoint(uint8_t *buf) {
|
|
||||||
SolKeyedAccounts ka[3];
|
|
||||||
uint64_t tx_data_len;
|
|
||||||
uint8_t *tx_data;
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (1 != sol_deserialize(buf, 3, ka, &tx_data, &tx_data_len)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO check dashboard and game program ids (how to check now that they are
|
|
||||||
// not know values)
|
|
||||||
// TODO check validity of dashboard and game structures contents
|
|
||||||
if (sizeof(Dashboard) > ka[1].userdata_len) {
|
|
||||||
sol_print(0, 0, 0xFF, sizeof(Dashboard), ka[2].userdata_len);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
Dashboard dashboard;
|
|
||||||
sol_memcpy(&dashboard, ka[1].userdata, sizeof(dashboard));
|
|
||||||
|
|
||||||
if (sizeof(Game) > ka[2].userdata_len) {
|
|
||||||
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
Game game;
|
|
||||||
sol_memcpy(&game, ka[2].userdata, sizeof(game));
|
|
||||||
if (true != update(&dashboard, &game, ka[2].key)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
sol_memcpy(ka[1].userdata, &dashboard, sizeof(dashboard));
|
|
||||||
return true;
|
|
||||||
}
|
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-bpfloader"
|
name = "solana-bpfloader"
|
||||||
version = "0.10.0-pre2"
|
version = "0.10.5"
|
||||||
description = "Solana BPF Loader"
|
description = "Solana BPF Loader"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -13,12 +13,12 @@ elf = "0.0.10"
|
|||||||
env_logger = "0.5.12"
|
env_logger = "0.5.12"
|
||||||
libc = "0.2.43"
|
libc = "0.2.43"
|
||||||
log = "0.4.2"
|
log = "0.4.2"
|
||||||
rbpf = "0.1.0"
|
solana_rbpf = "0.1.3"
|
||||||
serde = "1.0.27"
|
serde = "1.0.27"
|
||||||
serde_derive = "1.0.27"
|
serde_derive = "1.0.27"
|
||||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "bpf_loader"
|
name = "solana_bpf_loader"
|
||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use rbpf::ebpf;
|
use solana_rbpf::ebpf;
|
||||||
use std::io::{Error, ErrorKind};
|
use std::io::{Error, ErrorKind};
|
||||||
|
|
||||||
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {
|
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {
|
||||||
|
@ -5,37 +5,31 @@ extern crate byteorder;
|
|||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
extern crate rbpf;
|
extern crate libc;
|
||||||
|
extern crate solana_rbpf;
|
||||||
extern crate solana_sdk;
|
extern crate solana_sdk;
|
||||||
|
|
||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||||
|
use libc::c_char;
|
||||||
|
use solana_rbpf::EbpfVmRaw;
|
||||||
use solana_sdk::account::KeyedAccount;
|
use solana_sdk::account::KeyedAccount;
|
||||||
use solana_sdk::loader_instruction::LoaderInstruction;
|
use solana_sdk::loader_instruction::LoaderInstruction;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
use std::ffi::CStr;
|
||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::io::Error;
|
use std::io::{Error, ErrorKind};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::sync::{Once, ONCE_INIT};
|
use std::sync::{Once, ONCE_INIT};
|
||||||
|
|
||||||
fn create_vm(prog: &[u8]) -> Result<rbpf::EbpfVmRaw, Error> {
|
// TODO use rbpf's disassemble
|
||||||
let mut vm = rbpf::EbpfVmRaw::new(None)?;
|
|
||||||
vm.set_verifier(bpf_verifier::check)?;
|
|
||||||
vm.set_program(&prog)?;
|
|
||||||
vm.register_helper(
|
|
||||||
rbpf::helpers::BPF_TRACE_PRINTK_IDX,
|
|
||||||
rbpf::helpers::bpf_trace_printf,
|
|
||||||
)?;
|
|
||||||
Ok(vm)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
fn dump_program(key: &Pubkey, prog: &[u8]) {
|
fn dump_program(key: &Pubkey, prog: &[u8]) {
|
||||||
let mut eight_bytes: Vec<u8> = Vec::new();
|
let mut eight_bytes: Vec<u8> = Vec::new();
|
||||||
println!("BPF Program: {:?}", key);
|
info!("BPF Program: {:?}", key);
|
||||||
for i in prog.iter() {
|
for i in prog.iter() {
|
||||||
if eight_bytes.len() >= 7 {
|
if eight_bytes.len() >= 7 {
|
||||||
println!("{:02X?}", eight_bytes);
|
info!("{:02X?}", eight_bytes);
|
||||||
eight_bytes.clear();
|
eight_bytes.clear();
|
||||||
} else {
|
} else {
|
||||||
eight_bytes.push(i.clone());
|
eight_bytes.push(i.clone());
|
||||||
@ -43,6 +37,67 @@ fn dump_program(key: &Pubkey, prog: &[u8]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
pub fn helper_sol_log_verify(
|
||||||
|
addr: u64,
|
||||||
|
unused2: u64,
|
||||||
|
unused3: u64,
|
||||||
|
unused4: u64,
|
||||||
|
unused5: u64,
|
||||||
|
ro_regions: &[&[u8]],
|
||||||
|
unused7: &[&[u8]],
|
||||||
|
) -> Result<(()), Error> {
|
||||||
|
for region in ro_regions.iter() {
|
||||||
|
if region.as_ptr() as u64 <= addr
|
||||||
|
&& addr as u64 <= region.as_ptr() as u64 + region.len() as u64
|
||||||
|
{
|
||||||
|
let c_buf: *const c_char = addr as *const c_char;
|
||||||
|
let max_size = (region.as_ptr() as u64 + region.len() as u64) - addr;
|
||||||
|
unsafe {
|
||||||
|
for i in 0..max_size {
|
||||||
|
if std::ptr::read(c_buf.offset(i as isize)) == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Err(Error::new(ErrorKind::Other, "Error, Unterminated string"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(Error::new(
|
||||||
|
ErrorKind::Other,
|
||||||
|
"Error: Load segfault, bad string pointer",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
pub fn helper_sol_log(addr: u64, unused2: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 {
|
||||||
|
let c_buf: *const c_char = addr as *const c_char;
|
||||||
|
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
|
||||||
|
match c_str.to_str() {
|
||||||
|
Ok(slice) => info!("sol_log: {:?}", slice),
|
||||||
|
Err(e) => warn!("Error: Cannot print invalid string"),
|
||||||
|
};
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn helper_sol_log_u64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
|
||||||
|
info!(
|
||||||
|
"sol_log_u64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
|
||||||
|
arg1, arg2, arg3, arg4, arg5
|
||||||
|
);
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
|
||||||
|
let mut vm = EbpfVmRaw::new(None)?;
|
||||||
|
vm.set_verifier(bpf_verifier::check)?;
|
||||||
|
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
|
||||||
|
vm.set_elf(&prog)?;
|
||||||
|
vm.register_helper_ex("sol_log", Some(helper_sol_log_verify), helper_sol_log)?;
|
||||||
|
vm.register_helper_ex("sol_log_64", None, helper_sol_log_u64)?;
|
||||||
|
Ok(vm)
|
||||||
|
}
|
||||||
|
|
||||||
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
|
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
|
||||||
assert_eq!(32, mem::size_of::<Pubkey>());
|
assert_eq!(32, mem::size_of::<Pubkey>());
|
||||||
|
|
||||||
@ -90,12 +145,12 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
|||||||
|
|
||||||
if keyed_accounts[0].account.executable {
|
if keyed_accounts[0].account.executable {
|
||||||
let prog = keyed_accounts[0].account.userdata.clone();
|
let prog = keyed_accounts[0].account.userdata.clone();
|
||||||
trace!("Call BPF, {} Instructions", prog.len() / 8);
|
trace!("Call BPF, {} instructions", prog.len() / 8);
|
||||||
//dump_program(keyed_accounts[0].key, &prog);
|
//dump_program(keyed_accounts[0].key, &prog);
|
||||||
let vm = match create_vm(&prog) {
|
let mut vm = match create_vm(&prog) {
|
||||||
Ok(vm) => vm,
|
Ok(vm) => vm,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("{}", e);
|
warn!("create_vm failed: {}", e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -105,20 +160,24 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
|||||||
return false;
|
return false;
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("{}", e);
|
warn!("execute_program failed: {}", e);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
deserialize_parameters(&mut keyed_accounts[1..], &v);
|
deserialize_parameters(&mut keyed_accounts[1..], &v);
|
||||||
|
trace!(
|
||||||
|
"BPF program executed {} instructions",
|
||||||
|
vm.get_last_instruction_count()
|
||||||
|
);
|
||||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||||
match instruction {
|
match instruction {
|
||||||
LoaderInstruction::Write { offset, bytes } => {
|
LoaderInstruction::Write { offset, bytes } => {
|
||||||
let offset = offset as usize;
|
let offset = offset as usize;
|
||||||
let len = bytes.len();
|
let len = bytes.len();
|
||||||
trace!("BpfLoader::Write offset {} length {:?}", offset, len);
|
debug!("Write: offset={} length={}", offset, len);
|
||||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||||
println!(
|
warn!(
|
||||||
"Overflow {} < {}",
|
"Write overflow: {} < {}",
|
||||||
keyed_accounts[0].account.userdata.len(),
|
keyed_accounts[0].account.userdata.len(),
|
||||||
offset + len
|
offset + len
|
||||||
);
|
);
|
||||||
@ -128,7 +187,7 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
|||||||
}
|
}
|
||||||
LoaderInstruction::Finalize => {
|
LoaderInstruction::Finalize => {
|
||||||
keyed_accounts[0].account.executable = true;
|
keyed_accounts[0].account.executable = true;
|
||||||
trace!("BPfLoader::Finalize prog: {:?}", keyed_accounts[0].key);
|
info!("Finalize: account {:?}", keyed_accounts[0].key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -136,3 +195,36 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
|||||||
}
|
}
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use solana_rbpf::helpers;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
|
||||||
|
fn test_non_terminating_program() {
|
||||||
|
#[rustfmt::skip]
|
||||||
|
let prog = &[
|
||||||
|
0xb7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r6 = 0
|
||||||
|
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = 0
|
||||||
|
0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r2 = 0
|
||||||
|
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
|
||||||
|
0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r4 = 0
|
||||||
|
0xbf, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r5 = r6
|
||||||
|
0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call 6
|
||||||
|
0x07, 0x06, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // r6 + 1
|
||||||
|
0x05, 0x00, 0xf8, 0xff, 0x00, 0x00, 0x00, 0x00, // goto -8
|
||||||
|
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
|
||||||
|
];
|
||||||
|
let input = &mut [0x00];
|
||||||
|
|
||||||
|
let mut vm = EbpfVmRaw::new(None).unwrap();
|
||||||
|
vm.set_verifier(bpf_verifier::check).unwrap();
|
||||||
|
vm.set_max_instruction_count(36000).unwrap(); // 36000 is a wag, need to tune
|
||||||
|
vm.set_program(prog).unwrap();
|
||||||
|
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helpers::bpf_trace_printf)
|
||||||
|
.unwrap();
|
||||||
|
vm.execute_program(input).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-lualoader"
|
name = "solana-lualoader"
|
||||||
version = "0.10.0-pre2"
|
version = "0.10.5"
|
||||||
description = "Solana Lua Loader"
|
description = "Solana Lua Loader"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@ -13,12 +13,12 @@ log = "0.4.2"
|
|||||||
rlua = "0.15.2"
|
rlua = "0.15.2"
|
||||||
serde = "1.0.27"
|
serde = "1.0.27"
|
||||||
serde_derive = "1.0.27"
|
serde_derive = "1.0.27"
|
||||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
bincode = "1.0.0"
|
bincode = "1.0.0"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "lua_loader"
|
name = "solana_lua_loader"
|
||||||
crate-type = ["cdylib"]
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
|
@ -79,8 +79,8 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
|||||||
let len = bytes.len();
|
let len = bytes.len();
|
||||||
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
|
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
|
||||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||||
println!(
|
warn!(
|
||||||
"Overflow {} < {}",
|
"Write overflow {} < {}",
|
||||||
keyed_accounts[0].account.userdata.len(),
|
keyed_accounts[0].account.userdata.len(),
|
||||||
offset + len
|
offset + len
|
||||||
);
|
);
|
||||||
@ -147,13 +147,12 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_move_funds_with_lua_via_process() {
|
fn test_move_funds_with_lua_via_process() {
|
||||||
let bytes = r#"
|
let userdata = r#"
|
||||||
local tokens, _ = string.unpack("I", data)
|
local tokens, _ = string.unpack("I", data)
|
||||||
accounts[1].tokens = accounts[1].tokens - tokens
|
accounts[1].tokens = accounts[1].tokens - tokens
|
||||||
accounts[2].tokens = accounts[2].tokens + tokens
|
accounts[2].tokens = accounts[2].tokens + tokens
|
||||||
"#.as_bytes()
|
"#.as_bytes()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
let userdata = serialize(&LuaLoader::Bytes { bytes }).unwrap();
|
|
||||||
|
|
||||||
let alice_pubkey = Pubkey::default();
|
let alice_pubkey = Pubkey::default();
|
||||||
let bob_pubkey = Pubkey::default();
|
let bob_pubkey = Pubkey::default();
|
||||||
@ -194,15 +193,12 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_load_lua_library() {
|
fn test_load_lua_library() {
|
||||||
let bytes = r#"
|
let userdata = r#"
|
||||||
local serialize = load(accounts[2].userdata)().serialize
|
local serialize = load(accounts[2].userdata)().serialize
|
||||||
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
|
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
|
||||||
"#.as_bytes()
|
"#.as_bytes()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
let userdata = serialize(&LuaLoader::Bytes { bytes }).unwrap();
|
|
||||||
|
|
||||||
let program_id = Pubkey::default();
|
let program_id = Pubkey::default();
|
||||||
|
|
||||||
let program_account = Account {
|
let program_account = Account {
|
||||||
tokens: 1,
|
tokens: 1,
|
||||||
userdata,
|
userdata,
|
||||||
@ -210,9 +206,7 @@ mod tests {
|
|||||||
executable: true,
|
executable: true,
|
||||||
loader_program_id: Pubkey::default(),
|
loader_program_id: Pubkey::default(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let alice_account = Account::new(100, 0, program_id);
|
let alice_account = Account::new(100, 0, program_id);
|
||||||
|
|
||||||
let serialize_account = Account {
|
let serialize_account = Account {
|
||||||
tokens: 100,
|
tokens: 100,
|
||||||
userdata: read_test_file("serialize.lua"),
|
userdata: read_test_file("serialize.lua"),
|
||||||
@ -220,7 +214,6 @@ mod tests {
|
|||||||
executable: false,
|
executable: false,
|
||||||
loader_program_id: Pubkey::default(),
|
loader_program_id: Pubkey::default(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut accounts = [
|
let mut accounts = [
|
||||||
(Pubkey::default(), program_account),
|
(Pubkey::default(), program_account),
|
||||||
(Pubkey::default(), alice_account),
|
(Pubkey::default(), alice_account),
|
||||||
@ -228,9 +221,7 @@ mod tests {
|
|||||||
(Pubkey::default(), Account::new(1, 0, program_id)),
|
(Pubkey::default(), Account::new(1, 0, program_id)),
|
||||||
];
|
];
|
||||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||||
|
|
||||||
process(&mut keyed_accounts, &[]);
|
process(&mut keyed_accounts, &[]);
|
||||||
|
|
||||||
// Verify deterministic ordering of a serialized Lua table.
|
// Verify deterministic ordering of a serialized Lua table.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
|
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
|
||||||
@ -250,12 +241,9 @@ mod tests {
|
|||||||
let dan_pubkey = Pubkey::new(&[5; 32]);
|
let dan_pubkey = Pubkey::new(&[5; 32]);
|
||||||
let erin_pubkey = Pubkey::new(&[6; 32]);
|
let erin_pubkey = Pubkey::new(&[6; 32]);
|
||||||
|
|
||||||
let userdata = serialize(&LuaLoader::Bytes {
|
|
||||||
bytes: read_test_file("multisig.lua"),
|
|
||||||
}).unwrap();
|
|
||||||
let program_account = Account {
|
let program_account = Account {
|
||||||
tokens: 1,
|
tokens: 1,
|
||||||
userdata,
|
userdata: read_test_file("multisig.lua"),
|
||||||
program_id,
|
program_id,
|
||||||
executable: true,
|
executable: true,
|
||||||
loader_program_id: Pubkey::default(),
|
loader_program_id: Pubkey::default(),
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-noop"
|
name = "solana-noop"
|
||||||
version = "0.10.0-pre2"
|
version = "0.10.5"
|
||||||
description = "Solana noop program"
|
description = "Solana noop program"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "noop"
|
name = "noop"
|
||||||
|
@ -117,6 +117,14 @@ $ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
|||||||
<TX_SIGNATURE>
|
<TX_SIGNATURE>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Deploy program
|
||||||
|
```
|
||||||
|
// Command
|
||||||
|
$ solana-wallet deploy <PATH>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<PROGRAM_ID>
|
||||||
|
```
|
||||||
|
|
||||||
## Javascript solana-web3.js Interface
|
## Javascript solana-web3.js Interface
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ if [[ ! -d $installDir ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
for dir in "$SOLANA_ROOT"/programs/native/*; do
|
for dir in "$SOLANA_ROOT"/programs/native/*; do
|
||||||
for program in "$SOLANA_ROOT/target/$variant/deps/lib$(basename "$dir")".{so,dylib,dll}; do
|
for program in echo "$SOLANA_ROOT"/target/"$variant"/deps/lib{,solana_}"$(basename "$dir")".{so,dylib,dll}; do
|
||||||
if [[ -f $program ]]; then
|
if [[ -f $program ]]; then
|
||||||
cp -v "$program" "$installDir"
|
cp -v "$program" "$installDir"
|
||||||
fi
|
fi
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-sdk"
|
name = "solana-sdk"
|
||||||
version = "0.10.0-pre2"
|
version = "0.10.5"
|
||||||
description = "Solana SDK"
|
description = "Solana SDK"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
|
@ -117,8 +117,8 @@ parts:
|
|||||||
# Build/install all other programs
|
# Build/install all other programs
|
||||||
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
|
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
|
||||||
|
|
||||||
# TODO: install native programs when they are ready for public use
|
# Install native programs
|
||||||
#./scripts/install-native-programs.sh $SNAPCRAFT_PART_INSTALL/bin/
|
./scripts/install-native-programs.sh $SNAPCRAFT_PART_INSTALL/bin/
|
||||||
|
|
||||||
# Install multinode-demo/
|
# Install multinode-demo/
|
||||||
mkdir -p $SNAPCRAFT_PART_INSTALL/multinode-demo/
|
mkdir -p $SNAPCRAFT_PART_INSTALL/multinode-demo/
|
||||||
|
190
src/bank.rs
190
src/bank.rs
@ -7,6 +7,7 @@ use bincode::deserialize;
|
|||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use bpf_loader;
|
use bpf_loader;
|
||||||
use budget_program::BudgetState;
|
use budget_program::BudgetState;
|
||||||
|
use budget_transaction::BudgetTransaction;
|
||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
@ -30,7 +31,7 @@ use std;
|
|||||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Mutex, RwLock};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use storage_program::StorageProgram;
|
use storage_program::StorageProgram;
|
||||||
use system_program::SystemProgram;
|
use system_program::SystemProgram;
|
||||||
@ -41,7 +42,6 @@ use timing::{duration_as_us, timestamp};
|
|||||||
use token_program::TokenProgram;
|
use token_program::TokenProgram;
|
||||||
use tokio::prelude::Future;
|
use tokio::prelude::Future;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use vote_program::VoteProgram;
|
|
||||||
use window::WINDOW_SIZE;
|
use window::WINDOW_SIZE;
|
||||||
|
|
||||||
/// The number of most recent `last_id` values that the bank will track the signatures
|
/// The number of most recent `last_id` values that the bank will track the signatures
|
||||||
@ -151,7 +151,7 @@ impl Default for LastIds {
|
|||||||
/// The state of all accounts and contracts after processing its entries.
|
/// The state of all accounts and contracts after processing its entries.
|
||||||
pub struct Bank {
|
pub struct Bank {
|
||||||
/// A map of account public keys to the balance in that account.
|
/// A map of account public keys to the balance in that account.
|
||||||
pub accounts: RwLock<HashMap<Pubkey, Account>>,
|
accounts: RwLock<HashMap<Pubkey, Account>>,
|
||||||
|
|
||||||
/// set of accounts which are currently in the pipeline
|
/// set of accounts which are currently in the pipeline
|
||||||
account_locks: Mutex<HashSet<Pubkey>>,
|
account_locks: Mutex<HashSet<Pubkey>>,
|
||||||
@ -171,13 +171,6 @@ pub struct Bank {
|
|||||||
|
|
||||||
// Mapping of signatures to Subscriber ids and sinks to notify on confirmation
|
// Mapping of signatures to Subscriber ids and sinks to notify on confirmation
|
||||||
signature_subscriptions: RwLock<HashMap<Signature, HashMap<Pubkey, Sink<RpcSignatureStatus>>>>,
|
signature_subscriptions: RwLock<HashMap<Signature, HashMap<Pubkey, Sink<RpcSignatureStatus>>>>,
|
||||||
|
|
||||||
/// Tracks and updates the leader schedule based on the votes and account stakes
|
|
||||||
/// processed by the bank
|
|
||||||
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
|
||||||
|
|
||||||
// The number of ticks that have elapsed since genesis
|
|
||||||
tick_height: Mutex<u64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Bank {
|
impl Default for Bank {
|
||||||
@ -190,8 +183,6 @@ impl Default for Bank {
|
|||||||
finality_time: AtomicUsize::new(std::usize::MAX),
|
finality_time: AtomicUsize::new(std::usize::MAX),
|
||||||
account_subscriptions: RwLock::new(HashMap::new()),
|
account_subscriptions: RwLock::new(HashMap::new()),
|
||||||
signature_subscriptions: RwLock::new(HashMap::new()),
|
signature_subscriptions: RwLock::new(HashMap::new()),
|
||||||
leader_scheduler: Arc::new(RwLock::new(LeaderScheduler::default())),
|
|
||||||
tick_height: Mutex::new(0),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -622,8 +613,6 @@ impl Bank {
|
|||||||
{
|
{
|
||||||
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
|
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
|
||||||
}
|
}
|
||||||
} else if VoteProgram::check_id(&tx_program_id) {
|
|
||||||
VoteProgram::process_transaction(&tx, instruction_index, program_accounts).is_err();
|
|
||||||
} else {
|
} else {
|
||||||
let mut depth = 0;
|
let mut depth = 0;
|
||||||
let mut keys = Vec::new();
|
let mut keys = Vec::new();
|
||||||
@ -901,28 +890,41 @@ impl Bank {
|
|||||||
results
|
results
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_entry(&self, entry: &Entry) -> Result<()> {
|
pub fn process_entry(
|
||||||
|
&self,
|
||||||
|
entry: &Entry,
|
||||||
|
tick_height: &mut u64,
|
||||||
|
leader_scheduler: &mut LeaderScheduler,
|
||||||
|
) -> Result<()> {
|
||||||
if !entry.is_tick() {
|
if !entry.is_tick() {
|
||||||
for result in self.process_transactions(&entry.transactions) {
|
for result in self.process_transactions(&entry.transactions) {
|
||||||
result?;
|
result?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let tick_height = {
|
*tick_height += 1;
|
||||||
let mut tick_height_lock = self.tick_height.lock().unwrap();
|
|
||||||
*tick_height_lock += 1;
|
|
||||||
*tick_height_lock
|
|
||||||
};
|
|
||||||
|
|
||||||
self.leader_scheduler
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.update_height(tick_height, self);
|
|
||||||
self.register_entry_id(&entry.id);
|
self.register_entry_id(&entry.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.process_entry_votes(entry, *tick_height, leader_scheduler);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn process_entry_votes(
|
||||||
|
&self,
|
||||||
|
entry: &Entry,
|
||||||
|
tick_height: u64,
|
||||||
|
leader_scheduler: &mut LeaderScheduler,
|
||||||
|
) {
|
||||||
|
for tx in &entry.transactions {
|
||||||
|
if tx.vote().is_some() {
|
||||||
|
// Update the active set in the leader scheduler
|
||||||
|
leader_scheduler.push_vote(*tx.from(), tick_height);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
leader_scheduler.update_height(tick_height, self);
|
||||||
|
}
|
||||||
|
|
||||||
/// Process an ordered list of entries, populating a circular buffer "tail"
|
/// Process an ordered list of entries, populating a circular buffer "tail"
|
||||||
/// as we go.
|
/// as we go.
|
||||||
fn process_entries_tail(
|
fn process_entries_tail(
|
||||||
@ -930,6 +932,8 @@ impl Bank {
|
|||||||
entries: &[Entry],
|
entries: &[Entry],
|
||||||
tail: &mut Vec<Entry>,
|
tail: &mut Vec<Entry>,
|
||||||
tail_idx: &mut usize,
|
tail_idx: &mut usize,
|
||||||
|
tick_height: &mut u64,
|
||||||
|
leader_scheduler: &mut LeaderScheduler,
|
||||||
) -> Result<u64> {
|
) -> Result<u64> {
|
||||||
let mut entry_count = 0;
|
let mut entry_count = 0;
|
||||||
|
|
||||||
@ -947,7 +951,7 @@ impl Bank {
|
|||||||
// the leader scheduler. Next we will extract the vote tracking structure
|
// the leader scheduler. Next we will extract the vote tracking structure
|
||||||
// out of the leader scheduler, and into the bank, and remove the leader
|
// out of the leader scheduler, and into the bank, and remove the leader
|
||||||
// scheduler from these banking functions.
|
// scheduler from these banking functions.
|
||||||
self.process_entry(entry)?;
|
self.process_entry(entry, tick_height, leader_scheduler)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(entry_count)
|
Ok(entry_count)
|
||||||
@ -992,7 +996,6 @@ impl Bank {
|
|||||||
// if its a tick, execute the group and register the tick
|
// if its a tick, execute the group and register the tick
|
||||||
self.par_execute_entries(&mt_group)?;
|
self.par_execute_entries(&mt_group)?;
|
||||||
self.register_entry_id(&entry.id);
|
self.register_entry_id(&entry.id);
|
||||||
*self.tick_height.lock().unwrap() += 1;
|
|
||||||
mt_group = vec![];
|
mt_group = vec![];
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -1022,18 +1025,17 @@ impl Bank {
|
|||||||
entries: I,
|
entries: I,
|
||||||
tail: &mut Vec<Entry>,
|
tail: &mut Vec<Entry>,
|
||||||
tail_idx: &mut usize,
|
tail_idx: &mut usize,
|
||||||
) -> Result<u64>
|
leader_scheduler: &mut LeaderScheduler,
|
||||||
|
) -> Result<(u64, u64)>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = Entry>,
|
I: IntoIterator<Item = Entry>,
|
||||||
{
|
{
|
||||||
// Ledger verification needs to be parallelized, but we can't pull the whole
|
// Ledger verification needs to be parallelized, but we can't pull the whole
|
||||||
// thing into memory. We therefore chunk it.
|
// thing into memory. We therefore chunk it.
|
||||||
let mut entry_height = *tail_idx as u64;
|
let mut entry_height = *tail_idx as u64;
|
||||||
|
let mut tick_height = 0;
|
||||||
for entry in &tail[0..*tail_idx] {
|
for entry in &tail[0..*tail_idx] {
|
||||||
if entry.is_tick() {
|
tick_height += entry.is_tick() as u64
|
||||||
*self.tick_height.lock().unwrap() += 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut id = start_hash;
|
let mut id = start_hash;
|
||||||
@ -1044,15 +1046,25 @@ impl Bank {
|
|||||||
return Err(BankError::LedgerVerificationFailed);
|
return Err(BankError::LedgerVerificationFailed);
|
||||||
}
|
}
|
||||||
id = block.last().unwrap().id;
|
id = block.last().unwrap().id;
|
||||||
let entry_count = self.process_entries_tail(&block, tail, tail_idx)?;
|
let entry_count = self.process_entries_tail(
|
||||||
|
&block,
|
||||||
|
tail,
|
||||||
|
tail_idx,
|
||||||
|
&mut tick_height,
|
||||||
|
leader_scheduler,
|
||||||
|
)?;
|
||||||
|
|
||||||
entry_height += entry_count;
|
entry_height += entry_count;
|
||||||
}
|
}
|
||||||
Ok(entry_height)
|
Ok((tick_height, entry_height))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a full ledger.
|
/// Process a full ledger.
|
||||||
pub fn process_ledger<I>(&self, entries: I) -> Result<(u64, u64, Vec<Entry>)>
|
pub fn process_ledger<I>(
|
||||||
|
&self,
|
||||||
|
entries: I,
|
||||||
|
leader_scheduler: &mut LeaderScheduler,
|
||||||
|
) -> Result<(u64, u64, Vec<Entry>)>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = Entry>,
|
I: IntoIterator<Item = Entry>,
|
||||||
{
|
{
|
||||||
@ -1094,14 +1106,20 @@ impl Bank {
|
|||||||
tail.push(entry0);
|
tail.push(entry0);
|
||||||
tail.push(entry1);
|
tail.push(entry1);
|
||||||
let mut tail_idx = 2;
|
let mut tail_idx = 2;
|
||||||
let entry_height = self.process_blocks(entry1_id, entries, &mut tail, &mut tail_idx)?;
|
let (tick_height, entry_height) = self.process_blocks(
|
||||||
|
entry1_id,
|
||||||
|
entries,
|
||||||
|
&mut tail,
|
||||||
|
&mut tail_idx,
|
||||||
|
leader_scheduler,
|
||||||
|
)?;
|
||||||
|
|
||||||
// check if we need to rotate tail
|
// check if we need to rotate tail
|
||||||
if tail.len() == WINDOW_SIZE as usize {
|
if tail.len() == WINDOW_SIZE as usize {
|
||||||
tail.rotate_left(tail_idx)
|
tail.rotate_left(tail_idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((*self.tick_height.lock().unwrap(), entry_height, tail))
|
Ok((tick_height, entry_height, tail))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||||
@ -1218,16 +1236,6 @@ impl Bank {
|
|||||||
subscriptions.remove(pubkey).is_some()
|
subscriptions.remove(pubkey).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_current_leader(&self) -> Option<Pubkey> {
|
|
||||||
let ls_lock = self.leader_scheduler.read().unwrap();
|
|
||||||
let tick_height = self.tick_height.lock().unwrap();
|
|
||||||
ls_lock.get_scheduled_leader(*tick_height)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_tick_height(&self) -> u64 {
|
|
||||||
*self.tick_height.lock().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_account_subscriptions(&self, pubkey: &Pubkey, account: &Account) {
|
fn check_account_subscriptions(&self, pubkey: &Pubkey, account: &Account) {
|
||||||
let subscriptions = self.account_subscriptions.read().unwrap();
|
let subscriptions = self.account_subscriptions.read().unwrap();
|
||||||
if let Some(hashmap) = subscriptions.get(pubkey) {
|
if let Some(hashmap) = subscriptions.get(pubkey) {
|
||||||
@ -1280,6 +1288,13 @@ impl Bank {
|
|||||||
}
|
}
|
||||||
subscriptions.remove(&signature);
|
subscriptions.remove(&signature);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
// Used to access accounts for things like controlling stake to control
|
||||||
|
// the eligible set of nodes for leader selection
|
||||||
|
pub fn accounts(&self) -> &RwLock<HashMap<Pubkey, Account>> {
|
||||||
|
&self.accounts
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -1292,6 +1307,7 @@ mod tests {
|
|||||||
use entry_writer::{self, EntryWriter};
|
use entry_writer::{self, EntryWriter};
|
||||||
use hash::hash;
|
use hash::hash;
|
||||||
use jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
|
use jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
|
||||||
|
use leader_scheduler::LeaderScheduler;
|
||||||
use ledger;
|
use ledger;
|
||||||
use logger;
|
use logger;
|
||||||
use signature::Keypair;
|
use signature::Keypair;
|
||||||
@ -1624,7 +1640,8 @@ mod tests {
|
|||||||
let mint = Mint::new(1);
|
let mint = Mint::new(1);
|
||||||
let genesis = mint.create_entries();
|
let genesis = mint.create_entries();
|
||||||
let bank = Bank::default();
|
let bank = Bank::default();
|
||||||
bank.process_ledger(genesis).unwrap();
|
bank.process_ledger(genesis, &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1701,7 +1718,9 @@ mod tests {
|
|||||||
let (ledger, pubkey) = create_sample_ledger(1);
|
let (ledger, pubkey) = create_sample_ledger(1);
|
||||||
let (ledger, dup) = ledger.tee();
|
let (ledger, dup) = ledger.tee();
|
||||||
let bank = Bank::default();
|
let bank = Bank::default();
|
||||||
let (tick_height, ledger_height, tail) = bank.process_ledger(ledger).unwrap();
|
let (tick_height, ledger_height, tail) = bank
|
||||||
|
.process_ledger(ledger, &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
assert_eq!(ledger_height, 4);
|
assert_eq!(ledger_height, 4);
|
||||||
assert_eq!(tick_height, 2);
|
assert_eq!(tick_height, 2);
|
||||||
@ -1723,15 +1742,17 @@ mod tests {
|
|||||||
// let (_, _) = bank.process_ledger(ledger).unwrap();
|
// let (_, _) = bank.process_ledger(ledger).unwrap();
|
||||||
// }
|
// }
|
||||||
|
|
||||||
let window_size = WINDOW_SIZE as usize;
|
let window_size = 128;
|
||||||
for entry_count in window_size - 3..window_size + 2 {
|
for entry_count in window_size - 3..window_size + 2 {
|
||||||
let (ledger, pubkey) = create_sample_ledger(entry_count);
|
let (ledger, pubkey) = create_sample_ledger(entry_count);
|
||||||
let bank = Bank::default();
|
let bank = Bank::default();
|
||||||
let (tick_height, ledger_height, tail) = bank.process_ledger(ledger).unwrap();
|
let (tick_height, ledger_height, tail) = bank
|
||||||
|
.process_ledger(ledger, &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
assert_eq!(ledger_height, entry_count as u64 + 3);
|
assert_eq!(ledger_height, entry_count as u64 + 3);
|
||||||
assert_eq!(tick_height, 2);
|
assert_eq!(tick_height, 2);
|
||||||
assert!(tail.len() <= window_size);
|
assert!(tail.len() <= WINDOW_SIZE as usize);
|
||||||
let last_entry = &tail[tail.len() - 1];
|
let last_entry = &tail[tail.len() - 1];
|
||||||
assert_eq!(bank.last_id(), last_entry.id);
|
assert_eq!(bank.last_id(), last_entry.id);
|
||||||
}
|
}
|
||||||
@ -1753,7 +1774,8 @@ mod tests {
|
|||||||
let ledger = to_file_iter(ledger);
|
let ledger = to_file_iter(ledger);
|
||||||
|
|
||||||
let bank = Bank::default();
|
let bank = Bank::default();
|
||||||
bank.process_ledger(ledger).unwrap();
|
bank.process_ledger(ledger, &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1764,7 +1786,8 @@ mod tests {
|
|||||||
let block = to_file_iter(create_sample_block_with_ticks(&mint, 1, 1));
|
let block = to_file_iter(create_sample_block_with_ticks(&mint, 1, 1));
|
||||||
|
|
||||||
let bank = Bank::default();
|
let bank = Bank::default();
|
||||||
bank.process_ledger(genesis.chain(block)).unwrap();
|
bank.process_ledger(genesis.chain(block), &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1778,9 +1801,13 @@ mod tests {
|
|||||||
let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
|
let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
|
||||||
|
|
||||||
let bank0 = Bank::default();
|
let bank0 = Bank::default();
|
||||||
bank0.process_ledger(ledger0).unwrap();
|
bank0
|
||||||
|
.process_ledger(ledger0, &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
let bank1 = Bank::default();
|
let bank1 = Bank::default();
|
||||||
bank1.process_ledger(ledger1).unwrap();
|
bank1
|
||||||
|
.process_ledger(ledger1, &mut LeaderScheduler::default())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let initial_state = bank0.hash_internal_state();
|
let initial_state = bank0.hash_internal_state();
|
||||||
|
|
||||||
@ -1873,7 +1900,7 @@ mod tests {
|
|||||||
let string = transport_receiver.poll();
|
let string = transport_receiver.poll();
|
||||||
assert!(string.is_ok());
|
assert!(string.is_ok());
|
||||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
||||||
assert_eq!(expected, response);
|
assert_eq!(expected, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2050,4 +2077,53 @@ mod tests {
|
|||||||
Err(BankError::AccountNotFound)
|
Err(BankError::AccountNotFound)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_program_ids() {
|
||||||
|
let system = Pubkey::new(&[
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0,
|
||||||
|
]);
|
||||||
|
let native = Pubkey::new(&[
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0,
|
||||||
|
]);
|
||||||
|
let bpf = Pubkey::new(&[
|
||||||
|
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0,
|
||||||
|
]);
|
||||||
|
let budget = Pubkey::new(&[
|
||||||
|
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0,
|
||||||
|
]);
|
||||||
|
let storage = Pubkey::new(&[
|
||||||
|
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0,
|
||||||
|
]);
|
||||||
|
let token = Pubkey::new(&[
|
||||||
|
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0,
|
||||||
|
]);
|
||||||
|
|
||||||
|
assert_eq!(SystemProgram::id(), system);
|
||||||
|
assert_eq!(native_loader::id(), native);
|
||||||
|
assert_eq!(bpf_loader::id(), bpf);
|
||||||
|
assert_eq!(BudgetState::id(), budget);
|
||||||
|
assert_eq!(StorageProgram::id(), storage);
|
||||||
|
assert_eq!(TokenProgram::id(), token);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_program_id_uniqueness() {
|
||||||
|
let mut unique = HashSet::new();
|
||||||
|
let ids = vec![
|
||||||
|
SystemProgram::id(),
|
||||||
|
native_loader::id(),
|
||||||
|
bpf_loader::id(),
|
||||||
|
BudgetState::id(),
|
||||||
|
StorageProgram::id(),
|
||||||
|
TokenProgram::id(),
|
||||||
|
];
|
||||||
|
assert!(ids.into_iter().all(move |id| unique.insert(id)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,16 +17,14 @@ use solana::logger;
|
|||||||
use solana::metrics::set_panic_hook;
|
use solana::metrics::set_panic_hook;
|
||||||
use solana::signature::{Keypair, KeypairUtil};
|
use solana::signature::{Keypair, KeypairUtil};
|
||||||
use solana::thin_client::poll_gossip_for_leader;
|
use solana::thin_client::poll_gossip_for_leader;
|
||||||
use solana::vote_program::VoteProgram;
|
|
||||||
use solana::wallet::request_airdrop;
|
use solana::wallet::request_airdrop;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::net::{Ipv4Addr, SocketAddr};
|
use std::net::{Ipv4Addr, SocketAddr};
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
fn main() {
|
fn main() -> () {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
set_panic_hook("fullnode");
|
set_panic_hook("fullnode");
|
||||||
let matches = App::new("fullnode")
|
let matches = App::new("fullnode")
|
||||||
@ -84,6 +82,7 @@ fn main() {
|
|||||||
|
|
||||||
// save off some stuff for airdrop
|
// save off some stuff for airdrop
|
||||||
let node_info = node.info.clone();
|
let node_info = node.info.clone();
|
||||||
|
let pubkey = keypair.pubkey();
|
||||||
|
|
||||||
let leader = match network {
|
let leader = match network {
|
||||||
Some(network) => {
|
Some(network) => {
|
||||||
@ -92,16 +91,10 @@ fn main() {
|
|||||||
None => node_info,
|
None => node_info,
|
||||||
};
|
};
|
||||||
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let vote_account_id = vote_account_keypair.pubkey();
|
|
||||||
let keypair = Arc::new(keypair);
|
|
||||||
let pubkey = keypair.pubkey();
|
|
||||||
|
|
||||||
let mut fullnode = Fullnode::new(
|
let mut fullnode = Fullnode::new(
|
||||||
node,
|
node,
|
||||||
ledger_path,
|
ledger_path,
|
||||||
keypair.clone(),
|
keypair,
|
||||||
vote_account_keypair,
|
|
||||||
network,
|
network,
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader.id),
|
LeaderScheduler::from_bootstrap_leader(leader.id),
|
||||||
@ -136,49 +129,6 @@ fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the vote account
|
|
||||||
loop {
|
|
||||||
let last_id = client.get_last_id();
|
|
||||||
if client
|
|
||||||
.create_vote_account(&keypair, vote_account_id, &last_id, 1)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
sleep(Duration::from_secs(2));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let balance = client.poll_get_balance(&vote_account_id).unwrap_or(0);
|
|
||||||
|
|
||||||
if balance > 0 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_secs(2));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the vote account to this node
|
|
||||||
loop {
|
|
||||||
let last_id = client.get_last_id();
|
|
||||||
if client
|
|
||||||
.register_vote_account(&keypair, vote_account_id, &last_id)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
sleep(Duration::from_secs(2));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let account_user_data = client.get_account_userdata(&vote_account_id);
|
|
||||||
if let Ok(Some(account_user_data)) = account_user_data {
|
|
||||||
if let Ok(vote_state) = VoteProgram::deserialize(&account_user_data) {
|
|
||||||
if vote_state.node_id == pubkey {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_secs(2));
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let status = fullnode.handle_role_transition();
|
let status = fullnode.handle_role_transition();
|
||||||
match status {
|
match status {
|
||||||
|
@ -5,6 +5,7 @@ extern crate solana;
|
|||||||
|
|
||||||
use clap::{App, Arg, SubCommand};
|
use clap::{App, Arg, SubCommand};
|
||||||
use solana::bank::Bank;
|
use solana::bank::Bank;
|
||||||
|
use solana::leader_scheduler::LeaderScheduler;
|
||||||
use solana::ledger::{read_ledger, verify_ledger};
|
use solana::ledger::{read_ledger, verify_ledger};
|
||||||
use solana::logger;
|
use solana::logger;
|
||||||
use std::io::{stdout, Write};
|
use std::io::{stdout, Write};
|
||||||
@ -115,7 +116,7 @@ fn main() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let genesis = genesis.take(2).map(|e| e.unwrap());
|
let genesis = genesis.take(2).map(|e| e.unwrap());
|
||||||
if let Err(e) = bank.process_ledger(genesis) {
|
if let Err(e) = bank.process_ledger(genesis, &mut LeaderScheduler::default()) {
|
||||||
eprintln!("verify failed at genesis err: {:?}", e);
|
eprintln!("verify failed at genesis err: {:?}", e);
|
||||||
if !matches.is_present("continue") {
|
if !matches.is_present("continue") {
|
||||||
exit(1);
|
exit(1);
|
||||||
@ -141,7 +142,10 @@ fn main() {
|
|||||||
}
|
}
|
||||||
last_id = entry.id;
|
last_id = entry.id;
|
||||||
|
|
||||||
if let Err(e) = bank.process_entry(&entry) {
|
let mut tick_height = 0;
|
||||||
|
let mut leader_scheduler = LeaderScheduler::default();
|
||||||
|
if let Err(e) = bank.process_entry(&entry, &mut tick_height, &mut leader_scheduler)
|
||||||
|
{
|
||||||
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
|
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
|
||||||
if !matches.is_present("continue") {
|
if !matches.is_present("continue") {
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -3,8 +3,11 @@ use native_loader;
|
|||||||
use solana_sdk::account::Account;
|
use solana_sdk::account::Account;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
|
|
||||||
pub const BPF_LOADER_PROGRAM_ID: [u8; 32] = [6u8; 32];
|
const BPF_LOADER_NAME: &str = "solana_bpf_loader";
|
||||||
pub const BPF_LOADER_NAME: &str = "bpf_loader";
|
const BPF_LOADER_PROGRAM_ID: [u8; 32] = [
|
||||||
|
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0,
|
||||||
|
];
|
||||||
|
|
||||||
pub fn id() -> Pubkey {
|
pub fn id() -> Pubkey {
|
||||||
Pubkey::new(&BPF_LOADER_PROGRAM_ID)
|
Pubkey::new(&BPF_LOADER_PROGRAM_ID)
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
use budget::Budget;
|
use budget::Budget;
|
||||||
use chrono::prelude::{DateTime, Utc};
|
use chrono::prelude::{DateTime, Utc};
|
||||||
|
|
||||||
/// A smart contract.
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub struct Contract {
|
pub struct Vote {
|
||||||
/// The number of tokens allocated to the `Budget` and any transaction fees.
|
/// We send some gossip specific membership information through the vote to shortcut
|
||||||
pub tokens: i64,
|
/// liveness voting
|
||||||
pub budget: Budget,
|
/// The version of the ClusterInfo struct that the last_id of this network voted with
|
||||||
|
pub version: u64,
|
||||||
|
/// The version of the ClusterInfo struct that has the same network configuration as this one
|
||||||
|
pub contact_info_version: u64,
|
||||||
|
// TODO: add signature of the state here as well
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An instruction to progress the smart contract.
|
/// An instruction to progress the smart contract.
|
||||||
@ -21,4 +24,7 @@ pub enum Instruction {
|
|||||||
/// Tell the budget that the `NewBudget` with `Signature` has been
|
/// Tell the budget that the `NewBudget` with `Signature` has been
|
||||||
/// signed by the containing transaction's `Pubkey`.
|
/// signed by the containing transaction's `Pubkey`.
|
||||||
ApplySignature,
|
ApplySignature,
|
||||||
|
|
||||||
|
/// Vote for a PoH that is equal to the lastid of this transaction
|
||||||
|
NewVote(Vote),
|
||||||
}
|
}
|
||||||
|
@ -30,9 +30,11 @@ pub struct BudgetState {
|
|||||||
pub pending_budget: Option<Budget>,
|
pub pending_budget: Option<Budget>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const BUDGET_PROGRAM_ID: [u8; 32] = [
|
const BUDGET_PROGRAM_ID: [u8; 32] = [
|
||||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0,
|
||||||
];
|
];
|
||||||
|
|
||||||
impl BudgetState {
|
impl BudgetState {
|
||||||
fn is_pending(&self) -> bool {
|
fn is_pending(&self) -> bool {
|
||||||
self.pending_budget != None
|
self.pending_budget != None
|
||||||
@ -172,6 +174,11 @@ impl BudgetState {
|
|||||||
Err(BudgetError::UninitializedContract)
|
Err(BudgetError::UninitializedContract)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Instruction::NewVote(_vote) => {
|
||||||
|
// TODO: move vote instruction into a different contract
|
||||||
|
trace!("GOT VOTE! last_id={}", tx.last_id);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {
|
fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
use budget::{Budget, Condition};
|
use budget::{Budget, Condition};
|
||||||
use budget_instruction::Instruction;
|
use budget_instruction::{Instruction, Vote};
|
||||||
use budget_program::BudgetState;
|
use budget_program::BudgetState;
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
@ -38,6 +38,8 @@ pub trait BudgetTransaction {
|
|||||||
last_id: Hash,
|
last_id: Hash,
|
||||||
) -> Self;
|
) -> Self;
|
||||||
|
|
||||||
|
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self;
|
||||||
|
|
||||||
fn budget_new_on_date(
|
fn budget_new_on_date(
|
||||||
from_keypair: &Keypair,
|
from_keypair: &Keypair,
|
||||||
to: Pubkey,
|
to: Pubkey,
|
||||||
@ -59,6 +61,8 @@ pub trait BudgetTransaction {
|
|||||||
last_id: Hash,
|
last_id: Hash,
|
||||||
) -> Self;
|
) -> Self;
|
||||||
|
|
||||||
|
fn vote(&self) -> Option<(Pubkey, Vote, Hash)>;
|
||||||
|
|
||||||
fn instruction(&self, program_index: usize) -> Option<Instruction>;
|
fn instruction(&self, program_index: usize) -> Option<Instruction>;
|
||||||
fn system_instruction(&self, program_index: usize) -> Option<SystemProgram>;
|
fn system_instruction(&self, program_index: usize) -> Option<SystemProgram>;
|
||||||
|
|
||||||
@ -149,6 +153,12 @@ impl BudgetTransaction for Transaction {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self {
|
||||||
|
let instruction = Instruction::NewVote(vote);
|
||||||
|
let userdata = serialize(&instruction).expect("serialize instruction");
|
||||||
|
Self::new(from_keypair, &[], BudgetState::id(), userdata, last_id, fee)
|
||||||
|
}
|
||||||
|
|
||||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||||
fn budget_new_on_date(
|
fn budget_new_on_date(
|
||||||
from_keypair: &Keypair,
|
from_keypair: &Keypair,
|
||||||
@ -209,6 +219,16 @@ impl BudgetTransaction for Transaction {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn vote(&self) -> Option<(Pubkey, Vote, Hash)> {
|
||||||
|
if self.instructions.len() > 1 {
|
||||||
|
None
|
||||||
|
} else if let Some(Instruction::NewVote(vote)) = self.instruction(0) {
|
||||||
|
Some((self.account_keys[0], vote, self.last_id))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn instruction(&self, instruction_index: usize) -> Option<Instruction> {
|
fn instruction(&self, instruction_index: usize) -> Option<Instruction> {
|
||||||
deserialize(&self.userdata(instruction_index)).ok()
|
deserialize(&self.userdata(instruction_index)).ok()
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Bank needs to provide an interface for us to query the stake weight
|
//! Bank needs to provide an interface for us to query the stake weight
|
||||||
use bincode::{deserialize, serialize, serialized_size};
|
use bincode::{deserialize, serialize, serialized_size};
|
||||||
|
use budget_instruction::Vote;
|
||||||
use choose_gossip_peer_strategy::{ChooseGossipPeerStrategy, ChooseWeightedPeerStrategy};
|
use choose_gossip_peer_strategy::{ChooseGossipPeerStrategy, ChooseWeightedPeerStrategy};
|
||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
@ -337,6 +338,47 @@ impl ClusterInfo {
|
|||||||
self.external_liveness.get(key)
|
self.external_liveness.get(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn insert_vote(&mut self, pubkey: &Pubkey, v: &Vote, last_id: Hash) {
|
||||||
|
if self.table.get(pubkey).is_none() {
|
||||||
|
warn!("{}: VOTE for unknown id: {}", self.id, pubkey);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if v.contact_info_version > self.table[pubkey].contact_info.version {
|
||||||
|
warn!(
|
||||||
|
"{}: VOTE for new address version from: {} ours: {} vote: {:?}",
|
||||||
|
self.id, pubkey, self.table[pubkey].contact_info.version, v,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if *pubkey == self.my_data().leader_id {
|
||||||
|
info!("{}: LEADER_VOTED! {}", self.id, pubkey);
|
||||||
|
inc_new_counter_info!("cluster_info-insert_vote-leader_voted", 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.version <= self.table[pubkey].version {
|
||||||
|
debug!("{}: VOTE for old version: {}", self.id, pubkey);
|
||||||
|
self.update_liveness(*pubkey);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
let mut data = self.table[pubkey].clone();
|
||||||
|
data.version = v.version;
|
||||||
|
data.ledger_state.last_id = last_id;
|
||||||
|
|
||||||
|
debug!("{}: INSERTING VOTE! for {}", self.id, data.id);
|
||||||
|
self.update_liveness(data.id);
|
||||||
|
self.insert(&data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn insert_votes(&mut self, votes: &[(Pubkey, Vote, Hash)]) {
|
||||||
|
inc_new_counter_info!("cluster_info-vote-count", votes.len());
|
||||||
|
if !votes.is_empty() {
|
||||||
|
info!("{}: INSERTING VOTES {}", self.id, votes.len());
|
||||||
|
}
|
||||||
|
for v in votes {
|
||||||
|
self.insert_vote(&v.0, &v.1, v.2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn insert(&mut self, v: &NodeInfo) -> usize {
|
pub fn insert(&mut self, v: &NodeInfo) -> usize {
|
||||||
// TODO check that last_verified types are always increasing
|
// TODO check that last_verified types are always increasing
|
||||||
// update the peer table
|
// update the peer table
|
||||||
@ -413,7 +455,6 @@ impl ClusterInfo {
|
|||||||
if *id == leader_id {
|
if *id == leader_id {
|
||||||
info!("{}: PURGE LEADER {}", self.id, id,);
|
info!("{}: PURGE LEADER {}", self.id, id,);
|
||||||
inc_new_counter_info!("cluster_info-purge-purged_leader", 1, 1);
|
inc_new_counter_info!("cluster_info-purge-purged_leader", 1, 1);
|
||||||
self.set_leader(Pubkey::default());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -497,7 +538,7 @@ impl ClusterInfo {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Make sure the next leader in line knows about the entries before his slot in the leader
|
// Make sure the next leader in line knows about the entries before his slot in the leader
|
||||||
// rotation so they can initiate repairs if necessary
|
// rotation so he can initiate repairs if necessary
|
||||||
{
|
{
|
||||||
let ls_lock = leader_scheduler.read().unwrap();
|
let ls_lock = leader_scheduler.read().unwrap();
|
||||||
let next_leader_height = ls_lock.max_height_for_leader(tick_height);
|
let next_leader_height = ls_lock.max_height_for_leader(tick_height);
|
||||||
@ -782,6 +823,22 @@ impl ClusterInfo {
|
|||||||
Ok((v.contact_info.ncp, req))
|
Ok((v.contact_info.ncp, req))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn new_vote(&mut self, last_id: Hash) -> Result<(Vote, SocketAddr)> {
|
||||||
|
let mut me = self.my_data().clone();
|
||||||
|
let leader = self
|
||||||
|
.leader_data()
|
||||||
|
.ok_or(ClusterInfoError::NoLeader)?
|
||||||
|
.clone();
|
||||||
|
me.version += 1;
|
||||||
|
me.ledger_state.last_id = last_id;
|
||||||
|
let vote = Vote {
|
||||||
|
version: me.version,
|
||||||
|
contact_info_version: me.contact_info.version,
|
||||||
|
};
|
||||||
|
self.insert(&me);
|
||||||
|
Ok((vote, leader.contact_info.tpu))
|
||||||
|
}
|
||||||
|
|
||||||
/// At random pick a node and try to get updated changes from them
|
/// At random pick a node and try to get updated changes from them
|
||||||
fn run_gossip(obj: &Arc<RwLock<Self>>, blob_sender: &BlobSender) -> Result<()> {
|
fn run_gossip(obj: &Arc<RwLock<Self>>, blob_sender: &BlobSender) -> Result<()> {
|
||||||
//TODO we need to keep track of stakes and weight the selection by stake size
|
//TODO we need to keep track of stakes and weight the selection by stake size
|
||||||
@ -1330,6 +1387,7 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
|
use budget_instruction::Vote;
|
||||||
use cluster_info::{
|
use cluster_info::{
|
||||||
ClusterInfo, ClusterInfoError, Node, NodeInfo, Protocol, FULLNODE_PORT_RANGE,
|
ClusterInfo, ClusterInfoError, Node, NodeInfo, Protocol, FULLNODE_PORT_RANGE,
|
||||||
GOSSIP_PURGE_MILLIS, GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE,
|
GOSSIP_PURGE_MILLIS, GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE,
|
||||||
@ -1377,6 +1435,62 @@ mod tests {
|
|||||||
assert_eq!(cluster_info.table[&d.id].version, 3);
|
assert_eq!(cluster_info.table[&d.id].version, 3);
|
||||||
assert!(liveness < cluster_info.alive[&d.id]);
|
assert!(liveness < cluster_info.alive[&d.id]);
|
||||||
}
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_new_vote() {
|
||||||
|
let d = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
|
||||||
|
assert_eq!(d.version, 0);
|
||||||
|
let mut cluster_info = ClusterInfo::new(d.clone()).unwrap();
|
||||||
|
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||||
|
let leader = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.2:1235"));
|
||||||
|
assert_ne!(d.id, leader.id);
|
||||||
|
assert_matches!(
|
||||||
|
cluster_info.new_vote(Hash::default()).err(),
|
||||||
|
Some(Error::ClusterInfoError(ClusterInfoError::NoLeader))
|
||||||
|
);
|
||||||
|
cluster_info.insert(&leader);
|
||||||
|
assert_matches!(
|
||||||
|
cluster_info.new_vote(Hash::default()).err(),
|
||||||
|
Some(Error::ClusterInfoError(ClusterInfoError::NoLeader))
|
||||||
|
);
|
||||||
|
cluster_info.set_leader(leader.id);
|
||||||
|
assert_eq!(cluster_info.table[&d.id].version, 1);
|
||||||
|
let v = Vote {
|
||||||
|
version: 2, //version should increase when we vote
|
||||||
|
contact_info_version: 0,
|
||||||
|
};
|
||||||
|
let expected = (v, cluster_info.table[&leader.id].contact_info.tpu);
|
||||||
|
assert_eq!(cluster_info.new_vote(Hash::default()).unwrap(), expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_insert_vote() {
|
||||||
|
let d = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
|
||||||
|
assert_eq!(d.version, 0);
|
||||||
|
let mut cluster_info = ClusterInfo::new(d.clone()).unwrap();
|
||||||
|
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||||
|
let vote_same_version = Vote {
|
||||||
|
version: d.version,
|
||||||
|
contact_info_version: 0,
|
||||||
|
};
|
||||||
|
cluster_info.insert_vote(&d.id, &vote_same_version, Hash::default());
|
||||||
|
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||||
|
|
||||||
|
let vote_new_version_new_addrs = Vote {
|
||||||
|
version: d.version + 1,
|
||||||
|
contact_info_version: 1,
|
||||||
|
};
|
||||||
|
cluster_info.insert_vote(&d.id, &vote_new_version_new_addrs, Hash::default());
|
||||||
|
//should be dropped since the address is newer then we know
|
||||||
|
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||||
|
|
||||||
|
let vote_new_version_old_addrs = Vote {
|
||||||
|
version: d.version + 1,
|
||||||
|
contact_info_version: 0,
|
||||||
|
};
|
||||||
|
cluster_info.insert_vote(&d.id, &vote_new_version_old_addrs, Hash::default());
|
||||||
|
//should be accepted, since the update is for the same address field as the one we know
|
||||||
|
assert_eq!(cluster_info.table[&d.id].version, 1);
|
||||||
|
}
|
||||||
fn sorted(ls: &Vec<(NodeInfo, u64)>) -> Vec<(NodeInfo, u64)> {
|
fn sorted(ls: &Vec<(NodeInfo, u64)>) -> Vec<(NodeInfo, u64)> {
|
||||||
let mut copy: Vec<_> = ls.iter().cloned().collect();
|
let mut copy: Vec<_> = ls.iter().cloned().collect();
|
||||||
copy.sort_by(|x, y| x.0.id.cmp(&y.0.id));
|
copy.sort_by(|x, y| x.0.id.cmp(&y.0.id));
|
||||||
@ -1667,7 +1781,7 @@ mod tests {
|
|||||||
let len = cluster_info.table.len() as u64;
|
let len = cluster_info.table.len() as u64;
|
||||||
cluster_info.purge(now + GOSSIP_PURGE_MILLIS + 1);
|
cluster_info.purge(now + GOSSIP_PURGE_MILLIS + 1);
|
||||||
assert_eq!(len as usize - 1, cluster_info.table.len());
|
assert_eq!(len as usize - 1, cluster_info.table.len());
|
||||||
assert_eq!(cluster_info.my_data().leader_id, Pubkey::default());
|
assert_eq!(cluster_info.my_data().leader_id, nxt.id);
|
||||||
assert!(cluster_info.leader_data().is_none());
|
assert!(cluster_info.leader_data().is_none());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
15
src/drone.rs
15
src/drone.rs
@ -235,7 +235,6 @@ mod tests {
|
|||||||
use signature::{Keypair, KeypairUtil};
|
use signature::{Keypair, KeypairUtil};
|
||||||
use std::fs::remove_dir_all;
|
use std::fs::remove_dir_all;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use thin_client::ThinClient;
|
use thin_client::ThinClient;
|
||||||
|
|
||||||
@ -314,24 +313,18 @@ mod tests {
|
|||||||
const TPS_BATCH: i64 = 5_000_000;
|
const TPS_BATCH: i64 = 5_000_000;
|
||||||
|
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
|
|
||||||
let alice = Mint::new(10_000_000);
|
let alice = Mint::new(10_000_000);
|
||||||
let mut bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
leader.info.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let bob_pubkey = Keypair::new().pubkey();
|
let bob_pubkey = Keypair::new().pubkey();
|
||||||
let carlos_pubkey = Keypair::new().pubkey();
|
let carlos_pubkey = Keypair::new().pubkey();
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
let ledger_path = get_tmp_ledger_path("send_airdrop");
|
let ledger_path = get_tmp_ledger_path("send_airdrop");
|
||||||
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let server = Fullnode::new_with_bank(
|
let server = Fullnode::new_with_bank(
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
@ -340,6 +333,7 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -374,14 +368,13 @@ mod tests {
|
|||||||
// restart the leader, drone should find the new one at the same gossip port
|
// restart the leader, drone should find the new one at the same gossip port
|
||||||
server.close().unwrap();
|
server.close().unwrap();
|
||||||
|
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
let server = Fullnode::new(
|
let server = Fullnode::new(
|
||||||
leader,
|
leader,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
Arc::new(Keypair::new()),
|
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
|
292
src/fullnode.rs
292
src/fullnode.rs
@ -85,12 +85,12 @@ pub enum FullnodeReturnType {
|
|||||||
|
|
||||||
pub struct Fullnode {
|
pub struct Fullnode {
|
||||||
pub node_role: Option<NodeRole>,
|
pub node_role: Option<NodeRole>,
|
||||||
|
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||||
keypair: Arc<Keypair>,
|
keypair: Arc<Keypair>,
|
||||||
vote_account_keypair: Arc<Keypair>,
|
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
rpu: Option<Rpu>,
|
rpu: Option<Rpu>,
|
||||||
rpc_service: Option<JsonRpcService>,
|
rpc_service: JsonRpcService,
|
||||||
rpc_pubsub_service: Option<PubSubService>,
|
rpc_pubsub_service: PubSubService,
|
||||||
ncp: Ncp,
|
ncp: Ncp,
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
@ -104,7 +104,6 @@ pub struct Fullnode {
|
|||||||
broadcast_socket: UdpSocket,
|
broadcast_socket: UdpSocket,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
respond_socket: UdpSocket,
|
respond_socket: UdpSocket,
|
||||||
rpc_port: Option<u16>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||||
@ -133,17 +132,14 @@ impl Fullnode {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
node: Node,
|
node: Node,
|
||||||
ledger_path: &str,
|
ledger_path: &str,
|
||||||
keypair: Arc<Keypair>,
|
keypair: Keypair,
|
||||||
vote_account_keypair: Arc<Keypair>,
|
|
||||||
leader_addr: Option<SocketAddr>,
|
leader_addr: Option<SocketAddr>,
|
||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
leader_scheduler: LeaderScheduler,
|
mut leader_scheduler: LeaderScheduler,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
|
||||||
|
|
||||||
info!("creating bank...");
|
info!("creating bank...");
|
||||||
let (bank, tick_height, entry_height, ledger_tail) =
|
let (bank, tick_height, entry_height, ledger_tail) =
|
||||||
Self::new_bank_from_ledger(ledger_path, leader_scheduler);
|
Self::new_bank_from_ledger(ledger_path, &mut leader_scheduler);
|
||||||
|
|
||||||
info!("creating networking stack...");
|
info!("creating networking stack...");
|
||||||
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
|
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
|
||||||
@ -158,7 +154,6 @@ impl Fullnode {
|
|||||||
let leader_info = leader_addr.map(|i| NodeInfo::new_entry_point(&i));
|
let leader_info = leader_addr.map(|i| NodeInfo::new_entry_point(&i));
|
||||||
let server = Self::new_with_bank(
|
let server = Self::new_with_bank(
|
||||||
keypair,
|
keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
tick_height,
|
tick_height,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -167,6 +162,7 @@ impl Fullnode {
|
|||||||
leader_info.as_ref(),
|
leader_info.as_ref(),
|
||||||
ledger_path,
|
ledger_path,
|
||||||
sigverify_disabled,
|
sigverify_disabled,
|
||||||
|
leader_scheduler,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -240,8 +236,7 @@ impl Fullnode {
|
|||||||
/// ```
|
/// ```
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||||
pub fn new_with_bank(
|
pub fn new_with_bank(
|
||||||
keypair: Arc<Keypair>,
|
keypair: Keypair,
|
||||||
vote_account_keypair: Arc<Keypair>,
|
|
||||||
bank: Bank,
|
bank: Bank,
|
||||||
tick_height: u64,
|
tick_height: u64,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
@ -250,6 +245,7 @@ impl Fullnode {
|
|||||||
bootstrap_leader_info_option: Option<&NodeInfo>,
|
bootstrap_leader_info_option: Option<&NodeInfo>,
|
||||||
ledger_path: &str,
|
ledger_path: &str,
|
||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
|
leader_scheduler: LeaderScheduler,
|
||||||
rpc_port: Option<u16>,
|
rpc_port: Option<u16>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
@ -278,8 +274,21 @@ impl Fullnode {
|
|||||||
ClusterInfo::new(node.info).expect("ClusterInfo::new"),
|
ClusterInfo::new(node.info).expect("ClusterInfo::new"),
|
||||||
));
|
));
|
||||||
|
|
||||||
let (rpc_service, rpc_pubsub_service) =
|
// Use custom RPC port, if provided (`Some(port)`)
|
||||||
Self::startup_rpc_services(rpc_port, &bank, &cluster_info);
|
// RPC port may be any open port on the node
|
||||||
|
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
|
||||||
|
// If rpc_port == `Some(0)`, node will dynamically choose any open port for both
|
||||||
|
// Rpc and RpcPubsub serivces. Useful for tests.
|
||||||
|
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
|
||||||
|
// TODO: The RPC service assumes that there is a drone running on the leader
|
||||||
|
// Drone location/id will need to be handled a different way as soon as leader rotation begins
|
||||||
|
let rpc_service = JsonRpcService::new(&bank, &cluster_info, rpc_addr, exit.clone());
|
||||||
|
|
||||||
|
let rpc_pubsub_addr = SocketAddr::new(
|
||||||
|
IpAddr::V4(Ipv4Addr::from(0)),
|
||||||
|
rpc_port.map_or(RPC_PORT + 1, |port| if port == 0 { port } else { port + 1 }),
|
||||||
|
);
|
||||||
|
let rpc_pubsub_service = PubSubService::new(&bank, rpc_pubsub_addr, exit.clone());
|
||||||
|
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
@ -289,6 +298,9 @@ impl Fullnode {
|
|||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
||||||
|
let keypair = Arc::new(keypair);
|
||||||
|
|
||||||
// Insert the bootstrap leader info, should only be None if this node
|
// Insert the bootstrap leader info, should only be None if this node
|
||||||
// is the bootstrap leader
|
// is the bootstrap leader
|
||||||
if let Some(bootstrap_leader_info) = bootstrap_leader_info_option {
|
if let Some(bootstrap_leader_info) = bootstrap_leader_info_option {
|
||||||
@ -296,8 +308,10 @@ impl Fullnode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the scheduled leader
|
// Get the scheduled leader
|
||||||
let scheduled_leader = bank
|
let scheduled_leader = leader_scheduler
|
||||||
.get_current_leader()
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get_scheduled_leader(tick_height)
|
||||||
.expect("Leader not known after processing bank");
|
.expect("Leader not known after processing bank");
|
||||||
|
|
||||||
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
||||||
@ -305,8 +319,8 @@ impl Fullnode {
|
|||||||
// Start in validator mode.
|
// Start in validator mode.
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
keypair.clone(),
|
keypair.clone(),
|
||||||
vote_account_keypair.clone(),
|
|
||||||
&bank,
|
&bank,
|
||||||
|
tick_height,
|
||||||
entry_height,
|
entry_height,
|
||||||
cluster_info.clone(),
|
cluster_info.clone(),
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
@ -324,17 +338,20 @@ impl Fullnode {
|
|||||||
.try_clone()
|
.try_clone()
|
||||||
.expect("Failed to clone retransmit socket"),
|
.expect("Failed to clone retransmit socket"),
|
||||||
Some(ledger_path),
|
Some(ledger_path),
|
||||||
|
leader_scheduler.clone(),
|
||||||
);
|
);
|
||||||
let validator_state = ValidatorServices::new(tvu);
|
let validator_state = ValidatorServices::new(tvu);
|
||||||
Some(NodeRole::Validator(validator_state))
|
Some(NodeRole::Validator(validator_state))
|
||||||
} else {
|
} else {
|
||||||
let max_tick_height = {
|
let max_tick_height = {
|
||||||
let ls_lock = bank.leader_scheduler.read().unwrap();
|
let ls_lock = leader_scheduler.read().unwrap();
|
||||||
ls_lock.max_height_for_leader(tick_height)
|
ls_lock.max_height_for_leader(tick_height)
|
||||||
};
|
};
|
||||||
// Start in leader mode.
|
// Start in leader mode.
|
||||||
let (tpu, entry_receiver, tpu_exit) = Tpu::new(
|
let (tpu, entry_receiver, tpu_exit) = Tpu::new(
|
||||||
|
keypair.clone(),
|
||||||
&bank,
|
&bank,
|
||||||
|
&cluster_info,
|
||||||
Default::default(),
|
Default::default(),
|
||||||
node.sockets
|
node.sockets
|
||||||
.transaction
|
.transaction
|
||||||
@ -357,7 +374,7 @@ impl Fullnode {
|
|||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
entry_height,
|
entry_height,
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
bank.leader_scheduler.clone(),
|
leader_scheduler.clone(),
|
||||||
tick_height,
|
tick_height,
|
||||||
tpu_exit,
|
tpu_exit,
|
||||||
);
|
);
|
||||||
@ -367,15 +384,14 @@ impl Fullnode {
|
|||||||
|
|
||||||
Fullnode {
|
Fullnode {
|
||||||
keypair,
|
keypair,
|
||||||
vote_account_keypair,
|
|
||||||
cluster_info,
|
cluster_info,
|
||||||
shared_window,
|
shared_window,
|
||||||
bank,
|
bank,
|
||||||
sigverify_disabled,
|
sigverify_disabled,
|
||||||
rpu,
|
rpu,
|
||||||
ncp,
|
ncp,
|
||||||
rpc_service: Some(rpc_service),
|
rpc_service,
|
||||||
rpc_pubsub_service: Some(rpc_pubsub_service),
|
rpc_pubsub_service,
|
||||||
node_role,
|
node_role,
|
||||||
ledger_path: ledger_path.to_owned(),
|
ledger_path: ledger_path.to_owned(),
|
||||||
exit,
|
exit,
|
||||||
@ -386,50 +402,27 @@ impl Fullnode {
|
|||||||
broadcast_socket: node.sockets.broadcast,
|
broadcast_socket: node.sockets.broadcast,
|
||||||
requests_socket: node.sockets.requests,
|
requests_socket: node.sockets.requests,
|
||||||
respond_socket: node.sockets.respond,
|
respond_socket: node.sockets.respond,
|
||||||
rpc_port,
|
leader_scheduler,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn leader_to_validator(&mut self) -> Result<()> {
|
fn leader_to_validator(&mut self) -> Result<()> {
|
||||||
// Close down any services that could have a reference to the bank
|
let (scheduled_leader, tick_height, entry_height, last_entry_id) = {
|
||||||
if self.rpu.is_some() {
|
let mut ls_lock = self.leader_scheduler.write().unwrap();
|
||||||
let old_rpu = self.rpu.take().unwrap();
|
// Clear the leader scheduler
|
||||||
old_rpu.close()?;
|
ls_lock.reset();
|
||||||
}
|
|
||||||
|
|
||||||
if self.rpc_service.is_some() {
|
|
||||||
let old_rpc_service = self.rpc_service.take().unwrap();
|
|
||||||
old_rpc_service.close()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.rpc_pubsub_service.is_some() {
|
|
||||||
let old_rpc_pubsub_service = self.rpc_pubsub_service.take().unwrap();
|
|
||||||
old_rpc_pubsub_service.close()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Correctness check: Ensure that references to the bank and leader scheduler are no
|
|
||||||
// longer held by any running thread
|
|
||||||
let mut new_leader_scheduler = self.bank.leader_scheduler.read().unwrap().clone();
|
|
||||||
|
|
||||||
// Clear the leader scheduler
|
|
||||||
new_leader_scheduler.reset();
|
|
||||||
|
|
||||||
let (new_bank, scheduled_leader, tick_height, entry_height, last_entry_id) = {
|
|
||||||
// TODO: We can avoid building the bank again once RecordStage is
|
// TODO: We can avoid building the bank again once RecordStage is
|
||||||
// integrated with BankingStage
|
// integrated with BankingStage
|
||||||
let (new_bank, tick_height, entry_height, ledger_tail) = Self::new_bank_from_ledger(
|
let (bank, tick_height, entry_height, ledger_tail) =
|
||||||
&self.ledger_path,
|
Self::new_bank_from_ledger(&self.ledger_path, &mut *ls_lock);
|
||||||
Arc::new(RwLock::new(new_leader_scheduler)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let new_bank = Arc::new(new_bank);
|
self.bank = Arc::new(bank);
|
||||||
let scheduled_leader = new_bank
|
|
||||||
.get_current_leader()
|
|
||||||
.expect("Scheduled leader should exist after rebuilding bank");
|
|
||||||
|
|
||||||
(
|
(
|
||||||
new_bank,
|
ls_lock
|
||||||
scheduled_leader,
|
.get_scheduled_leader(entry_height)
|
||||||
|
.expect("Scheduled leader should exist after rebuilding bank"),
|
||||||
tick_height,
|
tick_height,
|
||||||
entry_height,
|
entry_height,
|
||||||
ledger_tail
|
ledger_tail
|
||||||
@ -444,23 +437,21 @@ impl Fullnode {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.set_leader(scheduled_leader);
|
.set_leader(scheduled_leader);
|
||||||
|
|
||||||
// Spin up new versions of all the services that relied on the bank, passing in the
|
// Make a new RPU to serve requests out of the new bank we've created
|
||||||
// new bank
|
// instead of the old one
|
||||||
self.rpu = Some(Rpu::new(
|
if self.rpu.is_some() {
|
||||||
&new_bank,
|
let old_rpu = self.rpu.take().unwrap();
|
||||||
self.requests_socket
|
old_rpu.close()?;
|
||||||
.try_clone()
|
self.rpu = Some(Rpu::new(
|
||||||
.expect("Failed to clone requests socket"),
|
&self.bank,
|
||||||
self.respond_socket
|
self.requests_socket
|
||||||
.try_clone()
|
.try_clone()
|
||||||
.expect("Failed to clone respond socket"),
|
.expect("Failed to clone requests socket"),
|
||||||
));
|
self.respond_socket
|
||||||
|
.try_clone()
|
||||||
let (rpc_service, rpc_pubsub_service) =
|
.expect("Failed to clone respond socket"),
|
||||||
Self::startup_rpc_services(self.rpc_port, &new_bank, &self.cluster_info);
|
));
|
||||||
self.rpc_service = Some(rpc_service);
|
}
|
||||||
self.rpc_pubsub_service = Some(rpc_pubsub_service);
|
|
||||||
self.bank = new_bank;
|
|
||||||
|
|
||||||
// In the rare case that the leader exited on a multiple of seed_rotation_interval
|
// In the rare case that the leader exited on a multiple of seed_rotation_interval
|
||||||
// when the new leader schedule was being generated, and there are no other validators
|
// when the new leader schedule was being generated, and there are no other validators
|
||||||
@ -468,31 +459,32 @@ impl Fullnode {
|
|||||||
// check for that
|
// check for that
|
||||||
if scheduled_leader == self.keypair.pubkey() {
|
if scheduled_leader == self.keypair.pubkey() {
|
||||||
self.validator_to_leader(tick_height, entry_height, last_entry_id);
|
self.validator_to_leader(tick_height, entry_height, last_entry_id);
|
||||||
Ok(())
|
return Ok(());
|
||||||
} else {
|
|
||||||
let tvu = Tvu::new(
|
|
||||||
self.keypair.clone(),
|
|
||||||
self.vote_account_keypair.clone(),
|
|
||||||
&self.bank,
|
|
||||||
entry_height,
|
|
||||||
self.cluster_info.clone(),
|
|
||||||
self.shared_window.clone(),
|
|
||||||
self.replicate_socket
|
|
||||||
.iter()
|
|
||||||
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
|
||||||
.collect(),
|
|
||||||
self.repair_socket
|
|
||||||
.try_clone()
|
|
||||||
.expect("Failed to clone repair socket"),
|
|
||||||
self.retransmit_socket
|
|
||||||
.try_clone()
|
|
||||||
.expect("Failed to clone retransmit socket"),
|
|
||||||
Some(&self.ledger_path),
|
|
||||||
);
|
|
||||||
let validator_state = ValidatorServices::new(tvu);
|
|
||||||
self.node_role = Some(NodeRole::Validator(validator_state));
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let tvu = Tvu::new(
|
||||||
|
self.keypair.clone(),
|
||||||
|
&self.bank,
|
||||||
|
tick_height,
|
||||||
|
entry_height,
|
||||||
|
self.cluster_info.clone(),
|
||||||
|
self.shared_window.clone(),
|
||||||
|
self.replicate_socket
|
||||||
|
.iter()
|
||||||
|
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
||||||
|
.collect(),
|
||||||
|
self.repair_socket
|
||||||
|
.try_clone()
|
||||||
|
.expect("Failed to clone repair socket"),
|
||||||
|
self.retransmit_socket
|
||||||
|
.try_clone()
|
||||||
|
.expect("Failed to clone retransmit socket"),
|
||||||
|
Some(&self.ledger_path),
|
||||||
|
self.leader_scheduler.clone(),
|
||||||
|
);
|
||||||
|
let validator_state = ValidatorServices::new(tvu);
|
||||||
|
self.node_role = Some(NodeRole::Validator(validator_state));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validator_to_leader(&mut self, tick_height: u64, entry_height: u64, last_entry_id: Hash) {
|
fn validator_to_leader(&mut self, tick_height: u64, entry_height: u64, last_entry_id: Hash) {
|
||||||
@ -502,12 +494,14 @@ impl Fullnode {
|
|||||||
.set_leader(self.keypair.pubkey());
|
.set_leader(self.keypair.pubkey());
|
||||||
|
|
||||||
let max_tick_height = {
|
let max_tick_height = {
|
||||||
let ls_lock = self.bank.leader_scheduler.read().unwrap();
|
let ls_lock = self.leader_scheduler.read().unwrap();
|
||||||
ls_lock.max_height_for_leader(tick_height)
|
ls_lock.max_height_for_leader(tick_height)
|
||||||
};
|
};
|
||||||
|
|
||||||
let (tpu, blob_receiver, tpu_exit) = Tpu::new(
|
let (tpu, blob_receiver, tpu_exit) = Tpu::new(
|
||||||
|
self.keypair.clone(),
|
||||||
&self.bank,
|
&self.bank,
|
||||||
|
&self.cluster_info,
|
||||||
Default::default(),
|
Default::default(),
|
||||||
self.transaction_sockets
|
self.transaction_sockets
|
||||||
.iter()
|
.iter()
|
||||||
@ -531,7 +525,7 @@ impl Fullnode {
|
|||||||
self.shared_window.clone(),
|
self.shared_window.clone(),
|
||||||
entry_height,
|
entry_height,
|
||||||
blob_receiver,
|
blob_receiver,
|
||||||
self.bank.leader_scheduler.clone(),
|
self.leader_scheduler.clone(),
|
||||||
tick_height,
|
tick_height,
|
||||||
tpu_exit,
|
tpu_exit,
|
||||||
);
|
);
|
||||||
@ -575,12 +569,6 @@ impl Fullnode {
|
|||||||
if let Some(ref rpu) = self.rpu {
|
if let Some(ref rpu) = self.rpu {
|
||||||
rpu.exit();
|
rpu.exit();
|
||||||
}
|
}
|
||||||
if let Some(ref rpc_service) = self.rpc_service {
|
|
||||||
rpc_service.exit();
|
|
||||||
}
|
|
||||||
if let Some(ref rpc_pubsub_service) = self.rpc_pubsub_service {
|
|
||||||
rpc_pubsub_service.exit();
|
|
||||||
}
|
|
||||||
match self.node_role {
|
match self.node_role {
|
||||||
Some(NodeRole::Leader(ref leader_services)) => leader_services.exit(),
|
Some(NodeRole::Leader(ref leader_services)) => leader_services.exit(),
|
||||||
Some(NodeRole::Validator(ref validator_services)) => validator_services.exit(),
|
Some(NodeRole::Validator(ref validator_services)) => validator_services.exit(),
|
||||||
@ -595,50 +583,21 @@ impl Fullnode {
|
|||||||
|
|
||||||
pub fn new_bank_from_ledger(
|
pub fn new_bank_from_ledger(
|
||||||
ledger_path: &str,
|
ledger_path: &str,
|
||||||
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
leader_scheduler: &mut LeaderScheduler,
|
||||||
) -> (Bank, u64, u64, Vec<Entry>) {
|
) -> (Bank, u64, u64, Vec<Entry>) {
|
||||||
let mut bank = Bank::new_with_builtin_programs();
|
let bank = Bank::new_with_builtin_programs();
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let entries = read_ledger(ledger_path, true).expect("opening ledger");
|
let entries = read_ledger(ledger_path, true).expect("opening ledger");
|
||||||
let entries = entries
|
let entries = entries
|
||||||
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
|
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
|
||||||
info!("processing ledger...");
|
info!("processing ledger...");
|
||||||
let (tick_height, entry_height, ledger_tail) =
|
let (tick_height, entry_height, ledger_tail) = bank
|
||||||
bank.process_ledger(entries).expect("process_ledger");
|
.process_ledger(entries, leader_scheduler)
|
||||||
|
.expect("process_ledger");
|
||||||
// entry_height is the network-wide agreed height of the ledger.
|
// entry_height is the network-wide agreed height of the ledger.
|
||||||
// initialize it from the input ledger
|
// initialize it from the input ledger
|
||||||
info!("processed {} ledger...", entry_height);
|
info!("processed {} ledger...", entry_height);
|
||||||
(bank, tick_height, entry_height, ledger_tail)
|
(bank, tick_height, entry_height, ledger_tail)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_leader_scheduler(&self) -> &Arc<RwLock<LeaderScheduler>> {
|
|
||||||
&self.bank.leader_scheduler
|
|
||||||
}
|
|
||||||
|
|
||||||
fn startup_rpc_services(
|
|
||||||
rpc_port: Option<u16>,
|
|
||||||
bank: &Arc<Bank>,
|
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
|
||||||
) -> (JsonRpcService, PubSubService) {
|
|
||||||
// Use custom RPC port, if provided (`Some(port)`)
|
|
||||||
// RPC port may be any open port on the node
|
|
||||||
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
|
|
||||||
// If rpc_port == `Some(0)`, node will dynamically choose any open port for both
|
|
||||||
// Rpc and RpcPubsub serivces. Useful for tests.
|
|
||||||
|
|
||||||
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
|
|
||||||
let rpc_pubsub_addr = SocketAddr::new(
|
|
||||||
IpAddr::V4(Ipv4Addr::from(0)),
|
|
||||||
rpc_port.map_or(RPC_PORT + 1, |port| if port == 0 { port } else { port + 1 }),
|
|
||||||
);
|
|
||||||
|
|
||||||
// TODO: The RPC service assumes that there is a drone running on the leader
|
|
||||||
// Drone location/id will need to be handled a different way as soon as leader rotation begins
|
|
||||||
(
|
|
||||||
JsonRpcService::new(bank, cluster_info, rpc_addr),
|
|
||||||
PubSubService::new(bank, rpc_pubsub_addr),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service for Fullnode {
|
impl Service for Fullnode {
|
||||||
@ -648,14 +607,9 @@ impl Service for Fullnode {
|
|||||||
if let Some(rpu) = self.rpu {
|
if let Some(rpu) = self.rpu {
|
||||||
rpu.join()?;
|
rpu.join()?;
|
||||||
}
|
}
|
||||||
if let Some(rpc_service) = self.rpc_service {
|
|
||||||
rpc_service.join()?;
|
|
||||||
}
|
|
||||||
if let Some(rpc_pubsub_service) = self.rpc_pubsub_service {
|
|
||||||
rpc_pubsub_service.join()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.ncp.join()?;
|
self.ncp.join()?;
|
||||||
|
self.rpc_service.join()?;
|
||||||
|
self.rpc_pubsub_service.join()?;
|
||||||
|
|
||||||
match self.node_role {
|
match self.node_role {
|
||||||
Some(NodeRole::Validator(validator_service)) => {
|
Some(NodeRole::Validator(validator_service)) => {
|
||||||
@ -689,7 +643,7 @@ mod tests {
|
|||||||
use std::fs::remove_dir_all;
|
use std::fs::remove_dir_all;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::Arc;
|
||||||
use streamer::responder;
|
use streamer::responder;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -697,19 +651,13 @@ mod tests {
|
|||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
||||||
let (mint, validator_ledger_path) = create_tmp_genesis("validator_exit", 10_000);
|
let (mint, validator_ledger_path) = create_tmp_genesis("validator_exit", 10_000);
|
||||||
let mut bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let entry = tn.info.clone();
|
let entry = tn.info.clone();
|
||||||
let genesis_entries = &mint.create_entries();
|
let genesis_entries = &mint.create_entries();
|
||||||
let entry_height = genesis_entries.len() as u64;
|
let entry_height = genesis_entries.len() as u64;
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
entry.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
|
|
||||||
let v = Fullnode::new_with_bank(
|
let v = Fullnode::new_with_bank(
|
||||||
Arc::new(keypair),
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -718,6 +666,7 @@ mod tests {
|
|||||||
Some(&entry),
|
Some(&entry),
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(entry.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
);
|
);
|
||||||
v.close().unwrap();
|
v.close().unwrap();
|
||||||
@ -734,20 +683,13 @@ mod tests {
|
|||||||
let (mint, validator_ledger_path) =
|
let (mint, validator_ledger_path) =
|
||||||
create_tmp_genesis(&format!("validator_parallel_exit_{}", i), 10_000);
|
create_tmp_genesis(&format!("validator_parallel_exit_{}", i), 10_000);
|
||||||
ledger_paths.push(validator_ledger_path.clone());
|
ledger_paths.push(validator_ledger_path.clone());
|
||||||
let mut bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let entry = tn.info.clone();
|
let entry = tn.info.clone();
|
||||||
|
|
||||||
let genesis_entries = &mint.create_entries();
|
let genesis_entries = &mint.create_entries();
|
||||||
let entry_height = genesis_entries.len() as u64;
|
let entry_height = genesis_entries.len() as u64;
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(
|
|
||||||
LeaderScheduler::from_bootstrap_leader(entry.id),
|
|
||||||
));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
|
|
||||||
Fullnode::new_with_bank(
|
Fullnode::new_with_bank(
|
||||||
Arc::new(keypair),
|
keypair,
|
||||||
Arc::new(Keypair::new()),
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -756,6 +698,7 @@ mod tests {
|
|||||||
Some(&entry),
|
Some(&entry),
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(entry.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
)
|
)
|
||||||
}).collect();
|
}).collect();
|
||||||
@ -814,8 +757,7 @@ mod tests {
|
|||||||
let mut bootstrap_leader = Fullnode::new(
|
let mut bootstrap_leader = Fullnode::new(
|
||||||
bootstrap_leader_node,
|
bootstrap_leader_node,
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
Arc::new(bootstrap_leader_keypair),
|
bootstrap_leader_keypair,
|
||||||
Arc::new(Keypair::new()),
|
|
||||||
Some(bootstrap_leader_info.contact_info.ncp),
|
Some(bootstrap_leader_info.contact_info.ncp),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
@ -841,7 +783,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_wrong_role_transition() {
|
fn test_wrong_role_transition() {
|
||||||
// Create the leader node information
|
// Create the leader node information
|
||||||
let bootstrap_leader_keypair = Arc::new(Keypair::new());
|
let bootstrap_leader_keypair = Keypair::new();
|
||||||
let bootstrap_leader_node =
|
let bootstrap_leader_node =
|
||||||
Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
|
Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
|
||||||
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
|
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
|
||||||
@ -863,7 +805,7 @@ mod tests {
|
|||||||
// Write the entries to the ledger that will cause leader rotation
|
// Write the entries to the ledger that will cause leader rotation
|
||||||
// after the bootstrap height
|
// after the bootstrap height
|
||||||
let mut ledger_writer = LedgerWriter::open(&bootstrap_leader_ledger_path, false).unwrap();
|
let mut ledger_writer = LedgerWriter::open(&bootstrap_leader_ledger_path, false).unwrap();
|
||||||
let (active_set_entries, validator_vote_account_keypair) = make_active_set_entries(
|
let active_set_entries = make_active_set_entries(
|
||||||
&validator_keypair,
|
&validator_keypair,
|
||||||
&mint.keypair(),
|
&mint.keypair(),
|
||||||
&last_id,
|
&last_id,
|
||||||
@ -895,12 +837,10 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Test that a node knows to transition to a validator based on parsing the ledger
|
// Test that a node knows to transition to a validator based on parsing the ledger
|
||||||
let leader_vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let bootstrap_leader = Fullnode::new(
|
let bootstrap_leader = Fullnode::new(
|
||||||
bootstrap_leader_node,
|
bootstrap_leader_node,
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
bootstrap_leader_keypair,
|
bootstrap_leader_keypair,
|
||||||
leader_vote_account_keypair,
|
|
||||||
Some(bootstrap_leader_info.contact_info.ncp),
|
Some(bootstrap_leader_info.contact_info.ncp),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
@ -917,8 +857,7 @@ mod tests {
|
|||||||
let validator = Fullnode::new(
|
let validator = Fullnode::new(
|
||||||
validator_node,
|
validator_node,
|
||||||
&bootstrap_leader_ledger_path,
|
&bootstrap_leader_ledger_path,
|
||||||
Arc::new(validator_keypair),
|
validator_keypair,
|
||||||
Arc::new(validator_vote_account_keypair),
|
|
||||||
Some(bootstrap_leader_info.contact_info.ncp),
|
Some(bootstrap_leader_info.contact_info.ncp),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
@ -966,7 +905,7 @@ mod tests {
|
|||||||
//
|
//
|
||||||
// 2) A vote from the validator
|
// 2) A vote from the validator
|
||||||
let mut ledger_writer = LedgerWriter::open(&validator_ledger_path, false).unwrap();
|
let mut ledger_writer = LedgerWriter::open(&validator_ledger_path, false).unwrap();
|
||||||
let (active_set_entries, validator_vote_account_keypair) =
|
let active_set_entries =
|
||||||
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id, 0);
|
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id, 0);
|
||||||
let initial_tick_height = genesis_entries
|
let initial_tick_height = genesis_entries
|
||||||
.iter()
|
.iter()
|
||||||
@ -994,8 +933,7 @@ mod tests {
|
|||||||
let mut validator = Fullnode::new(
|
let mut validator = Fullnode::new(
|
||||||
validator_node,
|
validator_node,
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
Arc::new(validator_keypair),
|
validator_keypair,
|
||||||
Arc::new(validator_vote_account_keypair),
|
|
||||||
Some(leader_ncp),
|
Some(leader_ncp),
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::new(&leader_scheduler_config),
|
LeaderScheduler::new(&leader_scheduler_config),
|
||||||
@ -1055,7 +993,7 @@ mod tests {
|
|||||||
// transitioned after tick_height = bootstrap_height.
|
// transitioned after tick_height = bootstrap_height.
|
||||||
let (_, tick_height, entry_height, _) = Fullnode::new_bank_from_ledger(
|
let (_, tick_height, entry_height, _) = Fullnode::new_bank_from_ledger(
|
||||||
&validator_ledger_path,
|
&validator_ledger_path,
|
||||||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
&mut LeaderScheduler::new(&leader_scheduler_config),
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(tick_height, bootstrap_height);
|
assert_eq!(tick_height, bootstrap_height);
|
||||||
|
@ -4,24 +4,82 @@
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
|
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
|
use budget_instruction::Vote;
|
||||||
|
use budget_transaction::BudgetTransaction;
|
||||||
use byteorder::{LittleEndian, ReadBytesExt};
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
use ledger::create_ticks;
|
use ledger::create_ticks;
|
||||||
use signature::{Keypair, KeypairUtil};
|
use signature::{Keypair, KeypairUtil};
|
||||||
|
#[cfg(test)]
|
||||||
|
use solana_sdk::account::Account;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashMap;
|
||||||
use std::io::Cursor;
|
use std::io::Cursor;
|
||||||
use system_transaction::SystemTransaction;
|
use system_transaction::SystemTransaction;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use vote_program::{Vote, VoteProgram};
|
|
||||||
use vote_transaction::VoteTransaction;
|
|
||||||
|
|
||||||
pub const DEFAULT_BOOTSTRAP_HEIGHT: u64 = 1000;
|
pub const DEFAULT_BOOTSTRAP_HEIGHT: u64 = 1000;
|
||||||
pub const DEFAULT_LEADER_ROTATION_INTERVAL: u64 = 100;
|
pub const DEFAULT_LEADER_ROTATION_INTERVAL: u64 = 100;
|
||||||
pub const DEFAULT_SEED_ROTATION_INTERVAL: u64 = 1000;
|
pub const DEFAULT_SEED_ROTATION_INTERVAL: u64 = 1000;
|
||||||
pub const DEFAULT_ACTIVE_WINDOW_LENGTH: u64 = 1000;
|
pub const DEFAULT_ACTIVE_WINDOW_LENGTH: u64 = 1000;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ActiveValidators {
|
||||||
|
// Map from validator id to the last PoH height at which they voted,
|
||||||
|
pub active_validators: HashMap<Pubkey, u64>,
|
||||||
|
pub active_window_length: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActiveValidators {
|
||||||
|
pub fn new(active_window_length_option: Option<u64>) -> Self {
|
||||||
|
let mut active_window_length = DEFAULT_ACTIVE_WINDOW_LENGTH;
|
||||||
|
if let Some(input) = active_window_length_option {
|
||||||
|
active_window_length = input;
|
||||||
|
}
|
||||||
|
|
||||||
|
ActiveValidators {
|
||||||
|
active_validators: HashMap::new(),
|
||||||
|
active_window_length,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finds all the active voters who have voted in the range
|
||||||
|
// (height - active_window_length, height], and removes
|
||||||
|
// anybody who hasn't voted in that range from the map
|
||||||
|
pub fn get_active_set(&mut self, height: u64) -> Vec<Pubkey> {
|
||||||
|
// Don't filter anything if height is less than the
|
||||||
|
// size of the active window. Otherwise, calculate the acceptable
|
||||||
|
// window and filter the active_validators
|
||||||
|
|
||||||
|
// Note: height == 0 will only be included for all
|
||||||
|
// height < self.active_window_length
|
||||||
|
let upper_bound = height;
|
||||||
|
if height >= self.active_window_length {
|
||||||
|
let lower_bound = height - self.active_window_length;
|
||||||
|
self.active_validators
|
||||||
|
.retain(|_, height| *height > lower_bound);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.active_validators
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(k, v)| if *v <= upper_bound { Some(*k) } else { None })
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push a vote for a validator with id == "id" who voted at PoH height == "height"
|
||||||
|
pub fn push_vote(&mut self, id: Pubkey, height: u64) -> () {
|
||||||
|
let old_height = self.active_validators.entry(id).or_insert(height);
|
||||||
|
if height > *old_height {
|
||||||
|
*old_height = height;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(&mut self) -> () {
|
||||||
|
self.active_validators.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct LeaderSchedulerConfig {
|
pub struct LeaderSchedulerConfig {
|
||||||
// The first leader who will bootstrap the network
|
// The first leader who will bootstrap the network
|
||||||
pub bootstrap_leader: Pubkey,
|
pub bootstrap_leader: Pubkey,
|
||||||
@ -61,7 +119,7 @@ impl LeaderSchedulerConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Debug)]
|
||||||
pub struct LeaderScheduler {
|
pub struct LeaderScheduler {
|
||||||
// Set to true if we want the default implementation of the LeaderScheduler,
|
// Set to true if we want the default implementation of the LeaderScheduler,
|
||||||
// where ony the bootstrap leader is used
|
// where ony the bootstrap leader is used
|
||||||
@ -81,13 +139,12 @@ pub struct LeaderScheduler {
|
|||||||
// the leader rotation process begins to pick future leaders
|
// the leader rotation process begins to pick future leaders
|
||||||
pub bootstrap_height: u64,
|
pub bootstrap_height: u64,
|
||||||
|
|
||||||
|
// Maintain the set of active validators
|
||||||
|
pub active_validators: ActiveValidators,
|
||||||
|
|
||||||
// The last height at which the seed + schedule was generated
|
// The last height at which the seed + schedule was generated
|
||||||
pub last_seed_height: Option<u64>,
|
pub last_seed_height: Option<u64>,
|
||||||
|
|
||||||
// The length of time in ticks for which a vote qualifies a candidate for leader
|
|
||||||
// selection
|
|
||||||
pub active_window_length: u64,
|
|
||||||
|
|
||||||
// Round-robin ordering for the validators
|
// Round-robin ordering for the validators
|
||||||
leader_schedule: Vec<Pubkey>,
|
leader_schedule: Vec<Pubkey>,
|
||||||
|
|
||||||
@ -136,11 +193,6 @@ impl LeaderScheduler {
|
|||||||
seed_rotation_interval = input;
|
seed_rotation_interval = input;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut active_window_length = DEFAULT_ACTIVE_WINDOW_LENGTH;
|
|
||||||
if let Some(input) = config.active_window_length_option {
|
|
||||||
active_window_length = input;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enforced invariants
|
// Enforced invariants
|
||||||
assert!(seed_rotation_interval >= leader_rotation_interval);
|
assert!(seed_rotation_interval >= leader_rotation_interval);
|
||||||
assert!(bootstrap_height > 0);
|
assert!(bootstrap_height > 0);
|
||||||
@ -148,13 +200,13 @@ impl LeaderScheduler {
|
|||||||
|
|
||||||
LeaderScheduler {
|
LeaderScheduler {
|
||||||
use_only_bootstrap_leader: false,
|
use_only_bootstrap_leader: false,
|
||||||
|
active_validators: ActiveValidators::new(config.active_window_length_option),
|
||||||
leader_rotation_interval,
|
leader_rotation_interval,
|
||||||
seed_rotation_interval,
|
seed_rotation_interval,
|
||||||
leader_schedule: Vec::new(),
|
leader_schedule: Vec::new(),
|
||||||
last_seed_height: None,
|
last_seed_height: None,
|
||||||
bootstrap_leader: config.bootstrap_leader,
|
bootstrap_leader: config.bootstrap_leader,
|
||||||
bootstrap_height,
|
bootstrap_height,
|
||||||
active_window_length,
|
|
||||||
seed: 0,
|
seed: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -228,6 +280,15 @@ impl LeaderScheduler {
|
|||||||
|
|
||||||
pub fn reset(&mut self) {
|
pub fn reset(&mut self) {
|
||||||
self.last_seed_height = None;
|
self.last_seed_height = None;
|
||||||
|
self.active_validators.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn push_vote(&mut self, id: Pubkey, height: u64) {
|
||||||
|
if self.use_only_bootstrap_leader {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.active_validators.push_vote(id, height);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_height(&mut self, height: u64, bank: &Bank) {
|
pub fn update_height(&mut self, height: u64, bank: &Bank) {
|
||||||
@ -282,34 +343,8 @@ impl LeaderScheduler {
|
|||||||
Some(self.leader_schedule[validator_index])
|
Some(self.leader_schedule[validator_index])
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: We use a HashSet for now because a single validator could potentially register
|
fn get_active_set(&mut self, height: u64) -> Vec<Pubkey> {
|
||||||
// multiple vote account. Once that is no longer possible (see the TODO in vote_program.rs,
|
self.active_validators.get_active_set(height)
|
||||||
// process_transaction(), case VoteInstruction::RegisterAccount), we can use a vector.
|
|
||||||
fn get_active_set(&mut self, height: u64, bank: &Bank) -> HashSet<Pubkey> {
|
|
||||||
let upper_bound = height;
|
|
||||||
let lower_bound = height.saturating_sub(self.active_window_length);
|
|
||||||
|
|
||||||
{
|
|
||||||
let bank_accounts = &*bank.accounts.read().unwrap();
|
|
||||||
|
|
||||||
bank_accounts
|
|
||||||
.values()
|
|
||||||
.filter_map(|account| {
|
|
||||||
if VoteProgram::check_id(&account.program_id) {
|
|
||||||
if let Ok(vote_state) = VoteProgram::deserialize(&account.userdata) {
|
|
||||||
return vote_state
|
|
||||||
.votes
|
|
||||||
.back()
|
|
||||||
.filter(|vote| {
|
|
||||||
vote.tick_height > lower_bound
|
|
||||||
&& vote.tick_height <= upper_bound
|
|
||||||
}).map(|_| vote_state.node_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}).collect()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called every seed_rotation_interval entries, generates the leader schedule
|
// Called every seed_rotation_interval entries, generates the leader schedule
|
||||||
@ -319,8 +354,8 @@ impl LeaderScheduler {
|
|||||||
assert!((height - self.bootstrap_height) % self.seed_rotation_interval == 0);
|
assert!((height - self.bootstrap_height) % self.seed_rotation_interval == 0);
|
||||||
let seed = Self::calculate_seed(height);
|
let seed = Self::calculate_seed(height);
|
||||||
self.seed = seed;
|
self.seed = seed;
|
||||||
let active_set = self.get_active_set(height, &bank);
|
let active_set = self.get_active_set(height);
|
||||||
let ranked_active_set = Self::rank_active_set(bank, active_set.iter());
|
let ranked_active_set = Self::rank_active_set(bank, &active_set[..]);
|
||||||
|
|
||||||
// Handle case where there are no active validators with
|
// Handle case where there are no active validators with
|
||||||
// non-zero stake. In this case, use the bootstrap leader for
|
// non-zero stake. In this case, use the bootstrap leader for
|
||||||
@ -382,11 +417,9 @@ impl LeaderScheduler {
|
|||||||
bank.get_balance(id)
|
bank.get_balance(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rank_active_set<'a, I>(bank: &Bank, active: I) -> Vec<(&'a Pubkey, u64)>
|
fn rank_active_set<'a>(bank: &Bank, active: &'a [Pubkey]) -> Vec<(&'a Pubkey, u64)> {
|
||||||
where
|
|
||||||
I: Iterator<Item = &'a Pubkey>,
|
|
||||||
{
|
|
||||||
let mut active_accounts: Vec<(&'a Pubkey, u64)> = active
|
let mut active_accounts: Vec<(&'a Pubkey, u64)> = active
|
||||||
|
.iter()
|
||||||
.filter_map(|pk| {
|
.filter_map(|pk| {
|
||||||
let stake = Self::get_stake(pk, bank);
|
let stake = Self::get_stake(pk, bank);
|
||||||
if stake > 0 {
|
if stake > 0 {
|
||||||
@ -445,6 +478,24 @@ impl Default for LeaderScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove all candiates for leader selection from the active set by clearing the bank,
|
||||||
|
// and then set a single new candidate who will be eligible starting at height = vote_height
|
||||||
|
// by adding one new account to the bank
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn set_new_leader(bank: &Bank, leader_scheduler: &mut LeaderScheduler, vote_height: u64) {
|
||||||
|
// Set the scheduled next leader to some other node
|
||||||
|
let new_leader_keypair = Keypair::new();
|
||||||
|
let new_leader_id = new_leader_keypair.pubkey();
|
||||||
|
leader_scheduler.push_vote(new_leader_id, vote_height);
|
||||||
|
let dummy_id = Keypair::new().pubkey();
|
||||||
|
let new_account = Account::new(1, 10, dummy_id.clone());
|
||||||
|
|
||||||
|
// Remove the previous acounts from the active set
|
||||||
|
let mut accounts = bank.accounts().write().unwrap();
|
||||||
|
accounts.clear();
|
||||||
|
accounts.insert(new_leader_id, new_account);
|
||||||
|
}
|
||||||
|
|
||||||
// Create two entries so that the node with keypair == active_keypair
|
// Create two entries so that the node with keypair == active_keypair
|
||||||
// is in the active set for leader selection:
|
// is in the active set for leader selection:
|
||||||
// 1) Give the node a nonzero number of tokens,
|
// 1) Give the node a nonzero number of tokens,
|
||||||
@ -455,107 +506,50 @@ pub fn make_active_set_entries(
|
|||||||
last_entry_id: &Hash,
|
last_entry_id: &Hash,
|
||||||
last_tick_id: &Hash,
|
last_tick_id: &Hash,
|
||||||
num_ending_ticks: usize,
|
num_ending_ticks: usize,
|
||||||
) -> (Vec<Entry>, Keypair) {
|
) -> Vec<Entry> {
|
||||||
// 1) Create transfer token entry
|
// 1) Create transfer token entry
|
||||||
let transfer_tx =
|
let transfer_tx =
|
||||||
Transaction::system_new(&token_source, active_keypair.pubkey(), 2, *last_tick_id);
|
Transaction::system_new(&token_source, active_keypair.pubkey(), 1, *last_tick_id);
|
||||||
let transfer_entry = Entry::new(last_entry_id, 1, vec![transfer_tx]);
|
let transfer_entry = Entry::new(last_entry_id, 1, vec![transfer_tx]);
|
||||||
let mut last_entry_id = transfer_entry.id;
|
let mut last_entry_id = transfer_entry.id;
|
||||||
|
|
||||||
// 2) Create the vote account
|
// 2) Create vote entry
|
||||||
let vote_account = Keypair::new();
|
let vote = Vote {
|
||||||
let create_vote_account_tx =
|
version: 0,
|
||||||
Transaction::vote_account_new(active_keypair, vote_account.pubkey(), *last_tick_id, 1);
|
contact_info_version: 0,
|
||||||
|
};
|
||||||
let create_vote_account_entry = Entry::new(&last_entry_id, 1, vec![create_vote_account_tx]);
|
let vote_tx = Transaction::budget_new_vote(&active_keypair, vote, *last_tick_id, 0);
|
||||||
last_entry_id = create_vote_account_entry.id;
|
|
||||||
|
|
||||||
// 3) Register the vote account
|
|
||||||
let register_vote_account_tx =
|
|
||||||
Transaction::vote_account_register(active_keypair, vote_account.pubkey(), *last_tick_id, 0);
|
|
||||||
|
|
||||||
let register_vote_account_entry = Entry::new(&last_entry_id, 1, vec![register_vote_account_tx]);
|
|
||||||
last_entry_id = register_vote_account_entry.id;
|
|
||||||
|
|
||||||
// 4) Create vote entry
|
|
||||||
let vote = Vote { tick_height: 1 };
|
|
||||||
let vote_tx = Transaction::vote_new(&vote_account, vote, *last_tick_id, 0);
|
|
||||||
let vote_entry = Entry::new(&last_entry_id, 1, vec![vote_tx]);
|
let vote_entry = Entry::new(&last_entry_id, 1, vec![vote_tx]);
|
||||||
last_entry_id = vote_entry.id;
|
last_entry_id = vote_entry.id;
|
||||||
|
|
||||||
// 5) Create the ending empty ticks
|
// 3) Create the ending empty ticks
|
||||||
let mut txs = vec![
|
let mut txs = vec![transfer_entry, vote_entry];
|
||||||
transfer_entry,
|
|
||||||
create_vote_account_entry,
|
|
||||||
register_vote_account_entry,
|
|
||||||
vote_entry,
|
|
||||||
];
|
|
||||||
let empty_ticks = create_ticks(num_ending_ticks, last_entry_id);
|
let empty_ticks = create_ticks(num_ending_ticks, last_entry_id);
|
||||||
txs.extend(empty_ticks);
|
txs.extend(empty_ticks);
|
||||||
(txs, vote_account)
|
txs
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use hash::Hash;
|
|
||||||
use leader_scheduler::{
|
use leader_scheduler::{
|
||||||
LeaderScheduler, LeaderSchedulerConfig, DEFAULT_BOOTSTRAP_HEIGHT,
|
ActiveValidators, LeaderScheduler, LeaderSchedulerConfig, DEFAULT_ACTIVE_WINDOW_LENGTH,
|
||||||
DEFAULT_LEADER_ROTATION_INTERVAL, DEFAULT_SEED_ROTATION_INTERVAL,
|
DEFAULT_BOOTSTRAP_HEIGHT, DEFAULT_LEADER_ROTATION_INTERVAL, DEFAULT_SEED_ROTATION_INTERVAL,
|
||||||
};
|
};
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use result::Result;
|
|
||||||
use signature::{Keypair, KeypairUtil};
|
use signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::hash::Hash as StdHash;
|
use std::hash::Hash;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use transaction::Transaction;
|
|
||||||
use vote_program::Vote;
|
|
||||||
use vote_transaction::VoteTransaction;
|
|
||||||
|
|
||||||
fn to_hashset_owned<T>(slice: &[T]) -> HashSet<T>
|
fn to_hashset_owned<T>(slice: &[T]) -> HashSet<T>
|
||||||
where
|
where
|
||||||
T: Eq + StdHash + Clone,
|
T: Eq + Hash + Clone,
|
||||||
{
|
{
|
||||||
HashSet::from_iter(slice.iter().cloned())
|
HashSet::from_iter(slice.iter().cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn push_vote(vote_account: &Keypair, bank: &Bank, height: u64, last_id: Hash) {
|
|
||||||
let vote = Vote {
|
|
||||||
tick_height: height,
|
|
||||||
};
|
|
||||||
|
|
||||||
let new_vote_tx = Transaction::vote_new(vote_account, vote, last_id, 0);
|
|
||||||
|
|
||||||
bank.process_transaction(&new_vote_tx).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_vote_account(
|
|
||||||
node_keypair: &Keypair,
|
|
||||||
bank: &Bank,
|
|
||||||
num_tokens: i64,
|
|
||||||
last_id: Hash,
|
|
||||||
) -> Result<Keypair> {
|
|
||||||
let new_vote_account = Keypair::new();
|
|
||||||
|
|
||||||
// Create the new vote account
|
|
||||||
let tx = Transaction::vote_account_new(
|
|
||||||
node_keypair,
|
|
||||||
new_vote_account.pubkey(),
|
|
||||||
last_id,
|
|
||||||
num_tokens,
|
|
||||||
);
|
|
||||||
bank.process_transaction(&tx)?;
|
|
||||||
|
|
||||||
// Register the vote account to the validator
|
|
||||||
let tx =
|
|
||||||
Transaction::vote_account_register(node_keypair, new_vote_account.pubkey(), last_id, 0);
|
|
||||||
bank.process_transaction(&tx)?;
|
|
||||||
|
|
||||||
Ok(new_vote_account)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_scheduler_test(
|
fn run_scheduler_test(
|
||||||
num_validators: usize,
|
num_validators: usize,
|
||||||
bootstrap_height: u64,
|
bootstrap_height: u64,
|
||||||
@ -578,11 +572,7 @@ mod tests {
|
|||||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||||
|
|
||||||
// Create the bank and validators, which are inserted in order of account balance
|
// Create the bank and validators, which are inserted in order of account balance
|
||||||
let num_vote_account_tokens = 1;
|
let mint = Mint::new((((num_validators + 1) / 2) * (num_validators + 1)) as i64);
|
||||||
let mint = Mint::new(
|
|
||||||
(((num_validators + 1) / 2) * (num_validators + 1)
|
|
||||||
+ num_vote_account_tokens * num_validators) as i64,
|
|
||||||
);
|
|
||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let mut validators = vec![];
|
let mut validators = vec![];
|
||||||
let last_id = mint
|
let last_id = mint
|
||||||
@ -594,24 +584,11 @@ mod tests {
|
|||||||
let new_validator = Keypair::new();
|
let new_validator = Keypair::new();
|
||||||
let new_pubkey = new_validator.pubkey();
|
let new_pubkey = new_validator.pubkey();
|
||||||
validators.push(new_pubkey);
|
validators.push(new_pubkey);
|
||||||
// Give the validator some tokens
|
|
||||||
bank.transfer(
|
|
||||||
(i + 1 + num_vote_account_tokens) as i64,
|
|
||||||
&mint.keypair(),
|
|
||||||
new_pubkey,
|
|
||||||
last_id,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account = create_vote_account(
|
|
||||||
&new_validator,
|
|
||||||
&bank,
|
|
||||||
num_vote_account_tokens as i64,
|
|
||||||
mint.last_id(),
|
|
||||||
).unwrap();
|
|
||||||
// Vote to make the validator part of the active set for the entire test
|
// Vote to make the validator part of the active set for the entire test
|
||||||
// (we made the active_window_length large enough at the beginning of the test)
|
// (we made the active_window_length large enough at the beginning of the test)
|
||||||
push_vote(&new_vote_account, &bank, 1, mint.last_id());
|
leader_scheduler.push_vote(new_pubkey, 1);
|
||||||
|
bank.transfer((i + 1) as i64, &mint.keypair(), new_pubkey, last_id)
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
// The scheduled leader during the bootstrapping period (assuming a seed + schedule
|
// The scheduled leader during the bootstrapping period (assuming a seed + schedule
|
||||||
@ -689,9 +666,6 @@ mod tests {
|
|||||||
fn test_active_set() {
|
fn test_active_set() {
|
||||||
let leader_id = Keypair::new().pubkey();
|
let leader_id = Keypair::new().pubkey();
|
||||||
let active_window_length = 1000;
|
let active_window_length = 1000;
|
||||||
let mint = Mint::new(10000);
|
|
||||||
let bank = Bank::new(&mint);
|
|
||||||
|
|
||||||
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
||||||
leader_id,
|
leader_id,
|
||||||
Some(100),
|
Some(100),
|
||||||
@ -707,60 +681,40 @@ mod tests {
|
|||||||
let num_old_ids = 20;
|
let num_old_ids = 20;
|
||||||
let mut old_ids = HashSet::new();
|
let mut old_ids = HashSet::new();
|
||||||
for _ in 0..num_old_ids {
|
for _ in 0..num_old_ids {
|
||||||
let new_keypair = Keypair::new();
|
let pk = Keypair::new().pubkey();
|
||||||
let pk = new_keypair.pubkey();
|
old_ids.insert(pk);
|
||||||
old_ids.insert(pk.clone());
|
leader_scheduler.push_vote(pk, start_height);
|
||||||
|
|
||||||
// Give the account some stake
|
|
||||||
bank.transfer(5, &mint.keypair(), pk, mint.last_id())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account =
|
|
||||||
create_vote_account(&new_keypair, &bank, 1, mint.last_id()).unwrap();
|
|
||||||
|
|
||||||
// Push a vote for the account
|
|
||||||
push_vote(&new_vote_account, &bank, start_height, mint.last_id());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a bunch of votes at height "start_height + active_window_length"
|
// Insert a bunch of votes at height "start_height + active_window_length"
|
||||||
let num_new_ids = 10;
|
let num_new_ids = 10;
|
||||||
let mut new_ids = HashSet::new();
|
let mut new_ids = HashSet::new();
|
||||||
for _ in 0..num_new_ids {
|
for _ in 0..num_new_ids {
|
||||||
let new_keypair = Keypair::new();
|
let pk = Keypair::new().pubkey();
|
||||||
let pk = new_keypair.pubkey();
|
|
||||||
new_ids.insert(pk);
|
new_ids.insert(pk);
|
||||||
// Give the account some stake
|
leader_scheduler.push_vote(pk, start_height + active_window_length);
|
||||||
bank.transfer(5, &mint.keypair(), pk, mint.last_id())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account =
|
|
||||||
create_vote_account(&new_keypair, &bank, 1, mint.last_id()).unwrap();
|
|
||||||
|
|
||||||
push_vote(
|
|
||||||
&new_vote_account,
|
|
||||||
&bank,
|
|
||||||
start_height + active_window_length,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Queries for the active set
|
// Queries for the active set
|
||||||
let result =
|
let result = leader_scheduler.get_active_set(active_window_length + start_height - 1);
|
||||||
leader_scheduler.get_active_set(active_window_length + start_height - 1, &bank);
|
assert_eq!(result.len(), num_old_ids);
|
||||||
assert_eq!(result, old_ids);
|
let result_set = to_hashset_owned(&result);
|
||||||
|
assert_eq!(result_set, old_ids);
|
||||||
|
|
||||||
let result = leader_scheduler.get_active_set(active_window_length + start_height, &bank);
|
let result = leader_scheduler.get_active_set(active_window_length + start_height);
|
||||||
assert_eq!(result, new_ids);
|
assert_eq!(result.len(), num_new_ids);
|
||||||
|
let result_set = to_hashset_owned(&result);
|
||||||
|
assert_eq!(result_set, new_ids);
|
||||||
|
|
||||||
let result =
|
let result = leader_scheduler.get_active_set(2 * active_window_length + start_height - 1);
|
||||||
leader_scheduler.get_active_set(2 * active_window_length + start_height - 1, &bank);
|
assert_eq!(result.len(), num_new_ids);
|
||||||
assert_eq!(result, new_ids);
|
let result_set = to_hashset_owned(&result);
|
||||||
|
assert_eq!(result_set, new_ids);
|
||||||
|
|
||||||
let result =
|
let result = leader_scheduler.get_active_set(2 * active_window_length + start_height);
|
||||||
leader_scheduler.get_active_set(2 * active_window_length + start_height, &bank);
|
assert_eq!(result.len(), 0);
|
||||||
assert!(result.is_empty());
|
let result_set = to_hashset_owned(&result);
|
||||||
|
assert!(result_set.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -800,7 +754,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let validators_pk: Vec<Pubkey> = validators.iter().map(Keypair::pubkey).collect();
|
let validators_pk: Vec<Pubkey> = validators.iter().map(Keypair::pubkey).collect();
|
||||||
let result = LeaderScheduler::rank_active_set(&bank, validators_pk.iter());
|
let result = LeaderScheduler::rank_active_set(&bank, &validators_pk[..]);
|
||||||
|
|
||||||
assert_eq!(result.len(), validators.len());
|
assert_eq!(result.len(), validators.len());
|
||||||
|
|
||||||
@ -830,7 +784,7 @@ mod tests {
|
|||||||
.chain(new_validators.iter())
|
.chain(new_validators.iter())
|
||||||
.map(Keypair::pubkey)
|
.map(Keypair::pubkey)
|
||||||
.collect();
|
.collect();
|
||||||
let result = LeaderScheduler::rank_active_set(&bank, all_validators.iter());
|
let result = LeaderScheduler::rank_active_set(&bank, &all_validators[..]);
|
||||||
assert_eq!(result.len(), new_validators.len());
|
assert_eq!(result.len(), new_validators.len());
|
||||||
|
|
||||||
for (i, (pk, balance)) in result.into_iter().enumerate() {
|
for (i, (pk, balance)) in result.into_iter().enumerate() {
|
||||||
@ -856,7 +810,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = LeaderScheduler::rank_active_set(&bank, tied_validators_pk.iter());
|
let result = LeaderScheduler::rank_active_set(&bank, &tied_validators_pk[..]);
|
||||||
let mut sorted: Vec<&Pubkey> = tied_validators_pk.iter().map(|x| x).collect();
|
let mut sorted: Vec<&Pubkey> = tied_validators_pk.iter().map(|x| x).collect();
|
||||||
sorted.sort_by(|pk1, pk2| pk1.cmp(pk2));
|
sorted.sort_by(|pk1, pk2| pk1.cmp(pk2));
|
||||||
assert_eq!(result.len(), tied_validators_pk.len());
|
assert_eq!(result.len(), tied_validators_pk.len());
|
||||||
@ -968,7 +922,6 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_scheduler_active_window() {
|
fn test_scheduler_active_window() {
|
||||||
let num_validators = 10;
|
let num_validators = 10;
|
||||||
let num_vote_account_tokens = 1;
|
|
||||||
// Set up the LeaderScheduler struct
|
// Set up the LeaderScheduler struct
|
||||||
let bootstrap_leader_id = Keypair::new().pubkey();
|
let bootstrap_leader_id = Keypair::new().pubkey();
|
||||||
let bootstrap_height = 500;
|
let bootstrap_height = 500;
|
||||||
@ -990,10 +943,7 @@ mod tests {
|
|||||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||||
|
|
||||||
// Create the bank and validators
|
// Create the bank and validators
|
||||||
let mint = Mint::new(
|
let mint = Mint::new((((num_validators + 1) / 2) * (num_validators + 1)) as i64);
|
||||||
((((num_validators + 1) / 2) * (num_validators + 1))
|
|
||||||
+ (num_vote_account_tokens * num_validators)) as i64,
|
|
||||||
);
|
|
||||||
let bank = Bank::new(&mint);
|
let bank = Bank::new(&mint);
|
||||||
let mut validators = vec![];
|
let mut validators = vec![];
|
||||||
let last_id = mint
|
let last_id = mint
|
||||||
@ -1005,29 +955,10 @@ mod tests {
|
|||||||
let new_validator = Keypair::new();
|
let new_validator = Keypair::new();
|
||||||
let new_pubkey = new_validator.pubkey();
|
let new_pubkey = new_validator.pubkey();
|
||||||
validators.push(new_pubkey);
|
validators.push(new_pubkey);
|
||||||
// Give the validator some tokens
|
|
||||||
bank.transfer(
|
|
||||||
(i + 1 + num_vote_account_tokens) as i64,
|
|
||||||
&mint.keypair(),
|
|
||||||
new_pubkey,
|
|
||||||
last_id,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account = create_vote_account(
|
|
||||||
&new_validator,
|
|
||||||
&bank,
|
|
||||||
num_vote_account_tokens as i64,
|
|
||||||
mint.last_id(),
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Vote at height i * active_window_length for validator i
|
// Vote at height i * active_window_length for validator i
|
||||||
push_vote(
|
leader_scheduler.push_vote(new_pubkey, i * active_window_length + bootstrap_height);
|
||||||
&new_vote_account,
|
bank.transfer((i + 1) as i64, &mint.keypair(), new_pubkey, last_id)
|
||||||
&bank,
|
.unwrap();
|
||||||
i * active_window_length + bootstrap_height,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate schedule every active_window_length entries and check that
|
// Generate schedule every active_window_length entries and check that
|
||||||
@ -1048,12 +979,8 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_multiple_vote() {
|
fn test_multiple_vote() {
|
||||||
let leader_keypair = Keypair::new();
|
let leader_id = Keypair::new().pubkey();
|
||||||
let leader_id = leader_keypair.pubkey();
|
|
||||||
let active_window_length = 1000;
|
let active_window_length = 1000;
|
||||||
let mint = Mint::new(10000);
|
|
||||||
let bank = Bank::new(&mint);
|
|
||||||
|
|
||||||
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
||||||
leader_id,
|
leader_id,
|
||||||
Some(100),
|
Some(100),
|
||||||
@ -1064,38 +991,18 @@ mod tests {
|
|||||||
|
|
||||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||||
|
|
||||||
// Give the node some tokens
|
// Check that a validator that votes twice in a row will get included in the active
|
||||||
bank.transfer(5, &mint.keypair(), leader_id, bank.last_id())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Check that a node that votes twice in a row will get included in the active
|
|
||||||
// window
|
// window
|
||||||
let initial_vote_height = 1;
|
let initial_vote_height = 1;
|
||||||
|
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account =
|
|
||||||
create_vote_account(&leader_keypair, &bank, 1, mint.last_id()).unwrap();
|
|
||||||
|
|
||||||
// Vote twice
|
// Vote twice
|
||||||
push_vote(
|
leader_scheduler.push_vote(leader_id, initial_vote_height);
|
||||||
&new_vote_account,
|
leader_scheduler.push_vote(leader_id, initial_vote_height + 1);
|
||||||
&bank,
|
let result = leader_scheduler.get_active_set(initial_vote_height + active_window_length);
|
||||||
initial_vote_height,
|
assert_eq!(result, vec![leader_id]);
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
push_vote(
|
|
||||||
&new_vote_account,
|
|
||||||
&bank,
|
|
||||||
initial_vote_height + 1,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let result =
|
let result =
|
||||||
leader_scheduler.get_active_set(initial_vote_height + active_window_length, &bank);
|
leader_scheduler.get_active_set(initial_vote_height + active_window_length + 1);
|
||||||
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
|
assert_eq!(result, vec![]);
|
||||||
let result =
|
|
||||||
leader_scheduler.get_active_set(initial_vote_height + active_window_length + 1, &bank);
|
|
||||||
assert!(result.is_empty());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1156,6 +1063,13 @@ mod tests {
|
|||||||
DEFAULT_SEED_ROTATION_INTERVAL
|
DEFAULT_SEED_ROTATION_INTERVAL
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Check defaults for ActiveValidators
|
||||||
|
let active_validators = ActiveValidators::new(None);
|
||||||
|
assert_eq!(
|
||||||
|
active_validators.active_window_length,
|
||||||
|
DEFAULT_ACTIVE_WINDOW_LENGTH
|
||||||
|
);
|
||||||
|
|
||||||
// Check actual arguments for LeaderScheduler
|
// Check actual arguments for LeaderScheduler
|
||||||
let bootstrap_height = 500;
|
let bootstrap_height = 500;
|
||||||
let leader_rotation_interval = 100;
|
let leader_rotation_interval = 100;
|
||||||
@ -1182,11 +1096,14 @@ mod tests {
|
|||||||
leader_scheduler.seed_rotation_interval,
|
leader_scheduler.seed_rotation_interval,
|
||||||
seed_rotation_interval
|
seed_rotation_interval
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Check actual arguments for ActiveValidators
|
||||||
|
let active_validators = ActiveValidators::new(Some(active_window_length));
|
||||||
|
assert_eq!(active_validators.active_window_length, active_window_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn run_consecutive_leader_test(num_slots_per_epoch: u64, add_validator: bool) {
|
fn run_consecutive_leader_test(num_slots_per_epoch: u64, add_validator: bool) {
|
||||||
let bootstrap_leader_keypair = Keypair::new();
|
let bootstrap_leader_id = Keypair::new().pubkey();
|
||||||
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
|
|
||||||
let bootstrap_height = 500;
|
let bootstrap_height = 500;
|
||||||
let leader_rotation_interval = 100;
|
let leader_rotation_interval = 100;
|
||||||
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
|
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
|
||||||
@ -1213,20 +1130,11 @@ mod tests {
|
|||||||
let initial_vote_height = 1;
|
let initial_vote_height = 1;
|
||||||
|
|
||||||
// Create and add validator to the active set
|
// Create and add validator to the active set
|
||||||
let validator_keypair = Keypair::new();
|
let validator_id = Keypair::new().pubkey();
|
||||||
let validator_id = validator_keypair.pubkey();
|
|
||||||
if add_validator {
|
if add_validator {
|
||||||
bank.transfer(5, &mint.keypair(), validator_id, last_id)
|
leader_scheduler.push_vote(validator_id, initial_vote_height);
|
||||||
|
bank.transfer(1, &mint.keypair(), validator_id, last_id)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account =
|
|
||||||
create_vote_account(&validator_keypair, &bank, 1, mint.last_id()).unwrap();
|
|
||||||
push_vote(
|
|
||||||
&new_vote_account,
|
|
||||||
&bank,
|
|
||||||
initial_vote_height,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the bootstrap leader, not the validator, is picked again on next slot
|
// Make sure the bootstrap leader, not the validator, is picked again on next slot
|
||||||
@ -1243,29 +1151,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let vote_account_tokens = 1;
|
|
||||||
bank.transfer(
|
|
||||||
leader_stake + vote_account_tokens,
|
|
||||||
&mint.keypair(),
|
|
||||||
bootstrap_leader_id,
|
|
||||||
last_id,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Create a vote account
|
|
||||||
let new_vote_account = create_vote_account(
|
|
||||||
&bootstrap_leader_keypair,
|
|
||||||
&bank,
|
|
||||||
vote_account_tokens,
|
|
||||||
mint.last_id(),
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Add leader to the active set
|
// Add leader to the active set
|
||||||
push_vote(
|
leader_scheduler.push_vote(bootstrap_leader_id, initial_vote_height);
|
||||||
&new_vote_account,
|
bank.transfer(leader_stake, &mint.keypair(), bootstrap_leader_id, last_id)
|
||||||
&bank,
|
.unwrap();
|
||||||
initial_vote_height,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
|
|
||||||
leader_scheduler.generate_schedule(bootstrap_height, &bank);
|
leader_scheduler.generate_schedule(bootstrap_height, &bank);
|
||||||
|
|
||||||
@ -1293,8 +1182,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_max_height_for_leader() {
|
fn test_max_height_for_leader() {
|
||||||
let bootstrap_leader_keypair = Keypair::new();
|
let bootstrap_leader_id = Keypair::new().pubkey();
|
||||||
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
|
|
||||||
let bootstrap_height = 500;
|
let bootstrap_height = 500;
|
||||||
let leader_rotation_interval = 100;
|
let leader_rotation_interval = 100;
|
||||||
let seed_rotation_interval = 2 * leader_rotation_interval;
|
let seed_rotation_interval = 2 * leader_rotation_interval;
|
||||||
@ -1366,34 +1254,15 @@ mod tests {
|
|||||||
// Now test when the active set > 1 node
|
// Now test when the active set > 1 node
|
||||||
|
|
||||||
// Create and add validator to the active set
|
// Create and add validator to the active set
|
||||||
let validator_keypair = Keypair::new();
|
let validator_id = Keypair::new().pubkey();
|
||||||
let validator_id = validator_keypair.pubkey();
|
leader_scheduler.push_vote(validator_id, initial_vote_height);
|
||||||
|
bank.transfer(1, &mint.keypair(), validator_id, last_id)
|
||||||
// Create a vote account for the validator
|
|
||||||
bank.transfer(5, &mint.keypair(), validator_id, last_id)
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let new_validator_vote_account =
|
|
||||||
create_vote_account(&validator_keypair, &bank, 1, mint.last_id()).unwrap();
|
|
||||||
push_vote(
|
|
||||||
&new_validator_vote_account,
|
|
||||||
&bank,
|
|
||||||
initial_vote_height,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Create a vote account for the leader
|
|
||||||
bank.transfer(5, &mint.keypair(), bootstrap_leader_id, last_id)
|
|
||||||
.unwrap();
|
|
||||||
let new_leader_vote_account =
|
|
||||||
create_vote_account(&bootstrap_leader_keypair, &bank, 1, mint.last_id()).unwrap();
|
|
||||||
|
|
||||||
// Add leader to the active set
|
// Add leader to the active set
|
||||||
push_vote(
|
leader_scheduler.push_vote(bootstrap_leader_id, initial_vote_height);
|
||||||
&new_leader_vote_account,
|
bank.transfer(1, &mint.keypair(), bootstrap_leader_id, last_id)
|
||||||
&bank,
|
.unwrap();
|
||||||
initial_vote_height,
|
|
||||||
mint.last_id(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Generate the schedule
|
// Generate the schedule
|
||||||
leader_scheduler.generate_schedule(bootstrap_height, &bank);
|
leader_scheduler.generate_schedule(bootstrap_height, &bank);
|
||||||
|
159
src/leader_vote_stage.rs
Normal file
159
src/leader_vote_stage.rs
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
//! The `leader_vote_stage` module implements the TPU's vote stage. It
|
||||||
|
//! computes and notes the votes for the entries, and then sends the
|
||||||
|
//! Entry to its output channel.
|
||||||
|
|
||||||
|
use bank::Bank;
|
||||||
|
use cluster_info::ClusterInfo;
|
||||||
|
use counter::Counter;
|
||||||
|
use entry::Entry;
|
||||||
|
use ledger::Block;
|
||||||
|
use log::Level;
|
||||||
|
use result::{Error, Result};
|
||||||
|
use service::Service;
|
||||||
|
use signature::Keypair;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::atomic::AtomicUsize;
|
||||||
|
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread::{self, Builder, JoinHandle};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use streamer::responder;
|
||||||
|
use timing::duration_as_ms;
|
||||||
|
use vote_stage::send_leader_vote;
|
||||||
|
|
||||||
|
pub struct LeaderVoteStage {
|
||||||
|
thread_hdls: Vec<JoinHandle<()>>,
|
||||||
|
vote_thread: JoinHandle<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LeaderVoteStage {
|
||||||
|
/// Process any Entry items that have been published by the RecordStage.
|
||||||
|
/// continuosly send entries out
|
||||||
|
pub fn compute_vote_and_send_entries(
|
||||||
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
|
entry_sender: &Sender<Vec<Entry>>,
|
||||||
|
entry_receiver: &Receiver<Vec<Entry>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut ventries = Vec::new();
|
||||||
|
let mut received_entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut num_new_entries = 0;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
num_new_entries += received_entries.len();
|
||||||
|
ventries.push(received_entries);
|
||||||
|
|
||||||
|
if let Ok(n) = entry_receiver.try_recv() {
|
||||||
|
received_entries = n;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inc_new_counter_info!("leader_vote_stage-entries_received", num_new_entries);
|
||||||
|
debug!("leader_vote_stage entries: {}", num_new_entries);
|
||||||
|
|
||||||
|
for entries in ventries {
|
||||||
|
let votes = &entries.votes();
|
||||||
|
cluster_info.write().unwrap().insert_votes(&votes);
|
||||||
|
|
||||||
|
inc_new_counter_info!("leader_vote_stage-write_entries", entries.len());
|
||||||
|
|
||||||
|
//TODO(anatoly): real stake based voting needs to change this
|
||||||
|
//leader simply votes if the current set of validators have voted
|
||||||
|
//on a valid last id
|
||||||
|
|
||||||
|
trace!("New entries? {}", entries.len());
|
||||||
|
if !entries.is_empty() {
|
||||||
|
inc_new_counter_info!("leader_vote_stage-recv_vote", votes.len());
|
||||||
|
inc_new_counter_info!("leader_vote_stage-entries_sent", entries.len());
|
||||||
|
trace!("broadcasting {}", entries.len());
|
||||||
|
entry_sender.send(entries)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inc_new_counter_info!(
|
||||||
|
"leader_vote_stage-time_ms",
|
||||||
|
duration_as_ms(&now.elapsed()) as usize
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new LeaderVoteStage for voting and broadcasting entries.
|
||||||
|
pub fn new(
|
||||||
|
keypair: Arc<Keypair>,
|
||||||
|
bank: Arc<Bank>,
|
||||||
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
|
entry_receiver: Receiver<Vec<Entry>>,
|
||||||
|
) -> (Self, Receiver<Vec<Entry>>) {
|
||||||
|
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||||
|
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
||||||
|
let t_responder = responder(
|
||||||
|
"leader_vote_stage_vote_sender",
|
||||||
|
Arc::new(send),
|
||||||
|
vote_blob_receiver,
|
||||||
|
);
|
||||||
|
let (entry_sender, entry_receiver_forward) = channel();
|
||||||
|
|
||||||
|
let vote_thread = Builder::new()
|
||||||
|
.name("solana-writer".to_string())
|
||||||
|
.spawn(move || {
|
||||||
|
let mut last_vote = 0;
|
||||||
|
let mut last_valid_validator_timestamp = 0;
|
||||||
|
let id = cluster_info.read().unwrap().id;
|
||||||
|
loop {
|
||||||
|
if let Err(e) = Self::compute_vote_and_send_entries(
|
||||||
|
&cluster_info,
|
||||||
|
&entry_sender,
|
||||||
|
&entry_receiver,
|
||||||
|
) {
|
||||||
|
match e {
|
||||||
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
|
_ => {
|
||||||
|
inc_new_counter_info!(
|
||||||
|
"leader_vote_stage-compute_vote_and_send_entries-error",
|
||||||
|
1
|
||||||
|
);
|
||||||
|
error!("{:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Err(e) = send_leader_vote(
|
||||||
|
&id,
|
||||||
|
&keypair,
|
||||||
|
&bank,
|
||||||
|
&cluster_info,
|
||||||
|
&vote_blob_sender,
|
||||||
|
&mut last_vote,
|
||||||
|
&mut last_valid_validator_timestamp,
|
||||||
|
) {
|
||||||
|
inc_new_counter_info!("leader_vote_stage-leader_vote-error", 1);
|
||||||
|
error!("{:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}).unwrap();
|
||||||
|
|
||||||
|
let thread_hdls = vec![t_responder];
|
||||||
|
(
|
||||||
|
LeaderVoteStage {
|
||||||
|
vote_thread,
|
||||||
|
thread_hdls,
|
||||||
|
},
|
||||||
|
entry_receiver_forward,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Service for LeaderVoteStage {
|
||||||
|
type JoinReturnType = ();
|
||||||
|
|
||||||
|
fn join(self) -> thread::Result<()> {
|
||||||
|
for thread_hdl in self.thread_hdls {
|
||||||
|
thread_hdl.join()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.vote_thread.join()
|
||||||
|
}
|
||||||
|
}
|
@ -3,7 +3,7 @@
|
|||||||
//! access read to a persistent file-based ledger.
|
//! access read to a persistent file-based ledger.
|
||||||
|
|
||||||
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
|
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
|
||||||
#[cfg(test)]
|
use budget_instruction::Vote;
|
||||||
use budget_transaction::BudgetTransaction;
|
use budget_transaction::BudgetTransaction;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use chrono::prelude::Utc;
|
use chrono::prelude::Utc;
|
||||||
@ -25,8 +25,6 @@ use std::mem::size_of;
|
|||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use vote_program::Vote;
|
|
||||||
use vote_transaction::VoteTransaction;
|
|
||||||
use window::WINDOW_SIZE;
|
use window::WINDOW_SIZE;
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -498,7 +496,7 @@ impl Block for [Entry] {
|
|||||||
entry
|
entry
|
||||||
.transactions
|
.transactions
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(VoteTransaction::get_votes)
|
.filter_map(BudgetTransaction::vote)
|
||||||
}).collect()
|
}).collect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -686,6 +684,7 @@ pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use bincode::serialized_size;
|
use bincode::serialized_size;
|
||||||
|
use budget_instruction::Vote;
|
||||||
use budget_transaction::BudgetTransaction;
|
use budget_transaction::BudgetTransaction;
|
||||||
use entry::{next_entry, Entry};
|
use entry::{next_entry, Entry};
|
||||||
use hash::hash;
|
use hash::hash;
|
||||||
@ -694,7 +693,6 @@ mod tests {
|
|||||||
use std;
|
use std;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use vote_program::Vote;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_verify_slice() {
|
fn test_verify_slice() {
|
||||||
@ -716,8 +714,15 @@ mod tests {
|
|||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let one = hash(&zero.as_ref());
|
let one = hash(&zero.as_ref());
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let vote_account = Keypair::new();
|
let tx0 = Transaction::budget_new_vote(
|
||||||
let tx0 = Transaction::vote_new(&vote_account, Vote { tick_height: 1 }, one, 1);
|
&keypair,
|
||||||
|
Vote {
|
||||||
|
version: 0,
|
||||||
|
contact_info_version: 1,
|
||||||
|
},
|
||||||
|
one,
|
||||||
|
1,
|
||||||
|
);
|
||||||
let tx1 = Transaction::budget_new_timestamp(
|
let tx1 = Transaction::budget_new_timestamp(
|
||||||
&keypair,
|
&keypair,
|
||||||
keypair.pubkey(),
|
keypair.pubkey(),
|
||||||
@ -767,8 +772,15 @@ mod tests {
|
|||||||
let id = Hash::default();
|
let id = Hash::default();
|
||||||
let next_id = hash(&id.as_ref());
|
let next_id = hash(&id.as_ref());
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
let vote_account = Keypair::new();
|
let tx_small = Transaction::budget_new_vote(
|
||||||
let tx_small = Transaction::vote_new(&vote_account, Vote { tick_height: 1 }, next_id, 2);
|
&keypair,
|
||||||
|
Vote {
|
||||||
|
version: 0,
|
||||||
|
contact_info_version: 2,
|
||||||
|
},
|
||||||
|
next_id,
|
||||||
|
2,
|
||||||
|
);
|
||||||
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
|
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
|
||||||
|
|
||||||
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
|
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
|
||||||
|
@ -35,6 +35,7 @@ pub mod fetch_stage;
|
|||||||
pub mod fullnode;
|
pub mod fullnode;
|
||||||
pub mod hash;
|
pub mod hash;
|
||||||
pub mod leader_scheduler;
|
pub mod leader_scheduler;
|
||||||
|
pub mod leader_vote_stage;
|
||||||
pub mod ledger;
|
pub mod ledger;
|
||||||
pub mod ledger_write_stage;
|
pub mod ledger_write_stage;
|
||||||
pub mod loader_transaction;
|
pub mod loader_transaction;
|
||||||
@ -79,9 +80,7 @@ pub mod token_program;
|
|||||||
pub mod tpu;
|
pub mod tpu;
|
||||||
pub mod transaction;
|
pub mod transaction;
|
||||||
pub mod tvu;
|
pub mod tvu;
|
||||||
pub mod vote_program;
|
|
||||||
pub mod vote_stage;
|
pub mod vote_stage;
|
||||||
pub mod vote_transaction;
|
|
||||||
pub mod wallet;
|
pub mod wallet;
|
||||||
pub mod window;
|
pub mod window;
|
||||||
pub mod window_service;
|
pub mod window_service;
|
||||||
|
@ -40,7 +40,9 @@ fn create_path(name: &str) -> PathBuf {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
const NATIVE_LOADER_PROGRAM_ID: [u8; 32] = [2u8; 32];
|
const NATIVE_LOADER_PROGRAM_ID: [u8; 32] = [
|
||||||
|
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
];
|
||||||
|
|
||||||
// All native programs export a symbol named process()
|
// All native programs export a symbol named process()
|
||||||
const ENTRYPOINT: &str = "process";
|
const ENTRYPOINT: &str = "process";
|
||||||
@ -68,16 +70,21 @@ pub fn process_transaction(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8])
|
|||||||
trace!("Call native {:?}", name);
|
trace!("Call native {:?}", name);
|
||||||
let path = create_path(&name);
|
let path = create_path(&name);
|
||||||
// TODO linux tls bug can cause crash on dlclose(), workaround by never unloading
|
// TODO linux tls bug can cause crash on dlclose(), workaround by never unloading
|
||||||
let library = Library::open(Some(path), libc::RTLD_NODELETE | libc::RTLD_NOW).unwrap();
|
match Library::open(Some(&path), libc::RTLD_NODELETE | libc::RTLD_NOW) {
|
||||||
unsafe {
|
Ok(library) => unsafe {
|
||||||
let entrypoint: Symbol<Entrypoint> = match library.get(ENTRYPOINT.as_bytes()) {
|
let entrypoint: Symbol<Entrypoint> = match library.get(ENTRYPOINT.as_bytes()) {
|
||||||
Ok(s) => s,
|
Ok(s) => s,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!("{:?}: Unable to find {:?} in program", e, ENTRYPOINT);
|
warn!("{:?}: Unable to find {:?} in program", e, ENTRYPOINT);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
return entrypoint(&mut keyed_accounts[1..], tx_data);
|
return entrypoint(&mut keyed_accounts[1..], tx_data);
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Unable to load: {:?}", e);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||||
match instruction {
|
match instruction {
|
||||||
|
@ -6,6 +6,8 @@ use counter::Counter;
|
|||||||
use entry::{EntryReceiver, EntrySender};
|
use entry::{EntryReceiver, EntrySender};
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use influx_db_client as influxdb;
|
use influx_db_client as influxdb;
|
||||||
|
use leader_scheduler::LeaderScheduler;
|
||||||
|
use ledger::Block;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use metrics;
|
use metrics;
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
@ -57,10 +59,11 @@ impl ReplicateStage {
|
|||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
window_receiver: &EntryReceiver,
|
window_receiver: &EntryReceiver,
|
||||||
keypair: &Arc<Keypair>,
|
keypair: &Arc<Keypair>,
|
||||||
vote_account_keypair: &Arc<Keypair>,
|
|
||||||
vote_blob_sender: Option<&BlobSender>,
|
vote_blob_sender: Option<&BlobSender>,
|
||||||
ledger_entry_sender: &EntrySender,
|
ledger_entry_sender: &EntrySender,
|
||||||
|
tick_height: &mut u64,
|
||||||
entry_height: &mut u64,
|
entry_height: &mut u64,
|
||||||
|
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
|
||||||
) -> Result<Hash> {
|
) -> Result<Hash> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
//coalesce all the available entries into a single vote
|
//coalesce all the available entries into a single vote
|
||||||
@ -78,23 +81,37 @@ impl ReplicateStage {
|
|||||||
let mut res = Ok(());
|
let mut res = Ok(());
|
||||||
let last_entry_id = {
|
let last_entry_id = {
|
||||||
let mut num_entries_to_write = entries.len();
|
let mut num_entries_to_write = entries.len();
|
||||||
let current_leader = bank
|
|
||||||
.get_current_leader()
|
|
||||||
.expect("Scheduled leader id should never be unknown while processing entries");
|
|
||||||
for (i, entry) in entries.iter().enumerate() {
|
for (i, entry) in entries.iter().enumerate() {
|
||||||
res = bank.process_entry(&entry);
|
// max_tick_height is the PoH height at which the next leader rotation will
|
||||||
let my_id = keypair.pubkey();
|
// happen. The leader should send an entry such that the total PoH is equal
|
||||||
let scheduled_leader = bank
|
// to max_tick_height - guard.
|
||||||
.get_current_leader()
|
// TODO: Introduce a "guard" for the end of transmission periods, the guard
|
||||||
.expect("Scheduled leader id should never be unknown while processing entries");
|
// is assumed to be zero for now.
|
||||||
|
let max_tick_height = {
|
||||||
|
let ls_lock = leader_scheduler.read().unwrap();
|
||||||
|
ls_lock.max_height_for_leader(*tick_height)
|
||||||
|
};
|
||||||
|
|
||||||
// TODO: Remove this soon once we boot the leader from ClusterInfo
|
res = bank.process_entry(
|
||||||
if scheduled_leader != current_leader {
|
&entry,
|
||||||
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
tick_height,
|
||||||
}
|
&mut *leader_scheduler.write().unwrap(),
|
||||||
if my_id == scheduled_leader {
|
);
|
||||||
num_entries_to_write = i + 1;
|
|
||||||
break;
|
// Will run only if leader_scheduler.use_only_bootstrap_leader is false
|
||||||
|
if let Some(max_tick_height) = max_tick_height {
|
||||||
|
let ls_lock = leader_scheduler.read().unwrap();
|
||||||
|
if *tick_height == max_tick_height {
|
||||||
|
let my_id = keypair.pubkey();
|
||||||
|
let scheduled_leader = ls_lock.get_scheduled_leader(*tick_height).expect(
|
||||||
|
"Scheduled leader id should never be unknown while processing entries",
|
||||||
|
);
|
||||||
|
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
||||||
|
if my_id == scheduled_leader {
|
||||||
|
num_entries_to_write = i + 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
@ -117,9 +134,11 @@ impl ReplicateStage {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(sender) = vote_blob_sender {
|
if let Some(sender) = vote_blob_sender {
|
||||||
send_validator_vote(bank, vote_account_keypair, &cluster_info, sender)?;
|
send_validator_vote(bank, keypair, cluster_info, sender)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cluster_info.write().unwrap().insert_votes(&entries.votes());
|
||||||
|
|
||||||
inc_new_counter_info!(
|
inc_new_counter_info!(
|
||||||
"replicate-transactions",
|
"replicate-transactions",
|
||||||
entries.iter().map(|x| x.transactions.len()).sum()
|
entries.iter().map(|x| x.transactions.len()).sum()
|
||||||
@ -141,12 +160,13 @@ impl ReplicateStage {
|
|||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
keypair: Arc<Keypair>,
|
keypair: Arc<Keypair>,
|
||||||
vote_account_keypair: Arc<Keypair>,
|
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
window_receiver: EntryReceiver,
|
window_receiver: EntryReceiver,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
|
tick_height: u64,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
|
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||||
) -> (Self, EntryReceiver) {
|
) -> (Self, EntryReceiver) {
|
||||||
let (vote_blob_sender, vote_blob_receiver) = channel();
|
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||||
let (ledger_entry_sender, ledger_entry_receiver) = channel();
|
let (ledger_entry_sender, ledger_entry_receiver) = channel();
|
||||||
@ -162,15 +182,17 @@ impl ReplicateStage {
|
|||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut next_vote_secs = 1;
|
let mut next_vote_secs = 1;
|
||||||
let mut entry_height_ = entry_height;
|
let mut entry_height_ = entry_height;
|
||||||
|
let mut tick_height_ = tick_height;
|
||||||
let mut last_entry_id = None;
|
let mut last_entry_id = None;
|
||||||
loop {
|
loop {
|
||||||
let leader_id =
|
let leader_id = leader_scheduler
|
||||||
bank.get_current_leader()
|
.read()
|
||||||
.expect("Scheduled leader id should never be unknown at this point");
|
.unwrap()
|
||||||
|
.get_scheduled_leader(tick_height_)
|
||||||
|
.expect("Scheduled leader id should never be unknown at this point");
|
||||||
if leader_id == keypair.pubkey() {
|
if leader_id == keypair.pubkey() {
|
||||||
return Some(ReplicateStageReturnType::LeaderRotation(
|
return Some(ReplicateStageReturnType::LeaderRotation(
|
||||||
bank.get_tick_height(),
|
tick_height_,
|
||||||
entry_height_,
|
entry_height_,
|
||||||
// We should never start the TPU / this stage on an exact entry that causes leader
|
// We should never start the TPU / this stage on an exact entry that causes leader
|
||||||
// rotation (Fullnode should automatically transition on startup if it detects
|
// rotation (Fullnode should automatically transition on startup if it detects
|
||||||
@ -193,10 +215,11 @@ impl ReplicateStage {
|
|||||||
&cluster_info,
|
&cluster_info,
|
||||||
&window_receiver,
|
&window_receiver,
|
||||||
&keypair,
|
&keypair,
|
||||||
&vote_account_keypair,
|
|
||||||
vote_sender,
|
vote_sender,
|
||||||
&ledger_entry_sender,
|
&ledger_entry_sender,
|
||||||
|
&mut tick_height_,
|
||||||
&mut entry_height_,
|
&mut entry_height_,
|
||||||
|
&leader_scheduler,
|
||||||
) {
|
) {
|
||||||
Err(Error::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
|
||||||
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
||||||
@ -271,7 +294,7 @@ mod test {
|
|||||||
// 1) Give the validator a nonzero number of tokens 2) A vote from the validator .
|
// 1) Give the validator a nonzero number of tokens 2) A vote from the validator .
|
||||||
// This will cause leader rotation after the bootstrap height
|
// This will cause leader rotation after the bootstrap height
|
||||||
let mut ledger_writer = LedgerWriter::open(&my_ledger_path, false).unwrap();
|
let mut ledger_writer = LedgerWriter::open(&my_ledger_path, false).unwrap();
|
||||||
let (active_set_entries, vote_account_keypair) =
|
let active_set_entries =
|
||||||
make_active_set_entries(&my_keypair, &mint.keypair(), &last_id, &last_id, 0);
|
make_active_set_entries(&my_keypair, &mint.keypair(), &last_id, &last_id, 0);
|
||||||
last_id = active_set_entries.last().unwrap().id;
|
last_id = active_set_entries.last().unwrap().id;
|
||||||
let initial_tick_height = genesis_entries
|
let initial_tick_height = genesis_entries
|
||||||
@ -296,23 +319,26 @@ mod test {
|
|||||||
Some(bootstrap_height),
|
Some(bootstrap_height),
|
||||||
);
|
);
|
||||||
|
|
||||||
let leader_scheduler =
|
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
|
|
||||||
|
|
||||||
// Set up the bank
|
// Set up the bank
|
||||||
let (bank, _, _, _) = Fullnode::new_bank_from_ledger(&my_ledger_path, leader_scheduler);
|
let (bank, _, _, _) =
|
||||||
|
Fullnode::new_bank_from_ledger(&my_ledger_path, &mut leader_scheduler);
|
||||||
|
|
||||||
|
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
||||||
|
|
||||||
// Set up the replicate stage
|
// Set up the replicate stage
|
||||||
let (entry_sender, entry_receiver) = channel();
|
let (entry_sender, entry_receiver) = channel();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let (replicate_stage, _ledger_writer_recv) = ReplicateStage::new(
|
let (replicate_stage, _ledger_writer_recv) = ReplicateStage::new(
|
||||||
Arc::new(my_keypair),
|
Arc::new(my_keypair),
|
||||||
Arc::new(vote_account_keypair),
|
|
||||||
Arc::new(bank),
|
Arc::new(bank),
|
||||||
Arc::new(RwLock::new(cluster_info_me)),
|
Arc::new(RwLock::new(cluster_info_me)),
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
|
initial_tick_height,
|
||||||
initial_entry_len,
|
initial_entry_len,
|
||||||
|
leader_scheduler.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Send enough ticks to trigger leader rotation
|
// Send enough ticks to trigger leader rotation
|
||||||
@ -349,6 +375,13 @@ mod test {
|
|||||||
|
|
||||||
assert_eq!(exit.load(Ordering::Relaxed), true);
|
assert_eq!(exit.load(Ordering::Relaxed), true);
|
||||||
|
|
||||||
|
// Check ledger height is correct
|
||||||
|
let mut leader_scheduler = Arc::try_unwrap(leader_scheduler)
|
||||||
|
.expect("Multiple references to this RwLock still exist")
|
||||||
|
.into_inner()
|
||||||
|
.expect("RwLock for LeaderScheduler is still locked");
|
||||||
|
|
||||||
|
leader_scheduler.reset();
|
||||||
let _ignored = remove_dir_all(&my_ledger_path);
|
let _ignored = remove_dir_all(&my_ledger_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -198,16 +198,14 @@ mod tests {
|
|||||||
let (mint, leader_ledger_path) = create_tmp_genesis(leader_ledger_path, 100);
|
let (mint, leader_ledger_path) = create_tmp_genesis(leader_ledger_path, 100);
|
||||||
|
|
||||||
info!("starting leader node");
|
info!("starting leader node");
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let network_addr = leader_node.sockets.gossip.local_addr().unwrap();
|
let network_addr = leader_node.sockets.gossip.local_addr().unwrap();
|
||||||
let leader_info = leader_node.info.clone();
|
let leader_info = leader_node.info.clone();
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let leader = Fullnode::new(
|
let leader = Fullnode::new(
|
||||||
leader_node,
|
leader_node,
|
||||||
&leader_ledger_path,
|
&leader_ledger_path,
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
LeaderScheduler::from_bootstrap_leader(leader_info.id),
|
LeaderScheduler::from_bootstrap_leader(leader_info.id),
|
||||||
|
@ -25,31 +25,31 @@ impl RequestProcessor {
|
|||||||
Request::GetAccount { key } => {
|
Request::GetAccount { key } => {
|
||||||
let account = self.bank.get_account(&key);
|
let account = self.bank.get_account(&key);
|
||||||
let rsp = (Response::Account { key, account }, rsp_addr);
|
let rsp = (Response::Account { key, account }, rsp_addr);
|
||||||
info!("Response::Account {:?}", rsp);
|
debug!("Response::Account {:?}", rsp);
|
||||||
Some(rsp)
|
Some(rsp)
|
||||||
}
|
}
|
||||||
Request::GetLastId => {
|
Request::GetLastId => {
|
||||||
let id = self.bank.last_id();
|
let id = self.bank.last_id();
|
||||||
let rsp = (Response::LastId { id }, rsp_addr);
|
let rsp = (Response::LastId { id }, rsp_addr);
|
||||||
info!("Response::LastId {:?}", rsp);
|
debug!("Response::LastId {:?}", rsp);
|
||||||
Some(rsp)
|
Some(rsp)
|
||||||
}
|
}
|
||||||
Request::GetTransactionCount => {
|
Request::GetTransactionCount => {
|
||||||
let transaction_count = self.bank.transaction_count() as u64;
|
let transaction_count = self.bank.transaction_count() as u64;
|
||||||
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
|
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
|
||||||
info!("Response::TransactionCount {:?}", rsp);
|
debug!("Response::TransactionCount {:?}", rsp);
|
||||||
Some(rsp)
|
Some(rsp)
|
||||||
}
|
}
|
||||||
Request::GetSignature { signature } => {
|
Request::GetSignature { signature } => {
|
||||||
let signature_status = self.bank.has_signature(&signature);
|
let signature_status = self.bank.has_signature(&signature);
|
||||||
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
|
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
|
||||||
info!("Response::Signature {:?}", rsp);
|
debug!("Response::Signature {:?}", rsp);
|
||||||
Some(rsp)
|
Some(rsp)
|
||||||
}
|
}
|
||||||
Request::GetFinality => {
|
Request::GetFinality => {
|
||||||
let time = self.bank.finality();
|
let time = self.bank.finality();
|
||||||
let rsp = (Response::Finality { time }, rsp_addr);
|
let rsp = (Response::Finality { time }, rsp_addr);
|
||||||
info!("Response::Finality {:?}", rsp);
|
debug!("Response::Finality {:?}", rsp);
|
||||||
Some(rsp)
|
Some(rsp)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ impl RequestStage {
|
|||||||
|
|
||||||
let blobs = to_blobs(rsps)?;
|
let blobs = to_blobs(rsps)?;
|
||||||
if !blobs.is_empty() {
|
if !blobs.is_empty() {
|
||||||
info!("process: sending blobs: {}", blobs.len());
|
debug!("process: sending blobs: {}", blobs.len());
|
||||||
//don't wake up the other side if there is nothing
|
//don't wake up the other side if there is nothing
|
||||||
blob_sender.send(blobs)?;
|
blob_sender.send(blobs)?;
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,6 @@ use poh_recorder;
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use std;
|
use std;
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use vote_stage;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
@ -28,7 +27,6 @@ pub enum Error {
|
|||||||
ErasureError(erasure::ErasureError),
|
ErasureError(erasure::ErasureError),
|
||||||
SendError,
|
SendError,
|
||||||
PohRecorderError(poh_recorder::PohRecorderError),
|
PohRecorderError(poh_recorder::PohRecorderError),
|
||||||
VoteError(vote_stage::VoteError),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
@ -102,11 +100,6 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
|
|||||||
Error::PohRecorderError(e)
|
Error::PohRecorderError(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl std::convert::From<vote_stage::VoteError> for Error {
|
|
||||||
fn from(e: vote_stage::VoteError) -> Error {
|
|
||||||
Error::VoteError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
33
src/rpc.rs
33
src/rpc.rs
@ -28,7 +28,6 @@ pub const RPC_PORT: u16 = 8899;
|
|||||||
|
|
||||||
pub struct JsonRpcService {
|
pub struct JsonRpcService {
|
||||||
thread_hdl: JoinHandle<()>,
|
thread_hdl: JoinHandle<()>,
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl JsonRpcService {
|
impl JsonRpcService {
|
||||||
@ -36,12 +35,11 @@ impl JsonRpcService {
|
|||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
rpc_addr: SocketAddr,
|
rpc_addr: SocketAddr,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
let request_processor = JsonRpcRequestProcessor::new(bank.clone());
|
let request_processor = JsonRpcRequestProcessor::new(bank.clone());
|
||||||
let info = cluster_info.clone();
|
let info = cluster_info.clone();
|
||||||
let exit_pubsub = exit.clone();
|
let exit_pubsub = exit.clone();
|
||||||
let exit_ = exit.clone();
|
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-jsonrpc".to_string())
|
.name("solana-jsonrpc".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -64,23 +62,14 @@ impl JsonRpcService {
|
|||||||
warn!("JSON RPC service unavailable: unable to bind to RPC port {}. \nMake sure this port is not already in use by another application", rpc_addr.port());
|
warn!("JSON RPC service unavailable: unable to bind to RPC port {}. \nMake sure this port is not already in use by another application", rpc_addr.port());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
while !exit_.load(Ordering::Relaxed) {
|
while !exit.load(Ordering::Relaxed) {
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
server.unwrap().close();
|
server.unwrap().close();
|
||||||
()
|
()
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
JsonRpcService { thread_hdl, exit }
|
JsonRpcService { thread_hdl }
|
||||||
}
|
|
||||||
|
|
||||||
pub fn exit(&self) {
|
|
||||||
self.exit.store(true, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn close(self) -> thread::Result<()> {
|
|
||||||
self.exit();
|
|
||||||
self.join()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -390,7 +379,8 @@ mod tests {
|
|||||||
ClusterInfo::new(NodeInfo::new_unspecified()).unwrap(),
|
ClusterInfo::new(NodeInfo::new_unspecified()).unwrap(),
|
||||||
));
|
));
|
||||||
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 24680);
|
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 24680);
|
||||||
let rpc_service = JsonRpcService::new(&Arc::new(bank), &cluster_info, rpc_addr);
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let rpc_service = JsonRpcService::new(&Arc::new(bank), &cluster_info, rpc_addr, exit);
|
||||||
let thread = rpc_service.thread_hdl.thread();
|
let thread = rpc_service.thread_hdl.thread();
|
||||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||||
|
|
||||||
@ -596,11 +586,11 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_rpc_send_tx() {
|
fn test_rpc_send_tx() {
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
|
|
||||||
let alice = Mint::new(10_000_000);
|
let alice = Mint::new(10_000_000);
|
||||||
let mut bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = Keypair::new().pubkey();
|
let bob_pubkey = Keypair::new().pubkey();
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
let ledger_path = create_tmp_ledger_with_mint("rpc_send_tx", &alice);
|
let ledger_path = create_tmp_ledger_with_mint("rpc_send_tx", &alice);
|
||||||
@ -612,16 +602,8 @@ mod tests {
|
|||||||
|
|
||||||
let genesis_entries = &alice.create_entries();
|
let genesis_entries = &alice.create_entries();
|
||||||
let entry_height = genesis_entries.len() as u64;
|
let entry_height = genesis_entries.len() as u64;
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
leader_data.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let server = Fullnode::new_with_bank(
|
let server = Fullnode::new_with_bank(
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -630,6 +612,7 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
Some(rpc_port),
|
Some(rpc_port),
|
||||||
);
|
);
|
||||||
sleep(Duration::from_millis(900));
|
sleep(Duration::from_millis(900));
|
||||||
|
@ -27,7 +27,6 @@ pub enum ClientState {
|
|||||||
|
|
||||||
pub struct PubSubService {
|
pub struct PubSubService {
|
||||||
thread_hdl: JoinHandle<()>,
|
thread_hdl: JoinHandle<()>,
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service for PubSubService {
|
impl Service for PubSubService {
|
||||||
@ -39,10 +38,8 @@ impl Service for PubSubService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PubSubService {
|
impl PubSubService {
|
||||||
pub fn new(bank: &Arc<Bank>, pubsub_addr: SocketAddr) -> Self {
|
pub fn new(bank: &Arc<Bank>, pubsub_addr: SocketAddr, exit: Arc<AtomicBool>) -> Self {
|
||||||
let rpc = RpcSolPubSubImpl::new(JsonRpcRequestProcessor::new(bank.clone()), bank.clone());
|
let rpc = RpcSolPubSubImpl::new(JsonRpcRequestProcessor::new(bank.clone()), bank.clone());
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
|
||||||
let exit_ = exit.clone();
|
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-pubsub".to_string())
|
.name("solana-pubsub".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -63,23 +60,14 @@ impl PubSubService {
|
|||||||
warn!("Pubsub service unavailable: unable to bind to port {}. \nMake sure this port is not already in use by another application", pubsub_addr.port());
|
warn!("Pubsub service unavailable: unable to bind to port {}. \nMake sure this port is not already in use by another application", pubsub_addr.port());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
while !exit_.load(Ordering::Relaxed) {
|
while !exit.load(Ordering::Relaxed) {
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
server.unwrap().close();
|
server.unwrap().close();
|
||||||
()
|
()
|
||||||
})
|
})
|
||||||
.unwrap();
|
.unwrap();
|
||||||
PubSubService { thread_hdl, exit }
|
PubSubService { thread_hdl }
|
||||||
}
|
|
||||||
|
|
||||||
pub fn exit(&self) {
|
|
||||||
self.exit.store(true, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn close(self) -> thread::Result<()> {
|
|
||||||
self.exit();
|
|
||||||
self.join()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,7 +127,6 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
|||||||
subscriber: pubsub::Subscriber<Account>,
|
subscriber: pubsub::Subscriber<Account>,
|
||||||
pubkey_str: String,
|
pubkey_str: String,
|
||||||
) {
|
) {
|
||||||
info!("account_subscribe");
|
|
||||||
let pubkey_vec = bs58::decode(pubkey_str).into_vec().unwrap();
|
let pubkey_vec = bs58::decode(pubkey_str).into_vec().unwrap();
|
||||||
if pubkey_vec.len() != mem::size_of::<Pubkey>() {
|
if pubkey_vec.len() != mem::size_of::<Pubkey>() {
|
||||||
subscriber
|
subscriber
|
||||||
@ -154,6 +141,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
|||||||
|
|
||||||
let id = self.uid.fetch_add(1, atomic::Ordering::SeqCst);
|
let id = self.uid.fetch_add(1, atomic::Ordering::SeqCst);
|
||||||
let sub_id = SubscriptionId::Number(id as u64);
|
let sub_id = SubscriptionId::Number(id as u64);
|
||||||
|
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
|
||||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||||
let bank_sub_id = Keypair::new().pubkey();
|
let bank_sub_id = Keypair::new().pubkey();
|
||||||
self.account_subscriptions
|
self.account_subscriptions
|
||||||
@ -166,7 +154,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn account_unsubscribe(&self, id: SubscriptionId) -> Result<bool> {
|
fn account_unsubscribe(&self, id: SubscriptionId) -> Result<bool> {
|
||||||
info!("account_unsubscribe");
|
info!("account_unsubscribe: id={:?}", id);
|
||||||
if let Some((bank_sub_id, pubkey)) = self.account_subscriptions.write().unwrap().remove(&id)
|
if let Some((bank_sub_id, pubkey)) = self.account_subscriptions.write().unwrap().remove(&id)
|
||||||
{
|
{
|
||||||
self.bank.remove_account_subscription(&bank_sub_id, &pubkey);
|
self.bank.remove_account_subscription(&bank_sub_id, &pubkey);
|
||||||
@ -261,7 +249,8 @@ mod tests {
|
|||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||||
let pubsub_service = PubSubService::new(&Arc::new(bank), pubsub_addr);
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let pubsub_service = PubSubService::new(&Arc::new(bank), pubsub_addr, exit);
|
||||||
let thread = pubsub_service.thread_hdl.thread();
|
let thread = pubsub_service.thread_hdl.thread();
|
||||||
assert_eq!(thread.name().unwrap(), "solana-pubsub");
|
assert_eq!(thread.name().unwrap(), "solana-pubsub");
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,10 @@ pub enum StorageError {
|
|||||||
InvalidUserData,
|
InvalidUserData,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const STORAGE_PROGRAM_ID: [u8; 32] = [1u8; 32];
|
const STORAGE_PROGRAM_ID: [u8; 32] = [
|
||||||
|
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0,
|
||||||
|
];
|
||||||
|
|
||||||
impl StorageProgram {
|
impl StorageProgram {
|
||||||
pub fn check_id(program_id: &Pubkey) -> bool {
|
pub fn check_id(program_id: &Pubkey) -> bool {
|
||||||
|
@ -261,7 +261,7 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_sdk_serialize() {
|
fn test_sdk_serialize() {
|
||||||
let keypair = Keypair::new();
|
let keypair = Keypair::new();
|
||||||
use budget_program::BUDGET_PROGRAM_ID;
|
use budget_program::BudgetState;
|
||||||
|
|
||||||
// CreateAccount
|
// CreateAccount
|
||||||
let tx = Transaction::system_create(
|
let tx = Transaction::system_create(
|
||||||
@ -270,14 +270,14 @@ mod test {
|
|||||||
Hash::default(),
|
Hash::default(),
|
||||||
111,
|
111,
|
||||||
222,
|
222,
|
||||||
Pubkey::new(&BUDGET_PROGRAM_ID),
|
BudgetState::id(),
|
||||||
0,
|
0,
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
tx.userdata(0).to_vec(),
|
tx.userdata(0).to_vec(),
|
||||||
vec![
|
vec![
|
||||||
0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 222, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
|
0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 222, 0, 0, 0, 0, 0, 0, 0, 129, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
@ -302,17 +302,12 @@ mod test {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Assign
|
// Assign
|
||||||
let tx = Transaction::system_assign(
|
let tx = Transaction::system_assign(&keypair, Hash::default(), BudgetState::id(), 0);
|
||||||
&keypair,
|
|
||||||
Hash::default(),
|
|
||||||
Pubkey::new(&BUDGET_PROGRAM_ID),
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
tx.userdata(0).to_vec(),
|
tx.userdata(0).to_vec(),
|
||||||
vec![
|
vec![
|
||||||
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
1, 0, 0, 0, 129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0
|
0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ use std::time::Instant;
|
|||||||
use system_transaction::SystemTransaction;
|
use system_transaction::SystemTransaction;
|
||||||
use timing;
|
use timing;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use vote_transaction::VoteTransaction;
|
|
||||||
|
|
||||||
use influx_db_client as influxdb;
|
use influx_db_client as influxdb;
|
||||||
use metrics;
|
use metrics;
|
||||||
@ -149,29 +148,6 @@ impl ThinClient {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_vote_account(
|
|
||||||
&self,
|
|
||||||
node_keypair: &Keypair,
|
|
||||||
vote_account_id: Pubkey,
|
|
||||||
last_id: &Hash,
|
|
||||||
num_tokens: i64,
|
|
||||||
) -> io::Result<Signature> {
|
|
||||||
let tx =
|
|
||||||
Transaction::vote_account_new(&node_keypair, vote_account_id, *last_id, num_tokens);
|
|
||||||
self.transfer_signed(&tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
|
||||||
pub fn register_vote_account(
|
|
||||||
&self,
|
|
||||||
node_keypair: &Keypair,
|
|
||||||
vote_account_id: Pubkey,
|
|
||||||
last_id: &Hash,
|
|
||||||
) -> io::Result<Signature> {
|
|
||||||
let tx = Transaction::vote_account_register(node_keypair, vote_account_id, *last_id, 0);
|
|
||||||
self.transfer_signed(&tx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||||
pub fn transfer(
|
pub fn transfer(
|
||||||
&self,
|
&self,
|
||||||
@ -194,24 +170,6 @@ impl ThinClient {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_account_userdata(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> {
|
|
||||||
let req = Request::GetAccount { key: *pubkey };
|
|
||||||
let data = serialize(&req).expect("serialize GetAccount in pub fn get_account_userdata");
|
|
||||||
self.requests_socket
|
|
||||||
.send_to(&data, &self.requests_addr)
|
|
||||||
.expect("buffer error in pub fn get_account_userdata");
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let resp = self.recv_response()?;
|
|
||||||
trace!("recv_response {:?}", resp);
|
|
||||||
if let Response::Account { key, account } = resp {
|
|
||||||
if key == *pubkey {
|
|
||||||
return Ok(account.map(|account| account.userdata));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||||
/// until the server sends a response. If the response packet is dropped
|
/// until the server sends a response. If the response packet is dropped
|
||||||
/// by the network, this method will hang indefinitely.
|
/// by the network, this method will hang indefinitely.
|
||||||
@ -488,23 +446,17 @@ mod tests {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_thin_client() {
|
fn test_thin_client() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
|
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let mut bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = Keypair::new().pubkey();
|
let bob_pubkey = Keypair::new().pubkey();
|
||||||
let ledger_path = create_tmp_ledger_with_mint("thin_client", &alice);
|
let ledger_path = create_tmp_ledger_with_mint("thin_client", &alice);
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
leader_data.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let server = Fullnode::new_with_bank(
|
let server = Fullnode::new_with_bank(
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
@ -513,6 +465,7 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
);
|
);
|
||||||
sleep(Duration::from_millis(900));
|
sleep(Duration::from_millis(900));
|
||||||
@ -542,22 +495,16 @@ mod tests {
|
|||||||
#[ignore]
|
#[ignore]
|
||||||
fn test_bad_sig() {
|
fn test_bad_sig() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let mut bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = Keypair::new().pubkey();
|
let bob_pubkey = Keypair::new().pubkey();
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
let ledger_path = create_tmp_ledger_with_mint("bad_sig", &alice);
|
let ledger_path = create_tmp_ledger_with_mint("bad_sig", &alice);
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
leader_data.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let server = Fullnode::new_with_bank(
|
let server = Fullnode::new_with_bank(
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
0,
|
0,
|
||||||
@ -566,6 +513,7 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
);
|
);
|
||||||
//TODO: remove this sleep, or add a retry so CI is stable
|
//TODO: remove this sleep, or add a retry so CI is stable
|
||||||
@ -608,25 +556,18 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_client_check_signature() {
|
fn test_client_check_signature() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let mut bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_pubkey = Keypair::new().pubkey();
|
let bob_pubkey = Keypair::new().pubkey();
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
let ledger_path = create_tmp_ledger_with_mint("client_check_signature", &alice);
|
let ledger_path = create_tmp_ledger_with_mint("client_check_signature", &alice);
|
||||||
|
|
||||||
let genesis_entries = &alice.create_entries();
|
let genesis_entries = &alice.create_entries();
|
||||||
let entry_height = genesis_entries.len() as u64;
|
let entry_height = genesis_entries.len() as u64;
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
leader_data.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let server = Fullnode::new_with_bank(
|
let server = Fullnode::new_with_bank(
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -635,6 +576,7 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
);
|
);
|
||||||
sleep(Duration::from_millis(300));
|
sleep(Duration::from_millis(300));
|
||||||
@ -678,25 +620,18 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_zero_balance_after_nonzero() {
|
fn test_zero_balance_after_nonzero() {
|
||||||
logger::setup();
|
logger::setup();
|
||||||
let leader_keypair = Arc::new(Keypair::new());
|
let leader_keypair = Keypair::new();
|
||||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let mut bank = Bank::new(&alice);
|
let bank = Bank::new(&alice);
|
||||||
let bob_keypair = Keypair::new();
|
let bob_keypair = Keypair::new();
|
||||||
let leader_data = leader.info.clone();
|
let leader_data = leader.info.clone();
|
||||||
let ledger_path = create_tmp_ledger_with_mint("zero_balance_check", &alice);
|
let ledger_path = create_tmp_ledger_with_mint("zero_balance_check", &alice);
|
||||||
|
|
||||||
let genesis_entries = &alice.create_entries();
|
let genesis_entries = &alice.create_entries();
|
||||||
let entry_height = genesis_entries.len() as u64;
|
let entry_height = genesis_entries.len() as u64;
|
||||||
|
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
|
||||||
leader_data.id,
|
|
||||||
)));
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let server = Fullnode::new_with_bank(
|
let server = Fullnode::new_with_bank(
|
||||||
leader_keypair,
|
leader_keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank,
|
bank,
|
||||||
0,
|
0,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -705,6 +640,7 @@ mod tests {
|
|||||||
None,
|
None,
|
||||||
&ledger_path,
|
&ledger_path,
|
||||||
false,
|
false,
|
||||||
|
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||||
Some(0),
|
Some(0),
|
||||||
);
|
);
|
||||||
sleep(Duration::from_millis(900));
|
sleep(Duration::from_millis(900));
|
||||||
|
@ -105,8 +105,9 @@ impl Default for TokenProgram {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const TOKEN_PROGRAM_ID: [u8; 32] = [
|
const TOKEN_PROGRAM_ID: [u8; 32] = [
|
||||||
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0,
|
||||||
];
|
];
|
||||||
|
|
||||||
impl TokenProgram {
|
impl TokenProgram {
|
||||||
|
22
src/tpu.rs
22
src/tpu.rs
@ -27,18 +27,21 @@
|
|||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use banking_stage::{BankingStage, BankingStageReturnType};
|
use banking_stage::{BankingStage, BankingStageReturnType};
|
||||||
|
use cluster_info::ClusterInfo;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use fetch_stage::FetchStage;
|
use fetch_stage::FetchStage;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
|
use leader_vote_stage::LeaderVoteStage;
|
||||||
use ledger_write_stage::LedgerWriteStage;
|
use ledger_write_stage::LedgerWriteStage;
|
||||||
use poh_service::Config;
|
use poh_service::Config;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
|
use signature::Keypair;
|
||||||
use sigverify_stage::SigVerifyStage;
|
use sigverify_stage::SigVerifyStage;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::mpsc::Receiver;
|
use std::sync::mpsc::Receiver;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
pub enum TpuReturnType {
|
pub enum TpuReturnType {
|
||||||
@ -49,6 +52,7 @@ pub struct Tpu {
|
|||||||
fetch_stage: FetchStage,
|
fetch_stage: FetchStage,
|
||||||
sigverify_stage: SigVerifyStage,
|
sigverify_stage: SigVerifyStage,
|
||||||
banking_stage: BankingStage,
|
banking_stage: BankingStage,
|
||||||
|
leader_vote_stage: LeaderVoteStage,
|
||||||
ledger_write_stage: LedgerWriteStage,
|
ledger_write_stage: LedgerWriteStage,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
}
|
}
|
||||||
@ -56,7 +60,9 @@ pub struct Tpu {
|
|||||||
impl Tpu {
|
impl Tpu {
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
keypair: Arc<Keypair>,
|
||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
|
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||||
tick_duration: Config,
|
tick_duration: Config,
|
||||||
transactions_sockets: Vec<UdpSocket>,
|
transactions_sockets: Vec<UdpSocket>,
|
||||||
ledger_path: &str,
|
ledger_path: &str,
|
||||||
@ -81,21 +87,28 @@ impl Tpu {
|
|||||||
max_tick_height,
|
max_tick_height,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let (leader_vote_stage, ledger_entry_receiver) =
|
||||||
|
LeaderVoteStage::new(keypair, bank.clone(), cluster_info.clone(), entry_receiver);
|
||||||
|
|
||||||
let (ledger_entry_sender, entry_forwarder) = channel();
|
let (ledger_entry_sender, entry_forwarder) = channel();
|
||||||
let ledger_write_stage =
|
let ledger_write_stage = LedgerWriteStage::new(
|
||||||
LedgerWriteStage::new(Some(ledger_path), entry_receiver, Some(ledger_entry_sender));
|
Some(ledger_path),
|
||||||
|
ledger_entry_receiver,
|
||||||
|
Some(ledger_entry_sender),
|
||||||
|
);
|
||||||
|
|
||||||
let tpu = Tpu {
|
let tpu = Tpu {
|
||||||
fetch_stage,
|
fetch_stage,
|
||||||
sigverify_stage,
|
sigverify_stage,
|
||||||
banking_stage,
|
banking_stage,
|
||||||
|
leader_vote_stage,
|
||||||
ledger_write_stage,
|
ledger_write_stage,
|
||||||
exit: exit.clone(),
|
exit: exit.clone(),
|
||||||
};
|
};
|
||||||
(tpu, entry_forwarder, exit)
|
(tpu, entry_forwarder, exit)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exit(&self) {
|
pub fn exit(&self) -> () {
|
||||||
self.exit.store(true, Ordering::Relaxed);
|
self.exit.store(true, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,6 +128,7 @@ impl Service for Tpu {
|
|||||||
fn join(self) -> thread::Result<(Option<TpuReturnType>)> {
|
fn join(self) -> thread::Result<(Option<TpuReturnType>)> {
|
||||||
self.fetch_stage.join()?;
|
self.fetch_stage.join()?;
|
||||||
self.sigverify_stage.join()?;
|
self.sigverify_stage.join()?;
|
||||||
|
self.leader_vote_stage.join()?;
|
||||||
self.ledger_write_stage.join()?;
|
self.ledger_write_stage.join()?;
|
||||||
match self.banking_stage.join()? {
|
match self.banking_stage.join()? {
|
||||||
Some(BankingStageReturnType::LeaderRotation) => Ok(Some(TpuReturnType::LeaderRotation)),
|
Some(BankingStageReturnType::LeaderRotation) => Ok(Some(TpuReturnType::LeaderRotation)),
|
||||||
|
26
src/tvu.rs
26
src/tvu.rs
@ -40,6 +40,7 @@ use bank::Bank;
|
|||||||
use blob_fetch_stage::BlobFetchStage;
|
use blob_fetch_stage::BlobFetchStage;
|
||||||
use cluster_info::ClusterInfo;
|
use cluster_info::ClusterInfo;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
|
use leader_scheduler::LeaderScheduler;
|
||||||
use ledger_write_stage::LedgerWriteStage;
|
use ledger_write_stage::LedgerWriteStage;
|
||||||
use replicate_stage::{ReplicateStage, ReplicateStageReturnType};
|
use replicate_stage::{ReplicateStage, ReplicateStageReturnType};
|
||||||
use retransmit_stage::RetransmitStage;
|
use retransmit_stage::RetransmitStage;
|
||||||
@ -79,8 +80,8 @@ impl Tvu {
|
|||||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
keypair: Arc<Keypair>,
|
keypair: Arc<Keypair>,
|
||||||
vote_account_keypair: Arc<Keypair>,
|
|
||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
|
tick_height: u64,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
@ -88,6 +89,7 @@ impl Tvu {
|
|||||||
repair_socket: UdpSocket,
|
repair_socket: UdpSocket,
|
||||||
retransmit_socket: UdpSocket,
|
retransmit_socket: UdpSocket,
|
||||||
ledger_path: Option<&str>,
|
ledger_path: Option<&str>,
|
||||||
|
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
@ -103,22 +105,23 @@ impl Tvu {
|
|||||||
let (retransmit_stage, blob_window_receiver) = RetransmitStage::new(
|
let (retransmit_stage, blob_window_receiver) = RetransmitStage::new(
|
||||||
&cluster_info,
|
&cluster_info,
|
||||||
window,
|
window,
|
||||||
bank.get_tick_height(),
|
tick_height,
|
||||||
entry_height,
|
entry_height,
|
||||||
Arc::new(retransmit_socket),
|
Arc::new(retransmit_socket),
|
||||||
repair_socket,
|
repair_socket,
|
||||||
blob_fetch_receiver,
|
blob_fetch_receiver,
|
||||||
bank.leader_scheduler.clone(),
|
leader_scheduler.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (replicate_stage, ledger_entry_receiver) = ReplicateStage::new(
|
let (replicate_stage, ledger_entry_receiver) = ReplicateStage::new(
|
||||||
keypair,
|
keypair,
|
||||||
vote_account_keypair,
|
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
cluster_info,
|
cluster_info,
|
||||||
blob_window_receiver,
|
blob_window_receiver,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
|
tick_height,
|
||||||
entry_height,
|
entry_height,
|
||||||
|
leader_scheduler,
|
||||||
);
|
);
|
||||||
|
|
||||||
let ledger_write_stage = LedgerWriteStage::new(ledger_path, ledger_entry_receiver, None);
|
let ledger_write_stage = LedgerWriteStage::new(ledger_path, ledger_entry_receiver, None);
|
||||||
@ -136,7 +139,7 @@ impl Tvu {
|
|||||||
self.exit.load(Ordering::Relaxed)
|
self.exit.load(Ordering::Relaxed)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exit(&self) {
|
pub fn exit(&self) -> () {
|
||||||
self.exit.store(true, Ordering::Relaxed);
|
self.exit.store(true, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,12 +255,7 @@ pub mod tests {
|
|||||||
let starting_balance = 10_000;
|
let starting_balance = 10_000;
|
||||||
let mint = Mint::new(starting_balance);
|
let mint = Mint::new(starting_balance);
|
||||||
let replicate_addr = target1.info.contact_info.tvu;
|
let replicate_addr = target1.info.contact_info.tvu;
|
||||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
let bank = Arc::new(Bank::new(&mint));
|
||||||
leader_id,
|
|
||||||
)));
|
|
||||||
let mut bank = Bank::new(&mint);
|
|
||||||
bank.leader_scheduler = leader_scheduler;
|
|
||||||
let bank = Arc::new(bank);
|
|
||||||
|
|
||||||
//start cluster_info1
|
//start cluster_info1
|
||||||
let mut cluster_info1 = ClusterInfo::new(target1.info.clone()).expect("ClusterInfo::new");
|
let mut cluster_info1 = ClusterInfo::new(target1.info.clone()).expect("ClusterInfo::new");
|
||||||
@ -266,18 +264,20 @@ pub mod tests {
|
|||||||
let cref1 = Arc::new(RwLock::new(cluster_info1));
|
let cref1 = Arc::new(RwLock::new(cluster_info1));
|
||||||
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone());
|
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone());
|
||||||
|
|
||||||
let vote_account_keypair = Arc::new(Keypair::new());
|
|
||||||
let tvu = Tvu::new(
|
let tvu = Tvu::new(
|
||||||
Arc::new(target1_keypair),
|
Arc::new(target1_keypair),
|
||||||
vote_account_keypair,
|
|
||||||
&bank,
|
&bank,
|
||||||
0,
|
0,
|
||||||
|
0,
|
||||||
cref1,
|
cref1,
|
||||||
dr_1.1,
|
dr_1.1,
|
||||||
target1.sockets.replicate,
|
target1.sockets.replicate,
|
||||||
target1.sockets.repair,
|
target1.sockets.repair,
|
||||||
target1.sockets.retransmit,
|
target1.sockets.retransmit,
|
||||||
None,
|
None,
|
||||||
|
Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||||
|
leader_id,
|
||||||
|
))),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut alice_ref_balance = starting_balance;
|
let mut alice_ref_balance = starting_balance;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user