Compare commits
125 Commits
Author | SHA1 | Date | |
---|---|---|---|
14306a33e7 | |||
babc3847d7 | |||
40fd1befa5 | |||
7808af9a65 | |||
3c17732826 | |||
77aee571ad | |||
a01b55c580 | |||
0ecdc64302 | |||
ba06082d58 | |||
08e9c1a96e | |||
9f38b86df8 | |||
ca12faca9c | |||
97a0791f3f | |||
4791c7e0a7 | |||
1ba13fe180 | |||
9a30100a9c | |||
aa741b3147 | |||
09db7b5b52 | |||
fa9faa2cec | |||
d2dc585974 | |||
6721bdde3d | |||
a733873b8f | |||
7c02bbc47c | |||
16a815d2b1 | |||
ddb490e2fb | |||
242d0a23fb | |||
869009243d | |||
7b61f5279c | |||
7ef0b815ec | |||
8742de789e | |||
bfadd7b787 | |||
2e14bfcf4e | |||
a19426f055 | |||
df366017a7 | |||
7d76badd03 | |||
8047ab777c | |||
0d0a1c2919 | |||
1da90017ce | |||
0909618efa | |||
28bb7849f4 | |||
9cffd3a1ea | |||
917151ce54 | |||
6dcd127634 | |||
af66edf8c0 | |||
ab5b921e8f | |||
6c2843543b | |||
85f74cc537 | |||
43665115b4 | |||
156115c04c | |||
a66577eb87 | |||
3345d059e8 | |||
8c8c5de779 | |||
f03e971598 | |||
b4a1cdceaa | |||
b250d20059 | |||
dc3b270410 | |||
9d5092a71c | |||
a287c9e5fa | |||
ee85d534f9 | |||
6e1b291c17 | |||
68f7b1ecf3 | |||
58fe5cabd6 | |||
8993c6ae24 | |||
0e56473add | |||
f6b709ca48 | |||
ffa1fa557b | |||
e7631c85a1 | |||
edeadb503f | |||
d2044f2562 | |||
5703c740cf | |||
6ae20e78e2 | |||
506fc3baeb | |||
68523f4a7f | |||
beae217ab9 | |||
2c8c117e3c | |||
3a1285ebe5 | |||
e2660f2ac1 | |||
22eb1b977f | |||
43ef8d7bb7 | |||
d9271f2d30 | |||
dfbfd4d4dd | |||
9cb262ad4b | |||
73ee0cb100 | |||
9a6154beaf | |||
3f494bb91b | |||
2eb312796d | |||
3fb86662fb | |||
dce31f6002 | |||
39c42a6aba | |||
9961c0ee0a | |||
3f843f21b9 | |||
d07961a58b | |||
b85aa9282e | |||
1cd354cf15 | |||
92cd2d09ed | |||
a40122548f | |||
6e27f797bd | |||
476a585222 | |||
aa74ddb6c0 | |||
95921ce129 | |||
ee6d00a2fe | |||
212cbc4977 | |||
a6af1ba08d | |||
ee27e9e1cf | |||
4d21ee0546 | |||
493a2477b5 | |||
e284af33b9 | |||
f0aa14e135 | |||
fb9d8dfa99 | |||
4b02bbc802 | |||
18cf660f61 | |||
376303a1eb | |||
f295eb06d0 | |||
f423f61d8b | |||
94b06b2cbf | |||
9b2fc8cde7 | |||
d810752e86 | |||
fdaad1d85b | |||
7f29c1fe23 | |||
68df9d06db | |||
b60cb48c18 | |||
0fee854220 | |||
0cc7bbfe7d | |||
68834bd4c5 | |||
2df40cf9c9 |
31
.buildkite/env/README.md
vendored
Normal file
31
.buildkite/env/README.md
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
|
||||
[ejson](https://github.com/Shopify/ejson) and
|
||||
[ejson2env](https://github.com/Shopify/ejson2env) are used to manage access
|
||||
tokens and other secrets required for CI.
|
||||
|
||||
#### Setup
|
||||
```bash
|
||||
$ sudo gem install ejson ejson2env
|
||||
```
|
||||
|
||||
then obtain the necessary keypair and place it in `/opt/ejson/keys/`.
|
||||
|
||||
#### Usage
|
||||
Run the following command to decrypt the secrets into the environment:
|
||||
```bash
|
||||
eval $(ejson2env secrets.ejson)
|
||||
```
|
||||
|
||||
#### Managing secrets.ejson
|
||||
To decrypt `secrets.ejson` for modification, run:
|
||||
```bash
|
||||
$ ejson decrypt secrets.ejson -o secrets_unencrypted.ejson
|
||||
```
|
||||
|
||||
Edit, then run the following to re-encrypt the file **BEFORE COMMITING YOUR
|
||||
CHANGES**:
|
||||
```bash
|
||||
$ ejson encrypt secrets_unencrypted.ejson
|
||||
$ mv secrets_unencrypted.ejson secrets.ejson
|
||||
```
|
||||
|
10
.buildkite/env/secrets.ejson
vendored
Normal file
10
.buildkite/env/secrets.ejson
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:EzVa4Gpj2Qn5OhZQlVfGFchuROgupvnW:CbWc6sNh1GCrAbrncxDjW00zUAD/Sa+ccg7CFSz8Ua6LnCYnSddTBxJWcJEbEs0MrjuZRQ==]",
|
||||
"CRATES_IO_TOKEN": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:qF7QrUM8j+19mptcE1YS71CqmrCM13Ah:TZCatJeT1egCHiufE6cGFC1VsdJkKaaqV6QKWkEsMPBKvOAdaZbbVz9Kl+lGnIsF]",
|
||||
"INFLUX_DATABASE": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:PetD/4c/EbkQmFEcK21g3cBBAPwFqHEw:wvYmDZRajy2WngVFs9AlwyHk]",
|
||||
"INFLUX_USERNAME": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:WcnqZdmDFtJJ01Zu5LbeGgbYGfRzBdFc:a7c5zDDtCOu5L1Qd2NKkxT6kljyBcbck]",
|
||||
"INFLUX_PASSWORD": "EJ[1:Kqnm+k1Z4p8nr7GqMczXnzh6azTk39tj3bAbCKPitUc=:LIZgP9Tp9yE9OlpV8iogmLOI7iW7SiU3:x0nYdT1A6sxu+O+MMLIN19d2t6rrK1qJ3+HnoWG3PDodsXjz06YJWQKU/mx6saqH+QbGtGV5mk0=]"
|
||||
}
|
||||
}
|
@ -1,4 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
eval "$(ejson2env .buildkite/env/secrets.ejson)"
|
||||
|
||||
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
|
||||
# interprets this as the start of a log group.
|
||||
@ -24,4 +27,3 @@ export PS4="++"
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||
)
|
||||
|
||||
|
20
.buildkite/pipeline-upload.sh
Executable file
20
.buildkite/pipeline-upload.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script is used to upload the full buildkite pipeline. The steps defined
|
||||
# in the buildkite UI should simply be:
|
||||
#
|
||||
# steps:
|
||||
# - command: "ci/buildkite-pipeline-upload.sh"
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
buildkite-agent pipeline upload ci/buildkite.yml
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
buildkite-agent annotate --style "info" \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
fi
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,4 +1,3 @@
|
||||
Cargo.lock
|
||||
/target/
|
||||
|
||||
**/*.rs.bk
|
||||
|
2464
Cargo.lock
generated
Normal file
2464
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.10.0-pre2"
|
||||
version = "0.10.5"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -80,7 +80,7 @@ env_logger = "0.5.12"
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
hex-literal = "0.1.1"
|
||||
influx_db_client = "0.3.4"
|
||||
influx_db_client = "0.3.6"
|
||||
solana-jsonrpc-core = "0.3.0"
|
||||
solana-jsonrpc-http-server = "0.3.0"
|
||||
solana-jsonrpc-macros = "0.3.0"
|
||||
@ -104,14 +104,14 @@ serde_cbor = "0.9.0"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
socket2 = "0.3.8"
|
||||
solana-sdk = { path = "sdk", version = "0.10.0-pre2" }
|
||||
solana-sdk = { path = "sdk", version = "0.10.5" }
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
untrusted = "0.6.2"
|
||||
solana-noop = { path = "programs/native/noop", version = "0.10.0-pre2" }
|
||||
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.0-pre2" }
|
||||
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.0-pre2" }
|
||||
solana-noop = { path = "programs/native/noop", version = "0.10.5" }
|
||||
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.5" }
|
||||
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.5" }
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
@ -139,5 +139,5 @@ members = [
|
||||
"programs/native/noop",
|
||||
"programs/native/bpf_loader",
|
||||
"programs/native/lua_loader",
|
||||
"programs/bpf/noop_rust",
|
||||
"programs/bpf/rust/noop",
|
||||
]
|
||||
|
50
build.rs
50
build.rs
@ -8,7 +8,7 @@ fn main() {
|
||||
// Ensure target/perf-libs/ exists. It's been observed that
|
||||
// a cargo:rerun-if-changed= directive with a non-existent
|
||||
// directory triggers a rebuild on every |cargo build| invocation
|
||||
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
|
||||
fs::create_dir_all("target/perf-libs").unwrap_or_else(|err| {
|
||||
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
panic!("Unable to create target/perf-libs: {:?}", err);
|
||||
}
|
||||
@ -20,44 +20,22 @@ fn main() {
|
||||
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
||||
|
||||
if bpf_c {
|
||||
let out_dir = "target/".to_string() + &env::var("PROFILE").unwrap();
|
||||
let out_dir = "OUT_DIR=../../../target/".to_string()
|
||||
+ &env::var("PROFILE").unwrap()
|
||||
+ &"/bpf".to_string();
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/noop_c/build.sh");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/noop_c/src/noop.c");
|
||||
println!("cargo:warning=(not a warning) Compiling noop_c");
|
||||
let status = Command::new("programs/bpf/noop_c/build.sh")
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
|
||||
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
|
||||
let status = Command::new("make")
|
||||
.current_dir("programs/bpf/c")
|
||||
.arg("all")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to call noop_c build script");
|
||||
assert!(status.success());
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/move_funds_c/build.sh");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/move_funds_c/src/move_funds.c");
|
||||
println!("cargo:warning=(not a warning) Compiling move_funds_c");
|
||||
let status = Command::new("programs/bpf/move_funds_c/build.sh")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to call move_funds_c build script");
|
||||
assert!(status.success());
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_c/build.sh");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_c/src/tictactoe.c");
|
||||
println!("cargo:warning=(not a warning) Compiling tictactoe_c");
|
||||
let status = Command::new("programs/bpf/tictactoe_c/build.sh")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to call tictactoe_c build script");
|
||||
assert!(status.success());
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_dashboard_c/build.sh");
|
||||
println!(
|
||||
"cargo:rerun-if-changed=programs/bpf/tictactoe_dashboard_c/src/tictactoe_dashboard.c"
|
||||
);
|
||||
println!("cargo:warning=(not a warning) Compiling tictactoe_dashboard_c");
|
||||
let status = Command::new("programs/bpf/tictactoe_dashboard_c/build.sh")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to call tictactoe_dashboard_c build script");
|
||||
.expect("Failed to build C-based BPF programs");
|
||||
assert!(status.success());
|
||||
}
|
||||
if chacha || cuda || erasure {
|
||||
|
@ -8,3 +8,9 @@ steps:
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-solana-tar.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish solana release tar"
|
@ -1,5 +1,5 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.30.0 ci/test-stable.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
@ -36,7 +36,7 @@ steps:
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- trigger: "solana-snap"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
|
16
ci/crate-version.sh
Executable file
16
ci/crate-version.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Outputs the current crate version
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
while read -r name equals value _; do
|
||||
if [[ $name = version && $equals = = ]]; then
|
||||
echo "${value//\"/}"
|
||||
exit 0
|
||||
fi
|
||||
done < <(cat Cargo.toml)
|
||||
|
||||
echo Unable to locate version in Cargo.toml 1>&2
|
||||
exit 1
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/buildkite.yml to pick up the new image tag
|
||||
FROM rust:1.30.0
|
||||
FROM rust:1.30.1
|
||||
|
||||
RUN set -x && \
|
||||
apt update && \
|
||||
|
36
ci/publish-bpf-sdk.sh
Executable file
36
ci/publish-bpf-sdk.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
version=$(./ci/crate-version.sh)
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf bpf-sdk/
|
||||
mkdir bpf-sdk/
|
||||
(
|
||||
echo "$version"
|
||||
git rev-parse HEAD
|
||||
) > bpf-sdk/version.txt
|
||||
|
||||
cp -ra programs/bpf/c/sdk/* bpf-sdk/
|
||||
|
||||
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
|
||||
)
|
||||
|
||||
|
||||
echo --- AWS S3 Store
|
||||
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
|
||||
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
|
||||
|
||||
exit 0
|
||||
|
@ -18,7 +18,7 @@ if [[ -n $CI ]]; then
|
||||
fi
|
||||
|
||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||
for Cargo_toml in {.,sdk,programs/native/{bpf_loader,lua_loader,noop}}/Cargo.toml; do
|
||||
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
(
|
||||
set -x
|
||||
|
73
ci/publish-metrics-dashboard.sh
Executable file
73
ci/publish-metrics-dashboard.sh
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z $BUILDKITE ]]; then
|
||||
echo BUILDKITE not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
CHANNEL=$(buildkite-agent meta-data get "channel" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- block: "Select Dashboard"
|
||||
fields:
|
||||
- select: "Channel"
|
||||
key: "channel"
|
||||
options:
|
||||
- label: "stable"
|
||||
value: "stable"
|
||||
- label: "edge"
|
||||
value: "edge"
|
||||
- label: "beta"
|
||||
value: "beta"
|
||||
- command: "ci/$(basename "$0")"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
ci/channel-info.sh
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
case $CHANNEL in
|
||||
edge)
|
||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||
;;
|
||||
beta)
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
stable)
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid CHANNEL=$CHANNEL"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
branch: "$CHANNEL_BRANCH"
|
||||
env:
|
||||
CHANNEL: "$CHANNEL"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec metrics/publish-metrics-dashboard.sh "$CHANNEL"
|
71
ci/publish-solana-tar.sh
Executable file
71
ci/publish-solana-tar.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]]; then
|
||||
DRYRUN="echo"
|
||||
CHANNEL=unknown
|
||||
fi
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -n "$BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
||||
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||
else
|
||||
CHANNEL_OR_TAG=$CHANNEL
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf solana-release/
|
||||
mkdir solana-release/
|
||||
(
|
||||
echo "$CHANNEL_OR_TAG"
|
||||
git rev-parse HEAD
|
||||
) > solana-release/version.txt
|
||||
|
||||
cargo install --root solana-release
|
||||
./scripts/install-native-programs.sh solana-release/bin
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda --root solana-release-cuda
|
||||
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
|
||||
|
||||
tar jvcf solana-release.tar.bz2 solana-release/
|
||||
)
|
||||
|
||||
echo --- AWS S3 Store
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
|
||||
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
|
||||
)
|
||||
else
|
||||
echo Skipped due to DRYRUN
|
||||
fi
|
||||
exit 0
|
||||
|
@ -23,6 +23,16 @@ for test in tests/*.rs; do
|
||||
_ cargo test --verbose --jobs=1 --test="$test"
|
||||
done
|
||||
|
||||
# Run native program's tests
|
||||
for program in programs/native/*; do
|
||||
echo --- "$program"
|
||||
(
|
||||
set -x
|
||||
cd "$program"
|
||||
cargo test --verbose
|
||||
)
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
|
@ -9,8 +9,10 @@ clientNodeCount=0
|
||||
validatorNodeCount=10
|
||||
publicNetwork=false
|
||||
snapChannel=edge
|
||||
tarChannelOrTag=edge
|
||||
delete=false
|
||||
enableGpu=false
|
||||
useTarReleaseChannel=false
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
@ -19,16 +21,21 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [zone] [options...]
|
||||
usage: $0 [name] [cloud] [zone] [options...]
|
||||
|
||||
Deploys a CD testnet
|
||||
|
||||
name - name of the network
|
||||
zone - zone to deploy the network into
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone - cloud provider zone to deploy the network into
|
||||
|
||||
options:
|
||||
-s edge|beta|stable - Deploy the specified Snap release channel
|
||||
(default: $snapChannel)
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
(default: $tarChannelOrTag)
|
||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
@ -44,12 +51,14 @@ EOF
|
||||
}
|
||||
|
||||
netName=$1
|
||||
zone=$2
|
||||
cloudProvider=$2
|
||||
zone=$3
|
||||
[[ -n $netName ]] || usage
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
shift 2
|
||||
shift 3
|
||||
|
||||
while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
||||
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -73,6 +82,17 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
tarChannelOrTag=$OPTARG
|
||||
useTarReleaseChannel=true
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
;;
|
||||
@ -93,7 +113,7 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
|
||||
done
|
||||
|
||||
|
||||
gce_create_args=(
|
||||
create_args=(
|
||||
-a "$leaderAddress"
|
||||
-c "$clientNodeCount"
|
||||
-n "$validatorNodeCount"
|
||||
@ -103,26 +123,26 @@ gce_create_args=(
|
||||
|
||||
if $enableGpu; then
|
||||
if [[ -z $leaderMachineType ]]; then
|
||||
gce_create_args+=(-g)
|
||||
create_args+=(-g)
|
||||
else
|
||||
gce_create_args+=(-G "$leaderMachineType")
|
||||
create_args+=(-G "$leaderMachineType")
|
||||
fi
|
||||
fi
|
||||
|
||||
if $publicNetwork; then
|
||||
gce_create_args+=(-P)
|
||||
create_args+=(-P)
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- gce.sh delete
|
||||
time net/gce.sh delete -z "$zone" -p "$netName"
|
||||
echo "--- $cloudProvider.sh delete"
|
||||
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
|
||||
if $delete; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- gce.sh create
|
||||
time net/gce.sh create "${gce_create_args[@]}"
|
||||
echo "--- $cloudProvider.sh create"
|
||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- net.sh start
|
||||
@ -130,7 +150,18 @@ maybeRejectExtraNodes=
|
||||
if ! $publicNetwork; then
|
||||
maybeRejectExtraNodes="-o rejectExtraNodes"
|
||||
fi
|
||||
maybeNoValidatorSanity=
|
||||
if [[ -n $NO_VALIDATOR_SANITY ]]; then
|
||||
maybeNoValidatorSanity="-o noValidatorSanity"
|
||||
fi
|
||||
maybeNoLedgerVerify=
|
||||
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||
maybeNoLedgerVerify="-o noLedgerVerify"
|
||||
fi
|
||||
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
|
||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes
|
||||
|
||||
if $useTarReleaseChannel; then
|
||||
time net/net.sh start -t "$tarChannelOrTag" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
else
|
||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
fi
|
||||
exit 0
|
||||
|
360
ci/testnet-manager.sh
Executable file
360
ci/testnet-manager.sh
Executable file
@ -0,0 +1,360 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
if [[ -z $BUILDKITE ]]; then
|
||||
echo BUILDKITE not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
|
||||
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET ]]; then
|
||||
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET_OP ]]; then
|
||||
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
|
||||
fi
|
||||
|
||||
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- block: "Manage Testnet"
|
||||
fields:
|
||||
- select: "Network"
|
||||
key: "testnet"
|
||||
options:
|
||||
- label: "testnet"
|
||||
value: "testnet"
|
||||
- label: "testnet-perf"
|
||||
value: "testnet-perf"
|
||||
- label: "testnet-master"
|
||||
value: "testnet-master"
|
||||
- label: "testnet-master-perf"
|
||||
value: "testnet-master-perf"
|
||||
- label: "testnet-edge"
|
||||
value: "testnet-edge"
|
||||
- label: "testnet-edge-perf"
|
||||
value: "testnet-edge-perf"
|
||||
- label: "testnet-beta"
|
||||
value: "testnet-beta"
|
||||
- label: "testnet-beta-perf"
|
||||
value: "testnet-beta-perf"
|
||||
- select: "Operation"
|
||||
key: "testnet-operation"
|
||||
default: "sanity-or-restart"
|
||||
options:
|
||||
- label: "Sanity check. Restart network on failure"
|
||||
value: "sanity-or-restart"
|
||||
- label: "Start (or restart) the network"
|
||||
value: "start"
|
||||
- label: "Stop the network"
|
||||
value: "stop"
|
||||
- label: "Sanity check only"
|
||||
value: "sanity"
|
||||
- command: "ci/$(basename "$0")"
|
||||
agents:
|
||||
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
|
||||
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
|
||||
|
||||
ci/channel-info.sh
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge|testnet-edge-perf|testnet-master|testnet-master-perf)
|
||||
CHANNEL_OR_TAG=edge
|
||||
CHANNEL_BRANCH=$EDGE_CHANNEL
|
||||
;;
|
||||
testnet-beta|testnet-beta-perf)
|
||||
CHANNEL_OR_TAG=beta
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
;;
|
||||
testnet|testnet-perf)
|
||||
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
|
||||
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||
else
|
||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||
(
|
||||
cat <<EOF
|
||||
steps:
|
||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
||||
async: true
|
||||
build:
|
||||
message: "$BUILDKITE_MESSAGE"
|
||||
branch: "$CHANNEL_BRANCH"
|
||||
env:
|
||||
TESTNET: "$TESTNET"
|
||||
TESTNET_OP: "$TESTNET_OP"
|
||||
EOF
|
||||
) | buildkite-agent pipeline upload
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
sanity() {
|
||||
echo "--- sanity $TESTNET"
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh edge-perf-testnet-solana-com ec2 us-west-2b
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh beta-perf-testnet-solana-com ec2 us-west-2b
|
||||
)
|
||||
;;
|
||||
testnet-master)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh master-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-master-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-sanity.sh master-perf-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
|
||||
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export REJECT_EXTRA_NODES=1
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
|
||||
ci/testnet-sanity.sh perf-testnet-solana-com gce us-west1-b
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
start() {
|
||||
declare maybeDelete=$1
|
||||
if [[ -z $maybeDelete ]]; then
|
||||
echo "--- start $TESTNET"
|
||||
else
|
||||
echo "--- stop $TESTNET"
|
||||
fi
|
||||
|
||||
case $TESTNET in
|
||||
testnet-edge)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
|
||||
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0ccd4f2239886fa94 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-edge-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
|
||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-beta)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0f286cf8a0771ce35 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-beta-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
|
||||
-g -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-master)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh master-testnet-solana-com gce us-west1-b \
|
||||
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a master-testnet-solana-com \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-master-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh master-perf-testnet-solana-com gce us-west1-b \
|
||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
|
||||
# -s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
|
||||
# ${maybeDelete:+-d}
|
||||
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
|
||||
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0fa502bf95f6f18b2 \
|
||||
${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
# shellcheck disable=2030
|
||||
# shellcheck disable=2031
|
||||
(
|
||||
set -ex
|
||||
export NO_LEDGER_VERIFY=1
|
||||
export NO_VALIDATOR_SANITY=1
|
||||
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
|
||||
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
|
||||
-t "$CHANNEL_OR_TAG" -c 2 \
|
||||
${maybeDelete:+-d}
|
||||
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
|
||||
# -g \
|
||||
# -t "$CHANNEL_OR_TAG" -c 2 \
|
||||
# ${maybeDelete:+-d}
|
||||
)
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid TESTNET=$TESTNET"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
stop() {
|
||||
start delete
|
||||
}
|
||||
|
||||
case $TESTNET_OP in
|
||||
sanity)
|
||||
sanity
|
||||
;;
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
sanity-or-restart)
|
||||
if sanity; then
|
||||
echo Pass
|
||||
else
|
||||
echo "Sanity failed, restarting the network"
|
||||
echo "^^^ +++"
|
||||
start
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo --- fin
|
||||
exit 0
|
@ -9,12 +9,13 @@ usage() {
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [zone]
|
||||
usage: $0 [name] [cloud] [zone]
|
||||
|
||||
Sanity check a CD testnet
|
||||
|
||||
name - name of the network
|
||||
zone - zone of the network
|
||||
cloud - cloud provider to use (gce, ec2)
|
||||
zone - cloud provider zone of the network
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
@ -23,16 +24,18 @@ EOF
|
||||
}
|
||||
|
||||
netName=$1
|
||||
zone=$2
|
||||
cloudProvider=$2
|
||||
zone=$3
|
||||
[[ -n $netName ]] || usage ""
|
||||
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
|
||||
set -x
|
||||
echo --- gce.sh config
|
||||
net/gce.sh config -p "$netName" -z "$zone"
|
||||
echo "--- $cloudProvider.sh config"
|
||||
timeout 5m net/"$cloudProvider".sh config -p "$netName" -z "$zone"
|
||||
net/init-metrics.sh -e
|
||||
echo --- net.sh sanity
|
||||
net/net.sh sanity \
|
||||
timeout 5m net/net.sh sanity \
|
||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||
|
@ -4,14 +4,18 @@ Currently we have three testnets:
|
||||
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
|
||||
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs 24/7
|
||||
* `testnet-master` - private edge channel testnet with clients trying to flood the network
|
||||
* `testnet-msater` - public edge channel testnet accessible via master.testnet.solana.com. Runs 24/7
|
||||
* `testnet-master-perf` - private edge channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs on weekday mornings for a couple hours
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
||||
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
|
||||
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
|
||||
Each testnet can be manually manipulated from buildkite as well. The `-perf`
|
||||
testnets use a release tarball while the non`-perf` builds use the snap build
|
||||
(we've observed that the snap build runs slower than a tarball but this has yet
|
||||
to be root caused).
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
@ -29,7 +33,8 @@ $ net/ssh.sh
|
||||
for log location details
|
||||
|
||||
## How do I reset the testnet?
|
||||
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
|
||||
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
|
||||
and when prompted select the desired testnet
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
@ -43,5 +48,5 @@ Currently, a merged PR is the only way to test a change on the testnet. But you
|
||||
can run your own testnet using the scripts in the `net/` directory.
|
||||
|
||||
## Adjusting the number of clients or validators on the testnet
|
||||
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
|
||||
Edit `ci/testnet-manager.sh`
|
||||
|
||||
|
@ -15,7 +15,7 @@ mkdir -p target/perf-libs
|
||||
cd target/perf-libs
|
||||
(
|
||||
set -x
|
||||
curl https://solana-perf.s3.amazonaws.com/v0.10.2/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
|
39
metrics/README.md
Normal file
39
metrics/README.md
Normal file
@ -0,0 +1,39 @@
|
||||
# Metrics
|
||||
|
||||
## Testnet Grafana Dashboard
|
||||
|
||||
There are three versions of the testnet dashboard, corresponding to the three
|
||||
release channels:
|
||||
* https://metrics.solana.com:3000/d/testnet-edge/testnet-monitor-edge
|
||||
* https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta
|
||||
* https://metrics.solana.com:3000/d/testnet/testnet-monitor
|
||||
|
||||
The dashboard for each channel is defined from the
|
||||
`metrics/testnet-monitor.json` source file in the git branch associated with
|
||||
that channel, and deployed by automation running `ci/publish-metrics-dashboard.sh`.
|
||||
|
||||
A deploy can be triggered at any time via the `New Build` button of
|
||||
https://buildkite.com/solana-labs/publish-metrics-dashboard.
|
||||
|
||||
### Modifying a Dashboard
|
||||
|
||||
Dashboard updates are accomplished by modifying `metrics/testnet-monitor.json`,
|
||||
**manual edits made directly in Grafana will be overwritten**.
|
||||
|
||||
1. Open the desired dashboard in Grafana
|
||||
2. Create a development copy of the dashboard by selecting `Save As..` in the
|
||||
`Settings` menu for the dashboard
|
||||
3. Edit dashboard as desired
|
||||
4. Extract the JSON Model by selecting `JSON Model` in the `Settings` menu. Copy the JSON to the clipboard
|
||||
and paste into `metrics/testnet-monitor.json`
|
||||
5. Delete your development dashboard: `Settings` => `Delete`
|
||||
|
||||
### Deploying a Dashboard Manually
|
||||
|
||||
If you need to immediately deploy a dashboard using the contents of
|
||||
`metrics/testnet-monitor.json` in your local workspace,
|
||||
```
|
||||
$ export GRAFANA_API_TOKEN="an API key from https://metrics.solana.com:3000/org/apikeys"
|
||||
$ metrics/publish-metrics-dashboard.sh (edge|beta|stable)
|
||||
```
|
||||
Note that automation will eventually overwrite your manual deploy.
|
69
metrics/adjust-dashboard-for-channel.py
Executable file
69
metrics/adjust-dashboard-for-channel.py
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Adjusts the testnet monitor dashboard for the specified release channel
|
||||
#
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print('Error: Dashboard or Channel not specified')
|
||||
sys.exit(1)
|
||||
|
||||
dashboard_json = sys.argv[1]
|
||||
channel = sys.argv[2]
|
||||
if channel not in ['edge', 'beta', 'stable']:
|
||||
print('Error: Unknown channel:', channel)
|
||||
sys.exit(2)
|
||||
|
||||
with open(dashboard_json, 'r') as read_file:
|
||||
data = json.load(read_file)
|
||||
|
||||
if channel == 'stable':
|
||||
# Stable dashboard only allows the user to select between the stable
|
||||
# testnet databases
|
||||
data['title'] = 'Testnet Monitor'
|
||||
data['uid'] = 'testnet'
|
||||
data['templating']['list'] = [{'allValue': None,
|
||||
'current': {'text': 'testnet',
|
||||
'value': 'testnet'},
|
||||
'hide': 1,
|
||||
'includeAll': False,
|
||||
'label': 'Testnet',
|
||||
'multi': False,
|
||||
'name': 'testnet',
|
||||
'options': [{'selected': False,
|
||||
'text': 'testnet',
|
||||
'value': 'testnet'},
|
||||
{'selected': True,
|
||||
'text': 'testnet-perf',
|
||||
'value': 'testnet-perf'}],
|
||||
'query': 'testnet,testnet-perf',
|
||||
'type': 'custom'}]
|
||||
else:
|
||||
# Non-stable dashboard only allows the user to select between all testnet
|
||||
# databases
|
||||
data['title'] = 'Testnet Monitor ({})'.format(channel)
|
||||
data['uid'] = 'testnet-' + channel
|
||||
data['templating']['list'] = [{'allValue': None,
|
||||
'current': {'text': 'testnet',
|
||||
'value': 'testnet'},
|
||||
'datasource': 'Solana Metrics (read-only)',
|
||||
'hide': 1,
|
||||
'includeAll': False,
|
||||
'label': 'Testnet',
|
||||
'multi': False,
|
||||
'name': 'testnet',
|
||||
'options': [],
|
||||
'query': 'show databases',
|
||||
'refresh': 1,
|
||||
'regex': 'testnet.*',
|
||||
'sort': 1,
|
||||
'tagValuesQuery': '',
|
||||
'tags': [],
|
||||
'tagsQuery': '',
|
||||
'type': 'query',
|
||||
'useTags': False}]
|
||||
|
||||
with open(dashboard_json, 'w') as write_file:
|
||||
json.dump(data, write_file, indent=2)
|
15
metrics/grafcli.conf
Normal file
15
metrics/grafcli.conf
Normal file
@ -0,0 +1,15 @@
|
||||
[grafcli]
|
||||
editor = vim
|
||||
mergetool = vimdiff
|
||||
verbose = on
|
||||
force = on
|
||||
|
||||
[resources]
|
||||
|
||||
[hosts]
|
||||
metrics = on
|
||||
|
||||
[metrics]
|
||||
type = api
|
||||
url = https://metrics.solana.com:3000/api
|
||||
ssl = off
|
71
metrics/publish-metrics-dashboard.sh
Executable file
71
metrics/publish-metrics-dashboard.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
CHANNEL=$1
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo "usage: $0 [channel]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $CHANNEL in
|
||||
edge)
|
||||
DASHBOARD=testnet-monitor-edge
|
||||
;;
|
||||
beta)
|
||||
DASHBOARD=testnet-monitor-beta
|
||||
;;
|
||||
stable)
|
||||
DASHBOARD=testnet-monitor
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid CHANNEL=$CHANNEL"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
if [[ -z $GRAFANA_API_TOKEN ]]; then
|
||||
echo Error: GRAFANA_API_TOKEN not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DASHBOARD_JSON=./testnet-monitor.json
|
||||
if [[ ! -r $DASHBOARD_JSON ]]; then
|
||||
echo Error: $DASHBOARD_JSON not found
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
./adjust-dashboard-for-channel.py "$DASHBOARD_JSON" "$CHANNEL"
|
||||
)
|
||||
|
||||
rm -rf venv
|
||||
python3 -m venv venv
|
||||
# shellcheck source=/dev/null
|
||||
source venv/bin/activate
|
||||
|
||||
echo --- Fetch/build grafcli
|
||||
(
|
||||
set -x
|
||||
git clone git@github.com:mvines/grafcli.git -b experimental-v5 venv/grafcli
|
||||
cd venv/grafcli
|
||||
python3 setup.py install
|
||||
)
|
||||
|
||||
echo --- Take a backup of existing dashboard if possible
|
||||
(
|
||||
set -x +e
|
||||
grafcli export remote/metrics/$DASHBOARD $DASHBOARD_JSON.org
|
||||
grafcli rm remote/metrics/$DASHBOARD
|
||||
:
|
||||
)
|
||||
|
||||
echo --- Publish $DASHBOARD_JSON to $DASHBOARD
|
||||
(
|
||||
set -x
|
||||
grafcli import $DASHBOARD_JSON remote/metrics
|
||||
)
|
||||
|
||||
exit 0
|
5576
metrics/testnet-monitor.json
Normal file
5576
metrics/testnet-monitor.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -49,8 +49,6 @@ elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
}
|
||||
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
||||
unset SOLANA_CUDA
|
||||
else
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
@ -104,16 +102,16 @@ tune_networking() {
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=67108864 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_max=67108864 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
|
68
net/gce.sh
68
net/gce.sh
@ -11,7 +11,6 @@ gce)
|
||||
# shellcheck source=net/scripts/gce-provider.sh
|
||||
source "$here"/scripts/gce-provider.sh
|
||||
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
cpuLeaderMachineType=n1-standard-16
|
||||
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
@ -22,12 +21,11 @@ ec2)
|
||||
# shellcheck source=net/scripts/ec2-provider.sh
|
||||
source "$here"/scripts/ec2-provider.sh
|
||||
|
||||
imageName="ami-0466e26ccc0e752c1"
|
||||
cpuLeaderMachineType=m4.4xlarge
|
||||
gpuLeaderMachineType=p2.xlarge
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=m4.xlarge
|
||||
clientMachineType=m4.4xlarge
|
||||
validatorMachineType=m4.2xlarge
|
||||
clientMachineType=m4.2xlarge
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
@ -118,7 +116,7 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
leaderMachineType="$gpuLeaderMachineType"
|
||||
leaderMachineType=$gpuLeaderMachineType
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
@ -131,14 +129,53 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
||||
bootDiskType=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
usage "unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
[[ -z $1 ]] || usage "Unexpected argument: $1"
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
if [[ $cloudProvider = ec2 ]]; then
|
||||
# EC2 keys can't be retrieved from running instances like GCE keys can so save
|
||||
# EC2 keys in the user's home directory so |./ec2.sh config| can at least be
|
||||
# used on the same host that ran |./ec2.sh create| .
|
||||
sshPrivateKey="$HOME/.ssh/solana-net-id_$prefix"
|
||||
else
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
fi
|
||||
|
||||
case $cloudProvider in
|
||||
gce)
|
||||
if $enableGpu; then
|
||||
# TODO: GPU image is still 16.04-based pending resolution of
|
||||
# https://github.com/solana-labs/solana/issues/1702
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
else
|
||||
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
|
||||
fi
|
||||
;;
|
||||
ec2)
|
||||
# Deep Learning AMI (Ubuntu 16.04-based)
|
||||
case $region in # (region global variable is set by cloud_SetZone)
|
||||
us-east-1)
|
||||
imageName="ami-047daf3f2b162fc35"
|
||||
;;
|
||||
us-west-1)
|
||||
imageName="ami-08c8c7c4a57a6106d"
|
||||
;;
|
||||
us-west-2)
|
||||
imageName="ami-0b63040ee445728bf"
|
||||
;;
|
||||
*)
|
||||
usage "Unsupported region: $region"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
||||
@ -206,13 +243,18 @@ EOF
|
||||
|
||||
echo "Waiting for $name to finish booting..."
|
||||
(
|
||||
for i in $(seq 1 30); do
|
||||
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
|
||||
break
|
||||
set -x +e
|
||||
for i in $(seq 1 60); do
|
||||
timeout 20s ssh "${sshOptions[@]}" "$publicIp" "ls -l /.instance-startup-complete"
|
||||
ret=$?
|
||||
if [[ $ret -eq 0 ]]; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
echo "Retry $i..."
|
||||
done
|
||||
echo "$name failed to boot."
|
||||
exit 1
|
||||
)
|
||||
echo "$name has booted."
|
||||
}
|
||||
@ -230,7 +272,7 @@ EOF
|
||||
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
|
||||
|
||||
# Try to ping the machine first.
|
||||
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
timeout 90s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
|
||||
if [[ ! -r $sshPrivateKey ]]; then
|
||||
echo "Fetching $sshPrivateKey from $leaderName"
|
||||
@ -376,6 +418,10 @@ $(
|
||||
install-earlyoom.sh \
|
||||
install-libssl-compatability.sh \
|
||||
install-rsync.sh \
|
||||
network-config.sh \
|
||||
remove-docker-interface.sh \
|
||||
update-default-cuda.sh \
|
||||
|
||||
)
|
||||
|
||||
cat > /etc/motd <<EOM
|
||||
|
79
net/net.sh
79
net/net.sh
@ -23,10 +23,14 @@ Operate a configured testnet
|
||||
restart - Shortcut for stop then start
|
||||
|
||||
start-specific options:
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-T [tarFilename] - Deploy the specified release tarball
|
||||
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
|
||||
specified release channel (edge|beta|stable) or release tag
|
||||
(vX.Y.Z)
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
|
||||
Note: if RUST_LOG is set in the environment it will be propogated into the
|
||||
network nodes.
|
||||
@ -44,6 +48,7 @@ EOF
|
||||
}
|
||||
|
||||
snapChannel=
|
||||
releaseChannel=
|
||||
snapFilename=
|
||||
deployMethod=local
|
||||
sanityExtraArgs=
|
||||
@ -53,7 +58,7 @@ command=$1
|
||||
[[ -n $command ]] || usage
|
||||
shift
|
||||
|
||||
while getopts "h?S:s:o:f:" opt; do
|
||||
while getopts "h?S:s:T:t:o:f:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
@ -74,6 +79,22 @@ while getopts "h?S:s:o:f:" opt; do
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
T)
|
||||
tarballFilename=$OPTARG
|
||||
[[ -f $tarballFilename ]] || usage "Snap not readable: $tarballFilename"
|
||||
deployMethod=tar
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable|v*)
|
||||
releaseChannel=$OPTARG
|
||||
deployMethod=tar
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
f)
|
||||
cargoFeatures=$OPTARG
|
||||
;;
|
||||
@ -139,6 +160,9 @@ startLeader() {
|
||||
snap)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
|
||||
;;
|
||||
tar)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
local)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
@ -182,7 +206,7 @@ startClient() {
|
||||
set -x
|
||||
startCommon "$ipAddress"
|
||||
ssh "${sshOptions[@]}" -f "$ipAddress" \
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \"$RUST_LOG\""
|
||||
) >> "$logFile" 2>&1 || {
|
||||
cat "$logFile"
|
||||
echo "^^^ +++"
|
||||
@ -197,10 +221,11 @@ sanity() {
|
||||
echo "--- Sanity"
|
||||
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
|
||||
|
||||
declare host=$leaderIp # TODO: maybe use ${validatorIpList[0]} ?
|
||||
(
|
||||
set -x
|
||||
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
ssh "${sshOptions[@]}" "$host" \
|
||||
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
|
||||
) || ok=false
|
||||
|
||||
@ -220,13 +245,17 @@ start() {
|
||||
set -ex;
|
||||
apt-get -qq update;
|
||||
apt-get -qq -y install snapd;
|
||||
snap download --channel=$snapChannel solana;
|
||||
until snap download --channel=$snapChannel solana; do
|
||||
sleep 1;
|
||||
done
|
||||
"
|
||||
)
|
||||
else
|
||||
(
|
||||
cd "$SOLANA_ROOT"
|
||||
snap download --channel="$snapChannel" solana
|
||||
until snap download --channel="$snapChannel" solana; do
|
||||
sleep 1
|
||||
done
|
||||
)
|
||||
fi
|
||||
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
|
||||
@ -236,6 +265,17 @@ start() {
|
||||
}
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
if [[ -n $releaseChannel ]]; then
|
||||
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
|
||||
cd "$SOLANA_ROOT"
|
||||
|
||||
set -x
|
||||
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
|
||||
tarballFilename=solana-release.tar.bz2
|
||||
fi
|
||||
tar jxvf $tarballFilename
|
||||
;;
|
||||
local)
|
||||
build
|
||||
;;
|
||||
@ -287,15 +327,28 @@ start() {
|
||||
clientDeployTime=$SECONDS
|
||||
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
|
||||
|
||||
if [[ $deployMethod = "snap" ]]; then
|
||||
declare networkVersion=unknown
|
||||
declare networkVersion=unknown
|
||||
case $deployMethod in
|
||||
snap)
|
||||
IFS=\ read -r _ networkVersion _ < <(
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
"snap info solana | grep \"^installed:\""
|
||||
)
|
||||
networkVersion=${networkVersion/0+git./}
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
networkVersion="$(
|
||||
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
|
||||
)"
|
||||
;;
|
||||
local)
|
||||
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
|
||||
;;
|
||||
*)
|
||||
usage "Internal error: invalid deployMethod: $deployMethod"
|
||||
;;
|
||||
esac
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
|
||||
|
||||
echo
|
||||
echo "+++ Deployment Successful"
|
||||
|
@ -6,8 +6,7 @@ echo "$(date) | $0 $*" > client.log
|
||||
|
||||
deployMethod="$1"
|
||||
entrypointIp="$2"
|
||||
numNodes="$3"
|
||||
RUST_LOG="$4"
|
||||
RUST_LOG="$3"
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
|
||||
missing() {
|
||||
@ -17,7 +16,6 @@ missing() {
|
||||
|
||||
[[ -n $deployMethod ]] || missing deployMethod
|
||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||
[[ -n $numNodes ]] || missing numNodes
|
||||
|
||||
source net/common.sh
|
||||
loadConfigFile
|
||||
@ -35,7 +33,7 @@ snap)
|
||||
solana_bench_tps=/snap/bin/solana.bench-tps
|
||||
solana_keygen=/snap/bin/solana.keygen
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
@ -58,8 +56,7 @@ clientCommand="\
|
||||
$solana_bench_tps \
|
||||
--network $entrypointIp:8001 \
|
||||
--identity client.json \
|
||||
--num-nodes $numNodes \
|
||||
--duration 600 \
|
||||
--duration 7500 \
|
||||
--sustained \
|
||||
--threads $threadCount \
|
||||
"
|
||||
|
@ -35,7 +35,6 @@ else
|
||||
setupArgs="-l"
|
||||
fi
|
||||
|
||||
|
||||
case $deployMethod in
|
||||
snap)
|
||||
SECONDS=0
|
||||
@ -78,20 +77,25 @@ snap)
|
||||
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export RUST_LOG
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
|
||||
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
|
||||
|
||||
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||
|
||||
case $nodeType in
|
||||
leader)
|
||||
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||
echo Selecting solana-fullnode-cuda
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
./multinode-demo/setup.sh -t leader $setupArgs
|
||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||
./multinode-demo/leader.sh > leader.log 2>&1 &
|
||||
@ -99,6 +103,11 @@ local)
|
||||
validator)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
||||
|
||||
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
|
||||
echo Selecting solana-fullnode-cuda
|
||||
export SOLANA_CUDA=1
|
||||
fi
|
||||
|
||||
./multinode-demo/setup.sh -t validator $setupArgs
|
||||
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
|
||||
;;
|
||||
|
@ -65,7 +65,7 @@ snap)
|
||||
client_id=~/snap/solana/current/config/client-id.json
|
||||
|
||||
;;
|
||||
local)
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
entrypointRsyncUrl="$entrypointIp:~/solana"
|
||||
|
@ -31,7 +31,7 @@ __cloud_FindInstances() {
|
||||
|
||||
declare name zone publicIp privateIp status
|
||||
while read -r name publicIp privateIp status; do
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s staus=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
||||
|
||||
instances+=("$name:$publicIp:$privateIp")
|
||||
done < <(gcloud compute instances list \
|
||||
@ -128,6 +128,9 @@ cloud_CreateInstances() {
|
||||
--no-restart-on-failure
|
||||
)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $imageName as it may contain extra args
|
||||
args+=(--image $imageName)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
|
||||
args+=(--machine-type $machineType)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
|
@ -13,8 +13,8 @@ sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
if command -v earlyoom; then
|
||||
systemctl status earlyoom
|
||||
else
|
||||
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.1-*_amd64.deb' -e robots=off -nd
|
||||
apt install --quiet --yes ./earlyoom_1.1-*_amd64.deb
|
||||
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.2-*_amd64.deb' -e robots=off -nd
|
||||
apt install --quiet --yes ./earlyoom_1.2-*_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
|
@ -12,7 +12,6 @@ apt-get --assume-yes install libssl-dev
|
||||
#
|
||||
# cc: https://github.com/solana-labs/solana/issues/1090
|
||||
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
|
||||
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
dpkg -i libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
rm libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
|
||||
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||
dpkg -i libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||
rm libssl1.1_1.1.0g-2ubuntu4.3_amd64.deb
|
||||
|
11
net/scripts/network-config.sh
Executable file
11
net/scripts/network-config.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
sudo sysctl -w net.core.rmem_default=1610612736
|
||||
sudo sysctl -w net.core.rmem_max=1610612736
|
||||
|
||||
sudo sysctl -w net.core.wmem_default=1610612736
|
||||
sudo sysctl -w net.core.wmem_max=1610612736
|
11
net/scripts/remove-docker-interface.sh
Executable file
11
net/scripts/remove-docker-interface.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Some instances have docker running and docker0 network interface confuses
|
||||
# gossip and airdrops fail. As a workaround for now simply remove the docker0
|
||||
# interface
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
ip link delete docker0 || true
|
9
net/scripts/update-default-cuda.sh
Executable file
9
net/scripts/update-default-cuda.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Updates the default cuda symlink to the supported version
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
ln -sfT /usr/local/cuda-9.2 /usr/local/cuda
|
1
programs/bpf/c/.gitignore
vendored
Normal file
1
programs/bpf/c/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/out/
|
1
programs/bpf/c/makefile
Normal file
1
programs/bpf/c/makefile
Normal file
@ -0,0 +1 @@
|
||||
include sdk/bpf.mk
|
63
programs/bpf/c/sdk/README.md
Normal file
63
programs/bpf/c/sdk/README.md
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
## Prerequisites
|
||||
|
||||
## LLVM / clang 7.0.0
|
||||
http://releases.llvm.org/download.html
|
||||
|
||||
### Linux Ubuntu 16.04 (xenial)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### Linux Ubuntu 14.04 (trusty)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### macOS
|
||||
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
|
||||
|
||||
Once Homebrew is installed, ensure the latest llvm is installed:
|
||||
```
|
||||
$ brew update # <- ensure your brew is up to date
|
||||
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
|
||||
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Quick start
|
||||
To get started create a `makefile` containing:
|
||||
```make
|
||||
include path/to/bpf.mk
|
||||
```
|
||||
and `src/program.c` containing:
|
||||
```c
|
||||
#include <solana_sdk.h>
|
||||
|
||||
bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[1];
|
||||
uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
print_params(1, ka, data, data_len);
|
||||
return true;
|
||||
}
|
||||
```
|
||||
|
||||
Then run `make` to build `out/program.o`.
|
||||
Run `make help` for more details.
|
||||
|
||||
### Limitations
|
||||
* Programs must be fully contained within a single .c file
|
||||
* No libc is available but `solana_sdk.h` provides a minimal set of
|
||||
primitives.
|
115
programs/bpf/c/sdk/bpf.mk
Normal file
115
programs/bpf/c/sdk/bpf.mk
Normal file
@ -0,0 +1,115 @@
|
||||
|
||||
all:
|
||||
.PHONY: help all clean
|
||||
|
||||
ifneq ($(V),1)
|
||||
_@ :=@
|
||||
endif
|
||||
|
||||
INC_DIRS ?=
|
||||
SRC_DIR ?= ./src
|
||||
OUT_DIR ?= ./out
|
||||
|
||||
OS=$(shell uname)
|
||||
ifeq ($(OS),Darwin)
|
||||
LLVM_DIR ?= $(shell brew --prefix llvm)
|
||||
endif
|
||||
|
||||
ifdef LLVM_DIR
|
||||
CC := $(LLVM_DIR)/bin/clang
|
||||
LLC := $(LLVM_DIR)/bin/llc
|
||||
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
|
||||
else
|
||||
CC := clang-7
|
||||
LLC := llc-7
|
||||
OBJ_DUMP := llvm-objdump-7
|
||||
endif
|
||||
|
||||
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
|
||||
|
||||
CC_FLAGS := \
|
||||
-Werror \
|
||||
-target bpf \
|
||||
-O2 \
|
||||
-emit-llvm \
|
||||
-fno-builtin \
|
||||
|
||||
LLC_FLAGS := \
|
||||
-march=bpf \
|
||||
-filetype=obj \
|
||||
|
||||
OBJ_DUMP_FLAGS := \
|
||||
-color \
|
||||
-source \
|
||||
-disassemble \
|
||||
|
||||
help:
|
||||
@echo 'BPF Program makefile'
|
||||
@echo ''
|
||||
@echo 'This makefile will build BPF Programs from C source files into ELFs'
|
||||
@echo ''
|
||||
@echo 'Assumptions:'
|
||||
@echo ' - Programs are a single .c source file (may include headers)'
|
||||
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
|
||||
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
|
||||
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
|
||||
@echo ''
|
||||
@echo 'User settings'
|
||||
@echo ' - The following setting are overridable on the command line, default values shown:'
|
||||
@echo ' - Show commands while building:'
|
||||
@echo ' V=1'
|
||||
@echo ' - List of include directories:'
|
||||
@echo ' INC_DIRS=$(INC_DIRS)'
|
||||
@echo ' - List of system include directories:'
|
||||
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
|
||||
@echo ' - Location of source files:'
|
||||
@echo ' SRC_DIR=$(SRC_DIR)'
|
||||
@echo ' - Location to place output files:'
|
||||
@echo ' OUT_DIR=$(OUT_DIR)'
|
||||
@echo ' - Location of LLVM:'
|
||||
@echo ' LLVM_DIR=$(LLVM_DIR)'
|
||||
@echo ''
|
||||
@echo 'Usage:'
|
||||
@echo ' - make help - This help message'
|
||||
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
|
||||
@echo ' - make clean - Cleans all programs'
|
||||
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
|
||||
@echo ' - make <program name> - Build a single program by name'
|
||||
@echo ''
|
||||
@echo 'Available programs:'
|
||||
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
|
||||
@echo ''
|
||||
@echo 'Example:'
|
||||
@echo ' - Assuming a programed named foo (src/foo.c)'
|
||||
@echo ' - make foo'
|
||||
@echo ' - make dump_foo'
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.bc
|
||||
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
|
||||
@echo "[cc] $@ ($<)"
|
||||
$(_@)mkdir -p $(OUT_DIR)
|
||||
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.o
|
||||
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
|
||||
@echo "[llc] $@ ($<)"
|
||||
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
|
||||
|
||||
-include $(wildcard $(OUT_DIR)/*.d)
|
||||
|
||||
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
endef
|
||||
|
||||
all: $(PROGRAM_NAMES)
|
||||
|
||||
%: $(addprefix $(OUT_DIR)/, %.o) ;
|
||||
|
||||
dump_%: %
|
||||
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
|
||||
|
||||
clean:
|
||||
rm -rf $(OUT_DIR)
|
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
298
programs/bpf/c/sdk/inc/solana_sdk.h
Normal file
@ -0,0 +1,298 @@
|
||||
#pragma once
|
||||
/**
|
||||
* @brief Solana C-based BPF program utility functions and types
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Numeric types
|
||||
*/
|
||||
#ifndef __LP64__
|
||||
#error LP64 data model required
|
||||
#endif
|
||||
|
||||
typedef signed char int8_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef unsigned int uint32_t;
|
||||
typedef signed long int int64_t;
|
||||
typedef unsigned long int uint64_t;
|
||||
|
||||
/**
|
||||
* NULL
|
||||
*/
|
||||
#define NULL 0
|
||||
|
||||
/**
|
||||
* Boolean type
|
||||
*/
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
/**
|
||||
* Helper function that prints a string to stdout
|
||||
*/
|
||||
extern void sol_log(const char*);
|
||||
|
||||
/**
|
||||
* Helper function that prints a 64 bit values represented in hexadecimal
|
||||
* to stdout
|
||||
*/
|
||||
extern void sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||
|
||||
/**
|
||||
* Prefix for all BPF functions
|
||||
*
|
||||
* This prefix should be used for functions in order to facilitate
|
||||
* interoperability with BPF representation
|
||||
*/
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
/**
|
||||
* Size of Public key in bytes
|
||||
*/
|
||||
#define SIZE_PUBKEY 32
|
||||
|
||||
/**
|
||||
* Public key
|
||||
*/
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
/**
|
||||
* Compares two public keys
|
||||
*
|
||||
* @param one First public key
|
||||
* @param two Second public key
|
||||
* @return true if the same
|
||||
*/
|
||||
SOL_FN_PREFIX bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) {
|
||||
for (int i = 0; i < sizeof(*one); i++) {
|
||||
if (one->x[i] != two->x[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Keyed Accounts
|
||||
*/
|
||||
typedef struct {
|
||||
SolPubkey *key; /** Public Key of the account owner */
|
||||
int64_t *tokens; /** Numer of tokens owned by this account */
|
||||
uint64_t userdata_len; /** Length of userdata in bytes */
|
||||
uint8_t *userdata; /** On-chain data owned by this account */
|
||||
SolPubkey *program_id; /** Program that owns this account */
|
||||
} SolKeyedAccounts;
|
||||
|
||||
/**
|
||||
* Copies memory
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, const void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((const uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares memory
|
||||
*/
|
||||
SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
|
||||
for (int i = 0; i < n; i++) {
|
||||
uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i);
|
||||
if (diff) {
|
||||
return diff;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the number of elements in an array
|
||||
*/
|
||||
#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
|
||||
|
||||
/**
|
||||
* Panics
|
||||
*
|
||||
* Prints the line number where the panic occurred and then causes
|
||||
* the BPF VM to immediately halt execution. No accounts' userdata are updated
|
||||
*/
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_log_64(0xFF, 0xFF, 0xFF, 0xFF, line);
|
||||
uint8_t *pv = (uint8_t *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts
|
||||
*/
|
||||
#define sol_assert(expr) \
|
||||
if (!(expr)) { \
|
||||
_sol_panic(__LINE__); \
|
||||
}
|
||||
|
||||
/**
|
||||
* De-serializes the input parameters into usable types
|
||||
*
|
||||
* Use this function to deserialize the buffer passed to the program entrypoint
|
||||
* into usable types. This function does not perform copy deserialization,
|
||||
* instead it populates the pointers and lengths in SolKeyedAccounts and data so
|
||||
* that any modification to tokens or account data take place on the original
|
||||
* buffer. Doing so also eliminates the need to serialize back into the buffer
|
||||
* at program end.
|
||||
*
|
||||
* @param input Source buffer containing serialized input parameters
|
||||
* @param ka Pointer to an array of SolKeyedAccounts to deserialize into
|
||||
* @param ka_len Number of SolKeyedAccounts entries in `ka`
|
||||
* @param ka_len_out If NULL, fill exactly `ka_len` accounts or fail.
|
||||
* If not NULL, fill up to `ka_len` accounts and return the
|
||||
* number of filled accounts in `ka_len_out`.
|
||||
* @param data On return, a pointer to the instruction data
|
||||
* @param data_len On return, the length in bytes of the instruction data
|
||||
* @return Boolean true if successful
|
||||
*/
|
||||
SOL_FN_PREFIX bool sol_deserialize(
|
||||
const uint8_t *input,
|
||||
SolKeyedAccounts *ka,
|
||||
uint64_t ka_len,
|
||||
uint64_t *ka_len_out,
|
||||
const uint8_t **data,
|
||||
uint64_t *data_len
|
||||
) {
|
||||
|
||||
|
||||
if (ka_len_out == NULL) {
|
||||
if (ka_len != *(uint64_t *) input) {
|
||||
return false;
|
||||
}
|
||||
ka_len = *(uint64_t *) input;
|
||||
} else {
|
||||
if (ka_len > *(uint64_t *) input) {
|
||||
ka_len = *(uint64_t *) input;
|
||||
}
|
||||
*ka_len_out = ka_len;
|
||||
}
|
||||
|
||||
input += sizeof(uint64_t);
|
||||
for (int i = 0; i < ka_len; i++) {
|
||||
// key
|
||||
ka[i].key = (SolPubkey *) input;
|
||||
input += sizeof(SolPubkey);
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = (int64_t *) input;
|
||||
input += sizeof(int64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *) input;
|
||||
input += sizeof(uint64_t);
|
||||
ka[i].userdata = input;
|
||||
input += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *) input;
|
||||
input += sizeof(SolPubkey);
|
||||
}
|
||||
|
||||
// input data
|
||||
*data_len = *(uint64_t *) input;
|
||||
input += sizeof(uint64_t);
|
||||
*data = input;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Debugging utilities
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of a public key
|
||||
*
|
||||
* @param key The public key to print
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_key(const SolPubkey *key) {
|
||||
for (int j = 0; j < sizeof(*key); j++) {
|
||||
sol_log_64(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of an array
|
||||
*
|
||||
* @param array The array to print
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_array(const uint8_t *array, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_log_64(0, 0, 0, j, array[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the hexadecimal representation of the program's input parameters
|
||||
*
|
||||
* @param num_ka Numer of SolKeyedAccounts to print
|
||||
* @param ka A pointer to an array of SolKeyedAccounts to print
|
||||
* @param data A pointer to the instruction data to print
|
||||
* @param data_len The length in bytes of the instruction data
|
||||
*/
|
||||
SOL_FN_PREFIX void sol_log_params(
|
||||
uint64_t num_ka,
|
||||
const SolKeyedAccounts *ka,
|
||||
const uint8_t *data,
|
||||
uint64_t data_len
|
||||
) {
|
||||
sol_log_64(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
sol_log_key(ka[i].key);
|
||||
sol_log_64(0, 0, 0, 0, *ka[i].tokens);
|
||||
sol_log_array(ka[i].userdata, ka[i].userdata_len);
|
||||
sol_log_key(ka[i].program_id);
|
||||
}
|
||||
sol_log_array(data, data_len);
|
||||
}
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
* Program entrypoint
|
||||
* @{
|
||||
*
|
||||
* The following is an example of a simple program that prints the input
|
||||
* parameters it received:
|
||||
*
|
||||
* bool entrypoint(const uint8_t *input) {
|
||||
* SolKeyedAccounts ka[1];
|
||||
* uint8_t *data;
|
||||
* uint64_t data_len;
|
||||
*
|
||||
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
|
||||
* return false;
|
||||
* }
|
||||
* sol_log_params(1, ka, data, data_len);
|
||||
* return true;
|
||||
* }
|
||||
*/
|
||||
|
||||
/**
|
||||
* Program entrypoint signature
|
||||
*
|
||||
* @param input An array containing serialized input parameters
|
||||
* @return true if successful
|
||||
*/
|
||||
extern bool entrypoint(const uint8_t *input);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
/**@}*/
|
32
programs/bpf/c/src/move_funds.c
Normal file
32
programs/bpf/c/src/move_funds.c
Normal file
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* @brief Example C-based BPF program that moves funds from one account to
|
||||
* another
|
||||
*/
|
||||
|
||||
#include <solana_sdk.h>
|
||||
|
||||
/**
|
||||
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||
* unexpected number of accounts are passed to the program's entrypoint
|
||||
*/
|
||||
#define NUM_KA 3
|
||||
|
||||
extern bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[NUM_KA];
|
||||
const uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t tokens = *(int64_t *)data;
|
||||
if (*ka[0].tokens >= tokens) {
|
||||
*ka[0].tokens -= tokens;
|
||||
*ka[2].tokens += tokens;
|
||||
// sol_log_64(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
|
||||
} else {
|
||||
// sol_log_64(0, 0, 0xFF, *ka[0].tokens, tokens);
|
||||
}
|
||||
return true;
|
||||
}
|
35
programs/bpf/c/src/noop.c
Normal file
35
programs/bpf/c/src/noop.c
Normal file
@ -0,0 +1,35 @@
|
||||
/**
|
||||
* @brief Example C-based BPF program that prints out the parameters
|
||||
* passed to it
|
||||
*/
|
||||
|
||||
#include <solana_sdk.h>
|
||||
|
||||
/**
|
||||
* Number of SolKeyedAccounts expected. The program should bail if an
|
||||
* unexpected number of accounts are passed to the program's entrypoint
|
||||
*/
|
||||
#define NUM_KA 1
|
||||
|
||||
extern bool entrypoint(const uint8_t *input) {
|
||||
SolKeyedAccounts ka[NUM_KA];
|
||||
const uint8_t *data;
|
||||
uint64_t data_len;
|
||||
|
||||
sol_log("noop");
|
||||
|
||||
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
|
||||
return false;
|
||||
}
|
||||
sol_log_params(NUM_KA, ka, data, data_len);
|
||||
|
||||
sol_assert(sizeof(int8_t) == 1);
|
||||
sol_assert(sizeof(uint8_t) == 1);
|
||||
sol_assert(sizeof(int16_t) == 2);
|
||||
sol_assert(sizeof(uint16_t) == 2);
|
||||
sol_assert(sizeof(int32_t) == 4);
|
||||
sol_assert(sizeof(uint32_t) == 4);
|
||||
sol_assert(sizeof(int64_t) == 8);
|
||||
sol_assert(sizeof(uint64_t) == 8);
|
||||
return true;
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
OUTDIR="${1:-../../../target/release/}"
|
||||
THISDIR=$(dirname "$0")
|
||||
mkdir -p "$OUTDIR"
|
||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/move_funds_c.bc -c "$THISDIR"/src/move_funds.c
|
||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/move_funds_c.o "$OUTDIR"/move_funds_c.bc
|
||||
|
||||
#/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/move_funds_c.o
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/move_funds_c.o
|
@ -1,140 +0,0 @@
|
||||
|
||||
//#include <stdint.h>
|
||||
//#include <stddef.h>
|
||||
|
||||
#if 1
|
||||
// one way to define a helper function is with index as a fixed value
|
||||
#define BPF_TRACE_PRINTK_IDX 6
|
||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
||||
#else
|
||||
// relocation is another option
|
||||
extern int sol_print(int, int, int, int, int);
|
||||
#endif
|
||||
|
||||
typedef long long unsigned int uint64_t;
|
||||
typedef long long int int64_t;
|
||||
typedef unsigned char uint8_t;
|
||||
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
#define SIZE_PUBKEY 32
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
typedef struct {
|
||||
SolPubkey *key;
|
||||
int64_t* tokens;
|
||||
uint64_t userdata_len;
|
||||
uint8_t *userdata;
|
||||
SolPubkey *program_id;
|
||||
} SolKeyedAccounts;
|
||||
|
||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
// TODO move this to a registered helper
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
||||
char *pv = (char *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka, SolKeyedAccounts *ka,
|
||||
uint8_t **userdata, uint64_t *userdata_len) {
|
||||
if (num_ka != *(uint64_t *)src) {
|
||||
return 0;
|
||||
}
|
||||
src += sizeof(uint64_t);
|
||||
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < num_ka; i++) { // TODO this should end up unrolled, confirm
|
||||
// key
|
||||
ka[i].key = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = (int64_t *)src;
|
||||
src += sizeof(int64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
ka[i].userdata = src;
|
||||
src += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
}
|
||||
// tx userdata
|
||||
*userdata_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
*userdata = src;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
// -- Debug --
|
||||
|
||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
||||
sol_print(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_userdata(uint8_t *data, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_print(0, 0, 0, j, data[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
||||
uint8_t *userdata, uint64_t userdata_len) {
|
||||
sol_print(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
// key
|
||||
print_key(ka[i].key);
|
||||
|
||||
// tokens
|
||||
sol_print(0, 0, 0, 0, *ka[i].tokens);
|
||||
|
||||
// account userdata
|
||||
print_userdata(ka[i].userdata, ka[i].userdata_len);
|
||||
|
||||
// program_id
|
||||
print_key(ka[i].program_id);
|
||||
}
|
||||
// tx userdata
|
||||
print_userdata(userdata, userdata_len);
|
||||
}
|
||||
|
||||
uint64_t entrypoint(char *buf) {
|
||||
SolKeyedAccounts ka[3];
|
||||
uint64_t userdata_len;
|
||||
uint8_t *userdata;
|
||||
|
||||
if (1 != sol_deserialize((uint8_t *)buf, 3, ka, &userdata, &userdata_len)) {
|
||||
return 1;
|
||||
}
|
||||
print_params(3, ka, userdata, userdata_len);
|
||||
|
||||
int64_t tokens = *(int64_t*)userdata;
|
||||
if (*ka[0].tokens >= tokens) {
|
||||
*ka[0].tokens -= tokens;
|
||||
*ka[2].tokens += tokens;
|
||||
//sol_print(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
|
||||
} else {
|
||||
//sol_print(0, 0, 0xFF, *ka[0].tokens, tokens);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
OUTDIR="${1:-../../../target/release/}"
|
||||
THISDIR=$(dirname "$0")
|
||||
mkdir -p "$OUTDIR"
|
||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/noop_c.bc -c "$THISDIR"/src/noop.c
|
||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/noop_c.o "$OUTDIR"/noop_c.bc
|
||||
|
||||
#/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/noop_c.o
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/noop_c.o
|
@ -1,133 +0,0 @@
|
||||
|
||||
//#include <stdint.h>
|
||||
//#include <stddef.h>
|
||||
|
||||
#if 1
|
||||
// one way to define a helper function is with index as a fixed value
|
||||
#define BPF_TRACE_PRINTK_IDX 6
|
||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
||||
#else
|
||||
// relocation is another option
|
||||
extern int sol_print(int, int, int, int, int);
|
||||
#endif
|
||||
|
||||
typedef long long unsigned int uint64_t;
|
||||
typedef long long int int64_t;
|
||||
typedef unsigned char uint8_t;
|
||||
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
#define SIZE_PUBKEY 32
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
typedef struct {
|
||||
SolPubkey *key;
|
||||
int64_t* tokens;
|
||||
uint64_t userdata_len;
|
||||
uint8_t *userdata;
|
||||
SolPubkey *program_id;
|
||||
} SolKeyedAccounts;
|
||||
|
||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
// TODO move this to a registered helper
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
||||
char *pv = (char *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka, SolKeyedAccounts *ka,
|
||||
uint8_t **userdata, uint64_t *userdata_len) {
|
||||
if (num_ka != *(uint64_t *)src) {
|
||||
return 0;
|
||||
}
|
||||
src += sizeof(uint64_t);
|
||||
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < num_ka; i++) { // TODO this should end up unrolled, confirm
|
||||
// key
|
||||
ka[i].key = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = (int64_t *)src;
|
||||
src += sizeof(int64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
ka[i].userdata = src;
|
||||
src += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
}
|
||||
// tx userdata
|
||||
*userdata_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
*userdata = src;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
// -- Debug --
|
||||
|
||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
||||
sol_print(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_userdata(uint8_t *data, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_print(0, 0, 0, j, data[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
||||
uint8_t *userdata, uint64_t userdata_len) {
|
||||
sol_print(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
// key
|
||||
print_key(ka[i].key);
|
||||
|
||||
// tokens
|
||||
sol_print(0, 0, 0, 0, *ka[i].tokens);
|
||||
|
||||
// account userdata
|
||||
print_userdata(ka[i].userdata, ka[i].userdata_len);
|
||||
|
||||
// program_id
|
||||
print_key(ka[i].program_id);
|
||||
}
|
||||
// tx userdata
|
||||
print_userdata(userdata, userdata_len);
|
||||
}
|
||||
|
||||
// -- Program entrypoint --
|
||||
|
||||
uint64_t entrypoint(char *buf) {
|
||||
SolKeyedAccounts ka[1];
|
||||
uint64_t userdata_len;
|
||||
uint8_t *userdata;
|
||||
|
||||
if (1 != sol_deserialize((uint8_t *)buf, 1, ka, &userdata, &userdata_len)) {
|
||||
return 0;
|
||||
}
|
||||
print_params(1, ka, userdata, userdata_len);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bpf-noop"
|
||||
version = "0.10.0-pre2"
|
||||
version = "0.10.5"
|
||||
description = "Solana BPF noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -8,4 +8,4 @@ license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
rbpf = "0.1.0"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
||||
solana-sdk = { path = "../../../../sdk", version = "0.10.5" }
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
OUTDIR="${1:-../../../target/release/}"
|
||||
THISDIR=$(dirname "$0")
|
||||
mkdir -p "$OUTDIR"
|
||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/tictactoe_c.bc -c "$THISDIR"/src/tictactoe.c
|
||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/tictactoe_c.o "$OUTDIR"/tictactoe_c.bc
|
||||
|
||||
# /usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/tictactoe_c.o
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/tictactoe_c.o
|
@ -1,373 +0,0 @@
|
||||
//#include <stdint.h>
|
||||
//#include <stddef.h>
|
||||
|
||||
#if 1
|
||||
#define BPF_TRACE_PRINTK_IDX 6
|
||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
||||
#else
|
||||
// relocation is another option
|
||||
extern int sol_print(int, int, int, int, int);
|
||||
#endif
|
||||
|
||||
typedef long long unsigned int uint64_t;
|
||||
typedef long long int int64_t;
|
||||
typedef unsigned char uint8_t;
|
||||
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
// TODO move this to a registered helper
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
#define sol_trace() sol_print(0, 0, 0xFF, 0xFF, (__LINE__));
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
||||
char *pv = (char *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
#define SIZE_PUBKEY 32
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
typedef struct {
|
||||
SolPubkey *key;
|
||||
int64_t tokens;
|
||||
uint64_t userdata_len;
|
||||
uint8_t *userdata;
|
||||
SolPubkey *program_id;
|
||||
} SolKeyedAccounts;
|
||||
|
||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka,
|
||||
SolKeyedAccounts *ka, uint8_t **tx_data,
|
||||
uint64_t *tx_data_len) {
|
||||
if (num_ka != *(uint64_t *)src) {
|
||||
return 0;
|
||||
}
|
||||
src += sizeof(uint64_t);
|
||||
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < num_ka;
|
||||
i++) { // TODO this should end up unrolled, confirm
|
||||
// key
|
||||
ka[i].key = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
ka[i].userdata = src;
|
||||
src += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
}
|
||||
// tx userdata
|
||||
*tx_data_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
*tx_data = src;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// // -- Debug --
|
||||
|
||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
||||
sol_print(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_data(uint8_t *data, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_print(0, 0, 0, j, data[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
||||
uint8_t *tx_data, uint64_t tx_data_len) {
|
||||
sol_print(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
// key
|
||||
print_key(ka[i].key);
|
||||
|
||||
// tokens
|
||||
sol_print(0, 0, 0, 0, ka[i].tokens);
|
||||
|
||||
// account userdata
|
||||
print_data(ka[i].userdata, ka[i].userdata_len);
|
||||
|
||||
// program_id
|
||||
print_key(ka[i].program_id);
|
||||
}
|
||||
// tx userdata
|
||||
print_data(tx_data, tx_data_len);
|
||||
}
|
||||
|
||||
// -- TicTacToe --
|
||||
|
||||
// Board Coodinates
|
||||
// | 0,0 | 1,0 | 2,0 |
|
||||
// | 0,1 | 1,1 | 2,1 |
|
||||
// | 0,2 | 1,2 | 2,2 |
|
||||
|
||||
typedef enum {
|
||||
Result_Ok,
|
||||
Result_Panic,
|
||||
Result_GameInProgress,
|
||||
Result_InvalidArguments,
|
||||
Result_InvalidMove,
|
||||
Result_InvalidUserdata,
|
||||
Result_InvalidTimestamp,
|
||||
Result_NoGame,
|
||||
Result_NotYourTurn,
|
||||
Result_PlayerNotFound,
|
||||
Result_UserdataTooSmall,
|
||||
} Result;
|
||||
|
||||
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
|
||||
|
||||
typedef enum {
|
||||
State_Waiting,
|
||||
State_XMove,
|
||||
State_OMove,
|
||||
State_XWon,
|
||||
State_OWon,
|
||||
State_Draw,
|
||||
} State;
|
||||
|
||||
typedef struct {
|
||||
// Player who initialized the game
|
||||
SolPubkey player_x;
|
||||
// Player who joined the game
|
||||
SolPubkey player_o;
|
||||
// Current state of the game
|
||||
State state;
|
||||
// Tracks the player moves
|
||||
BoardItem board[9];
|
||||
// Keep Alive for each player
|
||||
int64_t keep_alive[2];
|
||||
} Game;
|
||||
|
||||
typedef enum {
|
||||
Command_Init = 0,
|
||||
Command_Join,
|
||||
Command_KeepAlive,
|
||||
Command_Move,
|
||||
} Command;
|
||||
|
||||
SOL_FN_PREFIX void game_dump_board(Game *self) {
|
||||
sol_print(0, 0, 0x9, 0x9, 0x9);
|
||||
sol_print(0, 0, self->board[0], self->board[1], self->board[2]);
|
||||
sol_print(0, 0, self->board[3], self->board[4], self->board[5]);
|
||||
sol_print(0, 0, self->board[6], self->board[7], self->board[8]);
|
||||
sol_print(0, 0, 0x9, 0x9, 0x9);
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void game_create(Game *self, SolPubkey *player_x) {
|
||||
sol_memcpy(self->player_x.x, player_x, SIZE_PUBKEY);
|
||||
// TODO self->player_o = 0;
|
||||
self->state = State_Waiting;
|
||||
self->keep_alive[0] = 0;
|
||||
self->keep_alive[1] = 0;
|
||||
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < 9; i++) {
|
||||
self->board[i] = BoardItem_F;
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX Result game_join(Game *self, SolPubkey *player_o,
|
||||
int64_t timestamp) {
|
||||
if (self->state == State_Waiting) {
|
||||
sol_memcpy(self->player_o.x, player_o, SIZE_PUBKEY);
|
||||
self->state = State_XMove;
|
||||
|
||||
if (timestamp <= self->keep_alive[1]) {
|
||||
return Result_InvalidTimestamp;
|
||||
} else {
|
||||
self->keep_alive[1] = timestamp;
|
||||
return Result_Ok;
|
||||
}
|
||||
}
|
||||
return Result_GameInProgress;
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX bool game_same(BoardItem x_or_o, BoardItem one, BoardItem two,
|
||||
BoardItem three) {
|
||||
if (x_or_o == one && x_or_o == two && x_or_o == three) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX bool game_same_player(SolPubkey *one, SolPubkey *two) {
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < SIZE_PUBKEY; i++) {
|
||||
if (one->x[i] != two->x[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX Result game_next_move(Game *self, SolPubkey *player, int x,
|
||||
int y) {
|
||||
int board_index = y * 3 + x;
|
||||
if (board_index >= 9 || self->board[board_index] != BoardItem_F) {
|
||||
return Result_InvalidMove;
|
||||
}
|
||||
|
||||
BoardItem x_or_o;
|
||||
State won_state;
|
||||
|
||||
switch (self->state) {
|
||||
case State_XMove:
|
||||
if (!game_same_player(player, &self->player_x)) {
|
||||
return Result_PlayerNotFound;
|
||||
}
|
||||
self->state = State_OMove;
|
||||
x_or_o = BoardItem_X;
|
||||
won_state = State_XWon;
|
||||
break;
|
||||
|
||||
case State_OMove:
|
||||
if (!game_same_player(player, &self->player_o)) {
|
||||
return Result_PlayerNotFound;
|
||||
}
|
||||
self->state = State_XMove;
|
||||
x_or_o = BoardItem_O;
|
||||
won_state = State_OWon;
|
||||
break;
|
||||
|
||||
default:
|
||||
return Result_NotYourTurn;
|
||||
}
|
||||
|
||||
self->board[board_index] = x_or_o;
|
||||
|
||||
// game_dump_board(self);
|
||||
|
||||
bool winner =
|
||||
// Check rows
|
||||
game_same(x_or_o, self->board[0], self->board[1], self->board[2]) ||
|
||||
game_same(x_or_o, self->board[3], self->board[4], self->board[5]) ||
|
||||
game_same(x_or_o, self->board[6], self->board[7], self->board[8]) ||
|
||||
// Check columns
|
||||
game_same(x_or_o, self->board[0], self->board[3], self->board[6]) ||
|
||||
game_same(x_or_o, self->board[1], self->board[4], self->board[7]) ||
|
||||
game_same(x_or_o, self->board[2], self->board[5], self->board[8]) ||
|
||||
// Check both diagonals
|
||||
game_same(x_or_o, self->board[0], self->board[4], self->board[8]) ||
|
||||
game_same(x_or_o, self->board[2], self->board[4], self->board[6]);
|
||||
|
||||
if (winner) {
|
||||
self->state = won_state;
|
||||
}
|
||||
|
||||
{
|
||||
int draw = true;
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < 9; i++) {
|
||||
if (BoardItem_F == self->board[i]) {
|
||||
draw = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (draw) {
|
||||
self->state = State_Draw;
|
||||
}
|
||||
}
|
||||
return Result_Ok;
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX Result game_keep_alive(Game *self, SolPubkey *player,
|
||||
int64_t timestamp) {
|
||||
switch (self->state) {
|
||||
case State_Waiting:
|
||||
case State_XMove:
|
||||
case State_OMove:
|
||||
if (game_same_player(player, &self->player_x)) {
|
||||
if (timestamp <= self->keep_alive[0]) {
|
||||
return Result_InvalidTimestamp;
|
||||
}
|
||||
self->keep_alive[0] = timestamp;
|
||||
} else if (game_same_player(player, &self->player_o)) {
|
||||
if (timestamp <= self->keep_alive[1]) {
|
||||
return Result_InvalidTimestamp;
|
||||
}
|
||||
self->keep_alive[1] = timestamp;
|
||||
} else {
|
||||
return Result_PlayerNotFound;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return Result_Ok;
|
||||
}
|
||||
|
||||
// accounts[0] On Init must be player X, after that doesn't matter,
|
||||
// anybody can cause a dashboard update
|
||||
// accounts[1] must be a TicTacToe state account
|
||||
// accounts[2] must be account of current player, only Pubkey is used
|
||||
uint64_t entrypoint(uint8_t *buf) {
|
||||
SolKeyedAccounts ka[3];
|
||||
uint64_t tx_data_len;
|
||||
uint8_t *tx_data;
|
||||
int err = 0;
|
||||
|
||||
if (1 != sol_deserialize(buf, 3, ka, &tx_data, &tx_data_len)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (sizeof(Game) > ka[1].userdata_len) {
|
||||
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
|
||||
return false;
|
||||
}
|
||||
Game game;
|
||||
sol_memcpy(&game, ka[1].userdata, sizeof(game));
|
||||
|
||||
Command command = *tx_data;
|
||||
switch (command) {
|
||||
case Command_Init:
|
||||
game_create(&game, ka[2].key);
|
||||
break;
|
||||
|
||||
case Command_Join:
|
||||
err = game_join(&game, ka[2].key, *((int64_t *)(tx_data + 4)));
|
||||
break;
|
||||
|
||||
case Command_KeepAlive:
|
||||
err = game_keep_alive(&game, ka[2].key, /*TODO*/ 0);
|
||||
break;
|
||||
|
||||
case Command_Move:
|
||||
err = game_next_move(&game, ka[2].key, tx_data[4], tx_data[5]);
|
||||
break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
sol_memcpy(ka[1].userdata, &game, sizeof(game));
|
||||
sol_print(0, 0, 0, err, game.state);
|
||||
if (Result_Ok != err) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
OUTDIR="${1:-../../../target/release/}"
|
||||
THISDIR=$(dirname "$0")
|
||||
mkdir -p "$OUTDIR"
|
||||
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/tictactoe_dashboard_c.bc -c "$THISDIR"/src/tictactoe_dashboard.c
|
||||
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/tictactoe_dashboard_c.o "$OUTDIR"/tictactoe_dashboard_c.bc
|
||||
|
||||
# /usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/tictactoe_dashboard_c.o
|
@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/tictactoe_dashboard_c.o
|
@ -1,236 +0,0 @@
|
||||
//#include <stdint.h>
|
||||
//#include <stddef.h>
|
||||
|
||||
#if 1
|
||||
#define BPF_TRACE_PRINTK_IDX 6
|
||||
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
|
||||
#else
|
||||
// relocation is another option
|
||||
extern int sol_print(int, int, int, int, int);
|
||||
#endif
|
||||
|
||||
typedef long long unsigned int uint64_t;
|
||||
typedef long long int int64_t;
|
||||
typedef long unsigned int uint32_t;
|
||||
typedef long int int32_t;
|
||||
typedef unsigned char uint8_t;
|
||||
|
||||
typedef enum { false = 0, true } bool;
|
||||
|
||||
// TODO support BPF function calls rather then forcing everything to be inlined
|
||||
#define SOL_FN_PREFIX __attribute__((always_inline)) static
|
||||
|
||||
// TODO move this to a registered helper
|
||||
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
|
||||
}
|
||||
}
|
||||
|
||||
#define sol_trace() sol_print(0, 0, 0xFF, 0xFF, (__LINE__));
|
||||
#define sol_panic() _sol_panic(__LINE__)
|
||||
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
|
||||
sol_print(0, 0, 0xFF, 0xFF, line);
|
||||
char *pv = (char *)1;
|
||||
*pv = 1;
|
||||
}
|
||||
|
||||
#define SIZE_PUBKEY 32
|
||||
typedef struct {
|
||||
uint8_t x[SIZE_PUBKEY];
|
||||
} SolPubkey;
|
||||
|
||||
SOL_FN_PREFIX bool SolPubkey_same(SolPubkey *one, SolPubkey *two) {
|
||||
for (int i = 0; i < SIZE_PUBKEY; i++) {
|
||||
if (one->x[i] != two->x[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
SolPubkey *key;
|
||||
int64_t tokens;
|
||||
uint64_t userdata_len;
|
||||
uint8_t *userdata;
|
||||
SolPubkey *program_id;
|
||||
} SolKeyedAccounts;
|
||||
|
||||
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka,
|
||||
SolKeyedAccounts *ka, uint8_t **tx_data,
|
||||
uint64_t *tx_data_len) {
|
||||
if (num_ka != *(uint64_t *)src) {
|
||||
return 0;
|
||||
}
|
||||
src += sizeof(uint64_t);
|
||||
|
||||
// TODO fixed iteration loops ok? unrolled?
|
||||
for (int i = 0; i < num_ka;
|
||||
i++) { // TODO this should end up unrolled, confirm
|
||||
// key
|
||||
ka[i].key = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
|
||||
// tokens
|
||||
ka[i].tokens = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
|
||||
// account userdata
|
||||
ka[i].userdata_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
ka[i].userdata = src;
|
||||
src += ka[i].userdata_len;
|
||||
|
||||
// program_id
|
||||
ka[i].program_id = (SolPubkey *)src;
|
||||
src += SIZE_PUBKEY;
|
||||
}
|
||||
// tx userdata
|
||||
*tx_data_len = *(uint64_t *)src;
|
||||
src += sizeof(uint64_t);
|
||||
*tx_data = src;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
// -- Debug --
|
||||
|
||||
SOL_FN_PREFIX void print_key(SolPubkey *key) {
|
||||
for (int j = 0; j < SIZE_PUBKEY; j++) {
|
||||
sol_print(0, 0, 0, j, key->x[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_data(uint8_t *data, int len) {
|
||||
for (int j = 0; j < len; j++) {
|
||||
sol_print(0, 0, 0, j, data[j]);
|
||||
}
|
||||
}
|
||||
|
||||
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
|
||||
uint8_t *tx_data, uint64_t tx_data_len) {
|
||||
sol_print(0, 0, 0, 0, num_ka);
|
||||
for (int i = 0; i < num_ka; i++) {
|
||||
// key
|
||||
print_key(ka[i].key);
|
||||
|
||||
// tokens
|
||||
sol_print(0, 0, 0, 0, ka[i].tokens);
|
||||
|
||||
// account userdata
|
||||
print_data(ka[i].userdata, ka[i].userdata_len);
|
||||
|
||||
// program_id
|
||||
print_key(ka[i].program_id);
|
||||
}
|
||||
// tx userdata
|
||||
print_data(tx_data, tx_data_len);
|
||||
}
|
||||
|
||||
// -- TicTacToe Dashboard --
|
||||
|
||||
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
|
||||
typedef enum {
|
||||
State_Waiting,
|
||||
State_XMove,
|
||||
State_OMove,
|
||||
State_XWon,
|
||||
State_OWon,
|
||||
State_Draw,
|
||||
} State;
|
||||
|
||||
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
|
||||
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
|
||||
|
||||
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
|
||||
typedef struct {
|
||||
SolPubkey player_x;
|
||||
SolPubkey player_o;
|
||||
State state;
|
||||
BoardItem board[9];
|
||||
int64_t keep_alive[2];
|
||||
} Game;
|
||||
|
||||
#define MAX_GAMES_TRACKED 5
|
||||
|
||||
typedef struct {
|
||||
// Latest pending game
|
||||
SolPubkey pending;
|
||||
// Last N completed games (0 is the latest)
|
||||
SolPubkey completed[MAX_GAMES_TRACKED];
|
||||
// Index into completed pointing to latest game completed
|
||||
uint32_t latest_game;
|
||||
// Total number of completed games
|
||||
uint32_t total;
|
||||
} Dashboard;
|
||||
|
||||
SOL_FN_PREFIX bool update(Dashboard *self, Game *game, SolPubkey *game_pubkey) {
|
||||
switch (game->state) {
|
||||
case State_Waiting:
|
||||
sol_memcpy(&self->pending, game_pubkey, SIZE_PUBKEY);
|
||||
break;
|
||||
case State_XMove:
|
||||
case State_OMove:
|
||||
// Nothing to do. In progress games are not managed by the dashboard
|
||||
break;
|
||||
case State_XWon:
|
||||
case State_OWon:
|
||||
case State_Draw:
|
||||
for (int i = 0; i < MAX_GAMES_TRACKED; i++) {
|
||||
if (SolPubkey_same(&self->completed[i], game_pubkey)) {
|
||||
// TODO: Once the PoH height is exposed to programs, it could be used
|
||||
// to ensure
|
||||
// that old games are not being re-added and causing total to
|
||||
// increment incorrectly.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
self->total += 1;
|
||||
self->latest_game = (self->latest_game + 1) % MAX_GAMES_TRACKED;
|
||||
sol_memcpy(self->completed[self->latest_game].x, game_pubkey,
|
||||
SIZE_PUBKEY);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// accounts[0] doesn't matter, anybody can cause a dashboard update
|
||||
// accounts[1] must be a Dashboard account
|
||||
// accounts[2] must be a Game account
|
||||
uint64_t entrypoint(uint8_t *buf) {
|
||||
SolKeyedAccounts ka[3];
|
||||
uint64_t tx_data_len;
|
||||
uint8_t *tx_data;
|
||||
int err = 0;
|
||||
|
||||
if (1 != sol_deserialize(buf, 3, ka, &tx_data, &tx_data_len)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO check dashboard and game program ids (how to check now that they are
|
||||
// not know values)
|
||||
// TODO check validity of dashboard and game structures contents
|
||||
if (sizeof(Dashboard) > ka[1].userdata_len) {
|
||||
sol_print(0, 0, 0xFF, sizeof(Dashboard), ka[2].userdata_len);
|
||||
return false;
|
||||
}
|
||||
Dashboard dashboard;
|
||||
sol_memcpy(&dashboard, ka[1].userdata, sizeof(dashboard));
|
||||
|
||||
if (sizeof(Game) > ka[2].userdata_len) {
|
||||
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
|
||||
return false;
|
||||
}
|
||||
Game game;
|
||||
sol_memcpy(&game, ka[2].userdata, sizeof(game));
|
||||
if (true != update(&dashboard, &game, ka[2].key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
sol_memcpy(ka[1].userdata, &dashboard, sizeof(dashboard));
|
||||
return true;
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bpfloader"
|
||||
version = "0.10.0-pre2"
|
||||
version = "0.10.5"
|
||||
description = "Solana BPF Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,12 +13,12 @@ elf = "0.0.10"
|
||||
env_logger = "0.5.12"
|
||||
libc = "0.2.43"
|
||||
log = "0.4.2"
|
||||
rbpf = "0.1.0"
|
||||
solana_rbpf = "0.1.3"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||
|
||||
[lib]
|
||||
name = "bpf_loader"
|
||||
name = "solana_bpf_loader"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
use rbpf::ebpf;
|
||||
use solana_rbpf::ebpf;
|
||||
use std::io::{Error, ErrorKind};
|
||||
|
||||
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {
|
||||
|
@ -5,37 +5,31 @@ extern crate byteorder;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rbpf;
|
||||
extern crate libc;
|
||||
extern crate solana_rbpf;
|
||||
extern crate solana_sdk;
|
||||
|
||||
use bincode::deserialize;
|
||||
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
|
||||
use libc::c_char;
|
||||
use solana_rbpf::EbpfVmRaw;
|
||||
use solana_sdk::account::KeyedAccount;
|
||||
use solana_sdk::loader_instruction::LoaderInstruction;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::ffi::CStr;
|
||||
use std::io::prelude::*;
|
||||
use std::io::Error;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::mem;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
|
||||
fn create_vm(prog: &[u8]) -> Result<rbpf::EbpfVmRaw, Error> {
|
||||
let mut vm = rbpf::EbpfVmRaw::new(None)?;
|
||||
vm.set_verifier(bpf_verifier::check)?;
|
||||
vm.set_program(&prog)?;
|
||||
vm.register_helper(
|
||||
rbpf::helpers::BPF_TRACE_PRINTK_IDX,
|
||||
rbpf::helpers::bpf_trace_printf,
|
||||
)?;
|
||||
Ok(vm)
|
||||
}
|
||||
|
||||
// TODO use rbpf's disassemble
|
||||
#[allow(dead_code)]
|
||||
fn dump_program(key: &Pubkey, prog: &[u8]) {
|
||||
let mut eight_bytes: Vec<u8> = Vec::new();
|
||||
println!("BPF Program: {:?}", key);
|
||||
info!("BPF Program: {:?}", key);
|
||||
for i in prog.iter() {
|
||||
if eight_bytes.len() >= 7 {
|
||||
println!("{:02X?}", eight_bytes);
|
||||
info!("{:02X?}", eight_bytes);
|
||||
eight_bytes.clear();
|
||||
} else {
|
||||
eight_bytes.push(i.clone());
|
||||
@ -43,6 +37,67 @@ fn dump_program(key: &Pubkey, prog: &[u8]) {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub fn helper_sol_log_verify(
|
||||
addr: u64,
|
||||
unused2: u64,
|
||||
unused3: u64,
|
||||
unused4: u64,
|
||||
unused5: u64,
|
||||
ro_regions: &[&[u8]],
|
||||
unused7: &[&[u8]],
|
||||
) -> Result<(()), Error> {
|
||||
for region in ro_regions.iter() {
|
||||
if region.as_ptr() as u64 <= addr
|
||||
&& addr as u64 <= region.as_ptr() as u64 + region.len() as u64
|
||||
{
|
||||
let c_buf: *const c_char = addr as *const c_char;
|
||||
let max_size = (region.as_ptr() as u64 + region.len() as u64) - addr;
|
||||
unsafe {
|
||||
for i in 0..max_size {
|
||||
if std::ptr::read(c_buf.offset(i as isize)) == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Err(Error::new(ErrorKind::Other, "Error, Unterminated string"));
|
||||
}
|
||||
}
|
||||
Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
"Error: Load segfault, bad string pointer",
|
||||
))
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub fn helper_sol_log(addr: u64, unused2: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 {
|
||||
let c_buf: *const c_char = addr as *const c_char;
|
||||
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
|
||||
match c_str.to_str() {
|
||||
Ok(slice) => info!("sol_log: {:?}", slice),
|
||||
Err(e) => warn!("Error: Cannot print invalid string"),
|
||||
};
|
||||
0
|
||||
}
|
||||
|
||||
pub fn helper_sol_log_u64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
|
||||
info!(
|
||||
"sol_log_u64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
|
||||
arg1, arg2, arg3, arg4, arg5
|
||||
);
|
||||
0
|
||||
}
|
||||
|
||||
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
|
||||
let mut vm = EbpfVmRaw::new(None)?;
|
||||
vm.set_verifier(bpf_verifier::check)?;
|
||||
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
|
||||
vm.set_elf(&prog)?;
|
||||
vm.register_helper_ex("sol_log", Some(helper_sol_log_verify), helper_sol_log)?;
|
||||
vm.register_helper_ex("sol_log_64", None, helper_sol_log_u64)?;
|
||||
Ok(vm)
|
||||
}
|
||||
|
||||
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
|
||||
assert_eq!(32, mem::size_of::<Pubkey>());
|
||||
|
||||
@ -90,12 +145,12 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
||||
|
||||
if keyed_accounts[0].account.executable {
|
||||
let prog = keyed_accounts[0].account.userdata.clone();
|
||||
trace!("Call BPF, {} Instructions", prog.len() / 8);
|
||||
trace!("Call BPF, {} instructions", prog.len() / 8);
|
||||
//dump_program(keyed_accounts[0].key, &prog);
|
||||
let vm = match create_vm(&prog) {
|
||||
let mut vm = match create_vm(&prog) {
|
||||
Ok(vm) => vm,
|
||||
Err(e) => {
|
||||
warn!("{}", e);
|
||||
warn!("create_vm failed: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@ -105,20 +160,24 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
||||
return false;
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("{}", e);
|
||||
warn!("execute_program failed: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
deserialize_parameters(&mut keyed_accounts[1..], &v);
|
||||
trace!(
|
||||
"BPF program executed {} instructions",
|
||||
vm.get_last_instruction_count()
|
||||
);
|
||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||
match instruction {
|
||||
LoaderInstruction::Write { offset, bytes } => {
|
||||
let offset = offset as usize;
|
||||
let len = bytes.len();
|
||||
trace!("BpfLoader::Write offset {} length {:?}", offset, len);
|
||||
debug!("Write: offset={} length={}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
println!(
|
||||
"Overflow {} < {}",
|
||||
warn!(
|
||||
"Write overflow: {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
offset + len
|
||||
);
|
||||
@ -128,7 +187,7 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
||||
}
|
||||
LoaderInstruction::Finalize => {
|
||||
keyed_accounts[0].account.executable = true;
|
||||
trace!("BPfLoader::Finalize prog: {:?}", keyed_accounts[0].key);
|
||||
info!("Finalize: account {:?}", keyed_accounts[0].key);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -136,3 +195,36 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_rbpf::helpers;
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
|
||||
fn test_non_terminating_program() {
|
||||
#[rustfmt::skip]
|
||||
let prog = &[
|
||||
0xb7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r6 = 0
|
||||
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = 0
|
||||
0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r2 = 0
|
||||
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
|
||||
0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r4 = 0
|
||||
0xbf, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r5 = r6
|
||||
0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call 6
|
||||
0x07, 0x06, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // r6 + 1
|
||||
0x05, 0x00, 0xf8, 0xff, 0x00, 0x00, 0x00, 0x00, // goto -8
|
||||
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
|
||||
];
|
||||
let input = &mut [0x00];
|
||||
|
||||
let mut vm = EbpfVmRaw::new(None).unwrap();
|
||||
vm.set_verifier(bpf_verifier::check).unwrap();
|
||||
vm.set_max_instruction_count(36000).unwrap(); // 36000 is a wag, need to tune
|
||||
vm.set_program(prog).unwrap();
|
||||
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helpers::bpf_trace_printf)
|
||||
.unwrap();
|
||||
vm.execute_program(input).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-lualoader"
|
||||
version = "0.10.0-pre2"
|
||||
version = "0.10.5"
|
||||
description = "Solana Lua Loader"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,12 +13,12 @@ log = "0.4.2"
|
||||
rlua = "0.15.2"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
bincode = "1.0.0"
|
||||
|
||||
[lib]
|
||||
name = "lua_loader"
|
||||
name = "solana_lua_loader"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
|
@ -79,8 +79,8 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
|
||||
let len = bytes.len();
|
||||
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
|
||||
if keyed_accounts[0].account.userdata.len() < offset + len {
|
||||
println!(
|
||||
"Overflow {} < {}",
|
||||
warn!(
|
||||
"Write overflow {} < {}",
|
||||
keyed_accounts[0].account.userdata.len(),
|
||||
offset + len
|
||||
);
|
||||
@ -147,13 +147,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_move_funds_with_lua_via_process() {
|
||||
let bytes = r#"
|
||||
let userdata = r#"
|
||||
local tokens, _ = string.unpack("I", data)
|
||||
accounts[1].tokens = accounts[1].tokens - tokens
|
||||
accounts[2].tokens = accounts[2].tokens + tokens
|
||||
"#.as_bytes()
|
||||
.to_vec();
|
||||
let userdata = serialize(&LuaLoader::Bytes { bytes }).unwrap();
|
||||
|
||||
let alice_pubkey = Pubkey::default();
|
||||
let bob_pubkey = Pubkey::default();
|
||||
@ -194,15 +193,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_load_lua_library() {
|
||||
let bytes = r#"
|
||||
let userdata = r#"
|
||||
local serialize = load(accounts[2].userdata)().serialize
|
||||
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
|
||||
"#.as_bytes()
|
||||
.to_vec();
|
||||
let userdata = serialize(&LuaLoader::Bytes { bytes }).unwrap();
|
||||
|
||||
let program_id = Pubkey::default();
|
||||
|
||||
let program_account = Account {
|
||||
tokens: 1,
|
||||
userdata,
|
||||
@ -210,9 +206,7 @@ mod tests {
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let alice_account = Account::new(100, 0, program_id);
|
||||
|
||||
let serialize_account = Account {
|
||||
tokens: 100,
|
||||
userdata: read_test_file("serialize.lua"),
|
||||
@ -220,7 +214,6 @@ mod tests {
|
||||
executable: false,
|
||||
loader_program_id: Pubkey::default(),
|
||||
};
|
||||
|
||||
let mut accounts = [
|
||||
(Pubkey::default(), program_account),
|
||||
(Pubkey::default(), alice_account),
|
||||
@ -228,9 +221,7 @@ mod tests {
|
||||
(Pubkey::default(), Account::new(1, 0, program_id)),
|
||||
];
|
||||
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
|
||||
|
||||
process(&mut keyed_accounts, &[]);
|
||||
|
||||
// Verify deterministic ordering of a serialized Lua table.
|
||||
assert_eq!(
|
||||
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
|
||||
@ -250,12 +241,9 @@ mod tests {
|
||||
let dan_pubkey = Pubkey::new(&[5; 32]);
|
||||
let erin_pubkey = Pubkey::new(&[6; 32]);
|
||||
|
||||
let userdata = serialize(&LuaLoader::Bytes {
|
||||
bytes: read_test_file("multisig.lua"),
|
||||
}).unwrap();
|
||||
let program_account = Account {
|
||||
tokens: 1,
|
||||
userdata,
|
||||
userdata: read_test_file("multisig.lua"),
|
||||
program_id,
|
||||
executable: true,
|
||||
loader_program_id: Pubkey::default(),
|
||||
|
@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "solana-noop"
|
||||
version = "0.10.0-pre2"
|
||||
version = "0.10.5"
|
||||
description = "Solana noop program"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
|
||||
solana-sdk = { path = "../../../sdk", version = "0.10.5" }
|
||||
|
||||
[lib]
|
||||
name = "noop"
|
||||
|
@ -117,6 +117,14 @@ $ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
<TX_SIGNATURE>
|
||||
```
|
||||
|
||||
### Deploy program
|
||||
```
|
||||
// Command
|
||||
$ solana-wallet deploy <PATH>
|
||||
|
||||
// Return
|
||||
<PROGRAM_ID>
|
||||
```
|
||||
|
||||
## Javascript solana-web3.js Interface
|
||||
|
||||
|
@ -20,7 +20,7 @@ if [[ ! -d $installDir ]]; then
|
||||
fi
|
||||
|
||||
for dir in "$SOLANA_ROOT"/programs/native/*; do
|
||||
for program in "$SOLANA_ROOT/target/$variant/deps/lib$(basename "$dir")".{so,dylib,dll}; do
|
||||
for program in echo "$SOLANA_ROOT"/target/"$variant"/deps/lib{,solana_}"$(basename "$dir")".{so,dylib,dll}; do
|
||||
if [[ -f $program ]]; then
|
||||
cp -v "$program" "$installDir"
|
||||
fi
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-sdk"
|
||||
version = "0.10.0-pre2"
|
||||
version = "0.10.5"
|
||||
description = "Solana SDK"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -117,8 +117,8 @@ parts:
|
||||
# Build/install all other programs
|
||||
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
|
||||
|
||||
# TODO: install native programs when they are ready for public use
|
||||
#./scripts/install-native-programs.sh $SNAPCRAFT_PART_INSTALL/bin/
|
||||
# Install native programs
|
||||
./scripts/install-native-programs.sh $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
||||
# Install multinode-demo/
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/multinode-demo/
|
||||
|
190
src/bank.rs
190
src/bank.rs
@ -7,6 +7,7 @@ use bincode::deserialize;
|
||||
use bincode::serialize;
|
||||
use bpf_loader;
|
||||
use budget_program::BudgetState;
|
||||
use budget_transaction::BudgetTransaction;
|
||||
use counter::Counter;
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
@ -30,7 +31,7 @@ use std;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::sync::{Mutex, RwLock};
|
||||
use std::time::Instant;
|
||||
use storage_program::StorageProgram;
|
||||
use system_program::SystemProgram;
|
||||
@ -41,7 +42,6 @@ use timing::{duration_as_us, timestamp};
|
||||
use token_program::TokenProgram;
|
||||
use tokio::prelude::Future;
|
||||
use transaction::Transaction;
|
||||
use vote_program::VoteProgram;
|
||||
use window::WINDOW_SIZE;
|
||||
|
||||
/// The number of most recent `last_id` values that the bank will track the signatures
|
||||
@ -151,7 +151,7 @@ impl Default for LastIds {
|
||||
/// The state of all accounts and contracts after processing its entries.
|
||||
pub struct Bank {
|
||||
/// A map of account public keys to the balance in that account.
|
||||
pub accounts: RwLock<HashMap<Pubkey, Account>>,
|
||||
accounts: RwLock<HashMap<Pubkey, Account>>,
|
||||
|
||||
/// set of accounts which are currently in the pipeline
|
||||
account_locks: Mutex<HashSet<Pubkey>>,
|
||||
@ -171,13 +171,6 @@ pub struct Bank {
|
||||
|
||||
// Mapping of signatures to Subscriber ids and sinks to notify on confirmation
|
||||
signature_subscriptions: RwLock<HashMap<Signature, HashMap<Pubkey, Sink<RpcSignatureStatus>>>>,
|
||||
|
||||
/// Tracks and updates the leader schedule based on the votes and account stakes
|
||||
/// processed by the bank
|
||||
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||
|
||||
// The number of ticks that have elapsed since genesis
|
||||
tick_height: Mutex<u64>,
|
||||
}
|
||||
|
||||
impl Default for Bank {
|
||||
@ -190,8 +183,6 @@ impl Default for Bank {
|
||||
finality_time: AtomicUsize::new(std::usize::MAX),
|
||||
account_subscriptions: RwLock::new(HashMap::new()),
|
||||
signature_subscriptions: RwLock::new(HashMap::new()),
|
||||
leader_scheduler: Arc::new(RwLock::new(LeaderScheduler::default())),
|
||||
tick_height: Mutex::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -622,8 +613,6 @@ impl Bank {
|
||||
{
|
||||
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
|
||||
}
|
||||
} else if VoteProgram::check_id(&tx_program_id) {
|
||||
VoteProgram::process_transaction(&tx, instruction_index, program_accounts).is_err();
|
||||
} else {
|
||||
let mut depth = 0;
|
||||
let mut keys = Vec::new();
|
||||
@ -901,28 +890,41 @@ impl Bank {
|
||||
results
|
||||
}
|
||||
|
||||
pub fn process_entry(&self, entry: &Entry) -> Result<()> {
|
||||
pub fn process_entry(
|
||||
&self,
|
||||
entry: &Entry,
|
||||
tick_height: &mut u64,
|
||||
leader_scheduler: &mut LeaderScheduler,
|
||||
) -> Result<()> {
|
||||
if !entry.is_tick() {
|
||||
for result in self.process_transactions(&entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
} else {
|
||||
let tick_height = {
|
||||
let mut tick_height_lock = self.tick_height.lock().unwrap();
|
||||
*tick_height_lock += 1;
|
||||
*tick_height_lock
|
||||
};
|
||||
|
||||
self.leader_scheduler
|
||||
.write()
|
||||
.unwrap()
|
||||
.update_height(tick_height, self);
|
||||
*tick_height += 1;
|
||||
self.register_entry_id(&entry.id);
|
||||
}
|
||||
|
||||
self.process_entry_votes(entry, *tick_height, leader_scheduler);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_entry_votes(
|
||||
&self,
|
||||
entry: &Entry,
|
||||
tick_height: u64,
|
||||
leader_scheduler: &mut LeaderScheduler,
|
||||
) {
|
||||
for tx in &entry.transactions {
|
||||
if tx.vote().is_some() {
|
||||
// Update the active set in the leader scheduler
|
||||
leader_scheduler.push_vote(*tx.from(), tick_height);
|
||||
}
|
||||
}
|
||||
|
||||
leader_scheduler.update_height(tick_height, self);
|
||||
}
|
||||
|
||||
/// Process an ordered list of entries, populating a circular buffer "tail"
|
||||
/// as we go.
|
||||
fn process_entries_tail(
|
||||
@ -930,6 +932,8 @@ impl Bank {
|
||||
entries: &[Entry],
|
||||
tail: &mut Vec<Entry>,
|
||||
tail_idx: &mut usize,
|
||||
tick_height: &mut u64,
|
||||
leader_scheduler: &mut LeaderScheduler,
|
||||
) -> Result<u64> {
|
||||
let mut entry_count = 0;
|
||||
|
||||
@ -947,7 +951,7 @@ impl Bank {
|
||||
// the leader scheduler. Next we will extract the vote tracking structure
|
||||
// out of the leader scheduler, and into the bank, and remove the leader
|
||||
// scheduler from these banking functions.
|
||||
self.process_entry(entry)?;
|
||||
self.process_entry(entry, tick_height, leader_scheduler)?;
|
||||
}
|
||||
|
||||
Ok(entry_count)
|
||||
@ -992,7 +996,6 @@ impl Bank {
|
||||
// if its a tick, execute the group and register the tick
|
||||
self.par_execute_entries(&mt_group)?;
|
||||
self.register_entry_id(&entry.id);
|
||||
*self.tick_height.lock().unwrap() += 1;
|
||||
mt_group = vec![];
|
||||
continue;
|
||||
}
|
||||
@ -1022,18 +1025,17 @@ impl Bank {
|
||||
entries: I,
|
||||
tail: &mut Vec<Entry>,
|
||||
tail_idx: &mut usize,
|
||||
) -> Result<u64>
|
||||
leader_scheduler: &mut LeaderScheduler,
|
||||
) -> Result<(u64, u64)>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
// Ledger verification needs to be parallelized, but we can't pull the whole
|
||||
// thing into memory. We therefore chunk it.
|
||||
let mut entry_height = *tail_idx as u64;
|
||||
|
||||
let mut tick_height = 0;
|
||||
for entry in &tail[0..*tail_idx] {
|
||||
if entry.is_tick() {
|
||||
*self.tick_height.lock().unwrap() += 1;
|
||||
}
|
||||
tick_height += entry.is_tick() as u64
|
||||
}
|
||||
|
||||
let mut id = start_hash;
|
||||
@ -1044,15 +1046,25 @@ impl Bank {
|
||||
return Err(BankError::LedgerVerificationFailed);
|
||||
}
|
||||
id = block.last().unwrap().id;
|
||||
let entry_count = self.process_entries_tail(&block, tail, tail_idx)?;
|
||||
let entry_count = self.process_entries_tail(
|
||||
&block,
|
||||
tail,
|
||||
tail_idx,
|
||||
&mut tick_height,
|
||||
leader_scheduler,
|
||||
)?;
|
||||
|
||||
entry_height += entry_count;
|
||||
}
|
||||
Ok(entry_height)
|
||||
Ok((tick_height, entry_height))
|
||||
}
|
||||
|
||||
/// Process a full ledger.
|
||||
pub fn process_ledger<I>(&self, entries: I) -> Result<(u64, u64, Vec<Entry>)>
|
||||
pub fn process_ledger<I>(
|
||||
&self,
|
||||
entries: I,
|
||||
leader_scheduler: &mut LeaderScheduler,
|
||||
) -> Result<(u64, u64, Vec<Entry>)>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
@ -1094,14 +1106,20 @@ impl Bank {
|
||||
tail.push(entry0);
|
||||
tail.push(entry1);
|
||||
let mut tail_idx = 2;
|
||||
let entry_height = self.process_blocks(entry1_id, entries, &mut tail, &mut tail_idx)?;
|
||||
let (tick_height, entry_height) = self.process_blocks(
|
||||
entry1_id,
|
||||
entries,
|
||||
&mut tail,
|
||||
&mut tail_idx,
|
||||
leader_scheduler,
|
||||
)?;
|
||||
|
||||
// check if we need to rotate tail
|
||||
if tail.len() == WINDOW_SIZE as usize {
|
||||
tail.rotate_left(tail_idx)
|
||||
}
|
||||
|
||||
Ok((*self.tick_height.lock().unwrap(), entry_height, tail))
|
||||
Ok((tick_height, entry_height, tail))
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
@ -1218,16 +1236,6 @@ impl Bank {
|
||||
subscriptions.remove(pubkey).is_some()
|
||||
}
|
||||
|
||||
pub fn get_current_leader(&self) -> Option<Pubkey> {
|
||||
let ls_lock = self.leader_scheduler.read().unwrap();
|
||||
let tick_height = self.tick_height.lock().unwrap();
|
||||
ls_lock.get_scheduled_leader(*tick_height)
|
||||
}
|
||||
|
||||
pub fn get_tick_height(&self) -> u64 {
|
||||
*self.tick_height.lock().unwrap()
|
||||
}
|
||||
|
||||
fn check_account_subscriptions(&self, pubkey: &Pubkey, account: &Account) {
|
||||
let subscriptions = self.account_subscriptions.read().unwrap();
|
||||
if let Some(hashmap) = subscriptions.get(pubkey) {
|
||||
@ -1280,6 +1288,13 @@ impl Bank {
|
||||
}
|
||||
subscriptions.remove(&signature);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// Used to access accounts for things like controlling stake to control
|
||||
// the eligible set of nodes for leader selection
|
||||
pub fn accounts(&self) -> &RwLock<HashMap<Pubkey, Account>> {
|
||||
&self.accounts
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -1292,6 +1307,7 @@ mod tests {
|
||||
use entry_writer::{self, EntryWriter};
|
||||
use hash::hash;
|
||||
use jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
|
||||
use leader_scheduler::LeaderScheduler;
|
||||
use ledger;
|
||||
use logger;
|
||||
use signature::Keypair;
|
||||
@ -1624,7 +1640,8 @@ mod tests {
|
||||
let mint = Mint::new(1);
|
||||
let genesis = mint.create_entries();
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(genesis).unwrap();
|
||||
bank.process_ledger(genesis, &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
|
||||
@ -1701,7 +1718,9 @@ mod tests {
|
||||
let (ledger, pubkey) = create_sample_ledger(1);
|
||||
let (ledger, dup) = ledger.tee();
|
||||
let bank = Bank::default();
|
||||
let (tick_height, ledger_height, tail) = bank.process_ledger(ledger).unwrap();
|
||||
let (tick_height, ledger_height, tail) = bank
|
||||
.process_ledger(ledger, &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
assert_eq!(ledger_height, 4);
|
||||
assert_eq!(tick_height, 2);
|
||||
@ -1723,15 +1742,17 @@ mod tests {
|
||||
// let (_, _) = bank.process_ledger(ledger).unwrap();
|
||||
// }
|
||||
|
||||
let window_size = WINDOW_SIZE as usize;
|
||||
let window_size = 128;
|
||||
for entry_count in window_size - 3..window_size + 2 {
|
||||
let (ledger, pubkey) = create_sample_ledger(entry_count);
|
||||
let bank = Bank::default();
|
||||
let (tick_height, ledger_height, tail) = bank.process_ledger(ledger).unwrap();
|
||||
let (tick_height, ledger_height, tail) = bank
|
||||
.process_ledger(ledger, &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
assert_eq!(ledger_height, entry_count as u64 + 3);
|
||||
assert_eq!(tick_height, 2);
|
||||
assert!(tail.len() <= window_size);
|
||||
assert!(tail.len() <= WINDOW_SIZE as usize);
|
||||
let last_entry = &tail[tail.len() - 1];
|
||||
assert_eq!(bank.last_id(), last_entry.id);
|
||||
}
|
||||
@ -1753,7 +1774,8 @@ mod tests {
|
||||
let ledger = to_file_iter(ledger);
|
||||
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(ledger).unwrap();
|
||||
bank.process_ledger(ledger, &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
}
|
||||
|
||||
@ -1764,7 +1786,8 @@ mod tests {
|
||||
let block = to_file_iter(create_sample_block_with_ticks(&mint, 1, 1));
|
||||
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(genesis.chain(block)).unwrap();
|
||||
bank.process_ledger(genesis.chain(block), &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
|
||||
@ -1778,9 +1801,13 @@ mod tests {
|
||||
let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
|
||||
|
||||
let bank0 = Bank::default();
|
||||
bank0.process_ledger(ledger0).unwrap();
|
||||
bank0
|
||||
.process_ledger(ledger0, &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
let bank1 = Bank::default();
|
||||
bank1.process_ledger(ledger1).unwrap();
|
||||
bank1
|
||||
.process_ledger(ledger1, &mut LeaderScheduler::default())
|
||||
.unwrap();
|
||||
|
||||
let initial_state = bank0.hash_internal_state();
|
||||
|
||||
@ -1873,7 +1900,7 @@ mod tests {
|
||||
let string = transport_receiver.poll();
|
||||
assert!(string.is_ok());
|
||||
if let Async::Ready(Some(response)) = string.unwrap() {
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
|
||||
assert_eq!(expected, response);
|
||||
}
|
||||
|
||||
@ -2050,4 +2077,53 @@ mod tests {
|
||||
Err(BankError::AccountNotFound)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_program_ids() {
|
||||
let system = Pubkey::new(&[
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0,
|
||||
]);
|
||||
let native = Pubkey::new(&[
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0,
|
||||
]);
|
||||
let bpf = Pubkey::new(&[
|
||||
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
]);
|
||||
let budget = Pubkey::new(&[
|
||||
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
]);
|
||||
let storage = Pubkey::new(&[
|
||||
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
]);
|
||||
let token = Pubkey::new(&[
|
||||
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0,
|
||||
]);
|
||||
|
||||
assert_eq!(SystemProgram::id(), system);
|
||||
assert_eq!(native_loader::id(), native);
|
||||
assert_eq!(bpf_loader::id(), bpf);
|
||||
assert_eq!(BudgetState::id(), budget);
|
||||
assert_eq!(StorageProgram::id(), storage);
|
||||
assert_eq!(TokenProgram::id(), token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_program_id_uniqueness() {
|
||||
let mut unique = HashSet::new();
|
||||
let ids = vec![
|
||||
SystemProgram::id(),
|
||||
native_loader::id(),
|
||||
bpf_loader::id(),
|
||||
BudgetState::id(),
|
||||
StorageProgram::id(),
|
||||
TokenProgram::id(),
|
||||
];
|
||||
assert!(ids.into_iter().all(move |id| unique.insert(id)));
|
||||
}
|
||||
}
|
||||
|
@ -17,16 +17,14 @@ use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::thin_client::poll_gossip_for_leader;
|
||||
use solana::vote_program::VoteProgram;
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::fs::File;
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn main() {
|
||||
fn main() -> () {
|
||||
logger::setup();
|
||||
set_panic_hook("fullnode");
|
||||
let matches = App::new("fullnode")
|
||||
@ -84,6 +82,7 @@ fn main() {
|
||||
|
||||
// save off some stuff for airdrop
|
||||
let node_info = node.info.clone();
|
||||
let pubkey = keypair.pubkey();
|
||||
|
||||
let leader = match network {
|
||||
Some(network) => {
|
||||
@ -92,16 +91,10 @@ fn main() {
|
||||
None => node_info,
|
||||
};
|
||||
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let vote_account_id = vote_account_keypair.pubkey();
|
||||
let keypair = Arc::new(keypair);
|
||||
let pubkey = keypair.pubkey();
|
||||
|
||||
let mut fullnode = Fullnode::new(
|
||||
node,
|
||||
ledger_path,
|
||||
keypair.clone(),
|
||||
vote_account_keypair,
|
||||
keypair,
|
||||
network,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader.id),
|
||||
@ -136,49 +129,6 @@ fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Create the vote account
|
||||
loop {
|
||||
let last_id = client.get_last_id();
|
||||
if client
|
||||
.create_vote_account(&keypair, vote_account_id, &last_id, 1)
|
||||
.is_err()
|
||||
{
|
||||
sleep(Duration::from_secs(2));
|
||||
continue;
|
||||
}
|
||||
|
||||
let balance = client.poll_get_balance(&vote_account_id).unwrap_or(0);
|
||||
|
||||
if balance > 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(2));
|
||||
}
|
||||
|
||||
// Register the vote account to this node
|
||||
loop {
|
||||
let last_id = client.get_last_id();
|
||||
if client
|
||||
.register_vote_account(&keypair, vote_account_id, &last_id)
|
||||
.is_err()
|
||||
{
|
||||
sleep(Duration::from_secs(2));
|
||||
continue;
|
||||
}
|
||||
|
||||
let account_user_data = client.get_account_userdata(&vote_account_id);
|
||||
if let Ok(Some(account_user_data)) = account_user_data {
|
||||
if let Ok(vote_state) = VoteProgram::deserialize(&account_user_data) {
|
||||
if vote_state.node_id == pubkey {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(2));
|
||||
}
|
||||
|
||||
loop {
|
||||
let status = fullnode.handle_role_transition();
|
||||
match status {
|
||||
|
@ -5,6 +5,7 @@ extern crate solana;
|
||||
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::bank::Bank;
|
||||
use solana::leader_scheduler::LeaderScheduler;
|
||||
use solana::ledger::{read_ledger, verify_ledger};
|
||||
use solana::logger;
|
||||
use std::io::{stdout, Write};
|
||||
@ -115,7 +116,7 @@ fn main() {
|
||||
};
|
||||
|
||||
let genesis = genesis.take(2).map(|e| e.unwrap());
|
||||
if let Err(e) = bank.process_ledger(genesis) {
|
||||
if let Err(e) = bank.process_ledger(genesis, &mut LeaderScheduler::default()) {
|
||||
eprintln!("verify failed at genesis err: {:?}", e);
|
||||
if !matches.is_present("continue") {
|
||||
exit(1);
|
||||
@ -141,7 +142,10 @@ fn main() {
|
||||
}
|
||||
last_id = entry.id;
|
||||
|
||||
if let Err(e) = bank.process_entry(&entry) {
|
||||
let mut tick_height = 0;
|
||||
let mut leader_scheduler = LeaderScheduler::default();
|
||||
if let Err(e) = bank.process_entry(&entry, &mut tick_height, &mut leader_scheduler)
|
||||
{
|
||||
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
|
||||
if !matches.is_present("continue") {
|
||||
exit(1);
|
||||
|
@ -3,8 +3,11 @@ use native_loader;
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
|
||||
pub const BPF_LOADER_PROGRAM_ID: [u8; 32] = [6u8; 32];
|
||||
pub const BPF_LOADER_NAME: &str = "bpf_loader";
|
||||
const BPF_LOADER_NAME: &str = "solana_bpf_loader";
|
||||
const BPF_LOADER_PROGRAM_ID: [u8; 32] = [
|
||||
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0,
|
||||
];
|
||||
|
||||
pub fn id() -> Pubkey {
|
||||
Pubkey::new(&BPF_LOADER_PROGRAM_ID)
|
||||
|
@ -1,12 +1,15 @@
|
||||
use budget::Budget;
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
|
||||
/// A smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Contract {
|
||||
/// The number of tokens allocated to the `Budget` and any transaction fees.
|
||||
pub tokens: i64,
|
||||
pub budget: Budget,
|
||||
pub struct Vote {
|
||||
/// We send some gossip specific membership information through the vote to shortcut
|
||||
/// liveness voting
|
||||
/// The version of the ClusterInfo struct that the last_id of this network voted with
|
||||
pub version: u64,
|
||||
/// The version of the ClusterInfo struct that has the same network configuration as this one
|
||||
pub contact_info_version: u64,
|
||||
// TODO: add signature of the state here as well
|
||||
}
|
||||
|
||||
/// An instruction to progress the smart contract.
|
||||
@ -21,4 +24,7 @@ pub enum Instruction {
|
||||
/// Tell the budget that the `NewBudget` with `Signature` has been
|
||||
/// signed by the containing transaction's `Pubkey`.
|
||||
ApplySignature,
|
||||
|
||||
/// Vote for a PoH that is equal to the lastid of this transaction
|
||||
NewVote(Vote),
|
||||
}
|
||||
|
@ -30,9 +30,11 @@ pub struct BudgetState {
|
||||
pub pending_budget: Option<Budget>,
|
||||
}
|
||||
|
||||
pub const BUDGET_PROGRAM_ID: [u8; 32] = [
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
const BUDGET_PROGRAM_ID: [u8; 32] = [
|
||||
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0,
|
||||
];
|
||||
|
||||
impl BudgetState {
|
||||
fn is_pending(&self) -> bool {
|
||||
self.pending_budget != None
|
||||
@ -172,6 +174,11 @@ impl BudgetState {
|
||||
Err(BudgetError::UninitializedContract)
|
||||
}
|
||||
}
|
||||
Instruction::NewVote(_vote) => {
|
||||
// TODO: move vote instruction into a different contract
|
||||
trace!("GOT VOTE! last_id={}", tx.last_id);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use budget::{Budget, Condition};
|
||||
use budget_instruction::Instruction;
|
||||
use budget_instruction::{Instruction, Vote};
|
||||
use budget_program::BudgetState;
|
||||
use chrono::prelude::*;
|
||||
use hash::Hash;
|
||||
@ -38,6 +38,8 @@ pub trait BudgetTransaction {
|
||||
last_id: Hash,
|
||||
) -> Self;
|
||||
|
||||
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self;
|
||||
|
||||
fn budget_new_on_date(
|
||||
from_keypair: &Keypair,
|
||||
to: Pubkey,
|
||||
@ -59,6 +61,8 @@ pub trait BudgetTransaction {
|
||||
last_id: Hash,
|
||||
) -> Self;
|
||||
|
||||
fn vote(&self) -> Option<(Pubkey, Vote, Hash)>;
|
||||
|
||||
fn instruction(&self, program_index: usize) -> Option<Instruction>;
|
||||
fn system_instruction(&self, program_index: usize) -> Option<SystemProgram>;
|
||||
|
||||
@ -149,6 +153,12 @@ impl BudgetTransaction for Transaction {
|
||||
)
|
||||
}
|
||||
|
||||
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self {
|
||||
let instruction = Instruction::NewVote(vote);
|
||||
let userdata = serialize(&instruction).expect("serialize instruction");
|
||||
Self::new(from_keypair, &[], BudgetState::id(), userdata, last_id, fee)
|
||||
}
|
||||
|
||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||
fn budget_new_on_date(
|
||||
from_keypair: &Keypair,
|
||||
@ -209,6 +219,16 @@ impl BudgetTransaction for Transaction {
|
||||
)
|
||||
}
|
||||
|
||||
fn vote(&self) -> Option<(Pubkey, Vote, Hash)> {
|
||||
if self.instructions.len() > 1 {
|
||||
None
|
||||
} else if let Some(Instruction::NewVote(vote)) = self.instruction(0) {
|
||||
Some((self.account_keys[0], vote, self.last_id))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn instruction(&self, instruction_index: usize) -> Option<Instruction> {
|
||||
deserialize(&self.userdata(instruction_index)).ok()
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use bincode::{deserialize, serialize, serialized_size};
|
||||
use budget_instruction::Vote;
|
||||
use choose_gossip_peer_strategy::{ChooseGossipPeerStrategy, ChooseWeightedPeerStrategy};
|
||||
use counter::Counter;
|
||||
use hash::Hash;
|
||||
@ -337,6 +338,47 @@ impl ClusterInfo {
|
||||
self.external_liveness.get(key)
|
||||
}
|
||||
|
||||
pub fn insert_vote(&mut self, pubkey: &Pubkey, v: &Vote, last_id: Hash) {
|
||||
if self.table.get(pubkey).is_none() {
|
||||
warn!("{}: VOTE for unknown id: {}", self.id, pubkey);
|
||||
return;
|
||||
}
|
||||
if v.contact_info_version > self.table[pubkey].contact_info.version {
|
||||
warn!(
|
||||
"{}: VOTE for new address version from: {} ours: {} vote: {:?}",
|
||||
self.id, pubkey, self.table[pubkey].contact_info.version, v,
|
||||
);
|
||||
return;
|
||||
}
|
||||
if *pubkey == self.my_data().leader_id {
|
||||
info!("{}: LEADER_VOTED! {}", self.id, pubkey);
|
||||
inc_new_counter_info!("cluster_info-insert_vote-leader_voted", 1);
|
||||
}
|
||||
|
||||
if v.version <= self.table[pubkey].version {
|
||||
debug!("{}: VOTE for old version: {}", self.id, pubkey);
|
||||
self.update_liveness(*pubkey);
|
||||
return;
|
||||
} else {
|
||||
let mut data = self.table[pubkey].clone();
|
||||
data.version = v.version;
|
||||
data.ledger_state.last_id = last_id;
|
||||
|
||||
debug!("{}: INSERTING VOTE! for {}", self.id, data.id);
|
||||
self.update_liveness(data.id);
|
||||
self.insert(&data);
|
||||
}
|
||||
}
|
||||
pub fn insert_votes(&mut self, votes: &[(Pubkey, Vote, Hash)]) {
|
||||
inc_new_counter_info!("cluster_info-vote-count", votes.len());
|
||||
if !votes.is_empty() {
|
||||
info!("{}: INSERTING VOTES {}", self.id, votes.len());
|
||||
}
|
||||
for v in votes {
|
||||
self.insert_vote(&v.0, &v.1, v.2);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, v: &NodeInfo) -> usize {
|
||||
// TODO check that last_verified types are always increasing
|
||||
// update the peer table
|
||||
@ -413,7 +455,6 @@ impl ClusterInfo {
|
||||
if *id == leader_id {
|
||||
info!("{}: PURGE LEADER {}", self.id, id,);
|
||||
inc_new_counter_info!("cluster_info-purge-purged_leader", 1, 1);
|
||||
self.set_leader(Pubkey::default());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -497,7 +538,7 @@ impl ClusterInfo {
|
||||
);
|
||||
|
||||
// Make sure the next leader in line knows about the entries before his slot in the leader
|
||||
// rotation so they can initiate repairs if necessary
|
||||
// rotation so he can initiate repairs if necessary
|
||||
{
|
||||
let ls_lock = leader_scheduler.read().unwrap();
|
||||
let next_leader_height = ls_lock.max_height_for_leader(tick_height);
|
||||
@ -782,6 +823,22 @@ impl ClusterInfo {
|
||||
Ok((v.contact_info.ncp, req))
|
||||
}
|
||||
|
||||
pub fn new_vote(&mut self, last_id: Hash) -> Result<(Vote, SocketAddr)> {
|
||||
let mut me = self.my_data().clone();
|
||||
let leader = self
|
||||
.leader_data()
|
||||
.ok_or(ClusterInfoError::NoLeader)?
|
||||
.clone();
|
||||
me.version += 1;
|
||||
me.ledger_state.last_id = last_id;
|
||||
let vote = Vote {
|
||||
version: me.version,
|
||||
contact_info_version: me.contact_info.version,
|
||||
};
|
||||
self.insert(&me);
|
||||
Ok((vote, leader.contact_info.tpu))
|
||||
}
|
||||
|
||||
/// At random pick a node and try to get updated changes from them
|
||||
fn run_gossip(obj: &Arc<RwLock<Self>>, blob_sender: &BlobSender) -> Result<()> {
|
||||
//TODO we need to keep track of stakes and weight the selection by stake size
|
||||
@ -1330,6 +1387,7 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bincode::serialize;
|
||||
use budget_instruction::Vote;
|
||||
use cluster_info::{
|
||||
ClusterInfo, ClusterInfoError, Node, NodeInfo, Protocol, FULLNODE_PORT_RANGE,
|
||||
GOSSIP_PURGE_MILLIS, GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE,
|
||||
@ -1377,6 +1435,62 @@ mod tests {
|
||||
assert_eq!(cluster_info.table[&d.id].version, 3);
|
||||
assert!(liveness < cluster_info.alive[&d.id]);
|
||||
}
|
||||
#[test]
|
||||
fn test_new_vote() {
|
||||
let d = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
|
||||
assert_eq!(d.version, 0);
|
||||
let mut cluster_info = ClusterInfo::new(d.clone()).unwrap();
|
||||
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||
let leader = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.2:1235"));
|
||||
assert_ne!(d.id, leader.id);
|
||||
assert_matches!(
|
||||
cluster_info.new_vote(Hash::default()).err(),
|
||||
Some(Error::ClusterInfoError(ClusterInfoError::NoLeader))
|
||||
);
|
||||
cluster_info.insert(&leader);
|
||||
assert_matches!(
|
||||
cluster_info.new_vote(Hash::default()).err(),
|
||||
Some(Error::ClusterInfoError(ClusterInfoError::NoLeader))
|
||||
);
|
||||
cluster_info.set_leader(leader.id);
|
||||
assert_eq!(cluster_info.table[&d.id].version, 1);
|
||||
let v = Vote {
|
||||
version: 2, //version should increase when we vote
|
||||
contact_info_version: 0,
|
||||
};
|
||||
let expected = (v, cluster_info.table[&leader.id].contact_info.tpu);
|
||||
assert_eq!(cluster_info.new_vote(Hash::default()).unwrap(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert_vote() {
|
||||
let d = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
|
||||
assert_eq!(d.version, 0);
|
||||
let mut cluster_info = ClusterInfo::new(d.clone()).unwrap();
|
||||
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||
let vote_same_version = Vote {
|
||||
version: d.version,
|
||||
contact_info_version: 0,
|
||||
};
|
||||
cluster_info.insert_vote(&d.id, &vote_same_version, Hash::default());
|
||||
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||
|
||||
let vote_new_version_new_addrs = Vote {
|
||||
version: d.version + 1,
|
||||
contact_info_version: 1,
|
||||
};
|
||||
cluster_info.insert_vote(&d.id, &vote_new_version_new_addrs, Hash::default());
|
||||
//should be dropped since the address is newer then we know
|
||||
assert_eq!(cluster_info.table[&d.id].version, 0);
|
||||
|
||||
let vote_new_version_old_addrs = Vote {
|
||||
version: d.version + 1,
|
||||
contact_info_version: 0,
|
||||
};
|
||||
cluster_info.insert_vote(&d.id, &vote_new_version_old_addrs, Hash::default());
|
||||
//should be accepted, since the update is for the same address field as the one we know
|
||||
assert_eq!(cluster_info.table[&d.id].version, 1);
|
||||
}
|
||||
fn sorted(ls: &Vec<(NodeInfo, u64)>) -> Vec<(NodeInfo, u64)> {
|
||||
let mut copy: Vec<_> = ls.iter().cloned().collect();
|
||||
copy.sort_by(|x, y| x.0.id.cmp(&y.0.id));
|
||||
@ -1667,7 +1781,7 @@ mod tests {
|
||||
let len = cluster_info.table.len() as u64;
|
||||
cluster_info.purge(now + GOSSIP_PURGE_MILLIS + 1);
|
||||
assert_eq!(len as usize - 1, cluster_info.table.len());
|
||||
assert_eq!(cluster_info.my_data().leader_id, Pubkey::default());
|
||||
assert_eq!(cluster_info.my_data().leader_id, nxt.id);
|
||||
assert!(cluster_info.leader_data().is_none());
|
||||
}
|
||||
|
||||
|
15
src/drone.rs
15
src/drone.rs
@ -235,7 +235,6 @@ mod tests {
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
|
||||
@ -314,24 +313,18 @@ mod tests {
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let mut bank = Bank::new(&alice);
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader.info.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let carlos_pubkey = Keypair::new().pubkey();
|
||||
let leader_data = leader.info.clone();
|
||||
let ledger_path = get_tmp_ledger_path("send_airdrop");
|
||||
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let server = Fullnode::new_with_bank(
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
0,
|
||||
0,
|
||||
@ -340,6 +333,7 @@ mod tests {
|
||||
None,
|
||||
&ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
Some(0),
|
||||
);
|
||||
|
||||
@ -374,14 +368,13 @@ mod tests {
|
||||
// restart the leader, drone should find the new one at the same gossip port
|
||||
server.close().unwrap();
|
||||
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let leader_data = leader.info.clone();
|
||||
let server = Fullnode::new(
|
||||
leader,
|
||||
&ledger_path,
|
||||
leader_keypair,
|
||||
Arc::new(Keypair::new()),
|
||||
None,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
|
292
src/fullnode.rs
292
src/fullnode.rs
@ -85,12 +85,12 @@ pub enum FullnodeReturnType {
|
||||
|
||||
pub struct Fullnode {
|
||||
pub node_role: Option<NodeRole>,
|
||||
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||
keypair: Arc<Keypair>,
|
||||
vote_account_keypair: Arc<Keypair>,
|
||||
exit: Arc<AtomicBool>,
|
||||
rpu: Option<Rpu>,
|
||||
rpc_service: Option<JsonRpcService>,
|
||||
rpc_pubsub_service: Option<PubSubService>,
|
||||
rpc_service: JsonRpcService,
|
||||
rpc_pubsub_service: PubSubService,
|
||||
ncp: Ncp,
|
||||
bank: Arc<Bank>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
@ -104,7 +104,6 @@ pub struct Fullnode {
|
||||
broadcast_socket: UdpSocket,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
rpc_port: Option<u16>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -133,17 +132,14 @@ impl Fullnode {
|
||||
pub fn new(
|
||||
node: Node,
|
||||
ledger_path: &str,
|
||||
keypair: Arc<Keypair>,
|
||||
vote_account_keypair: Arc<Keypair>,
|
||||
keypair: Keypair,
|
||||
leader_addr: Option<SocketAddr>,
|
||||
sigverify_disabled: bool,
|
||||
leader_scheduler: LeaderScheduler,
|
||||
mut leader_scheduler: LeaderScheduler,
|
||||
) -> Self {
|
||||
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
||||
|
||||
info!("creating bank...");
|
||||
let (bank, tick_height, entry_height, ledger_tail) =
|
||||
Self::new_bank_from_ledger(ledger_path, leader_scheduler);
|
||||
Self::new_bank_from_ledger(ledger_path, &mut leader_scheduler);
|
||||
|
||||
info!("creating networking stack...");
|
||||
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
|
||||
@ -158,7 +154,6 @@ impl Fullnode {
|
||||
let leader_info = leader_addr.map(|i| NodeInfo::new_entry_point(&i));
|
||||
let server = Self::new_with_bank(
|
||||
keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
tick_height,
|
||||
entry_height,
|
||||
@ -167,6 +162,7 @@ impl Fullnode {
|
||||
leader_info.as_ref(),
|
||||
ledger_path,
|
||||
sigverify_disabled,
|
||||
leader_scheduler,
|
||||
None,
|
||||
);
|
||||
|
||||
@ -240,8 +236,7 @@ impl Fullnode {
|
||||
/// ```
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||
pub fn new_with_bank(
|
||||
keypair: Arc<Keypair>,
|
||||
vote_account_keypair: Arc<Keypair>,
|
||||
keypair: Keypair,
|
||||
bank: Bank,
|
||||
tick_height: u64,
|
||||
entry_height: u64,
|
||||
@ -250,6 +245,7 @@ impl Fullnode {
|
||||
bootstrap_leader_info_option: Option<&NodeInfo>,
|
||||
ledger_path: &str,
|
||||
sigverify_disabled: bool,
|
||||
leader_scheduler: LeaderScheduler,
|
||||
rpc_port: Option<u16>,
|
||||
) -> Self {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
@ -278,8 +274,21 @@ impl Fullnode {
|
||||
ClusterInfo::new(node.info).expect("ClusterInfo::new"),
|
||||
));
|
||||
|
||||
let (rpc_service, rpc_pubsub_service) =
|
||||
Self::startup_rpc_services(rpc_port, &bank, &cluster_info);
|
||||
// Use custom RPC port, if provided (`Some(port)`)
|
||||
// RPC port may be any open port on the node
|
||||
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
|
||||
// If rpc_port == `Some(0)`, node will dynamically choose any open port for both
|
||||
// Rpc and RpcPubsub serivces. Useful for tests.
|
||||
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
|
||||
// TODO: The RPC service assumes that there is a drone running on the leader
|
||||
// Drone location/id will need to be handled a different way as soon as leader rotation begins
|
||||
let rpc_service = JsonRpcService::new(&bank, &cluster_info, rpc_addr, exit.clone());
|
||||
|
||||
let rpc_pubsub_addr = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::from(0)),
|
||||
rpc_port.map_or(RPC_PORT + 1, |port| if port == 0 { port } else { port + 1 }),
|
||||
);
|
||||
let rpc_pubsub_service = PubSubService::new(&bank, rpc_pubsub_addr, exit.clone());
|
||||
|
||||
let ncp = Ncp::new(
|
||||
&cluster_info,
|
||||
@ -289,6 +298,9 @@ impl Fullnode {
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
||||
let keypair = Arc::new(keypair);
|
||||
|
||||
// Insert the bootstrap leader info, should only be None if this node
|
||||
// is the bootstrap leader
|
||||
if let Some(bootstrap_leader_info) = bootstrap_leader_info_option {
|
||||
@ -296,8 +308,10 @@ impl Fullnode {
|
||||
}
|
||||
|
||||
// Get the scheduled leader
|
||||
let scheduled_leader = bank
|
||||
.get_current_leader()
|
||||
let scheduled_leader = leader_scheduler
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_scheduled_leader(tick_height)
|
||||
.expect("Leader not known after processing bank");
|
||||
|
||||
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
||||
@ -305,8 +319,8 @@ impl Fullnode {
|
||||
// Start in validator mode.
|
||||
let tvu = Tvu::new(
|
||||
keypair.clone(),
|
||||
vote_account_keypair.clone(),
|
||||
&bank,
|
||||
tick_height,
|
||||
entry_height,
|
||||
cluster_info.clone(),
|
||||
shared_window.clone(),
|
||||
@ -324,17 +338,20 @@ impl Fullnode {
|
||||
.try_clone()
|
||||
.expect("Failed to clone retransmit socket"),
|
||||
Some(ledger_path),
|
||||
leader_scheduler.clone(),
|
||||
);
|
||||
let validator_state = ValidatorServices::new(tvu);
|
||||
Some(NodeRole::Validator(validator_state))
|
||||
} else {
|
||||
let max_tick_height = {
|
||||
let ls_lock = bank.leader_scheduler.read().unwrap();
|
||||
let ls_lock = leader_scheduler.read().unwrap();
|
||||
ls_lock.max_height_for_leader(tick_height)
|
||||
};
|
||||
// Start in leader mode.
|
||||
let (tpu, entry_receiver, tpu_exit) = Tpu::new(
|
||||
keypair.clone(),
|
||||
&bank,
|
||||
&cluster_info,
|
||||
Default::default(),
|
||||
node.sockets
|
||||
.transaction
|
||||
@ -357,7 +374,7 @@ impl Fullnode {
|
||||
shared_window.clone(),
|
||||
entry_height,
|
||||
entry_receiver,
|
||||
bank.leader_scheduler.clone(),
|
||||
leader_scheduler.clone(),
|
||||
tick_height,
|
||||
tpu_exit,
|
||||
);
|
||||
@ -367,15 +384,14 @@ impl Fullnode {
|
||||
|
||||
Fullnode {
|
||||
keypair,
|
||||
vote_account_keypair,
|
||||
cluster_info,
|
||||
shared_window,
|
||||
bank,
|
||||
sigverify_disabled,
|
||||
rpu,
|
||||
ncp,
|
||||
rpc_service: Some(rpc_service),
|
||||
rpc_pubsub_service: Some(rpc_pubsub_service),
|
||||
rpc_service,
|
||||
rpc_pubsub_service,
|
||||
node_role,
|
||||
ledger_path: ledger_path.to_owned(),
|
||||
exit,
|
||||
@ -386,50 +402,27 @@ impl Fullnode {
|
||||
broadcast_socket: node.sockets.broadcast,
|
||||
requests_socket: node.sockets.requests,
|
||||
respond_socket: node.sockets.respond,
|
||||
rpc_port,
|
||||
leader_scheduler,
|
||||
}
|
||||
}
|
||||
|
||||
fn leader_to_validator(&mut self) -> Result<()> {
|
||||
// Close down any services that could have a reference to the bank
|
||||
if self.rpu.is_some() {
|
||||
let old_rpu = self.rpu.take().unwrap();
|
||||
old_rpu.close()?;
|
||||
}
|
||||
let (scheduled_leader, tick_height, entry_height, last_entry_id) = {
|
||||
let mut ls_lock = self.leader_scheduler.write().unwrap();
|
||||
// Clear the leader scheduler
|
||||
ls_lock.reset();
|
||||
|
||||
if self.rpc_service.is_some() {
|
||||
let old_rpc_service = self.rpc_service.take().unwrap();
|
||||
old_rpc_service.close()?;
|
||||
}
|
||||
|
||||
if self.rpc_pubsub_service.is_some() {
|
||||
let old_rpc_pubsub_service = self.rpc_pubsub_service.take().unwrap();
|
||||
old_rpc_pubsub_service.close()?;
|
||||
}
|
||||
|
||||
// Correctness check: Ensure that references to the bank and leader scheduler are no
|
||||
// longer held by any running thread
|
||||
let mut new_leader_scheduler = self.bank.leader_scheduler.read().unwrap().clone();
|
||||
|
||||
// Clear the leader scheduler
|
||||
new_leader_scheduler.reset();
|
||||
|
||||
let (new_bank, scheduled_leader, tick_height, entry_height, last_entry_id) = {
|
||||
// TODO: We can avoid building the bank again once RecordStage is
|
||||
// integrated with BankingStage
|
||||
let (new_bank, tick_height, entry_height, ledger_tail) = Self::new_bank_from_ledger(
|
||||
&self.ledger_path,
|
||||
Arc::new(RwLock::new(new_leader_scheduler)),
|
||||
);
|
||||
let (bank, tick_height, entry_height, ledger_tail) =
|
||||
Self::new_bank_from_ledger(&self.ledger_path, &mut *ls_lock);
|
||||
|
||||
let new_bank = Arc::new(new_bank);
|
||||
let scheduled_leader = new_bank
|
||||
.get_current_leader()
|
||||
.expect("Scheduled leader should exist after rebuilding bank");
|
||||
self.bank = Arc::new(bank);
|
||||
|
||||
(
|
||||
new_bank,
|
||||
scheduled_leader,
|
||||
ls_lock
|
||||
.get_scheduled_leader(entry_height)
|
||||
.expect("Scheduled leader should exist after rebuilding bank"),
|
||||
tick_height,
|
||||
entry_height,
|
||||
ledger_tail
|
||||
@ -444,23 +437,21 @@ impl Fullnode {
|
||||
.unwrap()
|
||||
.set_leader(scheduled_leader);
|
||||
|
||||
// Spin up new versions of all the services that relied on the bank, passing in the
|
||||
// new bank
|
||||
self.rpu = Some(Rpu::new(
|
||||
&new_bank,
|
||||
self.requests_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone requests socket"),
|
||||
self.respond_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone respond socket"),
|
||||
));
|
||||
|
||||
let (rpc_service, rpc_pubsub_service) =
|
||||
Self::startup_rpc_services(self.rpc_port, &new_bank, &self.cluster_info);
|
||||
self.rpc_service = Some(rpc_service);
|
||||
self.rpc_pubsub_service = Some(rpc_pubsub_service);
|
||||
self.bank = new_bank;
|
||||
// Make a new RPU to serve requests out of the new bank we've created
|
||||
// instead of the old one
|
||||
if self.rpu.is_some() {
|
||||
let old_rpu = self.rpu.take().unwrap();
|
||||
old_rpu.close()?;
|
||||
self.rpu = Some(Rpu::new(
|
||||
&self.bank,
|
||||
self.requests_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone requests socket"),
|
||||
self.respond_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone respond socket"),
|
||||
));
|
||||
}
|
||||
|
||||
// In the rare case that the leader exited on a multiple of seed_rotation_interval
|
||||
// when the new leader schedule was being generated, and there are no other validators
|
||||
@ -468,31 +459,32 @@ impl Fullnode {
|
||||
// check for that
|
||||
if scheduled_leader == self.keypair.pubkey() {
|
||||
self.validator_to_leader(tick_height, entry_height, last_entry_id);
|
||||
Ok(())
|
||||
} else {
|
||||
let tvu = Tvu::new(
|
||||
self.keypair.clone(),
|
||||
self.vote_account_keypair.clone(),
|
||||
&self.bank,
|
||||
entry_height,
|
||||
self.cluster_info.clone(),
|
||||
self.shared_window.clone(),
|
||||
self.replicate_socket
|
||||
.iter()
|
||||
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
||||
.collect(),
|
||||
self.repair_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone repair socket"),
|
||||
self.retransmit_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone retransmit socket"),
|
||||
Some(&self.ledger_path),
|
||||
);
|
||||
let validator_state = ValidatorServices::new(tvu);
|
||||
self.node_role = Some(NodeRole::Validator(validator_state));
|
||||
Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let tvu = Tvu::new(
|
||||
self.keypair.clone(),
|
||||
&self.bank,
|
||||
tick_height,
|
||||
entry_height,
|
||||
self.cluster_info.clone(),
|
||||
self.shared_window.clone(),
|
||||
self.replicate_socket
|
||||
.iter()
|
||||
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
||||
.collect(),
|
||||
self.repair_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone repair socket"),
|
||||
self.retransmit_socket
|
||||
.try_clone()
|
||||
.expect("Failed to clone retransmit socket"),
|
||||
Some(&self.ledger_path),
|
||||
self.leader_scheduler.clone(),
|
||||
);
|
||||
let validator_state = ValidatorServices::new(tvu);
|
||||
self.node_role = Some(NodeRole::Validator(validator_state));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validator_to_leader(&mut self, tick_height: u64, entry_height: u64, last_entry_id: Hash) {
|
||||
@ -502,12 +494,14 @@ impl Fullnode {
|
||||
.set_leader(self.keypair.pubkey());
|
||||
|
||||
let max_tick_height = {
|
||||
let ls_lock = self.bank.leader_scheduler.read().unwrap();
|
||||
let ls_lock = self.leader_scheduler.read().unwrap();
|
||||
ls_lock.max_height_for_leader(tick_height)
|
||||
};
|
||||
|
||||
let (tpu, blob_receiver, tpu_exit) = Tpu::new(
|
||||
self.keypair.clone(),
|
||||
&self.bank,
|
||||
&self.cluster_info,
|
||||
Default::default(),
|
||||
self.transaction_sockets
|
||||
.iter()
|
||||
@ -531,7 +525,7 @@ impl Fullnode {
|
||||
self.shared_window.clone(),
|
||||
entry_height,
|
||||
blob_receiver,
|
||||
self.bank.leader_scheduler.clone(),
|
||||
self.leader_scheduler.clone(),
|
||||
tick_height,
|
||||
tpu_exit,
|
||||
);
|
||||
@ -575,12 +569,6 @@ impl Fullnode {
|
||||
if let Some(ref rpu) = self.rpu {
|
||||
rpu.exit();
|
||||
}
|
||||
if let Some(ref rpc_service) = self.rpc_service {
|
||||
rpc_service.exit();
|
||||
}
|
||||
if let Some(ref rpc_pubsub_service) = self.rpc_pubsub_service {
|
||||
rpc_pubsub_service.exit();
|
||||
}
|
||||
match self.node_role {
|
||||
Some(NodeRole::Leader(ref leader_services)) => leader_services.exit(),
|
||||
Some(NodeRole::Validator(ref validator_services)) => validator_services.exit(),
|
||||
@ -595,50 +583,21 @@ impl Fullnode {
|
||||
|
||||
pub fn new_bank_from_ledger(
|
||||
ledger_path: &str,
|
||||
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||
leader_scheduler: &mut LeaderScheduler,
|
||||
) -> (Bank, u64, u64, Vec<Entry>) {
|
||||
let mut bank = Bank::new_with_builtin_programs();
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let bank = Bank::new_with_builtin_programs();
|
||||
let entries = read_ledger(ledger_path, true).expect("opening ledger");
|
||||
let entries = entries
|
||||
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
|
||||
info!("processing ledger...");
|
||||
let (tick_height, entry_height, ledger_tail) =
|
||||
bank.process_ledger(entries).expect("process_ledger");
|
||||
let (tick_height, entry_height, ledger_tail) = bank
|
||||
.process_ledger(entries, leader_scheduler)
|
||||
.expect("process_ledger");
|
||||
// entry_height is the network-wide agreed height of the ledger.
|
||||
// initialize it from the input ledger
|
||||
info!("processed {} ledger...", entry_height);
|
||||
(bank, tick_height, entry_height, ledger_tail)
|
||||
}
|
||||
|
||||
pub fn get_leader_scheduler(&self) -> &Arc<RwLock<LeaderScheduler>> {
|
||||
&self.bank.leader_scheduler
|
||||
}
|
||||
|
||||
fn startup_rpc_services(
|
||||
rpc_port: Option<u16>,
|
||||
bank: &Arc<Bank>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
) -> (JsonRpcService, PubSubService) {
|
||||
// Use custom RPC port, if provided (`Some(port)`)
|
||||
// RPC port may be any open port on the node
|
||||
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
|
||||
// If rpc_port == `Some(0)`, node will dynamically choose any open port for both
|
||||
// Rpc and RpcPubsub serivces. Useful for tests.
|
||||
|
||||
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
|
||||
let rpc_pubsub_addr = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::from(0)),
|
||||
rpc_port.map_or(RPC_PORT + 1, |port| if port == 0 { port } else { port + 1 }),
|
||||
);
|
||||
|
||||
// TODO: The RPC service assumes that there is a drone running on the leader
|
||||
// Drone location/id will need to be handled a different way as soon as leader rotation begins
|
||||
(
|
||||
JsonRpcService::new(bank, cluster_info, rpc_addr),
|
||||
PubSubService::new(bank, rpc_pubsub_addr),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Fullnode {
|
||||
@ -648,14 +607,9 @@ impl Service for Fullnode {
|
||||
if let Some(rpu) = self.rpu {
|
||||
rpu.join()?;
|
||||
}
|
||||
if let Some(rpc_service) = self.rpc_service {
|
||||
rpc_service.join()?;
|
||||
}
|
||||
if let Some(rpc_pubsub_service) = self.rpc_pubsub_service {
|
||||
rpc_pubsub_service.join()?;
|
||||
}
|
||||
|
||||
self.ncp.join()?;
|
||||
self.rpc_service.join()?;
|
||||
self.rpc_pubsub_service.join()?;
|
||||
|
||||
match self.node_role {
|
||||
Some(NodeRole::Validator(validator_service)) => {
|
||||
@ -689,7 +643,7 @@ mod tests {
|
||||
use std::fs::remove_dir_all;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
use streamer::responder;
|
||||
|
||||
#[test]
|
||||
@ -697,19 +651,13 @@ mod tests {
|
||||
let keypair = Keypair::new();
|
||||
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let (mint, validator_ledger_path) = create_tmp_genesis("validator_exit", 10_000);
|
||||
let mut bank = Bank::new(&mint);
|
||||
let bank = Bank::new(&mint);
|
||||
let entry = tn.info.clone();
|
||||
let genesis_entries = &mint.create_entries();
|
||||
let entry_height = genesis_entries.len() as u64;
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
entry.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
|
||||
let v = Fullnode::new_with_bank(
|
||||
Arc::new(keypair),
|
||||
Arc::new(Keypair::new()),
|
||||
keypair,
|
||||
bank,
|
||||
0,
|
||||
entry_height,
|
||||
@ -718,6 +666,7 @@ mod tests {
|
||||
Some(&entry),
|
||||
&validator_ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(entry.id),
|
||||
Some(0),
|
||||
);
|
||||
v.close().unwrap();
|
||||
@ -734,20 +683,13 @@ mod tests {
|
||||
let (mint, validator_ledger_path) =
|
||||
create_tmp_genesis(&format!("validator_parallel_exit_{}", i), 10_000);
|
||||
ledger_paths.push(validator_ledger_path.clone());
|
||||
let mut bank = Bank::new(&mint);
|
||||
let bank = Bank::new(&mint);
|
||||
let entry = tn.info.clone();
|
||||
|
||||
let genesis_entries = &mint.create_entries();
|
||||
let entry_height = genesis_entries.len() as u64;
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(
|
||||
LeaderScheduler::from_bootstrap_leader(entry.id),
|
||||
));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
|
||||
Fullnode::new_with_bank(
|
||||
Arc::new(keypair),
|
||||
Arc::new(Keypair::new()),
|
||||
keypair,
|
||||
bank,
|
||||
0,
|
||||
entry_height,
|
||||
@ -756,6 +698,7 @@ mod tests {
|
||||
Some(&entry),
|
||||
&validator_ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(entry.id),
|
||||
Some(0),
|
||||
)
|
||||
}).collect();
|
||||
@ -814,8 +757,7 @@ mod tests {
|
||||
let mut bootstrap_leader = Fullnode::new(
|
||||
bootstrap_leader_node,
|
||||
&bootstrap_leader_ledger_path,
|
||||
Arc::new(bootstrap_leader_keypair),
|
||||
Arc::new(Keypair::new()),
|
||||
bootstrap_leader_keypair,
|
||||
Some(bootstrap_leader_info.contact_info.ncp),
|
||||
false,
|
||||
LeaderScheduler::new(&leader_scheduler_config),
|
||||
@ -841,7 +783,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_wrong_role_transition() {
|
||||
// Create the leader node information
|
||||
let bootstrap_leader_keypair = Arc::new(Keypair::new());
|
||||
let bootstrap_leader_keypair = Keypair::new();
|
||||
let bootstrap_leader_node =
|
||||
Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
|
||||
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
|
||||
@ -863,7 +805,7 @@ mod tests {
|
||||
// Write the entries to the ledger that will cause leader rotation
|
||||
// after the bootstrap height
|
||||
let mut ledger_writer = LedgerWriter::open(&bootstrap_leader_ledger_path, false).unwrap();
|
||||
let (active_set_entries, validator_vote_account_keypair) = make_active_set_entries(
|
||||
let active_set_entries = make_active_set_entries(
|
||||
&validator_keypair,
|
||||
&mint.keypair(),
|
||||
&last_id,
|
||||
@ -895,12 +837,10 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test that a node knows to transition to a validator based on parsing the ledger
|
||||
let leader_vote_account_keypair = Arc::new(Keypair::new());
|
||||
let bootstrap_leader = Fullnode::new(
|
||||
bootstrap_leader_node,
|
||||
&bootstrap_leader_ledger_path,
|
||||
bootstrap_leader_keypair,
|
||||
leader_vote_account_keypair,
|
||||
Some(bootstrap_leader_info.contact_info.ncp),
|
||||
false,
|
||||
LeaderScheduler::new(&leader_scheduler_config),
|
||||
@ -917,8 +857,7 @@ mod tests {
|
||||
let validator = Fullnode::new(
|
||||
validator_node,
|
||||
&bootstrap_leader_ledger_path,
|
||||
Arc::new(validator_keypair),
|
||||
Arc::new(validator_vote_account_keypair),
|
||||
validator_keypair,
|
||||
Some(bootstrap_leader_info.contact_info.ncp),
|
||||
false,
|
||||
LeaderScheduler::new(&leader_scheduler_config),
|
||||
@ -966,7 +905,7 @@ mod tests {
|
||||
//
|
||||
// 2) A vote from the validator
|
||||
let mut ledger_writer = LedgerWriter::open(&validator_ledger_path, false).unwrap();
|
||||
let (active_set_entries, validator_vote_account_keypair) =
|
||||
let active_set_entries =
|
||||
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id, 0);
|
||||
let initial_tick_height = genesis_entries
|
||||
.iter()
|
||||
@ -994,8 +933,7 @@ mod tests {
|
||||
let mut validator = Fullnode::new(
|
||||
validator_node,
|
||||
&validator_ledger_path,
|
||||
Arc::new(validator_keypair),
|
||||
Arc::new(validator_vote_account_keypair),
|
||||
validator_keypair,
|
||||
Some(leader_ncp),
|
||||
false,
|
||||
LeaderScheduler::new(&leader_scheduler_config),
|
||||
@ -1055,7 +993,7 @@ mod tests {
|
||||
// transitioned after tick_height = bootstrap_height.
|
||||
let (_, tick_height, entry_height, _) = Fullnode::new_bank_from_ledger(
|
||||
&validator_ledger_path,
|
||||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
|
||||
&mut LeaderScheduler::new(&leader_scheduler_config),
|
||||
);
|
||||
|
||||
assert_eq!(tick_height, bootstrap_height);
|
||||
|
@ -4,24 +4,82 @@
|
||||
use bank::Bank;
|
||||
|
||||
use bincode::serialize;
|
||||
use budget_instruction::Vote;
|
||||
use budget_transaction::BudgetTransaction;
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ledger::create_ticks;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
#[cfg(test)]
|
||||
use solana_sdk::account::Account;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use system_transaction::SystemTransaction;
|
||||
use transaction::Transaction;
|
||||
use vote_program::{Vote, VoteProgram};
|
||||
use vote_transaction::VoteTransaction;
|
||||
|
||||
pub const DEFAULT_BOOTSTRAP_HEIGHT: u64 = 1000;
|
||||
pub const DEFAULT_LEADER_ROTATION_INTERVAL: u64 = 100;
|
||||
pub const DEFAULT_SEED_ROTATION_INTERVAL: u64 = 1000;
|
||||
pub const DEFAULT_ACTIVE_WINDOW_LENGTH: u64 = 1000;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ActiveValidators {
|
||||
// Map from validator id to the last PoH height at which they voted,
|
||||
pub active_validators: HashMap<Pubkey, u64>,
|
||||
pub active_window_length: u64,
|
||||
}
|
||||
|
||||
impl ActiveValidators {
|
||||
pub fn new(active_window_length_option: Option<u64>) -> Self {
|
||||
let mut active_window_length = DEFAULT_ACTIVE_WINDOW_LENGTH;
|
||||
if let Some(input) = active_window_length_option {
|
||||
active_window_length = input;
|
||||
}
|
||||
|
||||
ActiveValidators {
|
||||
active_validators: HashMap::new(),
|
||||
active_window_length,
|
||||
}
|
||||
}
|
||||
|
||||
// Finds all the active voters who have voted in the range
|
||||
// (height - active_window_length, height], and removes
|
||||
// anybody who hasn't voted in that range from the map
|
||||
pub fn get_active_set(&mut self, height: u64) -> Vec<Pubkey> {
|
||||
// Don't filter anything if height is less than the
|
||||
// size of the active window. Otherwise, calculate the acceptable
|
||||
// window and filter the active_validators
|
||||
|
||||
// Note: height == 0 will only be included for all
|
||||
// height < self.active_window_length
|
||||
let upper_bound = height;
|
||||
if height >= self.active_window_length {
|
||||
let lower_bound = height - self.active_window_length;
|
||||
self.active_validators
|
||||
.retain(|_, height| *height > lower_bound);
|
||||
}
|
||||
|
||||
self.active_validators
|
||||
.iter()
|
||||
.filter_map(|(k, v)| if *v <= upper_bound { Some(*k) } else { None })
|
||||
.collect()
|
||||
}
|
||||
|
||||
// Push a vote for a validator with id == "id" who voted at PoH height == "height"
|
||||
pub fn push_vote(&mut self, id: Pubkey, height: u64) -> () {
|
||||
let old_height = self.active_validators.entry(id).or_insert(height);
|
||||
if height > *old_height {
|
||||
*old_height = height;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) -> () {
|
||||
self.active_validators.clear();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LeaderSchedulerConfig {
|
||||
// The first leader who will bootstrap the network
|
||||
pub bootstrap_leader: Pubkey,
|
||||
@ -61,7 +119,7 @@ impl LeaderSchedulerConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Debug)]
|
||||
pub struct LeaderScheduler {
|
||||
// Set to true if we want the default implementation of the LeaderScheduler,
|
||||
// where ony the bootstrap leader is used
|
||||
@ -81,13 +139,12 @@ pub struct LeaderScheduler {
|
||||
// the leader rotation process begins to pick future leaders
|
||||
pub bootstrap_height: u64,
|
||||
|
||||
// Maintain the set of active validators
|
||||
pub active_validators: ActiveValidators,
|
||||
|
||||
// The last height at which the seed + schedule was generated
|
||||
pub last_seed_height: Option<u64>,
|
||||
|
||||
// The length of time in ticks for which a vote qualifies a candidate for leader
|
||||
// selection
|
||||
pub active_window_length: u64,
|
||||
|
||||
// Round-robin ordering for the validators
|
||||
leader_schedule: Vec<Pubkey>,
|
||||
|
||||
@ -136,11 +193,6 @@ impl LeaderScheduler {
|
||||
seed_rotation_interval = input;
|
||||
}
|
||||
|
||||
let mut active_window_length = DEFAULT_ACTIVE_WINDOW_LENGTH;
|
||||
if let Some(input) = config.active_window_length_option {
|
||||
active_window_length = input;
|
||||
}
|
||||
|
||||
// Enforced invariants
|
||||
assert!(seed_rotation_interval >= leader_rotation_interval);
|
||||
assert!(bootstrap_height > 0);
|
||||
@ -148,13 +200,13 @@ impl LeaderScheduler {
|
||||
|
||||
LeaderScheduler {
|
||||
use_only_bootstrap_leader: false,
|
||||
active_validators: ActiveValidators::new(config.active_window_length_option),
|
||||
leader_rotation_interval,
|
||||
seed_rotation_interval,
|
||||
leader_schedule: Vec::new(),
|
||||
last_seed_height: None,
|
||||
bootstrap_leader: config.bootstrap_leader,
|
||||
bootstrap_height,
|
||||
active_window_length,
|
||||
seed: 0,
|
||||
}
|
||||
}
|
||||
@ -228,6 +280,15 @@ impl LeaderScheduler {
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.last_seed_height = None;
|
||||
self.active_validators.reset();
|
||||
}
|
||||
|
||||
pub fn push_vote(&mut self, id: Pubkey, height: u64) {
|
||||
if self.use_only_bootstrap_leader {
|
||||
return;
|
||||
}
|
||||
|
||||
self.active_validators.push_vote(id, height);
|
||||
}
|
||||
|
||||
pub fn update_height(&mut self, height: u64, bank: &Bank) {
|
||||
@ -282,34 +343,8 @@ impl LeaderScheduler {
|
||||
Some(self.leader_schedule[validator_index])
|
||||
}
|
||||
|
||||
// TODO: We use a HashSet for now because a single validator could potentially register
|
||||
// multiple vote account. Once that is no longer possible (see the TODO in vote_program.rs,
|
||||
// process_transaction(), case VoteInstruction::RegisterAccount), we can use a vector.
|
||||
fn get_active_set(&mut self, height: u64, bank: &Bank) -> HashSet<Pubkey> {
|
||||
let upper_bound = height;
|
||||
let lower_bound = height.saturating_sub(self.active_window_length);
|
||||
|
||||
{
|
||||
let bank_accounts = &*bank.accounts.read().unwrap();
|
||||
|
||||
bank_accounts
|
||||
.values()
|
||||
.filter_map(|account| {
|
||||
if VoteProgram::check_id(&account.program_id) {
|
||||
if let Ok(vote_state) = VoteProgram::deserialize(&account.userdata) {
|
||||
return vote_state
|
||||
.votes
|
||||
.back()
|
||||
.filter(|vote| {
|
||||
vote.tick_height > lower_bound
|
||||
&& vote.tick_height <= upper_bound
|
||||
}).map(|_| vote_state.node_id);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}).collect()
|
||||
}
|
||||
fn get_active_set(&mut self, height: u64) -> Vec<Pubkey> {
|
||||
self.active_validators.get_active_set(height)
|
||||
}
|
||||
|
||||
// Called every seed_rotation_interval entries, generates the leader schedule
|
||||
@ -319,8 +354,8 @@ impl LeaderScheduler {
|
||||
assert!((height - self.bootstrap_height) % self.seed_rotation_interval == 0);
|
||||
let seed = Self::calculate_seed(height);
|
||||
self.seed = seed;
|
||||
let active_set = self.get_active_set(height, &bank);
|
||||
let ranked_active_set = Self::rank_active_set(bank, active_set.iter());
|
||||
let active_set = self.get_active_set(height);
|
||||
let ranked_active_set = Self::rank_active_set(bank, &active_set[..]);
|
||||
|
||||
// Handle case where there are no active validators with
|
||||
// non-zero stake. In this case, use the bootstrap leader for
|
||||
@ -382,11 +417,9 @@ impl LeaderScheduler {
|
||||
bank.get_balance(id)
|
||||
}
|
||||
|
||||
fn rank_active_set<'a, I>(bank: &Bank, active: I) -> Vec<(&'a Pubkey, u64)>
|
||||
where
|
||||
I: Iterator<Item = &'a Pubkey>,
|
||||
{
|
||||
fn rank_active_set<'a>(bank: &Bank, active: &'a [Pubkey]) -> Vec<(&'a Pubkey, u64)> {
|
||||
let mut active_accounts: Vec<(&'a Pubkey, u64)> = active
|
||||
.iter()
|
||||
.filter_map(|pk| {
|
||||
let stake = Self::get_stake(pk, bank);
|
||||
if stake > 0 {
|
||||
@ -445,6 +478,24 @@ impl Default for LeaderScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
// Remove all candiates for leader selection from the active set by clearing the bank,
|
||||
// and then set a single new candidate who will be eligible starting at height = vote_height
|
||||
// by adding one new account to the bank
|
||||
#[cfg(test)]
|
||||
pub fn set_new_leader(bank: &Bank, leader_scheduler: &mut LeaderScheduler, vote_height: u64) {
|
||||
// Set the scheduled next leader to some other node
|
||||
let new_leader_keypair = Keypair::new();
|
||||
let new_leader_id = new_leader_keypair.pubkey();
|
||||
leader_scheduler.push_vote(new_leader_id, vote_height);
|
||||
let dummy_id = Keypair::new().pubkey();
|
||||
let new_account = Account::new(1, 10, dummy_id.clone());
|
||||
|
||||
// Remove the previous acounts from the active set
|
||||
let mut accounts = bank.accounts().write().unwrap();
|
||||
accounts.clear();
|
||||
accounts.insert(new_leader_id, new_account);
|
||||
}
|
||||
|
||||
// Create two entries so that the node with keypair == active_keypair
|
||||
// is in the active set for leader selection:
|
||||
// 1) Give the node a nonzero number of tokens,
|
||||
@ -455,107 +506,50 @@ pub fn make_active_set_entries(
|
||||
last_entry_id: &Hash,
|
||||
last_tick_id: &Hash,
|
||||
num_ending_ticks: usize,
|
||||
) -> (Vec<Entry>, Keypair) {
|
||||
) -> Vec<Entry> {
|
||||
// 1) Create transfer token entry
|
||||
let transfer_tx =
|
||||
Transaction::system_new(&token_source, active_keypair.pubkey(), 2, *last_tick_id);
|
||||
Transaction::system_new(&token_source, active_keypair.pubkey(), 1, *last_tick_id);
|
||||
let transfer_entry = Entry::new(last_entry_id, 1, vec![transfer_tx]);
|
||||
let mut last_entry_id = transfer_entry.id;
|
||||
|
||||
// 2) Create the vote account
|
||||
let vote_account = Keypair::new();
|
||||
let create_vote_account_tx =
|
||||
Transaction::vote_account_new(active_keypair, vote_account.pubkey(), *last_tick_id, 1);
|
||||
|
||||
let create_vote_account_entry = Entry::new(&last_entry_id, 1, vec![create_vote_account_tx]);
|
||||
last_entry_id = create_vote_account_entry.id;
|
||||
|
||||
// 3) Register the vote account
|
||||
let register_vote_account_tx =
|
||||
Transaction::vote_account_register(active_keypair, vote_account.pubkey(), *last_tick_id, 0);
|
||||
|
||||
let register_vote_account_entry = Entry::new(&last_entry_id, 1, vec![register_vote_account_tx]);
|
||||
last_entry_id = register_vote_account_entry.id;
|
||||
|
||||
// 4) Create vote entry
|
||||
let vote = Vote { tick_height: 1 };
|
||||
let vote_tx = Transaction::vote_new(&vote_account, vote, *last_tick_id, 0);
|
||||
// 2) Create vote entry
|
||||
let vote = Vote {
|
||||
version: 0,
|
||||
contact_info_version: 0,
|
||||
};
|
||||
let vote_tx = Transaction::budget_new_vote(&active_keypair, vote, *last_tick_id, 0);
|
||||
let vote_entry = Entry::new(&last_entry_id, 1, vec![vote_tx]);
|
||||
last_entry_id = vote_entry.id;
|
||||
|
||||
// 5) Create the ending empty ticks
|
||||
let mut txs = vec![
|
||||
transfer_entry,
|
||||
create_vote_account_entry,
|
||||
register_vote_account_entry,
|
||||
vote_entry,
|
||||
];
|
||||
// 3) Create the ending empty ticks
|
||||
let mut txs = vec![transfer_entry, vote_entry];
|
||||
let empty_ticks = create_ticks(num_ending_ticks, last_entry_id);
|
||||
txs.extend(empty_ticks);
|
||||
(txs, vote_account)
|
||||
txs
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use hash::Hash;
|
||||
use leader_scheduler::{
|
||||
LeaderScheduler, LeaderSchedulerConfig, DEFAULT_BOOTSTRAP_HEIGHT,
|
||||
DEFAULT_LEADER_ROTATION_INTERVAL, DEFAULT_SEED_ROTATION_INTERVAL,
|
||||
ActiveValidators, LeaderScheduler, LeaderSchedulerConfig, DEFAULT_ACTIVE_WINDOW_LENGTH,
|
||||
DEFAULT_BOOTSTRAP_HEIGHT, DEFAULT_LEADER_ROTATION_INTERVAL, DEFAULT_SEED_ROTATION_INTERVAL,
|
||||
};
|
||||
use mint::Mint;
|
||||
use result::Result;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::collections::HashSet;
|
||||
use std::hash::Hash as StdHash;
|
||||
use std::hash::Hash;
|
||||
use std::iter::FromIterator;
|
||||
use transaction::Transaction;
|
||||
use vote_program::Vote;
|
||||
use vote_transaction::VoteTransaction;
|
||||
|
||||
fn to_hashset_owned<T>(slice: &[T]) -> HashSet<T>
|
||||
where
|
||||
T: Eq + StdHash + Clone,
|
||||
T: Eq + Hash + Clone,
|
||||
{
|
||||
HashSet::from_iter(slice.iter().cloned())
|
||||
}
|
||||
|
||||
fn push_vote(vote_account: &Keypair, bank: &Bank, height: u64, last_id: Hash) {
|
||||
let vote = Vote {
|
||||
tick_height: height,
|
||||
};
|
||||
|
||||
let new_vote_tx = Transaction::vote_new(vote_account, vote, last_id, 0);
|
||||
|
||||
bank.process_transaction(&new_vote_tx).unwrap();
|
||||
}
|
||||
|
||||
fn create_vote_account(
|
||||
node_keypair: &Keypair,
|
||||
bank: &Bank,
|
||||
num_tokens: i64,
|
||||
last_id: Hash,
|
||||
) -> Result<Keypair> {
|
||||
let new_vote_account = Keypair::new();
|
||||
|
||||
// Create the new vote account
|
||||
let tx = Transaction::vote_account_new(
|
||||
node_keypair,
|
||||
new_vote_account.pubkey(),
|
||||
last_id,
|
||||
num_tokens,
|
||||
);
|
||||
bank.process_transaction(&tx)?;
|
||||
|
||||
// Register the vote account to the validator
|
||||
let tx =
|
||||
Transaction::vote_account_register(node_keypair, new_vote_account.pubkey(), last_id, 0);
|
||||
bank.process_transaction(&tx)?;
|
||||
|
||||
Ok(new_vote_account)
|
||||
}
|
||||
|
||||
fn run_scheduler_test(
|
||||
num_validators: usize,
|
||||
bootstrap_height: u64,
|
||||
@ -578,11 +572,7 @@ mod tests {
|
||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||
|
||||
// Create the bank and validators, which are inserted in order of account balance
|
||||
let num_vote_account_tokens = 1;
|
||||
let mint = Mint::new(
|
||||
(((num_validators + 1) / 2) * (num_validators + 1)
|
||||
+ num_vote_account_tokens * num_validators) as i64,
|
||||
);
|
||||
let mint = Mint::new((((num_validators + 1) / 2) * (num_validators + 1)) as i64);
|
||||
let bank = Bank::new(&mint);
|
||||
let mut validators = vec![];
|
||||
let last_id = mint
|
||||
@ -594,24 +584,11 @@ mod tests {
|
||||
let new_validator = Keypair::new();
|
||||
let new_pubkey = new_validator.pubkey();
|
||||
validators.push(new_pubkey);
|
||||
// Give the validator some tokens
|
||||
bank.transfer(
|
||||
(i + 1 + num_vote_account_tokens) as i64,
|
||||
&mint.keypair(),
|
||||
new_pubkey,
|
||||
last_id,
|
||||
).unwrap();
|
||||
|
||||
// Create a vote account
|
||||
let new_vote_account = create_vote_account(
|
||||
&new_validator,
|
||||
&bank,
|
||||
num_vote_account_tokens as i64,
|
||||
mint.last_id(),
|
||||
).unwrap();
|
||||
// Vote to make the validator part of the active set for the entire test
|
||||
// (we made the active_window_length large enough at the beginning of the test)
|
||||
push_vote(&new_vote_account, &bank, 1, mint.last_id());
|
||||
leader_scheduler.push_vote(new_pubkey, 1);
|
||||
bank.transfer((i + 1) as i64, &mint.keypair(), new_pubkey, last_id)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// The scheduled leader during the bootstrapping period (assuming a seed + schedule
|
||||
@ -689,9 +666,6 @@ mod tests {
|
||||
fn test_active_set() {
|
||||
let leader_id = Keypair::new().pubkey();
|
||||
let active_window_length = 1000;
|
||||
let mint = Mint::new(10000);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
||||
leader_id,
|
||||
Some(100),
|
||||
@ -707,60 +681,40 @@ mod tests {
|
||||
let num_old_ids = 20;
|
||||
let mut old_ids = HashSet::new();
|
||||
for _ in 0..num_old_ids {
|
||||
let new_keypair = Keypair::new();
|
||||
let pk = new_keypair.pubkey();
|
||||
old_ids.insert(pk.clone());
|
||||
|
||||
// Give the account some stake
|
||||
bank.transfer(5, &mint.keypair(), pk, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Create a vote account
|
||||
let new_vote_account =
|
||||
create_vote_account(&new_keypair, &bank, 1, mint.last_id()).unwrap();
|
||||
|
||||
// Push a vote for the account
|
||||
push_vote(&new_vote_account, &bank, start_height, mint.last_id());
|
||||
let pk = Keypair::new().pubkey();
|
||||
old_ids.insert(pk);
|
||||
leader_scheduler.push_vote(pk, start_height);
|
||||
}
|
||||
|
||||
// Insert a bunch of votes at height "start_height + active_window_length"
|
||||
let num_new_ids = 10;
|
||||
let mut new_ids = HashSet::new();
|
||||
for _ in 0..num_new_ids {
|
||||
let new_keypair = Keypair::new();
|
||||
let pk = new_keypair.pubkey();
|
||||
let pk = Keypair::new().pubkey();
|
||||
new_ids.insert(pk);
|
||||
// Give the account some stake
|
||||
bank.transfer(5, &mint.keypair(), pk, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Create a vote account
|
||||
let new_vote_account =
|
||||
create_vote_account(&new_keypair, &bank, 1, mint.last_id()).unwrap();
|
||||
|
||||
push_vote(
|
||||
&new_vote_account,
|
||||
&bank,
|
||||
start_height + active_window_length,
|
||||
mint.last_id(),
|
||||
);
|
||||
leader_scheduler.push_vote(pk, start_height + active_window_length);
|
||||
}
|
||||
|
||||
// Queries for the active set
|
||||
let result =
|
||||
leader_scheduler.get_active_set(active_window_length + start_height - 1, &bank);
|
||||
assert_eq!(result, old_ids);
|
||||
let result = leader_scheduler.get_active_set(active_window_length + start_height - 1);
|
||||
assert_eq!(result.len(), num_old_ids);
|
||||
let result_set = to_hashset_owned(&result);
|
||||
assert_eq!(result_set, old_ids);
|
||||
|
||||
let result = leader_scheduler.get_active_set(active_window_length + start_height, &bank);
|
||||
assert_eq!(result, new_ids);
|
||||
let result = leader_scheduler.get_active_set(active_window_length + start_height);
|
||||
assert_eq!(result.len(), num_new_ids);
|
||||
let result_set = to_hashset_owned(&result);
|
||||
assert_eq!(result_set, new_ids);
|
||||
|
||||
let result =
|
||||
leader_scheduler.get_active_set(2 * active_window_length + start_height - 1, &bank);
|
||||
assert_eq!(result, new_ids);
|
||||
let result = leader_scheduler.get_active_set(2 * active_window_length + start_height - 1);
|
||||
assert_eq!(result.len(), num_new_ids);
|
||||
let result_set = to_hashset_owned(&result);
|
||||
assert_eq!(result_set, new_ids);
|
||||
|
||||
let result =
|
||||
leader_scheduler.get_active_set(2 * active_window_length + start_height, &bank);
|
||||
assert!(result.is_empty());
|
||||
let result = leader_scheduler.get_active_set(2 * active_window_length + start_height);
|
||||
assert_eq!(result.len(), 0);
|
||||
let result_set = to_hashset_owned(&result);
|
||||
assert!(result_set.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -800,7 +754,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let validators_pk: Vec<Pubkey> = validators.iter().map(Keypair::pubkey).collect();
|
||||
let result = LeaderScheduler::rank_active_set(&bank, validators_pk.iter());
|
||||
let result = LeaderScheduler::rank_active_set(&bank, &validators_pk[..]);
|
||||
|
||||
assert_eq!(result.len(), validators.len());
|
||||
|
||||
@ -830,7 +784,7 @@ mod tests {
|
||||
.chain(new_validators.iter())
|
||||
.map(Keypair::pubkey)
|
||||
.collect();
|
||||
let result = LeaderScheduler::rank_active_set(&bank, all_validators.iter());
|
||||
let result = LeaderScheduler::rank_active_set(&bank, &all_validators[..]);
|
||||
assert_eq!(result.len(), new_validators.len());
|
||||
|
||||
for (i, (pk, balance)) in result.into_iter().enumerate() {
|
||||
@ -856,7 +810,7 @@ mod tests {
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let result = LeaderScheduler::rank_active_set(&bank, tied_validators_pk.iter());
|
||||
let result = LeaderScheduler::rank_active_set(&bank, &tied_validators_pk[..]);
|
||||
let mut sorted: Vec<&Pubkey> = tied_validators_pk.iter().map(|x| x).collect();
|
||||
sorted.sort_by(|pk1, pk2| pk1.cmp(pk2));
|
||||
assert_eq!(result.len(), tied_validators_pk.len());
|
||||
@ -968,7 +922,6 @@ mod tests {
|
||||
#[test]
|
||||
fn test_scheduler_active_window() {
|
||||
let num_validators = 10;
|
||||
let num_vote_account_tokens = 1;
|
||||
// Set up the LeaderScheduler struct
|
||||
let bootstrap_leader_id = Keypair::new().pubkey();
|
||||
let bootstrap_height = 500;
|
||||
@ -990,10 +943,7 @@ mod tests {
|
||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||
|
||||
// Create the bank and validators
|
||||
let mint = Mint::new(
|
||||
((((num_validators + 1) / 2) * (num_validators + 1))
|
||||
+ (num_vote_account_tokens * num_validators)) as i64,
|
||||
);
|
||||
let mint = Mint::new((((num_validators + 1) / 2) * (num_validators + 1)) as i64);
|
||||
let bank = Bank::new(&mint);
|
||||
let mut validators = vec![];
|
||||
let last_id = mint
|
||||
@ -1005,29 +955,10 @@ mod tests {
|
||||
let new_validator = Keypair::new();
|
||||
let new_pubkey = new_validator.pubkey();
|
||||
validators.push(new_pubkey);
|
||||
// Give the validator some tokens
|
||||
bank.transfer(
|
||||
(i + 1 + num_vote_account_tokens) as i64,
|
||||
&mint.keypair(),
|
||||
new_pubkey,
|
||||
last_id,
|
||||
).unwrap();
|
||||
|
||||
// Create a vote account
|
||||
let new_vote_account = create_vote_account(
|
||||
&new_validator,
|
||||
&bank,
|
||||
num_vote_account_tokens as i64,
|
||||
mint.last_id(),
|
||||
).unwrap();
|
||||
|
||||
// Vote at height i * active_window_length for validator i
|
||||
push_vote(
|
||||
&new_vote_account,
|
||||
&bank,
|
||||
i * active_window_length + bootstrap_height,
|
||||
mint.last_id(),
|
||||
);
|
||||
leader_scheduler.push_vote(new_pubkey, i * active_window_length + bootstrap_height);
|
||||
bank.transfer((i + 1) as i64, &mint.keypair(), new_pubkey, last_id)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Generate schedule every active_window_length entries and check that
|
||||
@ -1048,12 +979,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_multiple_vote() {
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader_id = leader_keypair.pubkey();
|
||||
let leader_id = Keypair::new().pubkey();
|
||||
let active_window_length = 1000;
|
||||
let mint = Mint::new(10000);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
let leader_scheduler_config = LeaderSchedulerConfig::new(
|
||||
leader_id,
|
||||
Some(100),
|
||||
@ -1064,38 +991,18 @@ mod tests {
|
||||
|
||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||
|
||||
// Give the node some tokens
|
||||
bank.transfer(5, &mint.keypair(), leader_id, bank.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Check that a node that votes twice in a row will get included in the active
|
||||
// Check that a validator that votes twice in a row will get included in the active
|
||||
// window
|
||||
let initial_vote_height = 1;
|
||||
|
||||
// Create a vote account
|
||||
let new_vote_account =
|
||||
create_vote_account(&leader_keypair, &bank, 1, mint.last_id()).unwrap();
|
||||
|
||||
// Vote twice
|
||||
push_vote(
|
||||
&new_vote_account,
|
||||
&bank,
|
||||
initial_vote_height,
|
||||
mint.last_id(),
|
||||
);
|
||||
push_vote(
|
||||
&new_vote_account,
|
||||
&bank,
|
||||
initial_vote_height + 1,
|
||||
mint.last_id(),
|
||||
);
|
||||
|
||||
leader_scheduler.push_vote(leader_id, initial_vote_height);
|
||||
leader_scheduler.push_vote(leader_id, initial_vote_height + 1);
|
||||
let result = leader_scheduler.get_active_set(initial_vote_height + active_window_length);
|
||||
assert_eq!(result, vec![leader_id]);
|
||||
let result =
|
||||
leader_scheduler.get_active_set(initial_vote_height + active_window_length, &bank);
|
||||
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
|
||||
let result =
|
||||
leader_scheduler.get_active_set(initial_vote_height + active_window_length + 1, &bank);
|
||||
assert!(result.is_empty());
|
||||
leader_scheduler.get_active_set(initial_vote_height + active_window_length + 1);
|
||||
assert_eq!(result, vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1156,6 +1063,13 @@ mod tests {
|
||||
DEFAULT_SEED_ROTATION_INTERVAL
|
||||
);
|
||||
|
||||
// Check defaults for ActiveValidators
|
||||
let active_validators = ActiveValidators::new(None);
|
||||
assert_eq!(
|
||||
active_validators.active_window_length,
|
||||
DEFAULT_ACTIVE_WINDOW_LENGTH
|
||||
);
|
||||
|
||||
// Check actual arguments for LeaderScheduler
|
||||
let bootstrap_height = 500;
|
||||
let leader_rotation_interval = 100;
|
||||
@ -1182,11 +1096,14 @@ mod tests {
|
||||
leader_scheduler.seed_rotation_interval,
|
||||
seed_rotation_interval
|
||||
);
|
||||
|
||||
// Check actual arguments for ActiveValidators
|
||||
let active_validators = ActiveValidators::new(Some(active_window_length));
|
||||
assert_eq!(active_validators.active_window_length, active_window_length);
|
||||
}
|
||||
|
||||
fn run_consecutive_leader_test(num_slots_per_epoch: u64, add_validator: bool) {
|
||||
let bootstrap_leader_keypair = Keypair::new();
|
||||
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
|
||||
let bootstrap_leader_id = Keypair::new().pubkey();
|
||||
let bootstrap_height = 500;
|
||||
let leader_rotation_interval = 100;
|
||||
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
|
||||
@ -1213,20 +1130,11 @@ mod tests {
|
||||
let initial_vote_height = 1;
|
||||
|
||||
// Create and add validator to the active set
|
||||
let validator_keypair = Keypair::new();
|
||||
let validator_id = validator_keypair.pubkey();
|
||||
let validator_id = Keypair::new().pubkey();
|
||||
if add_validator {
|
||||
bank.transfer(5, &mint.keypair(), validator_id, last_id)
|
||||
leader_scheduler.push_vote(validator_id, initial_vote_height);
|
||||
bank.transfer(1, &mint.keypair(), validator_id, last_id)
|
||||
.unwrap();
|
||||
// Create a vote account
|
||||
let new_vote_account =
|
||||
create_vote_account(&validator_keypair, &bank, 1, mint.last_id()).unwrap();
|
||||
push_vote(
|
||||
&new_vote_account,
|
||||
&bank,
|
||||
initial_vote_height,
|
||||
mint.last_id(),
|
||||
);
|
||||
}
|
||||
|
||||
// Make sure the bootstrap leader, not the validator, is picked again on next slot
|
||||
@ -1243,29 +1151,10 @@ mod tests {
|
||||
}
|
||||
};
|
||||
|
||||
let vote_account_tokens = 1;
|
||||
bank.transfer(
|
||||
leader_stake + vote_account_tokens,
|
||||
&mint.keypair(),
|
||||
bootstrap_leader_id,
|
||||
last_id,
|
||||
).unwrap();
|
||||
|
||||
// Create a vote account
|
||||
let new_vote_account = create_vote_account(
|
||||
&bootstrap_leader_keypair,
|
||||
&bank,
|
||||
vote_account_tokens,
|
||||
mint.last_id(),
|
||||
).unwrap();
|
||||
|
||||
// Add leader to the active set
|
||||
push_vote(
|
||||
&new_vote_account,
|
||||
&bank,
|
||||
initial_vote_height,
|
||||
mint.last_id(),
|
||||
);
|
||||
leader_scheduler.push_vote(bootstrap_leader_id, initial_vote_height);
|
||||
bank.transfer(leader_stake, &mint.keypair(), bootstrap_leader_id, last_id)
|
||||
.unwrap();
|
||||
|
||||
leader_scheduler.generate_schedule(bootstrap_height, &bank);
|
||||
|
||||
@ -1293,8 +1182,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_max_height_for_leader() {
|
||||
let bootstrap_leader_keypair = Keypair::new();
|
||||
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
|
||||
let bootstrap_leader_id = Keypair::new().pubkey();
|
||||
let bootstrap_height = 500;
|
||||
let leader_rotation_interval = 100;
|
||||
let seed_rotation_interval = 2 * leader_rotation_interval;
|
||||
@ -1366,34 +1254,15 @@ mod tests {
|
||||
// Now test when the active set > 1 node
|
||||
|
||||
// Create and add validator to the active set
|
||||
let validator_keypair = Keypair::new();
|
||||
let validator_id = validator_keypair.pubkey();
|
||||
|
||||
// Create a vote account for the validator
|
||||
bank.transfer(5, &mint.keypair(), validator_id, last_id)
|
||||
let validator_id = Keypair::new().pubkey();
|
||||
leader_scheduler.push_vote(validator_id, initial_vote_height);
|
||||
bank.transfer(1, &mint.keypair(), validator_id, last_id)
|
||||
.unwrap();
|
||||
let new_validator_vote_account =
|
||||
create_vote_account(&validator_keypair, &bank, 1, mint.last_id()).unwrap();
|
||||
push_vote(
|
||||
&new_validator_vote_account,
|
||||
&bank,
|
||||
initial_vote_height,
|
||||
mint.last_id(),
|
||||
);
|
||||
|
||||
// Create a vote account for the leader
|
||||
bank.transfer(5, &mint.keypair(), bootstrap_leader_id, last_id)
|
||||
.unwrap();
|
||||
let new_leader_vote_account =
|
||||
create_vote_account(&bootstrap_leader_keypair, &bank, 1, mint.last_id()).unwrap();
|
||||
|
||||
// Add leader to the active set
|
||||
push_vote(
|
||||
&new_leader_vote_account,
|
||||
&bank,
|
||||
initial_vote_height,
|
||||
mint.last_id(),
|
||||
);
|
||||
leader_scheduler.push_vote(bootstrap_leader_id, initial_vote_height);
|
||||
bank.transfer(1, &mint.keypair(), bootstrap_leader_id, last_id)
|
||||
.unwrap();
|
||||
|
||||
// Generate the schedule
|
||||
leader_scheduler.generate_schedule(bootstrap_height, &bank);
|
||||
|
159
src/leader_vote_stage.rs
Normal file
159
src/leader_vote_stage.rs
Normal file
@ -0,0 +1,159 @@
|
||||
//! The `leader_vote_stage` module implements the TPU's vote stage. It
|
||||
//! computes and notes the votes for the entries, and then sends the
|
||||
//! Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use cluster_info::ClusterInfo;
|
||||
use counter::Counter;
|
||||
use entry::Entry;
|
||||
use ledger::Block;
|
||||
use log::Level;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use signature::Keypair;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use streamer::responder;
|
||||
use timing::duration_as_ms;
|
||||
use vote_stage::send_leader_vote;
|
||||
|
||||
pub struct LeaderVoteStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
vote_thread: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl LeaderVoteStage {
|
||||
/// Process any Entry items that have been published by the RecordStage.
|
||||
/// continuosly send entries out
|
||||
pub fn compute_vote_and_send_entries(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
entry_sender: &Sender<Vec<Entry>>,
|
||||
entry_receiver: &Receiver<Vec<Entry>>,
|
||||
) -> Result<()> {
|
||||
let mut ventries = Vec::new();
|
||||
let mut received_entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||
let now = Instant::now();
|
||||
let mut num_new_entries = 0;
|
||||
|
||||
loop {
|
||||
num_new_entries += received_entries.len();
|
||||
ventries.push(received_entries);
|
||||
|
||||
if let Ok(n) = entry_receiver.try_recv() {
|
||||
received_entries = n;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
inc_new_counter_info!("leader_vote_stage-entries_received", num_new_entries);
|
||||
debug!("leader_vote_stage entries: {}", num_new_entries);
|
||||
|
||||
for entries in ventries {
|
||||
let votes = &entries.votes();
|
||||
cluster_info.write().unwrap().insert_votes(&votes);
|
||||
|
||||
inc_new_counter_info!("leader_vote_stage-write_entries", entries.len());
|
||||
|
||||
//TODO(anatoly): real stake based voting needs to change this
|
||||
//leader simply votes if the current set of validators have voted
|
||||
//on a valid last id
|
||||
|
||||
trace!("New entries? {}", entries.len());
|
||||
if !entries.is_empty() {
|
||||
inc_new_counter_info!("leader_vote_stage-recv_vote", votes.len());
|
||||
inc_new_counter_info!("leader_vote_stage-entries_sent", entries.len());
|
||||
trace!("broadcasting {}", entries.len());
|
||||
entry_sender.send(entries)?;
|
||||
}
|
||||
}
|
||||
inc_new_counter_info!(
|
||||
"leader_vote_stage-time_ms",
|
||||
duration_as_ms(&now.elapsed()) as usize
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a new LeaderVoteStage for voting and broadcasting entries.
|
||||
pub fn new(
|
||||
keypair: Arc<Keypair>,
|
||||
bank: Arc<Bank>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
entry_receiver: Receiver<Vec<Entry>>,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
||||
let t_responder = responder(
|
||||
"leader_vote_stage_vote_sender",
|
||||
Arc::new(send),
|
||||
vote_blob_receiver,
|
||||
);
|
||||
let (entry_sender, entry_receiver_forward) = channel();
|
||||
|
||||
let vote_thread = Builder::new()
|
||||
.name("solana-writer".to_string())
|
||||
.spawn(move || {
|
||||
let mut last_vote = 0;
|
||||
let mut last_valid_validator_timestamp = 0;
|
||||
let id = cluster_info.read().unwrap().id;
|
||||
loop {
|
||||
if let Err(e) = Self::compute_vote_and_send_entries(
|
||||
&cluster_info,
|
||||
&entry_sender,
|
||||
&entry_receiver,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
break;
|
||||
}
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
inc_new_counter_info!(
|
||||
"leader_vote_stage-compute_vote_and_send_entries-error",
|
||||
1
|
||||
);
|
||||
error!("{:?}", e);
|
||||
}
|
||||
}
|
||||
};
|
||||
if let Err(e) = send_leader_vote(
|
||||
&id,
|
||||
&keypair,
|
||||
&bank,
|
||||
&cluster_info,
|
||||
&vote_blob_sender,
|
||||
&mut last_vote,
|
||||
&mut last_valid_validator_timestamp,
|
||||
) {
|
||||
inc_new_counter_info!("leader_vote_stage-leader_vote-error", 1);
|
||||
error!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}).unwrap();
|
||||
|
||||
let thread_hdls = vec![t_responder];
|
||||
(
|
||||
LeaderVoteStage {
|
||||
vote_thread,
|
||||
thread_hdls,
|
||||
},
|
||||
entry_receiver_forward,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for LeaderVoteStage {
|
||||
type JoinReturnType = ();
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
|
||||
self.vote_thread.join()
|
||||
}
|
||||
}
|
@ -3,7 +3,7 @@
|
||||
//! access read to a persistent file-based ledger.
|
||||
|
||||
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
|
||||
#[cfg(test)]
|
||||
use budget_instruction::Vote;
|
||||
use budget_transaction::BudgetTransaction;
|
||||
#[cfg(test)]
|
||||
use chrono::prelude::Utc;
|
||||
@ -25,8 +25,6 @@ use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::path::Path;
|
||||
use transaction::Transaction;
|
||||
use vote_program::Vote;
|
||||
use vote_transaction::VoteTransaction;
|
||||
use window::WINDOW_SIZE;
|
||||
|
||||
//
|
||||
@ -498,7 +496,7 @@ impl Block for [Entry] {
|
||||
entry
|
||||
.transactions
|
||||
.iter()
|
||||
.flat_map(VoteTransaction::get_votes)
|
||||
.filter_map(BudgetTransaction::vote)
|
||||
}).collect()
|
||||
}
|
||||
}
|
||||
@ -686,6 +684,7 @@ pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialized_size;
|
||||
use budget_instruction::Vote;
|
||||
use budget_transaction::BudgetTransaction;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
@ -694,7 +693,6 @@ mod tests {
|
||||
use std;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
use vote_program::Vote;
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
@ -716,8 +714,15 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let vote_account = Keypair::new();
|
||||
let tx0 = Transaction::vote_new(&vote_account, Vote { tick_height: 1 }, one, 1);
|
||||
let tx0 = Transaction::budget_new_vote(
|
||||
&keypair,
|
||||
Vote {
|
||||
version: 0,
|
||||
contact_info_version: 1,
|
||||
},
|
||||
one,
|
||||
1,
|
||||
);
|
||||
let tx1 = Transaction::budget_new_timestamp(
|
||||
&keypair,
|
||||
keypair.pubkey(),
|
||||
@ -767,8 +772,15 @@ mod tests {
|
||||
let id = Hash::default();
|
||||
let next_id = hash(&id.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let vote_account = Keypair::new();
|
||||
let tx_small = Transaction::vote_new(&vote_account, Vote { tick_height: 1 }, next_id, 2);
|
||||
let tx_small = Transaction::budget_new_vote(
|
||||
&keypair,
|
||||
Vote {
|
||||
version: 0,
|
||||
contact_info_version: 2,
|
||||
},
|
||||
next_id,
|
||||
2,
|
||||
);
|
||||
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
|
||||
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;
|
||||
|
@ -35,6 +35,7 @@ pub mod fetch_stage;
|
||||
pub mod fullnode;
|
||||
pub mod hash;
|
||||
pub mod leader_scheduler;
|
||||
pub mod leader_vote_stage;
|
||||
pub mod ledger;
|
||||
pub mod ledger_write_stage;
|
||||
pub mod loader_transaction;
|
||||
@ -79,9 +80,7 @@ pub mod token_program;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod vote_program;
|
||||
pub mod vote_stage;
|
||||
pub mod vote_transaction;
|
||||
pub mod wallet;
|
||||
pub mod window;
|
||||
pub mod window_service;
|
||||
|
@ -40,7 +40,9 @@ fn create_path(name: &str) -> PathBuf {
|
||||
)
|
||||
}
|
||||
|
||||
const NATIVE_LOADER_PROGRAM_ID: [u8; 32] = [2u8; 32];
|
||||
const NATIVE_LOADER_PROGRAM_ID: [u8; 32] = [
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
];
|
||||
|
||||
// All native programs export a symbol named process()
|
||||
const ENTRYPOINT: &str = "process";
|
||||
@ -68,16 +70,21 @@ pub fn process_transaction(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8])
|
||||
trace!("Call native {:?}", name);
|
||||
let path = create_path(&name);
|
||||
// TODO linux tls bug can cause crash on dlclose(), workaround by never unloading
|
||||
let library = Library::open(Some(path), libc::RTLD_NODELETE | libc::RTLD_NOW).unwrap();
|
||||
unsafe {
|
||||
let entrypoint: Symbol<Entrypoint> = match library.get(ENTRYPOINT.as_bytes()) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
warn!("{:?}: Unable to find {:?} in program", e, ENTRYPOINT);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
return entrypoint(&mut keyed_accounts[1..], tx_data);
|
||||
match Library::open(Some(&path), libc::RTLD_NODELETE | libc::RTLD_NOW) {
|
||||
Ok(library) => unsafe {
|
||||
let entrypoint: Symbol<Entrypoint> = match library.get(ENTRYPOINT.as_bytes()) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
warn!("{:?}: Unable to find {:?} in program", e, ENTRYPOINT);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
return entrypoint(&mut keyed_accounts[1..], tx_data);
|
||||
},
|
||||
Err(e) => {
|
||||
warn!("Unable to load: {:?}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if let Ok(instruction) = deserialize(tx_data) {
|
||||
match instruction {
|
||||
|
@ -6,6 +6,8 @@ use counter::Counter;
|
||||
use entry::{EntryReceiver, EntrySender};
|
||||
use hash::Hash;
|
||||
use influx_db_client as influxdb;
|
||||
use leader_scheduler::LeaderScheduler;
|
||||
use ledger::Block;
|
||||
use log::Level;
|
||||
use metrics;
|
||||
use result::{Error, Result};
|
||||
@ -57,10 +59,11 @@ impl ReplicateStage {
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
window_receiver: &EntryReceiver,
|
||||
keypair: &Arc<Keypair>,
|
||||
vote_account_keypair: &Arc<Keypair>,
|
||||
vote_blob_sender: Option<&BlobSender>,
|
||||
ledger_entry_sender: &EntrySender,
|
||||
tick_height: &mut u64,
|
||||
entry_height: &mut u64,
|
||||
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
|
||||
) -> Result<Hash> {
|
||||
let timer = Duration::new(1, 0);
|
||||
//coalesce all the available entries into a single vote
|
||||
@ -78,23 +81,37 @@ impl ReplicateStage {
|
||||
let mut res = Ok(());
|
||||
let last_entry_id = {
|
||||
let mut num_entries_to_write = entries.len();
|
||||
let current_leader = bank
|
||||
.get_current_leader()
|
||||
.expect("Scheduled leader id should never be unknown while processing entries");
|
||||
for (i, entry) in entries.iter().enumerate() {
|
||||
res = bank.process_entry(&entry);
|
||||
let my_id = keypair.pubkey();
|
||||
let scheduled_leader = bank
|
||||
.get_current_leader()
|
||||
.expect("Scheduled leader id should never be unknown while processing entries");
|
||||
// max_tick_height is the PoH height at which the next leader rotation will
|
||||
// happen. The leader should send an entry such that the total PoH is equal
|
||||
// to max_tick_height - guard.
|
||||
// TODO: Introduce a "guard" for the end of transmission periods, the guard
|
||||
// is assumed to be zero for now.
|
||||
let max_tick_height = {
|
||||
let ls_lock = leader_scheduler.read().unwrap();
|
||||
ls_lock.max_height_for_leader(*tick_height)
|
||||
};
|
||||
|
||||
// TODO: Remove this soon once we boot the leader from ClusterInfo
|
||||
if scheduled_leader != current_leader {
|
||||
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
||||
}
|
||||
if my_id == scheduled_leader {
|
||||
num_entries_to_write = i + 1;
|
||||
break;
|
||||
res = bank.process_entry(
|
||||
&entry,
|
||||
tick_height,
|
||||
&mut *leader_scheduler.write().unwrap(),
|
||||
);
|
||||
|
||||
// Will run only if leader_scheduler.use_only_bootstrap_leader is false
|
||||
if let Some(max_tick_height) = max_tick_height {
|
||||
let ls_lock = leader_scheduler.read().unwrap();
|
||||
if *tick_height == max_tick_height {
|
||||
let my_id = keypair.pubkey();
|
||||
let scheduled_leader = ls_lock.get_scheduled_leader(*tick_height).expect(
|
||||
"Scheduled leader id should never be unknown while processing entries",
|
||||
);
|
||||
cluster_info.write().unwrap().set_leader(scheduled_leader);
|
||||
if my_id == scheduled_leader {
|
||||
num_entries_to_write = i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if res.is_err() {
|
||||
@ -117,9 +134,11 @@ impl ReplicateStage {
|
||||
};
|
||||
|
||||
if let Some(sender) = vote_blob_sender {
|
||||
send_validator_vote(bank, vote_account_keypair, &cluster_info, sender)?;
|
||||
send_validator_vote(bank, keypair, cluster_info, sender)?;
|
||||
}
|
||||
|
||||
cluster_info.write().unwrap().insert_votes(&entries.votes());
|
||||
|
||||
inc_new_counter_info!(
|
||||
"replicate-transactions",
|
||||
entries.iter().map(|x| x.transactions.len()).sum()
|
||||
@ -141,12 +160,13 @@ impl ReplicateStage {
|
||||
|
||||
pub fn new(
|
||||
keypair: Arc<Keypair>,
|
||||
vote_account_keypair: Arc<Keypair>,
|
||||
bank: Arc<Bank>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
window_receiver: EntryReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
tick_height: u64,
|
||||
entry_height: u64,
|
||||
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||
) -> (Self, EntryReceiver) {
|
||||
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||
let (ledger_entry_sender, ledger_entry_receiver) = channel();
|
||||
@ -162,15 +182,17 @@ impl ReplicateStage {
|
||||
let now = Instant::now();
|
||||
let mut next_vote_secs = 1;
|
||||
let mut entry_height_ = entry_height;
|
||||
let mut tick_height_ = tick_height;
|
||||
let mut last_entry_id = None;
|
||||
loop {
|
||||
let leader_id =
|
||||
bank.get_current_leader()
|
||||
.expect("Scheduled leader id should never be unknown at this point");
|
||||
|
||||
let leader_id = leader_scheduler
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_scheduled_leader(tick_height_)
|
||||
.expect("Scheduled leader id should never be unknown at this point");
|
||||
if leader_id == keypair.pubkey() {
|
||||
return Some(ReplicateStageReturnType::LeaderRotation(
|
||||
bank.get_tick_height(),
|
||||
tick_height_,
|
||||
entry_height_,
|
||||
// We should never start the TPU / this stage on an exact entry that causes leader
|
||||
// rotation (Fullnode should automatically transition on startup if it detects
|
||||
@ -193,10 +215,11 @@ impl ReplicateStage {
|
||||
&cluster_info,
|
||||
&window_receiver,
|
||||
&keypair,
|
||||
&vote_account_keypair,
|
||||
vote_sender,
|
||||
&ledger_entry_sender,
|
||||
&mut tick_height_,
|
||||
&mut entry_height_,
|
||||
&leader_scheduler,
|
||||
) {
|
||||
Err(Error::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
|
||||
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
|
||||
@ -271,7 +294,7 @@ mod test {
|
||||
// 1) Give the validator a nonzero number of tokens 2) A vote from the validator .
|
||||
// This will cause leader rotation after the bootstrap height
|
||||
let mut ledger_writer = LedgerWriter::open(&my_ledger_path, false).unwrap();
|
||||
let (active_set_entries, vote_account_keypair) =
|
||||
let active_set_entries =
|
||||
make_active_set_entries(&my_keypair, &mint.keypair(), &last_id, &last_id, 0);
|
||||
last_id = active_set_entries.last().unwrap().id;
|
||||
let initial_tick_height = genesis_entries
|
||||
@ -296,23 +319,26 @@ mod test {
|
||||
Some(bootstrap_height),
|
||||
);
|
||||
|
||||
let leader_scheduler =
|
||||
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
|
||||
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
|
||||
|
||||
// Set up the bank
|
||||
let (bank, _, _, _) = Fullnode::new_bank_from_ledger(&my_ledger_path, leader_scheduler);
|
||||
let (bank, _, _, _) =
|
||||
Fullnode::new_bank_from_ledger(&my_ledger_path, &mut leader_scheduler);
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
|
||||
|
||||
// Set up the replicate stage
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (replicate_stage, _ledger_writer_recv) = ReplicateStage::new(
|
||||
Arc::new(my_keypair),
|
||||
Arc::new(vote_account_keypair),
|
||||
Arc::new(bank),
|
||||
Arc::new(RwLock::new(cluster_info_me)),
|
||||
entry_receiver,
|
||||
exit.clone(),
|
||||
initial_tick_height,
|
||||
initial_entry_len,
|
||||
leader_scheduler.clone(),
|
||||
);
|
||||
|
||||
// Send enough ticks to trigger leader rotation
|
||||
@ -349,6 +375,13 @@ mod test {
|
||||
|
||||
assert_eq!(exit.load(Ordering::Relaxed), true);
|
||||
|
||||
// Check ledger height is correct
|
||||
let mut leader_scheduler = Arc::try_unwrap(leader_scheduler)
|
||||
.expect("Multiple references to this RwLock still exist")
|
||||
.into_inner()
|
||||
.expect("RwLock for LeaderScheduler is still locked");
|
||||
|
||||
leader_scheduler.reset();
|
||||
let _ignored = remove_dir_all(&my_ledger_path);
|
||||
}
|
||||
}
|
||||
|
@ -198,16 +198,14 @@ mod tests {
|
||||
let (mint, leader_ledger_path) = create_tmp_genesis(leader_ledger_path, 100);
|
||||
|
||||
info!("starting leader node");
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let network_addr = leader_node.sockets.gossip.local_addr().unwrap();
|
||||
let leader_info = leader_node.info.clone();
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let leader = Fullnode::new(
|
||||
leader_node,
|
||||
&leader_ledger_path,
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
None,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_info.id),
|
||||
|
@ -25,31 +25,31 @@ impl RequestProcessor {
|
||||
Request::GetAccount { key } => {
|
||||
let account = self.bank.get_account(&key);
|
||||
let rsp = (Response::Account { key, account }, rsp_addr);
|
||||
info!("Response::Account {:?}", rsp);
|
||||
debug!("Response::Account {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetLastId => {
|
||||
let id = self.bank.last_id();
|
||||
let rsp = (Response::LastId { id }, rsp_addr);
|
||||
info!("Response::LastId {:?}", rsp);
|
||||
debug!("Response::LastId {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetTransactionCount => {
|
||||
let transaction_count = self.bank.transaction_count() as u64;
|
||||
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
|
||||
info!("Response::TransactionCount {:?}", rsp);
|
||||
debug!("Response::TransactionCount {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetSignature { signature } => {
|
||||
let signature_status = self.bank.has_signature(&signature);
|
||||
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
|
||||
info!("Response::Signature {:?}", rsp);
|
||||
debug!("Response::Signature {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetFinality => {
|
||||
let time = self.bank.finality();
|
||||
let rsp = (Response::Finality { time }, rsp_addr);
|
||||
info!("Response::Finality {:?}", rsp);
|
||||
debug!("Response::Finality {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ impl RequestStage {
|
||||
|
||||
let blobs = to_blobs(rsps)?;
|
||||
if !blobs.is_empty() {
|
||||
info!("process: sending blobs: {}", blobs.len());
|
||||
debug!("process: sending blobs: {}", blobs.len());
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@ use poh_recorder;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
use vote_stage;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
@ -28,7 +27,6 @@ pub enum Error {
|
||||
ErasureError(erasure::ErasureError),
|
||||
SendError,
|
||||
PohRecorderError(poh_recorder::PohRecorderError),
|
||||
VoteError(vote_stage::VoteError),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@ -102,11 +100,6 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
|
||||
Error::PohRecorderError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<vote_stage::VoteError> for Error {
|
||||
fn from(e: vote_stage::VoteError) -> Error {
|
||||
Error::VoteError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
33
src/rpc.rs
33
src/rpc.rs
@ -28,7 +28,6 @@ pub const RPC_PORT: u16 = 8899;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl JsonRpcService {
|
||||
@ -36,12 +35,11 @@ impl JsonRpcService {
|
||||
bank: &Arc<Bank>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
rpc_addr: SocketAddr,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let request_processor = JsonRpcRequestProcessor::new(bank.clone());
|
||||
let info = cluster_info.clone();
|
||||
let exit_pubsub = exit.clone();
|
||||
let exit_ = exit.clone();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-jsonrpc".to_string())
|
||||
.spawn(move || {
|
||||
@ -64,23 +62,14 @@ impl JsonRpcService {
|
||||
warn!("JSON RPC service unavailable: unable to bind to RPC port {}. \nMake sure this port is not already in use by another application", rpc_addr.port());
|
||||
return;
|
||||
}
|
||||
while !exit_.load(Ordering::Relaxed) {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
server.unwrap().close();
|
||||
()
|
||||
})
|
||||
.unwrap();
|
||||
JsonRpcService { thread_hdl, exit }
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn close(self) -> thread::Result<()> {
|
||||
self.exit();
|
||||
self.join()
|
||||
JsonRpcService { thread_hdl }
|
||||
}
|
||||
}
|
||||
|
||||
@ -390,7 +379,8 @@ mod tests {
|
||||
ClusterInfo::new(NodeInfo::new_unspecified()).unwrap(),
|
||||
));
|
||||
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 24680);
|
||||
let rpc_service = JsonRpcService::new(&Arc::new(bank), &cluster_info, rpc_addr);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rpc_service = JsonRpcService::new(&Arc::new(bank), &cluster_info, rpc_addr, exit);
|
||||
let thread = rpc_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||
|
||||
@ -596,11 +586,11 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_rpc_send_tx() {
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let mut bank = Bank::new(&alice);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let leader_data = leader.info.clone();
|
||||
let ledger_path = create_tmp_ledger_with_mint("rpc_send_tx", &alice);
|
||||
@ -612,16 +602,8 @@ mod tests {
|
||||
|
||||
let genesis_entries = &alice.create_entries();
|
||||
let entry_height = genesis_entries.len() as u64;
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_data.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let server = Fullnode::new_with_bank(
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
0,
|
||||
entry_height,
|
||||
@ -630,6 +612,7 @@ mod tests {
|
||||
None,
|
||||
&ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
Some(rpc_port),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
@ -27,7 +27,6 @@ pub enum ClientState {
|
||||
|
||||
pub struct PubSubService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl Service for PubSubService {
|
||||
@ -39,10 +38,8 @@ impl Service for PubSubService {
|
||||
}
|
||||
|
||||
impl PubSubService {
|
||||
pub fn new(bank: &Arc<Bank>, pubsub_addr: SocketAddr) -> Self {
|
||||
pub fn new(bank: &Arc<Bank>, pubsub_addr: SocketAddr, exit: Arc<AtomicBool>) -> Self {
|
||||
let rpc = RpcSolPubSubImpl::new(JsonRpcRequestProcessor::new(bank.clone()), bank.clone());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let exit_ = exit.clone();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-pubsub".to_string())
|
||||
.spawn(move || {
|
||||
@ -63,23 +60,14 @@ impl PubSubService {
|
||||
warn!("Pubsub service unavailable: unable to bind to port {}. \nMake sure this port is not already in use by another application", pubsub_addr.port());
|
||||
return;
|
||||
}
|
||||
while !exit_.load(Ordering::Relaxed) {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
server.unwrap().close();
|
||||
()
|
||||
})
|
||||
.unwrap();
|
||||
PubSubService { thread_hdl, exit }
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn close(self) -> thread::Result<()> {
|
||||
self.exit();
|
||||
self.join()
|
||||
PubSubService { thread_hdl }
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,7 +127,6 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
subscriber: pubsub::Subscriber<Account>,
|
||||
pubkey_str: String,
|
||||
) {
|
||||
info!("account_subscribe");
|
||||
let pubkey_vec = bs58::decode(pubkey_str).into_vec().unwrap();
|
||||
if pubkey_vec.len() != mem::size_of::<Pubkey>() {
|
||||
subscriber
|
||||
@ -154,6 +141,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
|
||||
let id = self.uid.fetch_add(1, atomic::Ordering::SeqCst);
|
||||
let sub_id = SubscriptionId::Number(id as u64);
|
||||
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
|
||||
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
|
||||
let bank_sub_id = Keypair::new().pubkey();
|
||||
self.account_subscriptions
|
||||
@ -166,7 +154,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
}
|
||||
|
||||
fn account_unsubscribe(&self, id: SubscriptionId) -> Result<bool> {
|
||||
info!("account_unsubscribe");
|
||||
info!("account_unsubscribe: id={:?}", id);
|
||||
if let Some((bank_sub_id, pubkey)) = self.account_subscriptions.write().unwrap().remove(&id)
|
||||
{
|
||||
self.bank.remove_account_subscription(&bank_sub_id, &pubkey);
|
||||
@ -261,7 +249,8 @@ mod tests {
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let pubsub_service = PubSubService::new(&Arc::new(bank), pubsub_addr);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pubsub_service = PubSubService::new(&Arc::new(bank), pubsub_addr, exit);
|
||||
let thread = pubsub_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-pubsub");
|
||||
}
|
||||
|
@ -17,7 +17,10 @@ pub enum StorageError {
|
||||
InvalidUserData,
|
||||
}
|
||||
|
||||
pub const STORAGE_PROGRAM_ID: [u8; 32] = [1u8; 32];
|
||||
const STORAGE_PROGRAM_ID: [u8; 32] = [
|
||||
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0,
|
||||
];
|
||||
|
||||
impl StorageProgram {
|
||||
pub fn check_id(program_id: &Pubkey) -> bool {
|
||||
|
@ -261,7 +261,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_sdk_serialize() {
|
||||
let keypair = Keypair::new();
|
||||
use budget_program::BUDGET_PROGRAM_ID;
|
||||
use budget_program::BudgetState;
|
||||
|
||||
// CreateAccount
|
||||
let tx = Transaction::system_create(
|
||||
@ -270,14 +270,14 @@ mod test {
|
||||
Hash::default(),
|
||||
111,
|
||||
222,
|
||||
Pubkey::new(&BUDGET_PROGRAM_ID),
|
||||
BudgetState::id(),
|
||||
0,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tx.userdata(0).to_vec(),
|
||||
vec![
|
||||
0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 222, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 222, 0, 0, 0, 0, 0, 0, 0, 129, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
]
|
||||
);
|
||||
@ -302,17 +302,12 @@ mod test {
|
||||
);
|
||||
|
||||
// Assign
|
||||
let tx = Transaction::system_assign(
|
||||
&keypair,
|
||||
Hash::default(),
|
||||
Pubkey::new(&BUDGET_PROGRAM_ID),
|
||||
0,
|
||||
);
|
||||
let tx = Transaction::system_assign(&keypair, Hash::default(), BudgetState::id(), 0);
|
||||
assert_eq!(
|
||||
tx.userdata(0).to_vec(),
|
||||
vec![
|
||||
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0
|
||||
1, 0, 0, 0, 129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||
]
|
||||
);
|
||||
|
||||
|
@ -26,7 +26,6 @@ use std::time::Instant;
|
||||
use system_transaction::SystemTransaction;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
use vote_transaction::VoteTransaction;
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
@ -149,29 +148,6 @@ impl ThinClient {
|
||||
))
|
||||
}
|
||||
|
||||
pub fn create_vote_account(
|
||||
&self,
|
||||
node_keypair: &Keypair,
|
||||
vote_account_id: Pubkey,
|
||||
last_id: &Hash,
|
||||
num_tokens: i64,
|
||||
) -> io::Result<Signature> {
|
||||
let tx =
|
||||
Transaction::vote_account_new(&node_keypair, vote_account_id, *last_id, num_tokens);
|
||||
self.transfer_signed(&tx)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn register_vote_account(
|
||||
&self,
|
||||
node_keypair: &Keypair,
|
||||
vote_account_id: Pubkey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tx = Transaction::vote_account_register(node_keypair, vote_account_id, *last_id, 0);
|
||||
self.transfer_signed(&tx)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
@ -194,24 +170,6 @@ impl ThinClient {
|
||||
result
|
||||
}
|
||||
|
||||
pub fn get_account_userdata(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> {
|
||||
let req = Request::GetAccount { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetAccount in pub fn get_account_userdata");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_account_userdata");
|
||||
|
||||
loop {
|
||||
let resp = self.recv_response()?;
|
||||
trace!("recv_response {:?}", resp);
|
||||
if let Response::Account { key, account } = resp {
|
||||
if key == *pubkey {
|
||||
return Ok(account.map(|account| account.userdata));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
@ -488,23 +446,17 @@ mod tests {
|
||||
#[ignore]
|
||||
fn test_thin_client() {
|
||||
logger::setup();
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let leader_data = leader.info.clone();
|
||||
|
||||
let alice = Mint::new(10_000);
|
||||
let mut bank = Bank::new(&alice);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let ledger_path = create_tmp_ledger_with_mint("thin_client", &alice);
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_data.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let server = Fullnode::new_with_bank(
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
0,
|
||||
0,
|
||||
@ -513,6 +465,7 @@ mod tests {
|
||||
None,
|
||||
&ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
Some(0),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
@ -542,22 +495,16 @@ mod tests {
|
||||
#[ignore]
|
||||
fn test_bad_sig() {
|
||||
logger::setup();
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let mut bank = Bank::new(&alice);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let leader_data = leader.info.clone();
|
||||
let ledger_path = create_tmp_ledger_with_mint("bad_sig", &alice);
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_data.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let server = Fullnode::new_with_bank(
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
0,
|
||||
0,
|
||||
@ -566,6 +513,7 @@ mod tests {
|
||||
None,
|
||||
&ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
Some(0),
|
||||
);
|
||||
//TODO: remove this sleep, or add a retry so CI is stable
|
||||
@ -608,25 +556,18 @@ mod tests {
|
||||
#[test]
|
||||
fn test_client_check_signature() {
|
||||
logger::setup();
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let mut bank = Bank::new(&alice);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let leader_data = leader.info.clone();
|
||||
let ledger_path = create_tmp_ledger_with_mint("client_check_signature", &alice);
|
||||
|
||||
let genesis_entries = &alice.create_entries();
|
||||
let entry_height = genesis_entries.len() as u64;
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_data.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let server = Fullnode::new_with_bank(
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
0,
|
||||
entry_height,
|
||||
@ -635,6 +576,7 @@ mod tests {
|
||||
None,
|
||||
&ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
Some(0),
|
||||
);
|
||||
sleep(Duration::from_millis(300));
|
||||
@ -678,25 +620,18 @@ mod tests {
|
||||
#[test]
|
||||
fn test_zero_balance_after_nonzero() {
|
||||
logger::setup();
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let mut bank = Bank::new(&alice);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_keypair = Keypair::new();
|
||||
let leader_data = leader.info.clone();
|
||||
let ledger_path = create_tmp_ledger_with_mint("zero_balance_check", &alice);
|
||||
|
||||
let genesis_entries = &alice.create_entries();
|
||||
let entry_height = genesis_entries.len() as u64;
|
||||
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_data.id,
|
||||
)));
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let server = Fullnode::new_with_bank(
|
||||
leader_keypair,
|
||||
vote_account_keypair,
|
||||
bank,
|
||||
0,
|
||||
entry_height,
|
||||
@ -705,6 +640,7 @@ mod tests {
|
||||
None,
|
||||
&ledger_path,
|
||||
false,
|
||||
LeaderScheduler::from_bootstrap_leader(leader_data.id),
|
||||
Some(0),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
@ -105,8 +105,9 @@ impl Default for TokenProgram {
|
||||
}
|
||||
}
|
||||
|
||||
pub const TOKEN_PROGRAM_ID: [u8; 32] = [
|
||||
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
const TOKEN_PROGRAM_ID: [u8; 32] = [
|
||||
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0,
|
||||
];
|
||||
|
||||
impl TokenProgram {
|
||||
|
22
src/tpu.rs
22
src/tpu.rs
@ -27,18 +27,21 @@
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::{BankingStage, BankingStageReturnType};
|
||||
use cluster_info::ClusterInfo;
|
||||
use entry::Entry;
|
||||
use fetch_stage::FetchStage;
|
||||
use hash::Hash;
|
||||
use leader_vote_stage::LeaderVoteStage;
|
||||
use ledger_write_stage::LedgerWriteStage;
|
||||
use poh_service::Config;
|
||||
use service::Service;
|
||||
use signature::Keypair;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
pub enum TpuReturnType {
|
||||
@ -49,6 +52,7 @@ pub struct Tpu {
|
||||
fetch_stage: FetchStage,
|
||||
sigverify_stage: SigVerifyStage,
|
||||
banking_stage: BankingStage,
|
||||
leader_vote_stage: LeaderVoteStage,
|
||||
ledger_write_stage: LedgerWriteStage,
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
@ -56,7 +60,9 @@ pub struct Tpu {
|
||||
impl Tpu {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||
pub fn new(
|
||||
keypair: Arc<Keypair>,
|
||||
bank: &Arc<Bank>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
tick_duration: Config,
|
||||
transactions_sockets: Vec<UdpSocket>,
|
||||
ledger_path: &str,
|
||||
@ -81,21 +87,28 @@ impl Tpu {
|
||||
max_tick_height,
|
||||
);
|
||||
|
||||
let (leader_vote_stage, ledger_entry_receiver) =
|
||||
LeaderVoteStage::new(keypair, bank.clone(), cluster_info.clone(), entry_receiver);
|
||||
|
||||
let (ledger_entry_sender, entry_forwarder) = channel();
|
||||
let ledger_write_stage =
|
||||
LedgerWriteStage::new(Some(ledger_path), entry_receiver, Some(ledger_entry_sender));
|
||||
let ledger_write_stage = LedgerWriteStage::new(
|
||||
Some(ledger_path),
|
||||
ledger_entry_receiver,
|
||||
Some(ledger_entry_sender),
|
||||
);
|
||||
|
||||
let tpu = Tpu {
|
||||
fetch_stage,
|
||||
sigverify_stage,
|
||||
banking_stage,
|
||||
leader_vote_stage,
|
||||
ledger_write_stage,
|
||||
exit: exit.clone(),
|
||||
};
|
||||
(tpu, entry_forwarder, exit)
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
pub fn exit(&self) -> () {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
@ -115,6 +128,7 @@ impl Service for Tpu {
|
||||
fn join(self) -> thread::Result<(Option<TpuReturnType>)> {
|
||||
self.fetch_stage.join()?;
|
||||
self.sigverify_stage.join()?;
|
||||
self.leader_vote_stage.join()?;
|
||||
self.ledger_write_stage.join()?;
|
||||
match self.banking_stage.join()? {
|
||||
Some(BankingStageReturnType::LeaderRotation) => Ok(Some(TpuReturnType::LeaderRotation)),
|
||||
|
26
src/tvu.rs
26
src/tvu.rs
@ -40,6 +40,7 @@ use bank::Bank;
|
||||
use blob_fetch_stage::BlobFetchStage;
|
||||
use cluster_info::ClusterInfo;
|
||||
use hash::Hash;
|
||||
use leader_scheduler::LeaderScheduler;
|
||||
use ledger_write_stage::LedgerWriteStage;
|
||||
use replicate_stage::{ReplicateStage, ReplicateStageReturnType};
|
||||
use retransmit_stage::RetransmitStage;
|
||||
@ -79,8 +80,8 @@ impl Tvu {
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||
pub fn new(
|
||||
keypair: Arc<Keypair>,
|
||||
vote_account_keypair: Arc<Keypair>,
|
||||
bank: &Arc<Bank>,
|
||||
tick_height: u64,
|
||||
entry_height: u64,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
window: SharedWindow,
|
||||
@ -88,6 +89,7 @@ impl Tvu {
|
||||
repair_socket: UdpSocket,
|
||||
retransmit_socket: UdpSocket,
|
||||
ledger_path: Option<&str>,
|
||||
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
|
||||
) -> Self {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
@ -103,22 +105,23 @@ impl Tvu {
|
||||
let (retransmit_stage, blob_window_receiver) = RetransmitStage::new(
|
||||
&cluster_info,
|
||||
window,
|
||||
bank.get_tick_height(),
|
||||
tick_height,
|
||||
entry_height,
|
||||
Arc::new(retransmit_socket),
|
||||
repair_socket,
|
||||
blob_fetch_receiver,
|
||||
bank.leader_scheduler.clone(),
|
||||
leader_scheduler.clone(),
|
||||
);
|
||||
|
||||
let (replicate_stage, ledger_entry_receiver) = ReplicateStage::new(
|
||||
keypair,
|
||||
vote_account_keypair,
|
||||
bank.clone(),
|
||||
cluster_info,
|
||||
blob_window_receiver,
|
||||
exit.clone(),
|
||||
tick_height,
|
||||
entry_height,
|
||||
leader_scheduler,
|
||||
);
|
||||
|
||||
let ledger_write_stage = LedgerWriteStage::new(ledger_path, ledger_entry_receiver, None);
|
||||
@ -136,7 +139,7 @@ impl Tvu {
|
||||
self.exit.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn exit(&self) {
|
||||
pub fn exit(&self) -> () {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
@ -252,12 +255,7 @@ pub mod tests {
|
||||
let starting_balance = 10_000;
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.info.contact_info.tvu;
|
||||
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_id,
|
||||
)));
|
||||
let mut bank = Bank::new(&mint);
|
||||
bank.leader_scheduler = leader_scheduler;
|
||||
let bank = Arc::new(bank);
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
//start cluster_info1
|
||||
let mut cluster_info1 = ClusterInfo::new(target1.info.clone()).expect("ClusterInfo::new");
|
||||
@ -266,18 +264,20 @@ pub mod tests {
|
||||
let cref1 = Arc::new(RwLock::new(cluster_info1));
|
||||
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone());
|
||||
|
||||
let vote_account_keypair = Arc::new(Keypair::new());
|
||||
let tvu = Tvu::new(
|
||||
Arc::new(target1_keypair),
|
||||
vote_account_keypair,
|
||||
&bank,
|
||||
0,
|
||||
0,
|
||||
cref1,
|
||||
dr_1.1,
|
||||
target1.sockets.replicate,
|
||||
target1.sockets.repair,
|
||||
target1.sockets.retransmit,
|
||||
None,
|
||||
Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
|
||||
leader_id,
|
||||
))),
|
||||
);
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user