Compare commits

...

61 Commits

Author SHA1 Message Date
f6b709ca48 ci: correct crates.io publishing order 2018-11-02 15:36:32 -07:00
ffa1fa557b Ship native programs in snap 2018-11-01 15:59:24 -07:00
e7631c85a1 Update bpf.mk 2018-11-01 15:25:43 -07:00
edeadb503f shell 2018-11-01 14:40:38 -07:00
d2044f2562 Find clang 7 better
If LLVM_DIR is defined, use it to locate clang.  Otherwise use brew on
macOS, and assume clang-7 otherwise
2018-11-01 09:46:47 -07:00
5703c740cf Improve clang install info for Linux 2018-11-01 09:46:47 -07:00
6ae20e78e2 Rename sol_bpf.h to solana_sdk.h 2018-10-31 23:39:59 -07:00
506fc3baeb sol_bpf.h improvements
- Define NULL
- Add sol_memcmp()
- Use sizeof() more
- Add SOL_ARRAY_SIZE
- Make sol_deserialize() more flexible
2018-10-31 23:39:59 -07:00
68523f4a7f Fix up bpf numeric types 2018-10-31 21:16:16 -07:00
beae217ab9 Remove purging of leader id from cluster info (#1677) 2018-10-31 13:09:44 -07:00
2c8c117e3c Use env variables to disable validator sanity and ledger verification (#1675) (#1676) 2018-10-31 12:54:40 -07:00
3a1285ebe5 Program may not exit (#1669)
Cap max executed instructions, report number of executed instructions
2018-10-31 11:15:08 -07:00
e2660f2ac1 Fix deps (#1672) 2018-10-31 11:14:41 -07:00
22eb1b977f Fix lua_loader tests (#1665) 2018-10-31 09:22:41 -07:00
43ef8d7bb7 SYSTEM_INC_DIRS needs immediate expansion 2018-10-31 09:22:41 -07:00
d9271f2d30 Revert inclusion change, fix doc 2018-10-31 09:22:41 -07:00
dfbfd4d4dd Fix const 2018-10-31 09:22:41 -07:00
9cb262ad4b Fix C programs 2018-10-31 09:22:41 -07:00
73ee0cb100 Run workspace member's tests (#1666)
Run workspace member's tests
2018-10-31 09:22:41 -07:00
9a6154beaf Upgrade to influx_db_client@0.3.6 2018-10-31 09:22:41 -07:00
3f494bb91b Update testnet scripts to use release tar ball (#1660) (#1664)
* Update testnet scripts to use release tar ball

* use curl instead of s3cmd
2018-10-30 18:29:07 -07:00
2eb312796d Publish a tarball of Solana release binaries (#1656) (#1658)
* Publish a tarball of solana release binaries

* included native programs in Solana release tar

* Remove PR check from publish script
2018-10-30 15:55:50 -07:00
3fb86662fb Find native program with solana_ prefix 2018-10-30 13:12:59 -07:00
dce31f6002 Improve account subscribe/unsubscribe logging 2018-10-30 12:10:25 -07:00
39c42a6aba Avoid panicking when a native library doesn't exist 2018-10-30 12:10:25 -07:00
9961c0ee0a Demote info logs 2018-10-30 12:10:25 -07:00
3f843f21b9 Add solana_ prefix to loaders so their logs appear in the default RUST_LOG config 2018-10-30 11:24:18 -07:00
d07961a58b Work around influxdb panic 2018-10-30 11:24:18 -07:00
b85aa9282e Tweak logging 2018-10-30 11:24:18 -07:00
1cd354cf15 Added a new remote node configuration script to set rmem/wmem (#1647) (#1648)
* Added a new remote node configuration script to set rmem/wmem

* Update common.sh for rmem/wmem configuration
2018-10-30 10:48:56 -07:00
92cd2d09ed Permit {INC,LLVM,OUT,SRC,SYSTEM_INC}_DIRs to be overridden 2018-10-30 07:59:22 -07:00
a40122548f Add programs/bpf/c/sdk entries 2018-10-29 20:52:34 -07:00
6e27f797bd Use NUM_KA 2018-10-29 20:52:34 -07:00
476a585222 README updates 2018-10-29 20:52:34 -07:00
aa74ddb6c0 LD -> LLC 2018-10-29 20:52:34 -07:00
95921ce129 Add extern "C" block 2018-10-29 20:52:34 -07:00
ee6d00a2fe Use #pragma once, it's widely supported
Fix up some spelling too
2018-10-29 20:52:34 -07:00
212cbc4977 Rename sol_bpf_c.h to sol_bpf.h 2018-10-29 20:52:34 -07:00
a6af1ba08d slight reformatting 2018-10-29 20:52:34 -07:00
ee27e9e1cf Apply some const 2018-10-29 20:52:34 -07:00
4d21ee0546 Include system includes in .d, remove unneeded tabs 2018-10-29 20:52:34 -07:00
493a2477b5 Tune make output 2018-10-29 19:32:20 -07:00
e284af33b9 Create programs/bpf/c/sdk/ 2018-10-29 19:10:54 -07:00
f0aa14e135 Run bench-tps for longer duration in testnet (#1638) (#1639)
- Increased to 2+ hours
2018-10-29 15:23:01 -07:00
fb9d8dfa99 Increase rmem and wmem for remote nodes in testnet (#1635) (#1637) 2018-10-29 14:36:26 -07:00
4b02bbc802 Remove unnecessary checks 2018-10-29 13:27:14 -07:00
18cf660f61 Create/publish bpf-sdk tarball 2018-10-29 13:04:20 -07:00
376303a1eb Add utility to figure the current crate version 2018-10-29 13:04:20 -07:00
f295eb06d0 Add llvm install info 2018-10-29 09:44:03 -07:00
f423f61d8b Ignore out/ 2018-10-29 09:44:03 -07:00
94b06b2cbf Use V=1 for verbosity, easier to type 2018-10-29 09:44:03 -07:00
9b2fc8cde7 Find llvm using brew on macOS 2018-10-29 09:44:03 -07:00
d810752e86 Remove VoteProgram references 2018-10-26 21:10:05 -07:00
fdaad1d85b Program_ids were overlapping (#1626)
Program_ids were overlapping
2018-10-26 21:10:05 -07:00
7f29c1fe23 Cleanup c programs (#1620)
Cleanup C programs
2018-10-26 21:10:05 -07:00
68df9d06db Bump version number to pick up fixed cuda library
Has fix for unaligned memory access in chacha_encrypt_many_sample
function.
2018-10-26 21:10:05 -07:00
b60cb48c18 Use a smaller test value for window_size
Otherwise this test takes forever to run.
2018-10-26 21:10:05 -07:00
0fee854220 Revert "Vote contract (#1552)"
This reverts commit f6c8e1a4bf.
2018-10-26 09:50:35 -07:00
0cc7bbfe7d Revert "cargo fmt"
This reverts commit 68834bd4c5.
2018-10-26 09:50:35 -07:00
68834bd4c5 cargo fmt 2018-10-25 17:24:40 -07:00
2df40cf9c9 Revert "0.10.0-pre2"
This reverts commit 48685cf766.
2018-10-25 17:20:37 -07:00
86 changed files with 2322 additions and 2086 deletions

View File

@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.10.0-pre2"
version = "0.10.0"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
readme = "README.md"
@ -80,7 +80,7 @@ env_logger = "0.5.12"
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
getopts = "0.2"
hex-literal = "0.1.1"
influx_db_client = "0.3.4"
influx_db_client = "0.3.6"
solana-jsonrpc-core = "0.3.0"
solana-jsonrpc-http-server = "0.3.0"
solana-jsonrpc-macros = "0.3.0"
@ -104,14 +104,14 @@ serde_cbor = "0.9.0"
serde_derive = "1.0.27"
serde_json = "1.0.10"
socket2 = "0.3.8"
solana-sdk = { path = "sdk", version = "0.10.0-pre2" }
solana-sdk = { path = "sdk", version = "0.10.0" }
sys-info = "0.5.6"
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
solana-noop = { path = "programs/native/noop", version = "0.10.0-pre2" }
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.0-pre2" }
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.0-pre2" }
solana-noop = { path = "programs/native/noop", version = "0.10.0" }
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.0" }
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.0" }
[[bench]]
name = "bank"
@ -139,5 +139,5 @@ members = [
"programs/native/noop",
"programs/native/bpf_loader",
"programs/native/lua_loader",
"programs/bpf/noop_rust",
"programs/bpf/rust/noop",
]

View File

@ -20,44 +20,24 @@ fn main() {
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
if bpf_c {
let out_dir = "target/".to_string() + &env::var("PROFILE").unwrap();
let out_dir = "OUT_DIR=../../../target/".to_string()
+ &env::var("PROFILE").unwrap()
+ &"/bpf".to_string();
println!("cargo:rerun-if-changed=programs/bpf/noop_c/build.sh");
println!("cargo:rerun-if-changed=programs/bpf/noop_c/src/noop.c");
println!("cargo:warning=(not a warning) Compiling noop_c");
let status = Command::new("programs/bpf/noop_c/build.sh")
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/tictactoe.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/tictactoe_dashboard.c");
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
let status = Command::new("make")
.current_dir("programs/bpf/c")
.arg("all")
.arg(&out_dir)
.status()
.expect("Failed to call noop_c build script");
assert!(status.success());
println!("cargo:rerun-if-changed=programs/bpf/move_funds_c/build.sh");
println!("cargo:rerun-if-changed=programs/bpf/move_funds_c/src/move_funds.c");
println!("cargo:warning=(not a warning) Compiling move_funds_c");
let status = Command::new("programs/bpf/move_funds_c/build.sh")
.arg(&out_dir)
.status()
.expect("Failed to call move_funds_c build script");
assert!(status.success());
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_c/build.sh");
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_c/src/tictactoe.c");
println!("cargo:warning=(not a warning) Compiling tictactoe_c");
let status = Command::new("programs/bpf/tictactoe_c/build.sh")
.arg(&out_dir)
.status()
.expect("Failed to call tictactoe_c build script");
assert!(status.success());
println!("cargo:rerun-if-changed=programs/bpf/tictactoe_dashboard_c/build.sh");
println!(
"cargo:rerun-if-changed=programs/bpf/tictactoe_dashboard_c/src/tictactoe_dashboard.c"
);
println!("cargo:warning=(not a warning) Compiling tictactoe_dashboard_c");
let status = Command::new("programs/bpf/tictactoe_dashboard_c/build.sh")
.arg(&out_dir)
.status()
.expect("Failed to call tictactoe_dashboard_c build script");
.expect("Failed to build C-based BPF programs");
assert!(status.success());
}
if chacha || cuda || erasure {

View File

@ -8,3 +8,9 @@ steps:
- command: "ci/publish-crate.sh"
timeout_in_minutes: 20
name: "publish crate [public]"
- command: "ci/publish-bpf-sdk.sh"
timeout_in_minutes: 5
name: "publish bpf sdk"
- command: "ci/publish-solana-tar.sh"
timeout_in_minutes: 15
name: "publish solana release tar"

16
ci/crate-version.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash -e
#
# Outputs the current crate version
#
cd "$(dirname "$0")"/..
while read -r name equals value _; do
if [[ $name = version && $equals = = ]]; then
echo "${value//\"/}"
exit 0
fi
done < <(cat Cargo.toml)
echo Unable to locate version in Cargo.toml 1>&2
exit 1

36
ci/publish-bpf-sdk.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
version=$(./ci/crate-version.sh)
echo --- Creating tarball
(
set -x
rm -rf bpf-sdk/
mkdir bpf-sdk/
(
echo "$version"
git rev-parse HEAD
) > bpf-sdk/version.txt
cp -ra programs/bpf/c/sdk/* bpf-sdk/
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
)
echo --- AWS S3 Store
set -x
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
tar zxf s3cmd-2.0.1.tar.gz
fi
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
exit 0

View File

@ -18,7 +18,7 @@ if [[ -n $CI ]]; then
fi
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
for Cargo_toml in {.,sdk,programs/native/{bpf_loader,lua_loader,noop}}/Cargo.toml; do
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
(
set -x

58
ci/publish-solana-tar.sh Executable file
View File

@ -0,0 +1,58 @@
#!/bin/bash -e
cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH ]]; then
DRYRUN="echo"
fi
eval "$(ci/channel-info.sh)"
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
CHANNEL=stable
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
CHANNEL=edge
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
if [[ -z $CHANNEL ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
echo --- Creating tarball
if [[ -z $DRYRUN ]]; then
(
set -x
rm -rf solana-release/
mkdir solana-release/
(
echo "$CHANNEL"
git rev-parse HEAD
) > solana-release/version.txt
./fetch-perf-libs.sh
cargo install --features=cuda --root solana-release
./scripts/install-native-programs.sh solana-release
tar jvcf solana-release.tar.bz2 solana-release/
)
fi
echo --- AWS S3 Store
set -x
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
fi
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
s3://solana-release/"$CHANNEL"/solana-release.tar.bz2
exit 0

View File

@ -23,6 +23,16 @@ for test in tests/*.rs; do
_ cargo test --verbose --jobs=1 --test="$test"
done
# Run native program's tests
for program in programs/native/*; do
echo --- "$program"
(
set -x
cd "$program"
cargo test --verbose
)
done
echo --- ci/localnet-sanity.sh
(
set -x

View File

@ -9,8 +9,10 @@ clientNodeCount=0
validatorNodeCount=10
publicNetwork=false
snapChannel=edge
releaseChannel=edge
delete=false
enableGpu=false
useReleaseChannel=false
usage() {
exitcode=0
@ -29,6 +31,8 @@ Deploys a CD testnet
options:
-s edge|beta|stable - Deploy the specified Snap release channel
(default: $snapChannel)
-t edge|beta|stable - Deploy the specified prebuilt tar from channel
(default: $releaseChannel)
-n [number] - Number of validator nodes (default: $validatorNodeCount)
-c [number] - Number of client nodes (default: $clientNodeCount)
-P - Use public network IP addresses (default: $publicNetwork)
@ -49,7 +53,7 @@ zone=$2
[[ -n $zone ]] || usage "Zone not specified"
shift 2
while getopts "h?p:Pn:c:s:gG:a:d" opt; do
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
case $opt in
h | \?)
usage
@ -73,6 +77,17 @@ while getopts "h?p:Pn:c:s:gG:a:d" opt; do
;;
esac
;;
t)
case $OPTARG in
edge|beta|stable)
releaseChannel=$OPTARG
useReleaseChannel=true
;;
*)
usage "Invalid release channel: $OPTARG"
;;
esac
;;
g)
enableGpu=true
;;
@ -130,7 +145,18 @@ maybeRejectExtraNodes=
if ! $publicNetwork; then
maybeRejectExtraNodes="-o rejectExtraNodes"
fi
maybeNoValidatorSanity=
if [[ -n $NO_VALIDATOR_SANITY ]]; then
maybeNoValidatorSanity="-o noValidatorSanity"
fi
maybeNoLedgerVerify=
if [[ -n $NO_LEDGER_VERIFY ]]; then
maybeNoLedgerVerify="-o noLedgerVerify"
fi
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes
if ! $useReleaseChannel; then
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
else
time net/net.sh start -t "$releaseChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
fi
exit 0

View File

@ -15,7 +15,7 @@ mkdir -p target/perf-libs
cd target/perf-libs
(
set -x
curl https://solana-perf.s3.amazonaws.com/v0.10.2/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
)
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then

View File

@ -104,16 +104,16 @@ tune_networking() {
# test the existence of the sysctls before trying to set them
# go ahead and return true and don't exit if these calls fail
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_max=67108864 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.wmem_max=67108864 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
sudo sysctl -w net.core.wmem_default=26214400 1>/dev/null 2>/dev/null
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
) || true
fi

View File

@ -376,6 +376,7 @@ $(
install-earlyoom.sh \
install-libssl-compatability.sh \
install-rsync.sh \
network-config.sh \
)
cat > /etc/motd <<EOM

View File

@ -25,6 +25,7 @@ Operate a configured testnet
start-specific options:
-S [snapFilename] - Deploy the specified Snap file
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
-t edge|beta|stable - Deploy the latest tarball release for the specified channel
-f [cargoFeatures] - List of |cargo --feaures=| to activate
(ignored if -s or -S is specified)
@ -44,6 +45,7 @@ EOF
}
snapChannel=
releaseChannel=
snapFilename=
deployMethod=local
sanityExtraArgs=
@ -53,7 +55,7 @@ command=$1
[[ -n $command ]] || usage
shift
while getopts "h?S:s:o:f:" opt; do
while getopts "h?S:s:t:o:f:" opt; do
case $opt in
h | \?)
usage
@ -74,6 +76,17 @@ while getopts "h?S:s:o:f:" opt; do
;;
esac
;;
t)
case $OPTARG in
edge|beta|stable)
releaseChannel=$OPTARG
deployMethod=tar
;;
*)
usage "Invalid release channel: $OPTARG"
;;
esac
;;
f)
cargoFeatures=$OPTARG
;;
@ -139,6 +152,9 @@ startLeader() {
snap)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
;;
tar)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
;;
local)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
;;
@ -236,6 +252,16 @@ start() {
}
fi
;;
tar)
if [[ -n $releaseChannel ]]; then
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
cd "$SOLANA_ROOT"
set -x
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
tar jxvf solana-release.tar.bz2
fi
;;
local)
build
;;

View File

@ -35,7 +35,7 @@ snap)
solana_bench_tps=/snap/bin/solana.bench-tps
solana_keygen=/snap/bin/solana.keygen
;;
local)
local|tar)
PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1
export SOLANA_DEFAULT_METRICS_RATE=1
@ -59,7 +59,7 @@ clientCommand="\
--network $entrypointIp:8001 \
--identity client.json \
--num-nodes $numNodes \
--duration 600 \
--duration 7500 \
--sustained \
--threads $threadCount \
"

View File

@ -35,7 +35,6 @@ else
setupArgs="-l"
fi
case $deployMethod in
snap)
SECONDS=0
@ -78,7 +77,7 @@ snap)
echo "Succeeded in ${SECONDS} seconds"
;;
local)
local|tar)
PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1
export RUST_LOG

View File

@ -65,7 +65,7 @@ snap)
client_id=~/snap/solana/current/config/client-id.json
;;
local)
local|tar)
PATH="$HOME"/.cargo/bin:"$PATH"
export USE_INSTALL=1
entrypointRsyncUrl="$entrypointIp:~/solana"

11
net/scripts/network-config.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash -ex
#
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
sudo sysctl -w net.core.rmem_default=1610612736
sudo sysctl -w net.core.rmem_max=1610612736
sudo sysctl -w net.core.wmem_default=1610612736
sudo sysctl -w net.core.wmem_max=1610612736

1
programs/bpf/c/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/out/

1
programs/bpf/c/makefile Normal file
View File

@ -0,0 +1 @@
include sdk/bpf.mk

View File

@ -0,0 +1,33 @@
## Prerequisites
## LLVM / clang 7.0.0
http://releases.llvm.org/download.html
### Linux Ubuntu 16.04 (xenial)
```
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
$ sudo apt-get update
$ sudo apt-get install -y clang-7
```
### Linux Ubuntu 14.04 (trusty)
```
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
$ sudo apt-get update
$ sudo apt-get install -y clang-7
```
### macOS
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
Once Homebrew is installed, ensure the latest llvm is installed:
```
$ brew update # <- ensure your brew is up to date
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
```

116
programs/bpf/c/sdk/bpf.mk Normal file
View File

@ -0,0 +1,116 @@
all:
.PHONY: help all clean
ifneq ($(V),1)
_@ :=@
endif
INC_DIRS ?=
SRC_DIR ?= ./src
OUT_DIR ?= ./out
OS=$(shell uname)
ifeq ($(OS),Darwin)
LLVM_DIR ?= $(shell brew --prefix llvm)
endif
ifdef LLVM_DIR
CC := $(LLVM_DIR)/bin/clang
LLC := $(LLVM_DIR)/bin/llc
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
else
CC := clang-7
LLC := llc-7
OBJ_DUMP := llvm-objdump-7
endif
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
CC_FLAGS := \
-Werror \
-target bpf \
-O2 \
-emit-llvm \
-fno-builtin \
LLC_FLAGS := \
-march=bpf \
-filetype=obj \
-function-sections \
OBJ_DUMP_FLAGS := \
-color \
-source \
-disassemble \
help:
@echo 'BPF Program makefile'
@echo ''
@echo 'This makefile will build BPF Programs from C source files into ELFs'
@echo ''
@echo 'Assumptions:'
@echo ' - Programs are a single .c source file (may include headers)'
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
@echo ''
@echo 'User settings'
@echo ' - The following setting are overridable on the command line, default values shown:'
@echo ' - Show commands while building:'
@echo ' V=1'
@echo ' - List of include directories:'
@echo ' INC_DIRS=$(INC_DIRS)'
@echo ' - List of system include directories:'
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
@echo ' - Location of source files:'
@echo ' SRC_DIR=$(SRC_DIR)'
@echo ' - Location to place output files:'
@echo ' OUT_DIR=$(OUT_DIR)'
@echo ' - Location of LLVM:'
@echo ' LLVM_DIR=$(LLVM_DIR)'
@echo ''
@echo 'Usage:'
@echo ' - make help - This help message'
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
@echo ' - make clean - Cleans all programs'
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
@echo ' - make <program name> - Build a single program by name'
@echo ''
@echo 'Available programs:'
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
@echo ''
@echo 'Example:'
@echo ' - Assuming a programed named foo (src/foo.c)'
@echo ' - make foo'
@echo ' - make dump_foo'
.PRECIOUS: $(OUT_DIR)/%.bc
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
@echo "[cc] $@ ($<)"
$(_@)mkdir -p $(OUT_DIR)
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
.PRECIOUS: $(OUT_DIR)/%.o
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
@echo "[llc] $@ ($<)"
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
-include $(wildcard $(OUT_DIR)/*.d)
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
define \n
endef
all: $(PROGRAM_NAMES)
%: $(addprefix $(OUT_DIR)/, %.o) ;
dump_%: %
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
clean:
rm -rf $(OUT_DIR)

View File

@ -0,0 +1,319 @@
#pragma once
/**
* @brief Solana C-based BPF program utility functions and types
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* Numeric types
*/
#ifndef __LP64__
#error LP64 data model required
#endif
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed short int16_t;
typedef unsigned short uint16_t;
typedef signed int int32_t;
typedef unsigned int uint32_t;
typedef signed long int int64_t;
typedef unsigned long int uint64_t;
/**
* NULL
*/
#define NULL 0
/**
* Boolean type
*/
typedef enum { false = 0, true } bool;
/**
* Built-in helper functions
* @{
* The BPF VM makes a limited number of helper functions available to BPF
* programs. They are resolved at run-time and identified by a function index.
* Calling any of these functions results in `Call` instruction out of the
* user's BPF program.
*
* The helper functions all follow the same signature:
*
* int helper(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)
*
* The meaning of each argument and return value is dependent on the particular
* helper function being called.
*/
/**
* Helper function that prints to stdout
*
* Prints the hexadecimal representation of each parameter
*/
#define BPF_TRACE_PRINTK_IDX 6
static int (*sol_print)(
uint64_t,
uint64_t,
uint64_t,
uint64_t,
uint64_t
) = (void *)BPF_TRACE_PRINTK_IDX;
/**@}*/
/**
* Prefix for all BPF functions
*
* This prefix should be used for functions in order to facilitate
* interoperability with BPF representation
*/
#define SOL_FN_PREFIX __attribute__((always_inline)) static
/**
* Size of Public key in bytes
*/
#define SIZE_PUBKEY 32
/**
* Public key
*/
typedef struct {
uint8_t x[SIZE_PUBKEY];
} SolPubkey;
/**
* Compares two public keys
*
* @param one First public key
* @param two Second public key
* @return true if the same
*/
SOL_FN_PREFIX bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) {
for (int i = 0; i < sizeof(*one); i++) {
if (one->x[i] != two->x[i]) {
return false;
}
}
return true;
}
/**
* Keyed Accounts
*/
typedef struct {
SolPubkey *key; /** Public Key of the account owner */
int64_t *tokens; /** Numer of tokens owned by this account */
uint64_t userdata_len; /** Length of userdata in bytes */
uint8_t *userdata; /** On-chain data owned by this account */
SolPubkey *program_id; /** Program that owns this account */
} SolKeyedAccounts;
/**
* Copies memory
*/
SOL_FN_PREFIX void sol_memcpy(void *dst, const void *src, int len) {
for (int i = 0; i < len; i++) {
*((uint8_t *)dst + i) = *((const uint8_t *)src + i);
}
}
/**
* Compares memory
*/
SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
for (int i = 0; i < n; i++) {
uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i);
if (diff) {
return diff;
}
}
return 0;
}
/**
* Computes the number of elements in an array
*/
#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
/**
* Panics
*
* Prints the line number where the panic occurred and then causes
* the BPF VM to immediately halt execution. No accounts' userdata are updated
*/
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_print(0xFF, 0xFF, 0xFF, 0xFF, line);
uint8_t *pv = (uint8_t *)1;
*pv = 1;
}
/**
* Asserts
*/
#define sol_assert(expr) \
if (!(expr)) { \
_sol_panic(__LINE__); \
}
/**
* De-serializes the input parameters into usable types
*
* Use this function to deserialize the buffer passed to the program entrypoint
* into usable types. This function does not perform copy deserialization,
* instead it populates the pointers and lengths in SolKeyedAccounts and data so
* that any modification to tokens or account data take place on the original
* buffer. Doing so also eliminates the need to serialize back into the buffer
* at program end.
*
* @param input Source buffer containing serialized input parameters
* @param ka Pointer to an array of SolKeyedAccounts to deserialize into
* @param ka_len Number of SolKeyedAccounts entries in `ka`
* @param ka_len_out If NULL, fill exactly `ka_len` accounts or fail.
* If not NULL, fill up to `ka_len` accounts and return the
* number of filled accounts in `ka_len_out`.
* @param data On return, a pointer to the instruction data
* @param data_len On return, the length in bytes of the instruction data
* @return Boolean true if successful
*/
SOL_FN_PREFIX bool sol_deserialize(
const uint8_t *input,
SolKeyedAccounts *ka,
uint64_t ka_len,
uint64_t *ka_len_out,
const uint8_t **data,
uint64_t *data_len
) {
if (ka_len_out == NULL) {
if (ka_len != *(uint64_t *) input) {
return false;
}
ka_len = *(uint64_t *) input;
} else {
if (ka_len > *(uint64_t *) input) {
ka_len = *(uint64_t *) input;
}
*ka_len_out = ka_len;
}
input += sizeof(uint64_t);
for (int i = 0; i < ka_len; i++) {
// key
ka[i].key = (SolPubkey *) input;
input += sizeof(SolPubkey);
// tokens
ka[i].tokens = (int64_t *) input;
input += sizeof(int64_t);
// account userdata
ka[i].userdata_len = *(uint64_t *) input;
input += sizeof(uint64_t);
ka[i].userdata = input;
input += ka[i].userdata_len;
// program_id
ka[i].program_id = (SolPubkey *) input;
input += sizeof(SolPubkey);
}
// input data
*data_len = *(uint64_t *) input;
input += sizeof(uint64_t);
*data = input;
return true;
}
/**
* Debugging utilities
* @{
*/
/**
* Prints the hexadecimal representation of a public key
*
* @param key The public key to print
*/
SOL_FN_PREFIX void sol_print_key(const SolPubkey *key) {
for (int j = 0; j < sizeof(*key); j++) {
sol_print(0, 0, 0, j, key->x[j]);
}
}
/**
* Prints the hexadecimal representation of an array
*
* @param array The array to print
*/
SOL_FN_PREFIX void sol_print_array(const uint8_t *array, int len) {
for (int j = 0; j < len; j++) {
sol_print(0, 0, 0, j, array[j]);
}
}
/**
* Prints the hexadecimal representation of the program's input parameters
*
* @param num_ka Numer of SolKeyedAccounts to print
* @param ka A pointer to an array of SolKeyedAccounts to print
* @param data A pointer to the instruction data to print
* @param data_len The length in bytes of the instruction data
*/
SOL_FN_PREFIX void sol_print_params(
uint64_t num_ka,
const SolKeyedAccounts *ka,
const uint8_t *data,
uint64_t data_len
) {
sol_print(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
sol_print_key(ka[i].key);
sol_print(0, 0, 0, 0, *ka[i].tokens);
sol_print_array(ka[i].userdata, ka[i].userdata_len);
sol_print_key(ka[i].program_id);
}
sol_print_array(data, data_len);
}
/**@}*/
/**
* Program entrypoint
* @{
*
* The following is an example of a simple program that prints the input
* parameters it received:
*
* bool entrypoint(const uint8_t *input) {
* SolKeyedAccounts ka[1];
* uint8_t *data;
* uint64_t data_len;
*
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
* return false;
* }
* print_params(1, ka, data, data_len);
* return true;
* }
*/
/**
* Program entrypoint signature
*
* @param input An array containing serialized input parameters
* @return true if successful
*/
extern bool entrypoint(const uint8_t *input);
#ifdef __cplusplus
}
#endif
/**@}*/

View File

@ -0,0 +1,32 @@
/**
* @brief Example C-based BPF program that moves funds from one account to
* another
*/
#include <solana_sdk.h>
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*/
#define NUM_KA 3
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
int64_t tokens = *(int64_t *)data;
if (*ka[0].tokens >= tokens) {
*ka[0].tokens -= tokens;
*ka[2].tokens += tokens;
// sol_print(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
} else {
// sol_print(0, 0, 0xFF, *ka[0].tokens, tokens);
}
return true;
}

33
programs/bpf/c/src/noop.c Normal file
View File

@ -0,0 +1,33 @@
/**
* @brief Example C-based BPF program that prints out the parameters
* passed to it
*/
#include <solana_sdk.h>
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*/
#define NUM_KA 1
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
sol_print_params(NUM_KA, ka, data, data_len);
sol_assert(sizeof(int8_t) == 1);
sol_assert(sizeof(uint8_t) == 1);
sol_assert(sizeof(int16_t) == 2);
sol_assert(sizeof(uint16_t) == 2);
sol_assert(sizeof(int32_t) == 4);
sol_assert(sizeof(uint32_t) == 4);
sol_assert(sizeof(int64_t) == 8);
sol_assert(sizeof(uint64_t) == 8);
return true;
}

View File

@ -1,128 +1,9 @@
//#include <stdint.h>
//#include <stddef.h>
/**
* @brief TicTacToe Dashboard C-based BPF program
*/
#if 1
#define BPF_TRACE_PRINTK_IDX 6
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
#else
// relocation is another option
extern int sol_print(int, int, int, int, int);
#endif
typedef long long unsigned int uint64_t;
typedef long long int int64_t;
typedef unsigned char uint8_t;
typedef enum { false = 0, true } bool;
// TODO support BPF function calls rather then forcing everything to be inlined
#define SOL_FN_PREFIX __attribute__((always_inline)) static
// TODO move this to a registered helper
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
for (int i = 0; i < len; i++) {
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
}
}
#define sol_trace() sol_print(0, 0, 0xFF, 0xFF, (__LINE__));
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_print(0, 0, 0xFF, 0xFF, line);
char *pv = (char *)1;
*pv = 1;
}
#define SIZE_PUBKEY 32
typedef struct {
uint8_t x[SIZE_PUBKEY];
} SolPubkey;
typedef struct {
SolPubkey *key;
int64_t tokens;
uint64_t userdata_len;
uint8_t *userdata;
SolPubkey *program_id;
} SolKeyedAccounts;
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka,
SolKeyedAccounts *ka, uint8_t **tx_data,
uint64_t *tx_data_len) {
if (num_ka != *(uint64_t *)src) {
return 0;
}
src += sizeof(uint64_t);
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < num_ka;
i++) { // TODO this should end up unrolled, confirm
// key
ka[i].key = (SolPubkey *)src;
src += SIZE_PUBKEY;
// tokens
ka[i].tokens = *(uint64_t *)src;
src += sizeof(uint64_t);
// account userdata
ka[i].userdata_len = *(uint64_t *)src;
src += sizeof(uint64_t);
ka[i].userdata = src;
src += ka[i].userdata_len;
// program_id
ka[i].program_id = (SolPubkey *)src;
src += SIZE_PUBKEY;
}
// tx userdata
*tx_data_len = *(uint64_t *)src;
src += sizeof(uint64_t);
*tx_data = src;
return 1;
}
// // -- Debug --
SOL_FN_PREFIX void print_key(SolPubkey *key) {
for (int j = 0; j < SIZE_PUBKEY; j++) {
sol_print(0, 0, 0, j, key->x[j]);
}
}
SOL_FN_PREFIX void print_data(uint8_t *data, int len) {
for (int j = 0; j < len; j++) {
sol_print(0, 0, 0, j, data[j]);
}
}
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
uint8_t *tx_data, uint64_t tx_data_len) {
sol_print(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
// key
print_key(ka[i].key);
// tokens
sol_print(0, 0, 0, 0, ka[i].tokens);
// account userdata
print_data(ka[i].userdata, ka[i].userdata_len);
// program_id
print_key(ka[i].program_id);
}
// tx userdata
print_data(tx_data, tx_data_len);
}
// -- TicTacToe --
// Board Coodinates
// | 0,0 | 1,0 | 2,0 |
// | 0,1 | 1,1 | 2,1 |
// | 0,2 | 1,2 | 2,2 |
#include <solana_sdk.h>
#include "tictactoe.h"
typedef enum {
Result_Ok,
@ -138,30 +19,6 @@ typedef enum {
Result_UserdataTooSmall,
} Result;
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
typedef enum {
State_Waiting,
State_XMove,
State_OMove,
State_XWon,
State_OWon,
State_Draw,
} State;
typedef struct {
// Player who initialized the game
SolPubkey player_x;
// Player who joined the game
SolPubkey player_o;
// Current state of the game
State state;
// Tracks the player moves
BoardItem board[9];
// Keep Alive for each player
int64_t keep_alive[2];
} Game;
typedef enum {
Command_Init = 0,
Command_Join,
@ -170,21 +27,17 @@ typedef enum {
} Command;
SOL_FN_PREFIX void game_dump_board(Game *self) {
sol_print(0, 0, 0x9, 0x9, 0x9);
sol_print(0x9, 0x9, 0x9, 0x9, 0x9);
sol_print(0, 0, self->board[0], self->board[1], self->board[2]);
sol_print(0, 0, self->board[3], self->board[4], self->board[5]);
sol_print(0, 0, self->board[6], self->board[7], self->board[8]);
sol_print(0, 0, 0x9, 0x9, 0x9);
sol_print(0x9, 0x9, 0x9, 0x9, 0x9);
}
SOL_FN_PREFIX void game_create(Game *self, SolPubkey *player_x) {
// account memory is zero-initialized
sol_memcpy(self->player_x.x, player_x, SIZE_PUBKEY);
// TODO self->player_o = 0;
self->state = State_Waiting;
self->keep_alive[0] = 0;
self->keep_alive[1] = 0;
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < 9; i++) {
self->board[i] = BoardItem_F;
}
@ -215,7 +68,6 @@ SOL_FN_PREFIX bool game_same(BoardItem x_or_o, BoardItem one, BoardItem two,
}
SOL_FN_PREFIX bool game_same_player(SolPubkey *one, SolPubkey *two) {
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < SIZE_PUBKEY; i++) {
if (one->x[i] != two->x[i]) {
return false;
@ -280,7 +132,6 @@ SOL_FN_PREFIX Result game_next_move(Game *self, SolPubkey *player, int x,
{
int draw = true;
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < 9; i++) {
if (BoardItem_F == self->board[i]) {
draw = false;
@ -321,17 +172,24 @@ SOL_FN_PREFIX Result game_keep_alive(Game *self, SolPubkey *player,
return Result_Ok;
}
// accounts[0] On Init must be player X, after that doesn't matter,
// anybody can cause a dashboard update
// accounts[1] must be a TicTacToe state account
// accounts[2] must be account of current player, only Pubkey is used
uint64_t entrypoint(uint8_t *buf) {
SolKeyedAccounts ka[3];
uint64_t tx_data_len;
uint8_t *tx_data;
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*
* accounts[0] On Init must be player X, after that doesn't matter,
* anybody can cause a dashboard update
* accounts[1] must be a TicTacToe state account
* accounts[2] must be account of current player, only Pubkey is used
*/
#define NUM_KA 3
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
int err = 0;
if (1 != sol_deserialize(buf, 3, ka, &tx_data, &tx_data_len)) {
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
@ -342,14 +200,14 @@ uint64_t entrypoint(uint8_t *buf) {
Game game;
sol_memcpy(&game, ka[1].userdata, sizeof(game));
Command command = *tx_data;
Command command = *data;
switch (command) {
case Command_Init:
game_create(&game, ka[2].key);
break;
case Command_Join:
err = game_join(&game, ka[2].key, *((int64_t *)(tx_data + 4)));
err = game_join(&game, ka[2].key, *((int64_t *)(data + 4)));
break;
case Command_KeepAlive:
@ -357,7 +215,7 @@ uint64_t entrypoint(uint8_t *buf) {
break;
case Command_Move:
err = game_next_move(&game, ka[2].key, tx_data[4], tx_data[5]);
err = game_next_move(&game, ka[2].key, data[4], data[5]);
break;
default:

View File

@ -0,0 +1,36 @@
#ifndef TICTACTOE_H
#define TICTACTOE_H
/**
* @brief Definitions common to tictactoe and tictactoe_dashboard
*/
typedef enum {
State_Waiting,
State_XMove,
State_OMove,
State_XWon,
State_OWon,
State_Draw,
} State;
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
/**
* Game state
*
* This structure is stored in the owner's account userdata
*
* Board Coordinates
* | 0,0 | 1,0 | 2,0 |
* | 0,1 | 1,1 | 2,1 |
* | 0,2 | 1,2 | 2,2 |
*/
typedef struct {
SolPubkey player_x; /** Player who initialized the game */
SolPubkey player_o; /** Player who joined the game */
State state; /** Current state of the game */
BoardItem board[9]; /** Tracks the player moves */
int64_t keep_alive[2]; /** Keep Alive for each player */
} Game;
#endif // TICTACTOE_H

View File

@ -0,0 +1,98 @@
/**
* @brief TicTacToe C-based BPF program
*/
#include <solana_sdk.h>
#include "tictactoe.h"
#define MAX_GAMES_TRACKED 5
/**
* Dashboard state
*
* This structure is stored in the owner's account userdata
*/
typedef struct {
SolPubkey pending; /** Latest pending game */
SolPubkey completed[MAX_GAMES_TRACKED]; /** Last N completed games (0 is the
latest) */
uint32_t latest_game; /** Index into completed pointing to latest game completed */
uint32_t total; /** Total number of completed games */
} Dashboard;
SOL_FN_PREFIX bool update(Dashboard *self, Game *game, SolPubkey *game_pubkey) {
switch (game->state) {
case State_Waiting:
sol_memcpy(&self->pending, game_pubkey, SIZE_PUBKEY);
break;
case State_XMove:
case State_OMove:
// Nothing to do. In progress games are not managed by the dashboard
break;
case State_XWon:
case State_OWon:
case State_Draw:
for (int i = 0; i < MAX_GAMES_TRACKED; i++) {
if (SolPubkey_same(&self->completed[i], game_pubkey)) {
// TODO: Once the PoH height is exposed to programs, it could be used
// to ensure
// that old games are not being re-added and causing total to
// increment incorrectly.
return false;
}
}
self->total += 1;
self->latest_game = (self->latest_game + 1) % MAX_GAMES_TRACKED;
sol_memcpy(self->completed[self->latest_game].x, game_pubkey,
SIZE_PUBKEY);
break;
default:
break;
}
return true;
}
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*
* accounts[0] doesn't matter, anybody can cause a dashboard update
* accounts[1] must be a Dashboard account
* accounts[2] must be a Game account
*/
#define NUM_KA 3
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
int err = 0;
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
// TODO check dashboard and game program ids (how to check now that they are
// not known values)
// TODO check validity of dashboard and game structures contents
if (sizeof(Dashboard) > ka[1].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Dashboard), ka[2].userdata_len);
return false;
}
Dashboard dashboard;
sol_memcpy(&dashboard, ka[1].userdata, sizeof(dashboard));
if (sizeof(Game) > ka[2].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
return false;
}
Game game;
sol_memcpy(&game, ka[2].userdata, sizeof(game));
if (true != update(&dashboard, &game, ka[2].key)) {
return false;
}
sol_memcpy(ka[1].userdata, &dashboard, sizeof(dashboard));
return true;
}

View File

@ -1,9 +0,0 @@
#!/bin/bash -ex
OUTDIR="${1:-../../../target/release/}"
THISDIR=$(dirname "$0")
mkdir -p "$OUTDIR"
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/move_funds_c.bc -c "$THISDIR"/src/move_funds.c
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/move_funds_c.o "$OUTDIR"/move_funds_c.bc
#/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/move_funds_c.o

View File

@ -1,3 +0,0 @@
#!/bin/sh
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/move_funds_c.o

View File

@ -1,140 +0,0 @@
//#include <stdint.h>
//#include <stddef.h>
#if 1
// one way to define a helper function is with index as a fixed value
#define BPF_TRACE_PRINTK_IDX 6
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
#else
// relocation is another option
extern int sol_print(int, int, int, int, int);
#endif
typedef long long unsigned int uint64_t;
typedef long long int int64_t;
typedef unsigned char uint8_t;
typedef enum { false = 0, true } bool;
#define SIZE_PUBKEY 32
typedef struct {
uint8_t x[SIZE_PUBKEY];
} SolPubkey;
typedef struct {
SolPubkey *key;
int64_t* tokens;
uint64_t userdata_len;
uint8_t *userdata;
SolPubkey *program_id;
} SolKeyedAccounts;
// TODO support BPF function calls rather then forcing everything to be inlined
#define SOL_FN_PREFIX __attribute__((always_inline)) static
// TODO move this to a registered helper
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
for (int i = 0; i < len; i++) {
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
}
}
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_print(0, 0, 0xFF, 0xFF, line);
char *pv = (char *)1;
*pv = 1;
}
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka, SolKeyedAccounts *ka,
uint8_t **userdata, uint64_t *userdata_len) {
if (num_ka != *(uint64_t *)src) {
return 0;
}
src += sizeof(uint64_t);
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < num_ka; i++) { // TODO this should end up unrolled, confirm
// key
ka[i].key = (SolPubkey *)src;
src += SIZE_PUBKEY;
// tokens
ka[i].tokens = (int64_t *)src;
src += sizeof(int64_t);
// account userdata
ka[i].userdata_len = *(uint64_t *)src;
src += sizeof(uint64_t);
ka[i].userdata = src;
src += ka[i].userdata_len;
// program_id
ka[i].program_id = (SolPubkey *)src;
src += SIZE_PUBKEY;
}
// tx userdata
*userdata_len = *(uint64_t *)src;
src += sizeof(uint64_t);
*userdata = src;
return 1;
}
// -- Debug --
SOL_FN_PREFIX void print_key(SolPubkey *key) {
for (int j = 0; j < SIZE_PUBKEY; j++) {
sol_print(0, 0, 0, j, key->x[j]);
}
}
SOL_FN_PREFIX void print_userdata(uint8_t *data, int len) {
for (int j = 0; j < len; j++) {
sol_print(0, 0, 0, j, data[j]);
}
}
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
uint8_t *userdata, uint64_t userdata_len) {
sol_print(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
// key
print_key(ka[i].key);
// tokens
sol_print(0, 0, 0, 0, *ka[i].tokens);
// account userdata
print_userdata(ka[i].userdata, ka[i].userdata_len);
// program_id
print_key(ka[i].program_id);
}
// tx userdata
print_userdata(userdata, userdata_len);
}
uint64_t entrypoint(char *buf) {
SolKeyedAccounts ka[3];
uint64_t userdata_len;
uint8_t *userdata;
if (1 != sol_deserialize((uint8_t *)buf, 3, ka, &userdata, &userdata_len)) {
return 1;
}
print_params(3, ka, userdata, userdata_len);
int64_t tokens = *(int64_t*)userdata;
if (*ka[0].tokens >= tokens) {
*ka[0].tokens -= tokens;
*ka[2].tokens += tokens;
//sol_print(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
} else {
//sol_print(0, 0, 0xFF, *ka[0].tokens, tokens);
}
return 0;
}

View File

@ -1,9 +0,0 @@
#!/bin/bash -ex
OUTDIR="${1:-../../../target/release/}"
THISDIR=$(dirname "$0")
mkdir -p "$OUTDIR"
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/noop_c.bc -c "$THISDIR"/src/noop.c
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/noop_c.o "$OUTDIR"/noop_c.bc
#/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/noop_c.o

View File

@ -1,3 +0,0 @@
#!/bin/sh
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/noop_c.o

View File

@ -1,133 +0,0 @@
//#include <stdint.h>
//#include <stddef.h>
#if 1
// one way to define a helper function is with index as a fixed value
#define BPF_TRACE_PRINTK_IDX 6
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
#else
// relocation is another option
extern int sol_print(int, int, int, int, int);
#endif
typedef long long unsigned int uint64_t;
typedef long long int int64_t;
typedef unsigned char uint8_t;
typedef enum { false = 0, true } bool;
#define SIZE_PUBKEY 32
typedef struct {
uint8_t x[SIZE_PUBKEY];
} SolPubkey;
typedef struct {
SolPubkey *key;
int64_t* tokens;
uint64_t userdata_len;
uint8_t *userdata;
SolPubkey *program_id;
} SolKeyedAccounts;
// TODO support BPF function calls rather then forcing everything to be inlined
#define SOL_FN_PREFIX __attribute__((always_inline)) static
// TODO move this to a registered helper
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
for (int i = 0; i < len; i++) {
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
}
}
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_print(0, 0, 0xFF, 0xFF, line);
char *pv = (char *)1;
*pv = 1;
}
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka, SolKeyedAccounts *ka,
uint8_t **userdata, uint64_t *userdata_len) {
if (num_ka != *(uint64_t *)src) {
return 0;
}
src += sizeof(uint64_t);
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < num_ka; i++) { // TODO this should end up unrolled, confirm
// key
ka[i].key = (SolPubkey *)src;
src += SIZE_PUBKEY;
// tokens
ka[i].tokens = (int64_t *)src;
src += sizeof(int64_t);
// account userdata
ka[i].userdata_len = *(uint64_t *)src;
src += sizeof(uint64_t);
ka[i].userdata = src;
src += ka[i].userdata_len;
// program_id
ka[i].program_id = (SolPubkey *)src;
src += SIZE_PUBKEY;
}
// tx userdata
*userdata_len = *(uint64_t *)src;
src += sizeof(uint64_t);
*userdata = src;
return 1;
}
// -- Debug --
SOL_FN_PREFIX void print_key(SolPubkey *key) {
for (int j = 0; j < SIZE_PUBKEY; j++) {
sol_print(0, 0, 0, j, key->x[j]);
}
}
SOL_FN_PREFIX void print_userdata(uint8_t *data, int len) {
for (int j = 0; j < len; j++) {
sol_print(0, 0, 0, j, data[j]);
}
}
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
uint8_t *userdata, uint64_t userdata_len) {
sol_print(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
// key
print_key(ka[i].key);
// tokens
sol_print(0, 0, 0, 0, *ka[i].tokens);
// account userdata
print_userdata(ka[i].userdata, ka[i].userdata_len);
// program_id
print_key(ka[i].program_id);
}
// tx userdata
print_userdata(userdata, userdata_len);
}
// -- Program entrypoint --
uint64_t entrypoint(char *buf) {
SolKeyedAccounts ka[1];
uint64_t userdata_len;
uint8_t *userdata;
if (1 != sol_deserialize((uint8_t *)buf, 1, ka, &userdata, &userdata_len)) {
return 0;
}
print_params(1, ka, userdata, userdata_len);
return 1;
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bpf-noop"
version = "0.10.0-pre2"
version = "0.10.0"
description = "Solana BPF noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -8,4 +8,4 @@ license = "Apache-2.0"
[dependencies]
rbpf = "0.1.0"
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
solana-sdk = { path = "../../../../sdk", version = "0.10.0" }

View File

@ -1,9 +0,0 @@
#!/bin/bash -ex
OUTDIR="${1:-../../../target/release/}"
THISDIR=$(dirname "$0")
mkdir -p "$OUTDIR"
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/tictactoe_c.bc -c "$THISDIR"/src/tictactoe.c
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/tictactoe_c.o "$OUTDIR"/tictactoe_c.bc
# /usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/tictactoe_c.o

View File

@ -1,3 +0,0 @@
#!/bin/sh
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/tictactoe_c.o

View File

@ -1,9 +0,0 @@
#!/bin/bash -ex
OUTDIR="${1:-../../../target/release/}"
THISDIR=$(dirname "$0")
mkdir -p "$OUTDIR"
/usr/local/opt/llvm/bin/clang -Werror -target bpf -O2 -emit-llvm -fno-builtin -o "$OUTDIR"/tictactoe_dashboard_c.bc -c "$THISDIR"/src/tictactoe_dashboard.c
/usr/local/opt/llvm/bin/llc -march=bpf -filetype=obj -function-sections -o "$OUTDIR"/tictactoe_dashboard_c.o "$OUTDIR"/tictactoe_dashboard_c.bc
# /usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble "$OUTDIR"/tictactoe_dashboard_c.o

View File

@ -1,3 +0,0 @@
#!/bin/sh
/usr/local/opt/llvm/bin/llvm-objdump -color -source -disassemble ../../../target/debug/tictactoe_dashboard_c.o

View File

@ -1,236 +0,0 @@
//#include <stdint.h>
//#include <stddef.h>
#if 1
#define BPF_TRACE_PRINTK_IDX 6
static int (*sol_print)(int, int, int, int, int) = (void *)BPF_TRACE_PRINTK_IDX;
#else
// relocation is another option
extern int sol_print(int, int, int, int, int);
#endif
typedef long long unsigned int uint64_t;
typedef long long int int64_t;
typedef long unsigned int uint32_t;
typedef long int int32_t;
typedef unsigned char uint8_t;
typedef enum { false = 0, true } bool;
// TODO support BPF function calls rather then forcing everything to be inlined
#define SOL_FN_PREFIX __attribute__((always_inline)) static
// TODO move this to a registered helper
SOL_FN_PREFIX void sol_memcpy(void *dst, void *src, int len) {
for (int i = 0; i < len; i++) {
*((uint8_t *)dst + i) = *((uint8_t *)src + i);
}
}
#define sol_trace() sol_print(0, 0, 0xFF, 0xFF, (__LINE__));
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_print(0, 0, 0xFF, 0xFF, line);
char *pv = (char *)1;
*pv = 1;
}
#define SIZE_PUBKEY 32
typedef struct {
uint8_t x[SIZE_PUBKEY];
} SolPubkey;
SOL_FN_PREFIX bool SolPubkey_same(SolPubkey *one, SolPubkey *two) {
for (int i = 0; i < SIZE_PUBKEY; i++) {
if (one->x[i] != two->x[i]) {
return false;
}
}
return true;
}
typedef struct {
SolPubkey *key;
int64_t tokens;
uint64_t userdata_len;
uint8_t *userdata;
SolPubkey *program_id;
} SolKeyedAccounts;
SOL_FN_PREFIX int sol_deserialize(uint8_t *src, uint64_t num_ka,
SolKeyedAccounts *ka, uint8_t **tx_data,
uint64_t *tx_data_len) {
if (num_ka != *(uint64_t *)src) {
return 0;
}
src += sizeof(uint64_t);
// TODO fixed iteration loops ok? unrolled?
for (int i = 0; i < num_ka;
i++) { // TODO this should end up unrolled, confirm
// key
ka[i].key = (SolPubkey *)src;
src += SIZE_PUBKEY;
// tokens
ka[i].tokens = *(uint64_t *)src;
src += sizeof(uint64_t);
// account userdata
ka[i].userdata_len = *(uint64_t *)src;
src += sizeof(uint64_t);
ka[i].userdata = src;
src += ka[i].userdata_len;
// program_id
ka[i].program_id = (SolPubkey *)src;
src += SIZE_PUBKEY;
}
// tx userdata
*tx_data_len = *(uint64_t *)src;
src += sizeof(uint64_t);
*tx_data = src;
return 1;
}
// -- Debug --
SOL_FN_PREFIX void print_key(SolPubkey *key) {
for (int j = 0; j < SIZE_PUBKEY; j++) {
sol_print(0, 0, 0, j, key->x[j]);
}
}
SOL_FN_PREFIX void print_data(uint8_t *data, int len) {
for (int j = 0; j < len; j++) {
sol_print(0, 0, 0, j, data[j]);
}
}
SOL_FN_PREFIX void print_params(uint64_t num_ka, SolKeyedAccounts *ka,
uint8_t *tx_data, uint64_t tx_data_len) {
sol_print(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
// key
print_key(ka[i].key);
// tokens
sol_print(0, 0, 0, 0, ka[i].tokens);
// account userdata
print_data(ka[i].userdata, ka[i].userdata_len);
// program_id
print_key(ka[i].program_id);
}
// tx userdata
print_data(tx_data, tx_data_len);
}
// -- TicTacToe Dashboard --
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
typedef enum {
State_Waiting,
State_XMove,
State_OMove,
State_XWon,
State_OWon,
State_Draw,
} State;
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
// TODO put this in a common place for both tictactoe and tictactoe_dashboard
typedef struct {
SolPubkey player_x;
SolPubkey player_o;
State state;
BoardItem board[9];
int64_t keep_alive[2];
} Game;
#define MAX_GAMES_TRACKED 5
typedef struct {
// Latest pending game
SolPubkey pending;
// Last N completed games (0 is the latest)
SolPubkey completed[MAX_GAMES_TRACKED];
// Index into completed pointing to latest game completed
uint32_t latest_game;
// Total number of completed games
uint32_t total;
} Dashboard;
SOL_FN_PREFIX bool update(Dashboard *self, Game *game, SolPubkey *game_pubkey) {
switch (game->state) {
case State_Waiting:
sol_memcpy(&self->pending, game_pubkey, SIZE_PUBKEY);
break;
case State_XMove:
case State_OMove:
// Nothing to do. In progress games are not managed by the dashboard
break;
case State_XWon:
case State_OWon:
case State_Draw:
for (int i = 0; i < MAX_GAMES_TRACKED; i++) {
if (SolPubkey_same(&self->completed[i], game_pubkey)) {
// TODO: Once the PoH height is exposed to programs, it could be used
// to ensure
// that old games are not being re-added and causing total to
// increment incorrectly.
return false;
}
}
self->total += 1;
self->latest_game = (self->latest_game + 1) % MAX_GAMES_TRACKED;
sol_memcpy(self->completed[self->latest_game].x, game_pubkey,
SIZE_PUBKEY);
break;
default:
break;
}
return true;
}
// accounts[0] doesn't matter, anybody can cause a dashboard update
// accounts[1] must be a Dashboard account
// accounts[2] must be a Game account
uint64_t entrypoint(uint8_t *buf) {
SolKeyedAccounts ka[3];
uint64_t tx_data_len;
uint8_t *tx_data;
int err = 0;
if (1 != sol_deserialize(buf, 3, ka, &tx_data, &tx_data_len)) {
return false;
}
// TODO check dashboard and game program ids (how to check now that they are
// not know values)
// TODO check validity of dashboard and game structures contents
if (sizeof(Dashboard) > ka[1].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Dashboard), ka[2].userdata_len);
return false;
}
Dashboard dashboard;
sol_memcpy(&dashboard, ka[1].userdata, sizeof(dashboard));
if (sizeof(Game) > ka[2].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
return false;
}
Game game;
sol_memcpy(&game, ka[2].userdata, sizeof(game));
if (true != update(&dashboard, &game, ka[2].key)) {
return false;
}
sol_memcpy(ka[1].userdata, &dashboard, sizeof(dashboard));
return true;
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bpfloader"
version = "0.10.0-pre2"
version = "0.10.0"
description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,12 +13,12 @@ elf = "0.0.10"
env_logger = "0.5.12"
libc = "0.2.43"
log = "0.4.2"
rbpf = "0.1.0"
solana_rbpf = "0.1.2"
serde = "1.0.27"
serde_derive = "1.0.27"
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
solana-sdk = { path = "../../../sdk", version = "0.10.0" }
[lib]
name = "bpf_loader"
name = "solana_bpf_loader"
crate-type = ["cdylib"]

View File

@ -1,4 +1,4 @@
use rbpf::ebpf;
use solana_rbpf::ebpf;
use std::io::{Error, ErrorKind};
fn reject<S: AsRef<str>>(msg: S) -> Result<(), Error> {

View File

@ -5,11 +5,12 @@ extern crate byteorder;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate rbpf;
extern crate solana_rbpf;
extern crate solana_sdk;
use bincode::deserialize;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use solana_rbpf::{helpers, EbpfVmRaw};
use solana_sdk::account::KeyedAccount;
use solana_sdk::loader_instruction::LoaderInstruction;
use solana_sdk::pubkey::Pubkey;
@ -18,24 +19,13 @@ use std::io::Error;
use std::mem;
use std::sync::{Once, ONCE_INIT};
fn create_vm(prog: &[u8]) -> Result<rbpf::EbpfVmRaw, Error> {
let mut vm = rbpf::EbpfVmRaw::new(None)?;
vm.set_verifier(bpf_verifier::check)?;
vm.set_program(&prog)?;
vm.register_helper(
rbpf::helpers::BPF_TRACE_PRINTK_IDX,
rbpf::helpers::bpf_trace_printf,
)?;
Ok(vm)
}
#[allow(dead_code)]
fn dump_program(key: &Pubkey, prog: &[u8]) {
let mut eight_bytes: Vec<u8> = Vec::new();
println!("BPF Program: {:?}", key);
info!("BPF Program: {:?}", key);
for i in prog.iter() {
if eight_bytes.len() >= 7 {
println!("{:02X?}", eight_bytes);
info!("{:02X?}", eight_bytes);
eight_bytes.clear();
} else {
eight_bytes.push(i.clone());
@ -43,6 +33,35 @@ fn dump_program(key: &Pubkey, prog: &[u8]) {
}
}
pub fn helper_printf(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
info!(
"bpf_trace_printf: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
arg1, arg2, arg3, arg4, arg5
);
let size_arg = |x| {
if x == 0 {
1
} else {
(x as f64).log(16.0).floor() as u64 + 1
}
};
"bpf_trace_printf: 0x, 0x, 0x, 0x, 0x\n".len() as u64
+ size_arg(arg1)
+ size_arg(arg2)
+ size_arg(arg3)
+ size_arg(arg4)
+ size_arg(arg5)
}
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
let mut vm = EbpfVmRaw::new(None)?;
vm.set_verifier(bpf_verifier::check)?;
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
vm.set_program(&prog)?;
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helper_printf)?;
Ok(vm)
}
fn serialize_parameters(keyed_accounts: &mut [KeyedAccount], data: &[u8]) -> Vec<u8> {
assert_eq!(32, mem::size_of::<Pubkey>());
@ -90,12 +109,12 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
if keyed_accounts[0].account.executable {
let prog = keyed_accounts[0].account.userdata.clone();
trace!("Call BPF, {} Instructions", prog.len() / 8);
trace!("Call BPF, {} instructions", prog.len() / 8);
//dump_program(keyed_accounts[0].key, &prog);
let vm = match create_vm(&prog) {
let mut vm = match create_vm(&prog) {
Ok(vm) => vm,
Err(e) => {
warn!("{}", e);
warn!("create_vm failed: {}", e);
return false;
}
};
@ -105,20 +124,24 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
return false;
},
Err(e) => {
warn!("{}", e);
warn!("execute_program failed: {}", e);
return false;
}
}
deserialize_parameters(&mut keyed_accounts[1..], &v);
trace!(
"BPF program executed {} instructions",
vm.get_last_instruction_count()
);
} else if let Ok(instruction) = deserialize(tx_data) {
match instruction {
LoaderInstruction::Write { offset, bytes } => {
let offset = offset as usize;
let len = bytes.len();
trace!("BpfLoader::Write offset {} length {:?}", offset, len);
debug!("Write: offset={} length={}", offset, len);
if keyed_accounts[0].account.userdata.len() < offset + len {
println!(
"Overflow {} < {}",
warn!(
"Write overflow: {} < {}",
keyed_accounts[0].account.userdata.len(),
offset + len
);
@ -128,7 +151,7 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
}
LoaderInstruction::Finalize => {
keyed_accounts[0].account.executable = true;
trace!("BPfLoader::Finalize prog: {:?}", keyed_accounts[0].key);
info!("Finalize: account {:?}", keyed_accounts[0].key);
}
}
} else {
@ -136,3 +159,28 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
}
true
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
fn test_non_terminating_program() {
#[rustfmt::skip]
let prog = &[
0xb7, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r6 = 0
0xb7, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = 0
0xb7, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r2 = 0
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
0xb7, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r4 = 0
0xbf, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r5 = r6
0x85, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, // call 6
0x07, 0x06, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, // r6 + 1
0x05, 0x00, 0xf8, 0xff, 0x00, 0x00, 0x00, 0x00, // goto -8
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
];
let input = &mut [0x00];
let mut vm = create_vm(prog).unwrap();
vm.execute_program(input).unwrap();
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-lualoader"
version = "0.10.0-pre2"
version = "0.10.0"
description = "Solana Lua Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,12 +13,12 @@ log = "0.4.2"
rlua = "0.15.2"
serde = "1.0.27"
serde_derive = "1.0.27"
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
solana-sdk = { path = "../../../sdk", version = "0.10.0" }
[dev-dependencies]
bincode = "1.0.0"
[lib]
name = "lua_loader"
name = "solana_lua_loader"
crate-type = ["cdylib"]

View File

@ -79,8 +79,8 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
let len = bytes.len();
trace!("LuaLoader::Write offset {} length {:?}", offset, len);
if keyed_accounts[0].account.userdata.len() < offset + len {
println!(
"Overflow {} < {}",
warn!(
"Write overflow {} < {}",
keyed_accounts[0].account.userdata.len(),
offset + len
);
@ -147,13 +147,12 @@ mod tests {
#[test]
fn test_move_funds_with_lua_via_process() {
let bytes = r#"
let userdata = r#"
local tokens, _ = string.unpack("I", data)
accounts[1].tokens = accounts[1].tokens - tokens
accounts[2].tokens = accounts[2].tokens + tokens
"#.as_bytes()
.to_vec();
let userdata = serialize(&LuaLoader::Bytes { bytes }).unwrap();
let alice_pubkey = Pubkey::default();
let bob_pubkey = Pubkey::default();
@ -194,15 +193,12 @@ mod tests {
#[test]
fn test_load_lua_library() {
let bytes = r#"
let userdata = r#"
local serialize = load(accounts[2].userdata)().serialize
accounts[3].userdata = serialize({a=1, b=2, c=3}, nil, "s")
"#.as_bytes()
.to_vec();
let userdata = serialize(&LuaLoader::Bytes { bytes }).unwrap();
let program_id = Pubkey::default();
let program_account = Account {
tokens: 1,
userdata,
@ -210,9 +206,7 @@ mod tests {
executable: true,
loader_program_id: Pubkey::default(),
};
let alice_account = Account::new(100, 0, program_id);
let serialize_account = Account {
tokens: 100,
userdata: read_test_file("serialize.lua"),
@ -220,7 +214,6 @@ mod tests {
executable: false,
loader_program_id: Pubkey::default(),
};
let mut accounts = [
(Pubkey::default(), program_account),
(Pubkey::default(), alice_account),
@ -228,9 +221,7 @@ mod tests {
(Pubkey::default(), Account::new(1, 0, program_id)),
];
let mut keyed_accounts = create_keyed_accounts(&mut accounts);
process(&mut keyed_accounts, &[]);
// Verify deterministic ordering of a serialized Lua table.
assert_eq!(
str::from_utf8(&keyed_accounts[3].account.userdata).unwrap(),
@ -250,12 +241,9 @@ mod tests {
let dan_pubkey = Pubkey::new(&[5; 32]);
let erin_pubkey = Pubkey::new(&[6; 32]);
let userdata = serialize(&LuaLoader::Bytes {
bytes: read_test_file("multisig.lua"),
}).unwrap();
let program_account = Account {
tokens: 1,
userdata,
userdata: read_test_file("multisig.lua"),
program_id,
executable: true,
loader_program_id: Pubkey::default(),

View File

@ -1,13 +1,13 @@
[package]
name = "solana-noop"
version = "0.10.0-pre2"
version = "0.10.0"
description = "Solana noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
[dependencies]
solana-sdk = { path = "../../../sdk", version = "0.10.0-pre2" }
solana-sdk = { path = "../../../sdk", version = "0.10.0" }
[lib]
name = "noop"

View File

@ -20,7 +20,7 @@ if [[ ! -d $installDir ]]; then
fi
for dir in "$SOLANA_ROOT"/programs/native/*; do
for program in "$SOLANA_ROOT/target/$variant/deps/lib$(basename "$dir")".{so,dylib,dll}; do
for program in echo "$SOLANA_ROOT"/target/"$variant"/deps/lib{,solana_}"$(basename "$dir")".{so,dylib,dll}; do
if [[ -f $program ]]; then
cp -v "$program" "$installDir"
fi

View File

@ -1,6 +1,6 @@
[package]
name = "solana-sdk"
version = "0.10.0-pre2"
version = "0.10.0"
description = "Solana SDK"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -117,8 +117,8 @@ parts:
# Build/install all other programs
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
# TODO: install native programs when they are ready for public use
#./scripts/install-native-programs.sh $SNAPCRAFT_PART_INSTALL/bin/
# Install native programs
./scripts/install-native-programs.sh $SNAPCRAFT_PART_INSTALL/bin/
# Install multinode-demo/
mkdir -p $SNAPCRAFT_PART_INSTALL/multinode-demo/

View File

@ -7,6 +7,7 @@ use bincode::deserialize;
use bincode::serialize;
use bpf_loader;
use budget_program::BudgetState;
use budget_transaction::BudgetTransaction;
use counter::Counter;
use entry::Entry;
use hash::{hash, Hash};
@ -30,7 +31,7 @@ use std;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::result;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{Mutex, RwLock};
use std::time::Instant;
use storage_program::StorageProgram;
use system_program::SystemProgram;
@ -41,7 +42,6 @@ use timing::{duration_as_us, timestamp};
use token_program::TokenProgram;
use tokio::prelude::Future;
use transaction::Transaction;
use vote_program::VoteProgram;
use window::WINDOW_SIZE;
/// The number of most recent `last_id` values that the bank will track the signatures
@ -151,7 +151,7 @@ impl Default for LastIds {
/// The state of all accounts and contracts after processing its entries.
pub struct Bank {
/// A map of account public keys to the balance in that account.
pub accounts: RwLock<HashMap<Pubkey, Account>>,
accounts: RwLock<HashMap<Pubkey, Account>>,
/// set of accounts which are currently in the pipeline
account_locks: Mutex<HashSet<Pubkey>>,
@ -171,13 +171,6 @@ pub struct Bank {
// Mapping of signatures to Subscriber ids and sinks to notify on confirmation
signature_subscriptions: RwLock<HashMap<Signature, HashMap<Pubkey, Sink<RpcSignatureStatus>>>>,
/// Tracks and updates the leader schedule based on the votes and account stakes
/// processed by the bank
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
// The number of ticks that have elapsed since genesis
tick_height: Mutex<u64>,
}
impl Default for Bank {
@ -190,8 +183,6 @@ impl Default for Bank {
finality_time: AtomicUsize::new(std::usize::MAX),
account_subscriptions: RwLock::new(HashMap::new()),
signature_subscriptions: RwLock::new(HashMap::new()),
leader_scheduler: Arc::new(RwLock::new(LeaderScheduler::default())),
tick_height: Mutex::new(0),
}
}
}
@ -622,8 +613,6 @@ impl Bank {
{
return Err(BankError::ProgramRuntimeError(instruction_index as u8));
}
} else if VoteProgram::check_id(&tx_program_id) {
VoteProgram::process_transaction(&tx, instruction_index, program_accounts).is_err();
} else {
let mut depth = 0;
let mut keys = Vec::new();
@ -901,28 +890,41 @@ impl Bank {
results
}
pub fn process_entry(&self, entry: &Entry) -> Result<()> {
pub fn process_entry(
&self,
entry: &Entry,
tick_height: &mut u64,
leader_scheduler: &mut LeaderScheduler,
) -> Result<()> {
if !entry.is_tick() {
for result in self.process_transactions(&entry.transactions) {
result?;
}
} else {
let tick_height = {
let mut tick_height_lock = self.tick_height.lock().unwrap();
*tick_height_lock += 1;
*tick_height_lock
};
self.leader_scheduler
.write()
.unwrap()
.update_height(tick_height, self);
*tick_height += 1;
self.register_entry_id(&entry.id);
}
self.process_entry_votes(entry, *tick_height, leader_scheduler);
Ok(())
}
fn process_entry_votes(
&self,
entry: &Entry,
tick_height: u64,
leader_scheduler: &mut LeaderScheduler,
) {
for tx in &entry.transactions {
if tx.vote().is_some() {
// Update the active set in the leader scheduler
leader_scheduler.push_vote(*tx.from(), tick_height);
}
}
leader_scheduler.update_height(tick_height, self);
}
/// Process an ordered list of entries, populating a circular buffer "tail"
/// as we go.
fn process_entries_tail(
@ -930,6 +932,8 @@ impl Bank {
entries: &[Entry],
tail: &mut Vec<Entry>,
tail_idx: &mut usize,
tick_height: &mut u64,
leader_scheduler: &mut LeaderScheduler,
) -> Result<u64> {
let mut entry_count = 0;
@ -947,7 +951,7 @@ impl Bank {
// the leader scheduler. Next we will extract the vote tracking structure
// out of the leader scheduler, and into the bank, and remove the leader
// scheduler from these banking functions.
self.process_entry(entry)?;
self.process_entry(entry, tick_height, leader_scheduler)?;
}
Ok(entry_count)
@ -992,7 +996,6 @@ impl Bank {
// if its a tick, execute the group and register the tick
self.par_execute_entries(&mt_group)?;
self.register_entry_id(&entry.id);
*self.tick_height.lock().unwrap() += 1;
mt_group = vec![];
continue;
}
@ -1022,18 +1025,17 @@ impl Bank {
entries: I,
tail: &mut Vec<Entry>,
tail_idx: &mut usize,
) -> Result<u64>
leader_scheduler: &mut LeaderScheduler,
) -> Result<(u64, u64)>
where
I: IntoIterator<Item = Entry>,
{
// Ledger verification needs to be parallelized, but we can't pull the whole
// thing into memory. We therefore chunk it.
let mut entry_height = *tail_idx as u64;
let mut tick_height = 0;
for entry in &tail[0..*tail_idx] {
if entry.is_tick() {
*self.tick_height.lock().unwrap() += 1;
}
tick_height += entry.is_tick() as u64
}
let mut id = start_hash;
@ -1044,15 +1046,25 @@ impl Bank {
return Err(BankError::LedgerVerificationFailed);
}
id = block.last().unwrap().id;
let entry_count = self.process_entries_tail(&block, tail, tail_idx)?;
let entry_count = self.process_entries_tail(
&block,
tail,
tail_idx,
&mut tick_height,
leader_scheduler,
)?;
entry_height += entry_count;
}
Ok(entry_height)
Ok((tick_height, entry_height))
}
/// Process a full ledger.
pub fn process_ledger<I>(&self, entries: I) -> Result<(u64, u64, Vec<Entry>)>
pub fn process_ledger<I>(
&self,
entries: I,
leader_scheduler: &mut LeaderScheduler,
) -> Result<(u64, u64, Vec<Entry>)>
where
I: IntoIterator<Item = Entry>,
{
@ -1094,14 +1106,20 @@ impl Bank {
tail.push(entry0);
tail.push(entry1);
let mut tail_idx = 2;
let entry_height = self.process_blocks(entry1_id, entries, &mut tail, &mut tail_idx)?;
let (tick_height, entry_height) = self.process_blocks(
entry1_id,
entries,
&mut tail,
&mut tail_idx,
leader_scheduler,
)?;
// check if we need to rotate tail
if tail.len() == WINDOW_SIZE as usize {
tail.rotate_left(tail_idx)
}
Ok((*self.tick_height.lock().unwrap(), entry_height, tail))
Ok((tick_height, entry_height, tail))
}
/// Create, sign, and process a Transaction from `keypair` to `to` of
@ -1218,16 +1236,6 @@ impl Bank {
subscriptions.remove(pubkey).is_some()
}
pub fn get_current_leader(&self) -> Option<Pubkey> {
let ls_lock = self.leader_scheduler.read().unwrap();
let tick_height = self.tick_height.lock().unwrap();
ls_lock.get_scheduled_leader(*tick_height)
}
pub fn get_tick_height(&self) -> u64 {
*self.tick_height.lock().unwrap()
}
fn check_account_subscriptions(&self, pubkey: &Pubkey, account: &Account) {
let subscriptions = self.account_subscriptions.read().unwrap();
if let Some(hashmap) = subscriptions.get(pubkey) {
@ -1280,6 +1288,13 @@ impl Bank {
}
subscriptions.remove(&signature);
}
#[cfg(test)]
// Used to access accounts for things like controlling stake to control
// the eligible set of nodes for leader selection
pub fn accounts(&self) -> &RwLock<HashMap<Pubkey, Account>> {
&self.accounts
}
}
#[cfg(test)]
@ -1292,6 +1307,7 @@ mod tests {
use entry_writer::{self, EntryWriter};
use hash::hash;
use jsonrpc_macros::pubsub::{Subscriber, SubscriptionId};
use leader_scheduler::LeaderScheduler;
use ledger;
use logger;
use signature::Keypair;
@ -1624,7 +1640,8 @@ mod tests {
let mint = Mint::new(1);
let genesis = mint.create_entries();
let bank = Bank::default();
bank.process_ledger(genesis).unwrap();
bank.process_ledger(genesis, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
}
@ -1701,7 +1718,9 @@ mod tests {
let (ledger, pubkey) = create_sample_ledger(1);
let (ledger, dup) = ledger.tee();
let bank = Bank::default();
let (tick_height, ledger_height, tail) = bank.process_ledger(ledger).unwrap();
let (tick_height, ledger_height, tail) = bank
.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, 4);
assert_eq!(tick_height, 2);
@ -1723,15 +1742,17 @@ mod tests {
// let (_, _) = bank.process_ledger(ledger).unwrap();
// }
let window_size = WINDOW_SIZE as usize;
let window_size = 128;
for entry_count in window_size - 3..window_size + 2 {
let (ledger, pubkey) = create_sample_ledger(entry_count);
let bank = Bank::default();
let (tick_height, ledger_height, tail) = bank.process_ledger(ledger).unwrap();
let (tick_height, ledger_height, tail) = bank
.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
assert_eq!(ledger_height, entry_count as u64 + 3);
assert_eq!(tick_height, 2);
assert!(tail.len() <= window_size);
assert!(tail.len() <= WINDOW_SIZE as usize);
let last_entry = &tail[tail.len() - 1];
assert_eq!(bank.last_id(), last_entry.id);
}
@ -1753,7 +1774,8 @@ mod tests {
let ledger = to_file_iter(ledger);
let bank = Bank::default();
bank.process_ledger(ledger).unwrap();
bank.process_ledger(ledger, &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&pubkey), 1);
}
@ -1764,7 +1786,8 @@ mod tests {
let block = to_file_iter(create_sample_block_with_ticks(&mint, 1, 1));
let bank = Bank::default();
bank.process_ledger(genesis.chain(block)).unwrap();
bank.process_ledger(genesis.chain(block), &mut LeaderScheduler::default())
.unwrap();
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
}
@ -1778,9 +1801,13 @@ mod tests {
let ledger1 = create_sample_ledger_with_mint_and_keypairs(&mint, &keypairs);
let bank0 = Bank::default();
bank0.process_ledger(ledger0).unwrap();
bank0
.process_ledger(ledger0, &mut LeaderScheduler::default())
.unwrap();
let bank1 = Bank::default();
bank1.process_ledger(ledger1).unwrap();
bank1
.process_ledger(ledger1, &mut LeaderScheduler::default())
.unwrap();
let initial_state = bank0.hash_internal_state();
@ -1873,7 +1900,7 @@ mod tests {
let string = transport_receiver.poll();
assert!(string.is_ok());
if let Async::Ready(Some(response)) = string.unwrap() {
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
let expected = format!(r#"{{"jsonrpc":"2.0","method":"accountNotification","params":{{"result":{{"executable":false,"loader_program_id":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"program_id":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}},"subscription":0}}}}"#);
assert_eq!(expected, response);
}
@ -2050,4 +2077,53 @@ mod tests {
Err(BankError::AccountNotFound)
);
}
#[test]
fn test_program_ids() {
let system = Pubkey::new(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
]);
let native = Pubkey::new(&[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
]);
let bpf = Pubkey::new(&[
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let budget = Pubkey::new(&[
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let storage = Pubkey::new(&[
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
let token = Pubkey::new(&[
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]);
assert_eq!(SystemProgram::id(), system);
assert_eq!(native_loader::id(), native);
assert_eq!(bpf_loader::id(), bpf);
assert_eq!(BudgetState::id(), budget);
assert_eq!(StorageProgram::id(), storage);
assert_eq!(TokenProgram::id(), token);
}
#[test]
fn test_program_id_uniqueness() {
let mut unique = HashSet::new();
let ids = vec![
SystemProgram::id(),
native_loader::id(),
bpf_loader::id(),
BudgetState::id(),
StorageProgram::id(),
TokenProgram::id(),
];
assert!(ids.into_iter().all(move |id| unique.insert(id)));
}
}

View File

@ -17,16 +17,14 @@ use solana::logger;
use solana::metrics::set_panic_hook;
use solana::signature::{Keypair, KeypairUtil};
use solana::thin_client::poll_gossip_for_leader;
use solana::vote_program::VoteProgram;
use solana::wallet::request_airdrop;
use std::fs::File;
use std::net::{Ipv4Addr, SocketAddr};
use std::process::exit;
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
fn main() {
fn main() -> () {
logger::setup();
set_panic_hook("fullnode");
let matches = App::new("fullnode")
@ -84,6 +82,7 @@ fn main() {
// save off some stuff for airdrop
let node_info = node.info.clone();
let pubkey = keypair.pubkey();
let leader = match network {
Some(network) => {
@ -92,16 +91,10 @@ fn main() {
None => node_info,
};
let vote_account_keypair = Arc::new(Keypair::new());
let vote_account_id = vote_account_keypair.pubkey();
let keypair = Arc::new(keypair);
let pubkey = keypair.pubkey();
let mut fullnode = Fullnode::new(
node,
ledger_path,
keypair.clone(),
vote_account_keypair,
keypair,
network,
false,
LeaderScheduler::from_bootstrap_leader(leader.id),
@ -136,49 +129,6 @@ fn main() {
}
}
// Create the vote account
loop {
let last_id = client.get_last_id();
if client
.create_vote_account(&keypair, vote_account_id, &last_id, 1)
.is_err()
{
sleep(Duration::from_secs(2));
continue;
}
let balance = client.poll_get_balance(&vote_account_id).unwrap_or(0);
if balance > 0 {
break;
}
sleep(Duration::from_secs(2));
}
// Register the vote account to this node
loop {
let last_id = client.get_last_id();
if client
.register_vote_account(&keypair, vote_account_id, &last_id)
.is_err()
{
sleep(Duration::from_secs(2));
continue;
}
let account_user_data = client.get_account_userdata(&vote_account_id);
if let Ok(Some(account_user_data)) = account_user_data {
if let Ok(vote_state) = VoteProgram::deserialize(&account_user_data) {
if vote_state.node_id == pubkey {
break;
}
}
}
sleep(Duration::from_secs(2));
}
loop {
let status = fullnode.handle_role_transition();
match status {

View File

@ -5,6 +5,7 @@ extern crate solana;
use clap::{App, Arg, SubCommand};
use solana::bank::Bank;
use solana::leader_scheduler::LeaderScheduler;
use solana::ledger::{read_ledger, verify_ledger};
use solana::logger;
use std::io::{stdout, Write};
@ -115,7 +116,7 @@ fn main() {
};
let genesis = genesis.take(2).map(|e| e.unwrap());
if let Err(e) = bank.process_ledger(genesis) {
if let Err(e) = bank.process_ledger(genesis, &mut LeaderScheduler::default()) {
eprintln!("verify failed at genesis err: {:?}", e);
if !matches.is_present("continue") {
exit(1);
@ -141,7 +142,10 @@ fn main() {
}
last_id = entry.id;
if let Err(e) = bank.process_entry(&entry) {
let mut tick_height = 0;
let mut leader_scheduler = LeaderScheduler::default();
if let Err(e) = bank.process_entry(&entry, &mut tick_height, &mut leader_scheduler)
{
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
if !matches.is_present("continue") {
exit(1);

View File

@ -3,8 +3,11 @@ use native_loader;
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
pub const BPF_LOADER_PROGRAM_ID: [u8; 32] = [6u8; 32];
pub const BPF_LOADER_NAME: &str = "bpf_loader";
const BPF_LOADER_NAME: &str = "solana_bpf_loader";
const BPF_LOADER_PROGRAM_ID: [u8; 32] = [
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
pub fn id() -> Pubkey {
Pubkey::new(&BPF_LOADER_PROGRAM_ID)

View File

@ -1,12 +1,15 @@
use budget::Budget;
use chrono::prelude::{DateTime, Utc};
/// A smart contract.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Contract {
/// The number of tokens allocated to the `Budget` and any transaction fees.
pub tokens: i64,
pub budget: Budget,
pub struct Vote {
/// We send some gossip specific membership information through the vote to shortcut
/// liveness voting
/// The version of the ClusterInfo struct that the last_id of this network voted with
pub version: u64,
/// The version of the ClusterInfo struct that has the same network configuration as this one
pub contact_info_version: u64,
// TODO: add signature of the state here as well
}
/// An instruction to progress the smart contract.
@ -21,4 +24,7 @@ pub enum Instruction {
/// Tell the budget that the `NewBudget` with `Signature` has been
/// signed by the containing transaction's `Pubkey`.
ApplySignature,
/// Vote for a PoH that is equal to the lastid of this transaction
NewVote(Vote),
}

View File

@ -30,9 +30,11 @@ pub struct BudgetState {
pub pending_budget: Option<Budget>,
}
pub const BUDGET_PROGRAM_ID: [u8; 32] = [
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
const BUDGET_PROGRAM_ID: [u8; 32] = [
129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
impl BudgetState {
fn is_pending(&self) -> bool {
self.pending_budget != None
@ -172,6 +174,11 @@ impl BudgetState {
Err(BudgetError::UninitializedContract)
}
}
Instruction::NewVote(_vote) => {
// TODO: move vote instruction into a different contract
trace!("GOT VOTE! last_id={}", tx.last_id);
Ok(())
}
}
}
fn serialize(&self, output: &mut [u8]) -> Result<(), BudgetError> {

View File

@ -2,7 +2,7 @@
use bincode::{deserialize, serialize};
use budget::{Budget, Condition};
use budget_instruction::Instruction;
use budget_instruction::{Instruction, Vote};
use budget_program::BudgetState;
use chrono::prelude::*;
use hash::Hash;
@ -38,6 +38,8 @@ pub trait BudgetTransaction {
last_id: Hash,
) -> Self;
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self;
fn budget_new_on_date(
from_keypair: &Keypair,
to: Pubkey,
@ -59,6 +61,8 @@ pub trait BudgetTransaction {
last_id: Hash,
) -> Self;
fn vote(&self) -> Option<(Pubkey, Vote, Hash)>;
fn instruction(&self, program_index: usize) -> Option<Instruction>;
fn system_instruction(&self, program_index: usize) -> Option<SystemProgram>;
@ -149,6 +153,12 @@ impl BudgetTransaction for Transaction {
)
}
fn budget_new_vote(from_keypair: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self {
let instruction = Instruction::NewVote(vote);
let userdata = serialize(&instruction).expect("serialize instruction");
Self::new(from_keypair, &[], BudgetState::id(), userdata, last_id, fee)
}
/// Create and sign a postdated Transaction. Used for unit-testing.
fn budget_new_on_date(
from_keypair: &Keypair,
@ -209,6 +219,16 @@ impl BudgetTransaction for Transaction {
)
}
fn vote(&self) -> Option<(Pubkey, Vote, Hash)> {
if self.instructions.len() > 1 {
None
} else if let Some(Instruction::NewVote(vote)) = self.instruction(0) {
Some((self.account_keys[0], vote, self.last_id))
} else {
None
}
}
fn instruction(&self, instruction_index: usize) -> Option<Instruction> {
deserialize(&self.userdata(instruction_index)).ok()
}

View File

@ -13,6 +13,7 @@
//!
//! Bank needs to provide an interface for us to query the stake weight
use bincode::{deserialize, serialize, serialized_size};
use budget_instruction::Vote;
use choose_gossip_peer_strategy::{ChooseGossipPeerStrategy, ChooseWeightedPeerStrategy};
use counter::Counter;
use hash::Hash;
@ -337,6 +338,47 @@ impl ClusterInfo {
self.external_liveness.get(key)
}
pub fn insert_vote(&mut self, pubkey: &Pubkey, v: &Vote, last_id: Hash) {
if self.table.get(pubkey).is_none() {
warn!("{}: VOTE for unknown id: {}", self.id, pubkey);
return;
}
if v.contact_info_version > self.table[pubkey].contact_info.version {
warn!(
"{}: VOTE for new address version from: {} ours: {} vote: {:?}",
self.id, pubkey, self.table[pubkey].contact_info.version, v,
);
return;
}
if *pubkey == self.my_data().leader_id {
info!("{}: LEADER_VOTED! {}", self.id, pubkey);
inc_new_counter_info!("cluster_info-insert_vote-leader_voted", 1);
}
if v.version <= self.table[pubkey].version {
debug!("{}: VOTE for old version: {}", self.id, pubkey);
self.update_liveness(*pubkey);
return;
} else {
let mut data = self.table[pubkey].clone();
data.version = v.version;
data.ledger_state.last_id = last_id;
debug!("{}: INSERTING VOTE! for {}", self.id, data.id);
self.update_liveness(data.id);
self.insert(&data);
}
}
pub fn insert_votes(&mut self, votes: &[(Pubkey, Vote, Hash)]) {
inc_new_counter_info!("cluster_info-vote-count", votes.len());
if !votes.is_empty() {
info!("{}: INSERTING VOTES {}", self.id, votes.len());
}
for v in votes {
self.insert_vote(&v.0, &v.1, v.2);
}
}
pub fn insert(&mut self, v: &NodeInfo) -> usize {
// TODO check that last_verified types are always increasing
// update the peer table
@ -413,7 +455,6 @@ impl ClusterInfo {
if *id == leader_id {
info!("{}: PURGE LEADER {}", self.id, id,);
inc_new_counter_info!("cluster_info-purge-purged_leader", 1, 1);
self.set_leader(Pubkey::default());
}
}
}
@ -497,7 +538,7 @@ impl ClusterInfo {
);
// Make sure the next leader in line knows about the entries before his slot in the leader
// rotation so they can initiate repairs if necessary
// rotation so he can initiate repairs if necessary
{
let ls_lock = leader_scheduler.read().unwrap();
let next_leader_height = ls_lock.max_height_for_leader(tick_height);
@ -782,6 +823,22 @@ impl ClusterInfo {
Ok((v.contact_info.ncp, req))
}
pub fn new_vote(&mut self, last_id: Hash) -> Result<(Vote, SocketAddr)> {
let mut me = self.my_data().clone();
let leader = self
.leader_data()
.ok_or(ClusterInfoError::NoLeader)?
.clone();
me.version += 1;
me.ledger_state.last_id = last_id;
let vote = Vote {
version: me.version,
contact_info_version: me.contact_info.version,
};
self.insert(&me);
Ok((vote, leader.contact_info.tpu))
}
/// At random pick a node and try to get updated changes from them
fn run_gossip(obj: &Arc<RwLock<Self>>, blob_sender: &BlobSender) -> Result<()> {
//TODO we need to keep track of stakes and weight the selection by stake size
@ -1330,6 +1387,7 @@ fn report_time_spent(label: &str, time: &Duration, extra: &str) {
#[cfg(test)]
mod tests {
use bincode::serialize;
use budget_instruction::Vote;
use cluster_info::{
ClusterInfo, ClusterInfoError, Node, NodeInfo, Protocol, FULLNODE_PORT_RANGE,
GOSSIP_PURGE_MILLIS, GOSSIP_SLEEP_MILLIS, MIN_TABLE_SIZE,
@ -1377,6 +1435,62 @@ mod tests {
assert_eq!(cluster_info.table[&d.id].version, 3);
assert!(liveness < cluster_info.alive[&d.id]);
}
#[test]
fn test_new_vote() {
let d = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
assert_eq!(d.version, 0);
let mut cluster_info = ClusterInfo::new(d.clone()).unwrap();
assert_eq!(cluster_info.table[&d.id].version, 0);
let leader = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.2:1235"));
assert_ne!(d.id, leader.id);
assert_matches!(
cluster_info.new_vote(Hash::default()).err(),
Some(Error::ClusterInfoError(ClusterInfoError::NoLeader))
);
cluster_info.insert(&leader);
assert_matches!(
cluster_info.new_vote(Hash::default()).err(),
Some(Error::ClusterInfoError(ClusterInfoError::NoLeader))
);
cluster_info.set_leader(leader.id);
assert_eq!(cluster_info.table[&d.id].version, 1);
let v = Vote {
version: 2, //version should increase when we vote
contact_info_version: 0,
};
let expected = (v, cluster_info.table[&leader.id].contact_info.tpu);
assert_eq!(cluster_info.new_vote(Hash::default()).unwrap(), expected);
}
#[test]
fn test_insert_vote() {
let d = NodeInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234"));
assert_eq!(d.version, 0);
let mut cluster_info = ClusterInfo::new(d.clone()).unwrap();
assert_eq!(cluster_info.table[&d.id].version, 0);
let vote_same_version = Vote {
version: d.version,
contact_info_version: 0,
};
cluster_info.insert_vote(&d.id, &vote_same_version, Hash::default());
assert_eq!(cluster_info.table[&d.id].version, 0);
let vote_new_version_new_addrs = Vote {
version: d.version + 1,
contact_info_version: 1,
};
cluster_info.insert_vote(&d.id, &vote_new_version_new_addrs, Hash::default());
//should be dropped since the address is newer then we know
assert_eq!(cluster_info.table[&d.id].version, 0);
let vote_new_version_old_addrs = Vote {
version: d.version + 1,
contact_info_version: 0,
};
cluster_info.insert_vote(&d.id, &vote_new_version_old_addrs, Hash::default());
//should be accepted, since the update is for the same address field as the one we know
assert_eq!(cluster_info.table[&d.id].version, 1);
}
fn sorted(ls: &Vec<(NodeInfo, u64)>) -> Vec<(NodeInfo, u64)> {
let mut copy: Vec<_> = ls.iter().cloned().collect();
copy.sort_by(|x, y| x.0.id.cmp(&y.0.id));
@ -1667,7 +1781,7 @@ mod tests {
let len = cluster_info.table.len() as u64;
cluster_info.purge(now + GOSSIP_PURGE_MILLIS + 1);
assert_eq!(len as usize - 1, cluster_info.table.len());
assert_eq!(cluster_info.my_data().leader_id, Pubkey::default());
assert_eq!(cluster_info.my_data().leader_id, nxt.id);
assert!(cluster_info.leader_data().is_none());
}

View File

@ -235,7 +235,6 @@ mod tests {
use signature::{Keypair, KeypairUtil};
use std::fs::remove_dir_all;
use std::net::{SocketAddr, UdpSocket};
use std::sync::{Arc, RwLock};
use std::time::Duration;
use thin_client::ThinClient;
@ -314,24 +313,18 @@ mod tests {
const TPS_BATCH: i64 = 5_000_000;
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let alice = Mint::new(10_000_000);
let mut bank = Bank::new(&alice);
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader.info.id,
)));
bank.leader_scheduler = leader_scheduler;
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let carlos_pubkey = Keypair::new().pubkey();
let leader_data = leader.info.clone();
let ledger_path = get_tmp_ledger_path("send_airdrop");
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -340,6 +333,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(0),
);
@ -374,14 +368,13 @@ mod tests {
// restart the leader, drone should find the new one at the same gossip port
server.close().unwrap();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let server = Fullnode::new(
leader,
&ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),

View File

@ -85,12 +85,12 @@ pub enum FullnodeReturnType {
pub struct Fullnode {
pub node_role: Option<NodeRole>,
pub leader_scheduler: Arc<RwLock<LeaderScheduler>>,
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
rpu: Option<Rpu>,
rpc_service: Option<JsonRpcService>,
rpc_pubsub_service: Option<PubSubService>,
rpc_service: JsonRpcService,
rpc_pubsub_service: PubSubService,
ncp: Ncp,
bank: Arc<Bank>,
cluster_info: Arc<RwLock<ClusterInfo>>,
@ -104,7 +104,6 @@ pub struct Fullnode {
broadcast_socket: UdpSocket,
requests_socket: UdpSocket,
respond_socket: UdpSocket,
rpc_port: Option<u16>,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
@ -133,17 +132,14 @@ impl Fullnode {
pub fn new(
node: Node,
ledger_path: &str,
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
keypair: Keypair,
leader_addr: Option<SocketAddr>,
sigverify_disabled: bool,
leader_scheduler: LeaderScheduler,
mut leader_scheduler: LeaderScheduler,
) -> Self {
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
info!("creating bank...");
let (bank, tick_height, entry_height, ledger_tail) =
Self::new_bank_from_ledger(ledger_path, leader_scheduler);
Self::new_bank_from_ledger(ledger_path, &mut leader_scheduler);
info!("creating networking stack...");
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
@ -158,7 +154,6 @@ impl Fullnode {
let leader_info = leader_addr.map(|i| NodeInfo::new_entry_point(&i));
let server = Self::new_with_bank(
keypair,
vote_account_keypair,
bank,
tick_height,
entry_height,
@ -167,6 +162,7 @@ impl Fullnode {
leader_info.as_ref(),
ledger_path,
sigverify_disabled,
leader_scheduler,
None,
);
@ -240,8 +236,7 @@ impl Fullnode {
/// ```
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub fn new_with_bank(
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
keypair: Keypair,
bank: Bank,
tick_height: u64,
entry_height: u64,
@ -250,6 +245,7 @@ impl Fullnode {
bootstrap_leader_info_option: Option<&NodeInfo>,
ledger_path: &str,
sigverify_disabled: bool,
leader_scheduler: LeaderScheduler,
rpc_port: Option<u16>,
) -> Self {
let exit = Arc::new(AtomicBool::new(false));
@ -278,8 +274,21 @@ impl Fullnode {
ClusterInfo::new(node.info).expect("ClusterInfo::new"),
));
let (rpc_service, rpc_pubsub_service) =
Self::startup_rpc_services(rpc_port, &bank, &cluster_info);
// Use custom RPC port, if provided (`Some(port)`)
// RPC port may be any open port on the node
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
// If rpc_port == `Some(0)`, node will dynamically choose any open port for both
// Rpc and RpcPubsub serivces. Useful for tests.
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
// TODO: The RPC service assumes that there is a drone running on the leader
// Drone location/id will need to be handled a different way as soon as leader rotation begins
let rpc_service = JsonRpcService::new(&bank, &cluster_info, rpc_addr, exit.clone());
let rpc_pubsub_addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::from(0)),
rpc_port.map_or(RPC_PORT + 1, |port| if port == 0 { port } else { port + 1 }),
);
let rpc_pubsub_service = PubSubService::new(&bank, rpc_pubsub_addr, exit.clone());
let ncp = Ncp::new(
&cluster_info,
@ -289,6 +298,9 @@ impl Fullnode {
exit.clone(),
);
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
let keypair = Arc::new(keypair);
// Insert the bootstrap leader info, should only be None if this node
// is the bootstrap leader
if let Some(bootstrap_leader_info) = bootstrap_leader_info_option {
@ -296,8 +308,10 @@ impl Fullnode {
}
// Get the scheduled leader
let scheduled_leader = bank
.get_current_leader()
let scheduled_leader = leader_scheduler
.read()
.unwrap()
.get_scheduled_leader(tick_height)
.expect("Leader not known after processing bank");
cluster_info.write().unwrap().set_leader(scheduled_leader);
@ -305,8 +319,8 @@ impl Fullnode {
// Start in validator mode.
let tvu = Tvu::new(
keypair.clone(),
vote_account_keypair.clone(),
&bank,
tick_height,
entry_height,
cluster_info.clone(),
shared_window.clone(),
@ -324,17 +338,20 @@ impl Fullnode {
.try_clone()
.expect("Failed to clone retransmit socket"),
Some(ledger_path),
leader_scheduler.clone(),
);
let validator_state = ValidatorServices::new(tvu);
Some(NodeRole::Validator(validator_state))
} else {
let max_tick_height = {
let ls_lock = bank.leader_scheduler.read().unwrap();
let ls_lock = leader_scheduler.read().unwrap();
ls_lock.max_height_for_leader(tick_height)
};
// Start in leader mode.
let (tpu, entry_receiver, tpu_exit) = Tpu::new(
keypair.clone(),
&bank,
&cluster_info,
Default::default(),
node.sockets
.transaction
@ -357,7 +374,7 @@ impl Fullnode {
shared_window.clone(),
entry_height,
entry_receiver,
bank.leader_scheduler.clone(),
leader_scheduler.clone(),
tick_height,
tpu_exit,
);
@ -367,15 +384,14 @@ impl Fullnode {
Fullnode {
keypair,
vote_account_keypair,
cluster_info,
shared_window,
bank,
sigverify_disabled,
rpu,
ncp,
rpc_service: Some(rpc_service),
rpc_pubsub_service: Some(rpc_pubsub_service),
rpc_service,
rpc_pubsub_service,
node_role,
ledger_path: ledger_path.to_owned(),
exit,
@ -386,50 +402,27 @@ impl Fullnode {
broadcast_socket: node.sockets.broadcast,
requests_socket: node.sockets.requests,
respond_socket: node.sockets.respond,
rpc_port,
leader_scheduler,
}
}
fn leader_to_validator(&mut self) -> Result<()> {
// Close down any services that could have a reference to the bank
if self.rpu.is_some() {
let old_rpu = self.rpu.take().unwrap();
old_rpu.close()?;
}
let (scheduled_leader, tick_height, entry_height, last_entry_id) = {
let mut ls_lock = self.leader_scheduler.write().unwrap();
// Clear the leader scheduler
ls_lock.reset();
if self.rpc_service.is_some() {
let old_rpc_service = self.rpc_service.take().unwrap();
old_rpc_service.close()?;
}
if self.rpc_pubsub_service.is_some() {
let old_rpc_pubsub_service = self.rpc_pubsub_service.take().unwrap();
old_rpc_pubsub_service.close()?;
}
// Correctness check: Ensure that references to the bank and leader scheduler are no
// longer held by any running thread
let mut new_leader_scheduler = self.bank.leader_scheduler.read().unwrap().clone();
// Clear the leader scheduler
new_leader_scheduler.reset();
let (new_bank, scheduled_leader, tick_height, entry_height, last_entry_id) = {
// TODO: We can avoid building the bank again once RecordStage is
// integrated with BankingStage
let (new_bank, tick_height, entry_height, ledger_tail) = Self::new_bank_from_ledger(
&self.ledger_path,
Arc::new(RwLock::new(new_leader_scheduler)),
);
let (bank, tick_height, entry_height, ledger_tail) =
Self::new_bank_from_ledger(&self.ledger_path, &mut *ls_lock);
let new_bank = Arc::new(new_bank);
let scheduled_leader = new_bank
.get_current_leader()
.expect("Scheduled leader should exist after rebuilding bank");
self.bank = Arc::new(bank);
(
new_bank,
scheduled_leader,
ls_lock
.get_scheduled_leader(entry_height)
.expect("Scheduled leader should exist after rebuilding bank"),
tick_height,
entry_height,
ledger_tail
@ -444,23 +437,21 @@ impl Fullnode {
.unwrap()
.set_leader(scheduled_leader);
// Spin up new versions of all the services that relied on the bank, passing in the
// new bank
self.rpu = Some(Rpu::new(
&new_bank,
self.requests_socket
.try_clone()
.expect("Failed to clone requests socket"),
self.respond_socket
.try_clone()
.expect("Failed to clone respond socket"),
));
let (rpc_service, rpc_pubsub_service) =
Self::startup_rpc_services(self.rpc_port, &new_bank, &self.cluster_info);
self.rpc_service = Some(rpc_service);
self.rpc_pubsub_service = Some(rpc_pubsub_service);
self.bank = new_bank;
// Make a new RPU to serve requests out of the new bank we've created
// instead of the old one
if self.rpu.is_some() {
let old_rpu = self.rpu.take().unwrap();
old_rpu.close()?;
self.rpu = Some(Rpu::new(
&self.bank,
self.requests_socket
.try_clone()
.expect("Failed to clone requests socket"),
self.respond_socket
.try_clone()
.expect("Failed to clone respond socket"),
));
}
// In the rare case that the leader exited on a multiple of seed_rotation_interval
// when the new leader schedule was being generated, and there are no other validators
@ -468,31 +459,32 @@ impl Fullnode {
// check for that
if scheduled_leader == self.keypair.pubkey() {
self.validator_to_leader(tick_height, entry_height, last_entry_id);
Ok(())
} else {
let tvu = Tvu::new(
self.keypair.clone(),
self.vote_account_keypair.clone(),
&self.bank,
entry_height,
self.cluster_info.clone(),
self.shared_window.clone(),
self.replicate_socket
.iter()
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
.collect(),
self.repair_socket
.try_clone()
.expect("Failed to clone repair socket"),
self.retransmit_socket
.try_clone()
.expect("Failed to clone retransmit socket"),
Some(&self.ledger_path),
);
let validator_state = ValidatorServices::new(tvu);
self.node_role = Some(NodeRole::Validator(validator_state));
Ok(())
return Ok(());
}
let tvu = Tvu::new(
self.keypair.clone(),
&self.bank,
tick_height,
entry_height,
self.cluster_info.clone(),
self.shared_window.clone(),
self.replicate_socket
.iter()
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
.collect(),
self.repair_socket
.try_clone()
.expect("Failed to clone repair socket"),
self.retransmit_socket
.try_clone()
.expect("Failed to clone retransmit socket"),
Some(&self.ledger_path),
self.leader_scheduler.clone(),
);
let validator_state = ValidatorServices::new(tvu);
self.node_role = Some(NodeRole::Validator(validator_state));
Ok(())
}
fn validator_to_leader(&mut self, tick_height: u64, entry_height: u64, last_entry_id: Hash) {
@ -502,12 +494,14 @@ impl Fullnode {
.set_leader(self.keypair.pubkey());
let max_tick_height = {
let ls_lock = self.bank.leader_scheduler.read().unwrap();
let ls_lock = self.leader_scheduler.read().unwrap();
ls_lock.max_height_for_leader(tick_height)
};
let (tpu, blob_receiver, tpu_exit) = Tpu::new(
self.keypair.clone(),
&self.bank,
&self.cluster_info,
Default::default(),
self.transaction_sockets
.iter()
@ -531,7 +525,7 @@ impl Fullnode {
self.shared_window.clone(),
entry_height,
blob_receiver,
self.bank.leader_scheduler.clone(),
self.leader_scheduler.clone(),
tick_height,
tpu_exit,
);
@ -575,12 +569,6 @@ impl Fullnode {
if let Some(ref rpu) = self.rpu {
rpu.exit();
}
if let Some(ref rpc_service) = self.rpc_service {
rpc_service.exit();
}
if let Some(ref rpc_pubsub_service) = self.rpc_pubsub_service {
rpc_pubsub_service.exit();
}
match self.node_role {
Some(NodeRole::Leader(ref leader_services)) => leader_services.exit(),
Some(NodeRole::Validator(ref validator_services)) => validator_services.exit(),
@ -595,50 +583,21 @@ impl Fullnode {
pub fn new_bank_from_ledger(
ledger_path: &str,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
leader_scheduler: &mut LeaderScheduler,
) -> (Bank, u64, u64, Vec<Entry>) {
let mut bank = Bank::new_with_builtin_programs();
bank.leader_scheduler = leader_scheduler;
let bank = Bank::new_with_builtin_programs();
let entries = read_ledger(ledger_path, true).expect("opening ledger");
let entries = entries
.map(|e| e.unwrap_or_else(|err| panic!("failed to parse entry. error: {}", err)));
info!("processing ledger...");
let (tick_height, entry_height, ledger_tail) =
bank.process_ledger(entries).expect("process_ledger");
let (tick_height, entry_height, ledger_tail) = bank
.process_ledger(entries, leader_scheduler)
.expect("process_ledger");
// entry_height is the network-wide agreed height of the ledger.
// initialize it from the input ledger
info!("processed {} ledger...", entry_height);
(bank, tick_height, entry_height, ledger_tail)
}
pub fn get_leader_scheduler(&self) -> &Arc<RwLock<LeaderScheduler>> {
&self.bank.leader_scheduler
}
fn startup_rpc_services(
rpc_port: Option<u16>,
bank: &Arc<Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
) -> (JsonRpcService, PubSubService) {
// Use custom RPC port, if provided (`Some(port)`)
// RPC port may be any open port on the node
// If rpc_port == `None`, node will listen on the default RPC_PORT from Rpc module
// If rpc_port == `Some(0)`, node will dynamically choose any open port for both
// Rpc and RpcPubsub serivces. Useful for tests.
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::from(0)), rpc_port.unwrap_or(RPC_PORT));
let rpc_pubsub_addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::from(0)),
rpc_port.map_or(RPC_PORT + 1, |port| if port == 0 { port } else { port + 1 }),
);
// TODO: The RPC service assumes that there is a drone running on the leader
// Drone location/id will need to be handled a different way as soon as leader rotation begins
(
JsonRpcService::new(bank, cluster_info, rpc_addr),
PubSubService::new(bank, rpc_pubsub_addr),
)
}
}
impl Service for Fullnode {
@ -648,14 +607,9 @@ impl Service for Fullnode {
if let Some(rpu) = self.rpu {
rpu.join()?;
}
if let Some(rpc_service) = self.rpc_service {
rpc_service.join()?;
}
if let Some(rpc_pubsub_service) = self.rpc_pubsub_service {
rpc_pubsub_service.join()?;
}
self.ncp.join()?;
self.rpc_service.join()?;
self.rpc_pubsub_service.join()?;
match self.node_role {
Some(NodeRole::Validator(validator_service)) => {
@ -689,7 +643,7 @@ mod tests {
use std::fs::remove_dir_all;
use std::net::UdpSocket;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::sync::Arc;
use streamer::responder;
#[test]
@ -697,19 +651,13 @@ mod tests {
let keypair = Keypair::new();
let tn = Node::new_localhost_with_pubkey(keypair.pubkey());
let (mint, validator_ledger_path) = create_tmp_genesis("validator_exit", 10_000);
let mut bank = Bank::new(&mint);
let bank = Bank::new(&mint);
let entry = tn.info.clone();
let genesis_entries = &mint.create_entries();
let entry_height = genesis_entries.len() as u64;
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
entry.id,
)));
bank.leader_scheduler = leader_scheduler;
let v = Fullnode::new_with_bank(
Arc::new(keypair),
Arc::new(Keypair::new()),
keypair,
bank,
0,
entry_height,
@ -718,6 +666,7 @@ mod tests {
Some(&entry),
&validator_ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(entry.id),
Some(0),
);
v.close().unwrap();
@ -734,20 +683,13 @@ mod tests {
let (mint, validator_ledger_path) =
create_tmp_genesis(&format!("validator_parallel_exit_{}", i), 10_000);
ledger_paths.push(validator_ledger_path.clone());
let mut bank = Bank::new(&mint);
let bank = Bank::new(&mint);
let entry = tn.info.clone();
let genesis_entries = &mint.create_entries();
let entry_height = genesis_entries.len() as u64;
let leader_scheduler = Arc::new(RwLock::new(
LeaderScheduler::from_bootstrap_leader(entry.id),
));
bank.leader_scheduler = leader_scheduler;
Fullnode::new_with_bank(
Arc::new(keypair),
Arc::new(Keypair::new()),
keypair,
bank,
0,
entry_height,
@ -756,6 +698,7 @@ mod tests {
Some(&entry),
&validator_ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(entry.id),
Some(0),
)
}).collect();
@ -814,8 +757,7 @@ mod tests {
let mut bootstrap_leader = Fullnode::new(
bootstrap_leader_node,
&bootstrap_leader_ledger_path,
Arc::new(bootstrap_leader_keypair),
Arc::new(Keypair::new()),
bootstrap_leader_keypair,
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -841,7 +783,7 @@ mod tests {
#[test]
fn test_wrong_role_transition() {
// Create the leader node information
let bootstrap_leader_keypair = Arc::new(Keypair::new());
let bootstrap_leader_keypair = Keypair::new();
let bootstrap_leader_node =
Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
@ -863,7 +805,7 @@ mod tests {
// Write the entries to the ledger that will cause leader rotation
// after the bootstrap height
let mut ledger_writer = LedgerWriter::open(&bootstrap_leader_ledger_path, false).unwrap();
let (active_set_entries, validator_vote_account_keypair) = make_active_set_entries(
let active_set_entries = make_active_set_entries(
&validator_keypair,
&mint.keypair(),
&last_id,
@ -895,12 +837,10 @@ mod tests {
);
// Test that a node knows to transition to a validator based on parsing the ledger
let leader_vote_account_keypair = Arc::new(Keypair::new());
let bootstrap_leader = Fullnode::new(
bootstrap_leader_node,
&bootstrap_leader_ledger_path,
bootstrap_leader_keypair,
leader_vote_account_keypair,
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -917,8 +857,7 @@ mod tests {
let validator = Fullnode::new(
validator_node,
&bootstrap_leader_ledger_path,
Arc::new(validator_keypair),
Arc::new(validator_vote_account_keypair),
validator_keypair,
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -966,7 +905,7 @@ mod tests {
//
// 2) A vote from the validator
let mut ledger_writer = LedgerWriter::open(&validator_ledger_path, false).unwrap();
let (active_set_entries, validator_vote_account_keypair) =
let active_set_entries =
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id, 0);
let initial_tick_height = genesis_entries
.iter()
@ -994,8 +933,7 @@ mod tests {
let mut validator = Fullnode::new(
validator_node,
&validator_ledger_path,
Arc::new(validator_keypair),
Arc::new(validator_vote_account_keypair),
validator_keypair,
Some(leader_ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1055,7 +993,7 @@ mod tests {
// transitioned after tick_height = bootstrap_height.
let (_, tick_height, entry_height, _) = Fullnode::new_bank_from_ledger(
&validator_ledger_path,
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config))),
&mut LeaderScheduler::new(&leader_scheduler_config),
);
assert_eq!(tick_height, bootstrap_height);

View File

@ -4,24 +4,82 @@
use bank::Bank;
use bincode::serialize;
use budget_instruction::Vote;
use budget_transaction::BudgetTransaction;
use byteorder::{LittleEndian, ReadBytesExt};
use entry::Entry;
use hash::{hash, Hash};
use ledger::create_ticks;
use signature::{Keypair, KeypairUtil};
#[cfg(test)]
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashSet;
use std::collections::HashMap;
use std::io::Cursor;
use system_transaction::SystemTransaction;
use transaction::Transaction;
use vote_program::{Vote, VoteProgram};
use vote_transaction::VoteTransaction;
pub const DEFAULT_BOOTSTRAP_HEIGHT: u64 = 1000;
pub const DEFAULT_LEADER_ROTATION_INTERVAL: u64 = 100;
pub const DEFAULT_SEED_ROTATION_INTERVAL: u64 = 1000;
pub const DEFAULT_ACTIVE_WINDOW_LENGTH: u64 = 1000;
#[derive(Debug)]
pub struct ActiveValidators {
// Map from validator id to the last PoH height at which they voted,
pub active_validators: HashMap<Pubkey, u64>,
pub active_window_length: u64,
}
impl ActiveValidators {
pub fn new(active_window_length_option: Option<u64>) -> Self {
let mut active_window_length = DEFAULT_ACTIVE_WINDOW_LENGTH;
if let Some(input) = active_window_length_option {
active_window_length = input;
}
ActiveValidators {
active_validators: HashMap::new(),
active_window_length,
}
}
// Finds all the active voters who have voted in the range
// (height - active_window_length, height], and removes
// anybody who hasn't voted in that range from the map
pub fn get_active_set(&mut self, height: u64) -> Vec<Pubkey> {
// Don't filter anything if height is less than the
// size of the active window. Otherwise, calculate the acceptable
// window and filter the active_validators
// Note: height == 0 will only be included for all
// height < self.active_window_length
let upper_bound = height;
if height >= self.active_window_length {
let lower_bound = height - self.active_window_length;
self.active_validators
.retain(|_, height| *height > lower_bound);
}
self.active_validators
.iter()
.filter_map(|(k, v)| if *v <= upper_bound { Some(*k) } else { None })
.collect()
}
// Push a vote for a validator with id == "id" who voted at PoH height == "height"
pub fn push_vote(&mut self, id: Pubkey, height: u64) -> () {
let old_height = self.active_validators.entry(id).or_insert(height);
if height > *old_height {
*old_height = height;
}
}
pub fn reset(&mut self) -> () {
self.active_validators.clear();
}
}
pub struct LeaderSchedulerConfig {
// The first leader who will bootstrap the network
pub bootstrap_leader: Pubkey,
@ -61,7 +119,7 @@ impl LeaderSchedulerConfig {
}
}
#[derive(Clone, Debug)]
#[derive(Debug)]
pub struct LeaderScheduler {
// Set to true if we want the default implementation of the LeaderScheduler,
// where ony the bootstrap leader is used
@ -81,13 +139,12 @@ pub struct LeaderScheduler {
// the leader rotation process begins to pick future leaders
pub bootstrap_height: u64,
// Maintain the set of active validators
pub active_validators: ActiveValidators,
// The last height at which the seed + schedule was generated
pub last_seed_height: Option<u64>,
// The length of time in ticks for which a vote qualifies a candidate for leader
// selection
pub active_window_length: u64,
// Round-robin ordering for the validators
leader_schedule: Vec<Pubkey>,
@ -136,11 +193,6 @@ impl LeaderScheduler {
seed_rotation_interval = input;
}
let mut active_window_length = DEFAULT_ACTIVE_WINDOW_LENGTH;
if let Some(input) = config.active_window_length_option {
active_window_length = input;
}
// Enforced invariants
assert!(seed_rotation_interval >= leader_rotation_interval);
assert!(bootstrap_height > 0);
@ -148,13 +200,13 @@ impl LeaderScheduler {
LeaderScheduler {
use_only_bootstrap_leader: false,
active_validators: ActiveValidators::new(config.active_window_length_option),
leader_rotation_interval,
seed_rotation_interval,
leader_schedule: Vec::new(),
last_seed_height: None,
bootstrap_leader: config.bootstrap_leader,
bootstrap_height,
active_window_length,
seed: 0,
}
}
@ -228,6 +280,15 @@ impl LeaderScheduler {
pub fn reset(&mut self) {
self.last_seed_height = None;
self.active_validators.reset();
}
pub fn push_vote(&mut self, id: Pubkey, height: u64) {
if self.use_only_bootstrap_leader {
return;
}
self.active_validators.push_vote(id, height);
}
pub fn update_height(&mut self, height: u64, bank: &Bank) {
@ -282,34 +343,8 @@ impl LeaderScheduler {
Some(self.leader_schedule[validator_index])
}
// TODO: We use a HashSet for now because a single validator could potentially register
// multiple vote account. Once that is no longer possible (see the TODO in vote_program.rs,
// process_transaction(), case VoteInstruction::RegisterAccount), we can use a vector.
fn get_active_set(&mut self, height: u64, bank: &Bank) -> HashSet<Pubkey> {
let upper_bound = height;
let lower_bound = height.saturating_sub(self.active_window_length);
{
let bank_accounts = &*bank.accounts.read().unwrap();
bank_accounts
.values()
.filter_map(|account| {
if VoteProgram::check_id(&account.program_id) {
if let Ok(vote_state) = VoteProgram::deserialize(&account.userdata) {
return vote_state
.votes
.back()
.filter(|vote| {
vote.tick_height > lower_bound
&& vote.tick_height <= upper_bound
}).map(|_| vote_state.node_id);
}
}
None
}).collect()
}
fn get_active_set(&mut self, height: u64) -> Vec<Pubkey> {
self.active_validators.get_active_set(height)
}
// Called every seed_rotation_interval entries, generates the leader schedule
@ -319,8 +354,8 @@ impl LeaderScheduler {
assert!((height - self.bootstrap_height) % self.seed_rotation_interval == 0);
let seed = Self::calculate_seed(height);
self.seed = seed;
let active_set = self.get_active_set(height, &bank);
let ranked_active_set = Self::rank_active_set(bank, active_set.iter());
let active_set = self.get_active_set(height);
let ranked_active_set = Self::rank_active_set(bank, &active_set[..]);
// Handle case where there are no active validators with
// non-zero stake. In this case, use the bootstrap leader for
@ -382,11 +417,9 @@ impl LeaderScheduler {
bank.get_balance(id)
}
fn rank_active_set<'a, I>(bank: &Bank, active: I) -> Vec<(&'a Pubkey, u64)>
where
I: Iterator<Item = &'a Pubkey>,
{
fn rank_active_set<'a>(bank: &Bank, active: &'a [Pubkey]) -> Vec<(&'a Pubkey, u64)> {
let mut active_accounts: Vec<(&'a Pubkey, u64)> = active
.iter()
.filter_map(|pk| {
let stake = Self::get_stake(pk, bank);
if stake > 0 {
@ -445,6 +478,24 @@ impl Default for LeaderScheduler {
}
}
// Remove all candiates for leader selection from the active set by clearing the bank,
// and then set a single new candidate who will be eligible starting at height = vote_height
// by adding one new account to the bank
#[cfg(test)]
pub fn set_new_leader(bank: &Bank, leader_scheduler: &mut LeaderScheduler, vote_height: u64) {
// Set the scheduled next leader to some other node
let new_leader_keypair = Keypair::new();
let new_leader_id = new_leader_keypair.pubkey();
leader_scheduler.push_vote(new_leader_id, vote_height);
let dummy_id = Keypair::new().pubkey();
let new_account = Account::new(1, 10, dummy_id.clone());
// Remove the previous acounts from the active set
let mut accounts = bank.accounts().write().unwrap();
accounts.clear();
accounts.insert(new_leader_id, new_account);
}
// Create two entries so that the node with keypair == active_keypair
// is in the active set for leader selection:
// 1) Give the node a nonzero number of tokens,
@ -455,107 +506,50 @@ pub fn make_active_set_entries(
last_entry_id: &Hash,
last_tick_id: &Hash,
num_ending_ticks: usize,
) -> (Vec<Entry>, Keypair) {
) -> Vec<Entry> {
// 1) Create transfer token entry
let transfer_tx =
Transaction::system_new(&token_source, active_keypair.pubkey(), 2, *last_tick_id);
Transaction::system_new(&token_source, active_keypair.pubkey(), 1, *last_tick_id);
let transfer_entry = Entry::new(last_entry_id, 1, vec![transfer_tx]);
let mut last_entry_id = transfer_entry.id;
// 2) Create the vote account
let vote_account = Keypair::new();
let create_vote_account_tx =
Transaction::vote_account_new(active_keypair, vote_account.pubkey(), *last_tick_id, 1);
let create_vote_account_entry = Entry::new(&last_entry_id, 1, vec![create_vote_account_tx]);
last_entry_id = create_vote_account_entry.id;
// 3) Register the vote account
let register_vote_account_tx =
Transaction::vote_account_register(active_keypair, vote_account.pubkey(), *last_tick_id, 0);
let register_vote_account_entry = Entry::new(&last_entry_id, 1, vec![register_vote_account_tx]);
last_entry_id = register_vote_account_entry.id;
// 4) Create vote entry
let vote = Vote { tick_height: 1 };
let vote_tx = Transaction::vote_new(&vote_account, vote, *last_tick_id, 0);
// 2) Create vote entry
let vote = Vote {
version: 0,
contact_info_version: 0,
};
let vote_tx = Transaction::budget_new_vote(&active_keypair, vote, *last_tick_id, 0);
let vote_entry = Entry::new(&last_entry_id, 1, vec![vote_tx]);
last_entry_id = vote_entry.id;
// 5) Create the ending empty ticks
let mut txs = vec![
transfer_entry,
create_vote_account_entry,
register_vote_account_entry,
vote_entry,
];
// 3) Create the ending empty ticks
let mut txs = vec![transfer_entry, vote_entry];
let empty_ticks = create_ticks(num_ending_ticks, last_entry_id);
txs.extend(empty_ticks);
(txs, vote_account)
txs
}
#[cfg(test)]
mod tests {
use bank::Bank;
use hash::Hash;
use leader_scheduler::{
LeaderScheduler, LeaderSchedulerConfig, DEFAULT_BOOTSTRAP_HEIGHT,
DEFAULT_LEADER_ROTATION_INTERVAL, DEFAULT_SEED_ROTATION_INTERVAL,
ActiveValidators, LeaderScheduler, LeaderSchedulerConfig, DEFAULT_ACTIVE_WINDOW_LENGTH,
DEFAULT_BOOTSTRAP_HEIGHT, DEFAULT_LEADER_ROTATION_INTERVAL, DEFAULT_SEED_ROTATION_INTERVAL,
};
use mint::Mint;
use result::Result;
use signature::{Keypair, KeypairUtil};
use solana_sdk::pubkey::Pubkey;
use std::collections::HashSet;
use std::hash::Hash as StdHash;
use std::hash::Hash;
use std::iter::FromIterator;
use transaction::Transaction;
use vote_program::Vote;
use vote_transaction::VoteTransaction;
fn to_hashset_owned<T>(slice: &[T]) -> HashSet<T>
where
T: Eq + StdHash + Clone,
T: Eq + Hash + Clone,
{
HashSet::from_iter(slice.iter().cloned())
}
fn push_vote(vote_account: &Keypair, bank: &Bank, height: u64, last_id: Hash) {
let vote = Vote {
tick_height: height,
};
let new_vote_tx = Transaction::vote_new(vote_account, vote, last_id, 0);
bank.process_transaction(&new_vote_tx).unwrap();
}
fn create_vote_account(
node_keypair: &Keypair,
bank: &Bank,
num_tokens: i64,
last_id: Hash,
) -> Result<Keypair> {
let new_vote_account = Keypair::new();
// Create the new vote account
let tx = Transaction::vote_account_new(
node_keypair,
new_vote_account.pubkey(),
last_id,
num_tokens,
);
bank.process_transaction(&tx)?;
// Register the vote account to the validator
let tx =
Transaction::vote_account_register(node_keypair, new_vote_account.pubkey(), last_id, 0);
bank.process_transaction(&tx)?;
Ok(new_vote_account)
}
fn run_scheduler_test(
num_validators: usize,
bootstrap_height: u64,
@ -578,11 +572,7 @@ mod tests {
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
// Create the bank and validators, which are inserted in order of account balance
let num_vote_account_tokens = 1;
let mint = Mint::new(
(((num_validators + 1) / 2) * (num_validators + 1)
+ num_vote_account_tokens * num_validators) as i64,
);
let mint = Mint::new((((num_validators + 1) / 2) * (num_validators + 1)) as i64);
let bank = Bank::new(&mint);
let mut validators = vec![];
let last_id = mint
@ -594,24 +584,11 @@ mod tests {
let new_validator = Keypair::new();
let new_pubkey = new_validator.pubkey();
validators.push(new_pubkey);
// Give the validator some tokens
bank.transfer(
(i + 1 + num_vote_account_tokens) as i64,
&mint.keypair(),
new_pubkey,
last_id,
).unwrap();
// Create a vote account
let new_vote_account = create_vote_account(
&new_validator,
&bank,
num_vote_account_tokens as i64,
mint.last_id(),
).unwrap();
// Vote to make the validator part of the active set for the entire test
// (we made the active_window_length large enough at the beginning of the test)
push_vote(&new_vote_account, &bank, 1, mint.last_id());
leader_scheduler.push_vote(new_pubkey, 1);
bank.transfer((i + 1) as i64, &mint.keypair(), new_pubkey, last_id)
.unwrap();
}
// The scheduled leader during the bootstrapping period (assuming a seed + schedule
@ -689,9 +666,6 @@ mod tests {
fn test_active_set() {
let leader_id = Keypair::new().pubkey();
let active_window_length = 1000;
let mint = Mint::new(10000);
let bank = Bank::new(&mint);
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_id,
Some(100),
@ -707,60 +681,40 @@ mod tests {
let num_old_ids = 20;
let mut old_ids = HashSet::new();
for _ in 0..num_old_ids {
let new_keypair = Keypair::new();
let pk = new_keypair.pubkey();
old_ids.insert(pk.clone());
// Give the account some stake
bank.transfer(5, &mint.keypair(), pk, mint.last_id())
.unwrap();
// Create a vote account
let new_vote_account =
create_vote_account(&new_keypair, &bank, 1, mint.last_id()).unwrap();
// Push a vote for the account
push_vote(&new_vote_account, &bank, start_height, mint.last_id());
let pk = Keypair::new().pubkey();
old_ids.insert(pk);
leader_scheduler.push_vote(pk, start_height);
}
// Insert a bunch of votes at height "start_height + active_window_length"
let num_new_ids = 10;
let mut new_ids = HashSet::new();
for _ in 0..num_new_ids {
let new_keypair = Keypair::new();
let pk = new_keypair.pubkey();
let pk = Keypair::new().pubkey();
new_ids.insert(pk);
// Give the account some stake
bank.transfer(5, &mint.keypair(), pk, mint.last_id())
.unwrap();
// Create a vote account
let new_vote_account =
create_vote_account(&new_keypair, &bank, 1, mint.last_id()).unwrap();
push_vote(
&new_vote_account,
&bank,
start_height + active_window_length,
mint.last_id(),
);
leader_scheduler.push_vote(pk, start_height + active_window_length);
}
// Queries for the active set
let result =
leader_scheduler.get_active_set(active_window_length + start_height - 1, &bank);
assert_eq!(result, old_ids);
let result = leader_scheduler.get_active_set(active_window_length + start_height - 1);
assert_eq!(result.len(), num_old_ids);
let result_set = to_hashset_owned(&result);
assert_eq!(result_set, old_ids);
let result = leader_scheduler.get_active_set(active_window_length + start_height, &bank);
assert_eq!(result, new_ids);
let result = leader_scheduler.get_active_set(active_window_length + start_height);
assert_eq!(result.len(), num_new_ids);
let result_set = to_hashset_owned(&result);
assert_eq!(result_set, new_ids);
let result =
leader_scheduler.get_active_set(2 * active_window_length + start_height - 1, &bank);
assert_eq!(result, new_ids);
let result = leader_scheduler.get_active_set(2 * active_window_length + start_height - 1);
assert_eq!(result.len(), num_new_ids);
let result_set = to_hashset_owned(&result);
assert_eq!(result_set, new_ids);
let result =
leader_scheduler.get_active_set(2 * active_window_length + start_height, &bank);
assert!(result.is_empty());
let result = leader_scheduler.get_active_set(2 * active_window_length + start_height);
assert_eq!(result.len(), 0);
let result_set = to_hashset_owned(&result);
assert!(result_set.is_empty());
}
#[test]
@ -800,7 +754,7 @@ mod tests {
}
let validators_pk: Vec<Pubkey> = validators.iter().map(Keypair::pubkey).collect();
let result = LeaderScheduler::rank_active_set(&bank, validators_pk.iter());
let result = LeaderScheduler::rank_active_set(&bank, &validators_pk[..]);
assert_eq!(result.len(), validators.len());
@ -830,7 +784,7 @@ mod tests {
.chain(new_validators.iter())
.map(Keypair::pubkey)
.collect();
let result = LeaderScheduler::rank_active_set(&bank, all_validators.iter());
let result = LeaderScheduler::rank_active_set(&bank, &all_validators[..]);
assert_eq!(result.len(), new_validators.len());
for (i, (pk, balance)) in result.into_iter().enumerate() {
@ -856,7 +810,7 @@ mod tests {
.unwrap();
}
let result = LeaderScheduler::rank_active_set(&bank, tied_validators_pk.iter());
let result = LeaderScheduler::rank_active_set(&bank, &tied_validators_pk[..]);
let mut sorted: Vec<&Pubkey> = tied_validators_pk.iter().map(|x| x).collect();
sorted.sort_by(|pk1, pk2| pk1.cmp(pk2));
assert_eq!(result.len(), tied_validators_pk.len());
@ -968,7 +922,6 @@ mod tests {
#[test]
fn test_scheduler_active_window() {
let num_validators = 10;
let num_vote_account_tokens = 1;
// Set up the LeaderScheduler struct
let bootstrap_leader_id = Keypair::new().pubkey();
let bootstrap_height = 500;
@ -990,10 +943,7 @@ mod tests {
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
// Create the bank and validators
let mint = Mint::new(
((((num_validators + 1) / 2) * (num_validators + 1))
+ (num_vote_account_tokens * num_validators)) as i64,
);
let mint = Mint::new((((num_validators + 1) / 2) * (num_validators + 1)) as i64);
let bank = Bank::new(&mint);
let mut validators = vec![];
let last_id = mint
@ -1005,29 +955,10 @@ mod tests {
let new_validator = Keypair::new();
let new_pubkey = new_validator.pubkey();
validators.push(new_pubkey);
// Give the validator some tokens
bank.transfer(
(i + 1 + num_vote_account_tokens) as i64,
&mint.keypair(),
new_pubkey,
last_id,
).unwrap();
// Create a vote account
let new_vote_account = create_vote_account(
&new_validator,
&bank,
num_vote_account_tokens as i64,
mint.last_id(),
).unwrap();
// Vote at height i * active_window_length for validator i
push_vote(
&new_vote_account,
&bank,
i * active_window_length + bootstrap_height,
mint.last_id(),
);
leader_scheduler.push_vote(new_pubkey, i * active_window_length + bootstrap_height);
bank.transfer((i + 1) as i64, &mint.keypair(), new_pubkey, last_id)
.unwrap();
}
// Generate schedule every active_window_length entries and check that
@ -1048,12 +979,8 @@ mod tests {
#[test]
fn test_multiple_vote() {
let leader_keypair = Keypair::new();
let leader_id = leader_keypair.pubkey();
let leader_id = Keypair::new().pubkey();
let active_window_length = 1000;
let mint = Mint::new(10000);
let bank = Bank::new(&mint);
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_id,
Some(100),
@ -1064,38 +991,18 @@ mod tests {
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
// Give the node some tokens
bank.transfer(5, &mint.keypair(), leader_id, bank.last_id())
.unwrap();
// Check that a node that votes twice in a row will get included in the active
// Check that a validator that votes twice in a row will get included in the active
// window
let initial_vote_height = 1;
// Create a vote account
let new_vote_account =
create_vote_account(&leader_keypair, &bank, 1, mint.last_id()).unwrap();
// Vote twice
push_vote(
&new_vote_account,
&bank,
initial_vote_height,
mint.last_id(),
);
push_vote(
&new_vote_account,
&bank,
initial_vote_height + 1,
mint.last_id(),
);
leader_scheduler.push_vote(leader_id, initial_vote_height);
leader_scheduler.push_vote(leader_id, initial_vote_height + 1);
let result = leader_scheduler.get_active_set(initial_vote_height + active_window_length);
assert_eq!(result, vec![leader_id]);
let result =
leader_scheduler.get_active_set(initial_vote_height + active_window_length, &bank);
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
let result =
leader_scheduler.get_active_set(initial_vote_height + active_window_length + 1, &bank);
assert!(result.is_empty());
leader_scheduler.get_active_set(initial_vote_height + active_window_length + 1);
assert_eq!(result, vec![]);
}
#[test]
@ -1156,6 +1063,13 @@ mod tests {
DEFAULT_SEED_ROTATION_INTERVAL
);
// Check defaults for ActiveValidators
let active_validators = ActiveValidators::new(None);
assert_eq!(
active_validators.active_window_length,
DEFAULT_ACTIVE_WINDOW_LENGTH
);
// Check actual arguments for LeaderScheduler
let bootstrap_height = 500;
let leader_rotation_interval = 100;
@ -1182,11 +1096,14 @@ mod tests {
leader_scheduler.seed_rotation_interval,
seed_rotation_interval
);
// Check actual arguments for ActiveValidators
let active_validators = ActiveValidators::new(Some(active_window_length));
assert_eq!(active_validators.active_window_length, active_window_length);
}
fn run_consecutive_leader_test(num_slots_per_epoch: u64, add_validator: bool) {
let bootstrap_leader_keypair = Keypair::new();
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
let bootstrap_leader_id = Keypair::new().pubkey();
let bootstrap_height = 500;
let leader_rotation_interval = 100;
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
@ -1213,20 +1130,11 @@ mod tests {
let initial_vote_height = 1;
// Create and add validator to the active set
let validator_keypair = Keypair::new();
let validator_id = validator_keypair.pubkey();
let validator_id = Keypair::new().pubkey();
if add_validator {
bank.transfer(5, &mint.keypair(), validator_id, last_id)
leader_scheduler.push_vote(validator_id, initial_vote_height);
bank.transfer(1, &mint.keypair(), validator_id, last_id)
.unwrap();
// Create a vote account
let new_vote_account =
create_vote_account(&validator_keypair, &bank, 1, mint.last_id()).unwrap();
push_vote(
&new_vote_account,
&bank,
initial_vote_height,
mint.last_id(),
);
}
// Make sure the bootstrap leader, not the validator, is picked again on next slot
@ -1243,29 +1151,10 @@ mod tests {
}
};
let vote_account_tokens = 1;
bank.transfer(
leader_stake + vote_account_tokens,
&mint.keypair(),
bootstrap_leader_id,
last_id,
).unwrap();
// Create a vote account
let new_vote_account = create_vote_account(
&bootstrap_leader_keypair,
&bank,
vote_account_tokens,
mint.last_id(),
).unwrap();
// Add leader to the active set
push_vote(
&new_vote_account,
&bank,
initial_vote_height,
mint.last_id(),
);
leader_scheduler.push_vote(bootstrap_leader_id, initial_vote_height);
bank.transfer(leader_stake, &mint.keypair(), bootstrap_leader_id, last_id)
.unwrap();
leader_scheduler.generate_schedule(bootstrap_height, &bank);
@ -1293,8 +1182,7 @@ mod tests {
#[test]
fn test_max_height_for_leader() {
let bootstrap_leader_keypair = Keypair::new();
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
let bootstrap_leader_id = Keypair::new().pubkey();
let bootstrap_height = 500;
let leader_rotation_interval = 100;
let seed_rotation_interval = 2 * leader_rotation_interval;
@ -1366,34 +1254,15 @@ mod tests {
// Now test when the active set > 1 node
// Create and add validator to the active set
let validator_keypair = Keypair::new();
let validator_id = validator_keypair.pubkey();
// Create a vote account for the validator
bank.transfer(5, &mint.keypair(), validator_id, last_id)
let validator_id = Keypair::new().pubkey();
leader_scheduler.push_vote(validator_id, initial_vote_height);
bank.transfer(1, &mint.keypair(), validator_id, last_id)
.unwrap();
let new_validator_vote_account =
create_vote_account(&validator_keypair, &bank, 1, mint.last_id()).unwrap();
push_vote(
&new_validator_vote_account,
&bank,
initial_vote_height,
mint.last_id(),
);
// Create a vote account for the leader
bank.transfer(5, &mint.keypair(), bootstrap_leader_id, last_id)
.unwrap();
let new_leader_vote_account =
create_vote_account(&bootstrap_leader_keypair, &bank, 1, mint.last_id()).unwrap();
// Add leader to the active set
push_vote(
&new_leader_vote_account,
&bank,
initial_vote_height,
mint.last_id(),
);
leader_scheduler.push_vote(bootstrap_leader_id, initial_vote_height);
bank.transfer(1, &mint.keypair(), bootstrap_leader_id, last_id)
.unwrap();
// Generate the schedule
leader_scheduler.generate_schedule(bootstrap_height, &bank);

159
src/leader_vote_stage.rs Normal file
View File

@ -0,0 +1,159 @@
//! The `leader_vote_stage` module implements the TPU's vote stage. It
//! computes and notes the votes for the entries, and then sends the
//! Entry to its output channel.
use bank::Bank;
use cluster_info::ClusterInfo;
use counter::Counter;
use entry::Entry;
use ledger::Block;
use log::Level;
use result::{Error, Result};
use service::Service;
use signature::Keypair;
use std::net::UdpSocket;
use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle};
use std::time::{Duration, Instant};
use streamer::responder;
use timing::duration_as_ms;
use vote_stage::send_leader_vote;
pub struct LeaderVoteStage {
thread_hdls: Vec<JoinHandle<()>>,
vote_thread: JoinHandle<()>,
}
impl LeaderVoteStage {
/// Process any Entry items that have been published by the RecordStage.
/// continuosly send entries out
pub fn compute_vote_and_send_entries(
cluster_info: &Arc<RwLock<ClusterInfo>>,
entry_sender: &Sender<Vec<Entry>>,
entry_receiver: &Receiver<Vec<Entry>>,
) -> Result<()> {
let mut ventries = Vec::new();
let mut received_entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
let now = Instant::now();
let mut num_new_entries = 0;
loop {
num_new_entries += received_entries.len();
ventries.push(received_entries);
if let Ok(n) = entry_receiver.try_recv() {
received_entries = n;
} else {
break;
}
}
inc_new_counter_info!("leader_vote_stage-entries_received", num_new_entries);
debug!("leader_vote_stage entries: {}", num_new_entries);
for entries in ventries {
let votes = &entries.votes();
cluster_info.write().unwrap().insert_votes(&votes);
inc_new_counter_info!("leader_vote_stage-write_entries", entries.len());
//TODO(anatoly): real stake based voting needs to change this
//leader simply votes if the current set of validators have voted
//on a valid last id
trace!("New entries? {}", entries.len());
if !entries.is_empty() {
inc_new_counter_info!("leader_vote_stage-recv_vote", votes.len());
inc_new_counter_info!("leader_vote_stage-entries_sent", entries.len());
trace!("broadcasting {}", entries.len());
entry_sender.send(entries)?;
}
}
inc_new_counter_info!(
"leader_vote_stage-time_ms",
duration_as_ms(&now.elapsed()) as usize
);
Ok(())
}
/// Create a new LeaderVoteStage for voting and broadcasting entries.
pub fn new(
keypair: Arc<Keypair>,
bank: Arc<Bank>,
cluster_info: Arc<RwLock<ClusterInfo>>,
entry_receiver: Receiver<Vec<Entry>>,
) -> (Self, Receiver<Vec<Entry>>) {
let (vote_blob_sender, vote_blob_receiver) = channel();
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
let t_responder = responder(
"leader_vote_stage_vote_sender",
Arc::new(send),
vote_blob_receiver,
);
let (entry_sender, entry_receiver_forward) = channel();
let vote_thread = Builder::new()
.name("solana-writer".to_string())
.spawn(move || {
let mut last_vote = 0;
let mut last_valid_validator_timestamp = 0;
let id = cluster_info.read().unwrap().id;
loop {
if let Err(e) = Self::compute_vote_and_send_entries(
&cluster_info,
&entry_sender,
&entry_receiver,
) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => {
break;
}
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => {
inc_new_counter_info!(
"leader_vote_stage-compute_vote_and_send_entries-error",
1
);
error!("{:?}", e);
}
}
};
if let Err(e) = send_leader_vote(
&id,
&keypair,
&bank,
&cluster_info,
&vote_blob_sender,
&mut last_vote,
&mut last_valid_validator_timestamp,
) {
inc_new_counter_info!("leader_vote_stage-leader_vote-error", 1);
error!("{:?}", e);
}
}
}).unwrap();
let thread_hdls = vec![t_responder];
(
LeaderVoteStage {
vote_thread,
thread_hdls,
},
entry_receiver_forward,
)
}
}
impl Service for LeaderVoteStage {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
for thread_hdl in self.thread_hdls {
thread_hdl.join()?;
}
self.vote_thread.join()
}
}

View File

@ -3,7 +3,7 @@
//! access read to a persistent file-based ledger.
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
#[cfg(test)]
use budget_instruction::Vote;
use budget_transaction::BudgetTransaction;
#[cfg(test)]
use chrono::prelude::Utc;
@ -25,8 +25,6 @@ use std::mem::size_of;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::path::Path;
use transaction::Transaction;
use vote_program::Vote;
use vote_transaction::VoteTransaction;
use window::WINDOW_SIZE;
//
@ -498,7 +496,7 @@ impl Block for [Entry] {
entry
.transactions
.iter()
.flat_map(VoteTransaction::get_votes)
.filter_map(BudgetTransaction::vote)
}).collect()
}
}
@ -686,6 +684,7 @@ pub fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
mod tests {
use super::*;
use bincode::serialized_size;
use budget_instruction::Vote;
use budget_transaction::BudgetTransaction;
use entry::{next_entry, Entry};
use hash::hash;
@ -694,7 +693,6 @@ mod tests {
use std;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use transaction::Transaction;
use vote_program::Vote;
#[test]
fn test_verify_slice() {
@ -716,8 +714,15 @@ mod tests {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let keypair = Keypair::new();
let vote_account = Keypair::new();
let tx0 = Transaction::vote_new(&vote_account, Vote { tick_height: 1 }, one, 1);
let tx0 = Transaction::budget_new_vote(
&keypair,
Vote {
version: 0,
contact_info_version: 1,
},
one,
1,
);
let tx1 = Transaction::budget_new_timestamp(
&keypair,
keypair.pubkey(),
@ -767,8 +772,15 @@ mod tests {
let id = Hash::default();
let next_id = hash(&id.as_ref());
let keypair = Keypair::new();
let vote_account = Keypair::new();
let tx_small = Transaction::vote_new(&vote_account, Vote { tick_height: 1 }, next_id, 2);
let tx_small = Transaction::budget_new_vote(
&keypair,
Vote {
version: 0,
contact_info_version: 2,
},
next_id,
2,
);
let tx_large = Transaction::budget_new(&keypair, keypair.pubkey(), 1, next_id);
let tx_small_size = serialized_size(&tx_small).unwrap() as usize;

View File

@ -35,6 +35,7 @@ pub mod fetch_stage;
pub mod fullnode;
pub mod hash;
pub mod leader_scheduler;
pub mod leader_vote_stage;
pub mod ledger;
pub mod ledger_write_stage;
pub mod loader_transaction;
@ -79,9 +80,7 @@ pub mod token_program;
pub mod tpu;
pub mod transaction;
pub mod tvu;
pub mod vote_program;
pub mod vote_stage;
pub mod vote_transaction;
pub mod wallet;
pub mod window;
pub mod window_service;

View File

@ -40,7 +40,9 @@ fn create_path(name: &str) -> PathBuf {
)
}
const NATIVE_LOADER_PROGRAM_ID: [u8; 32] = [2u8; 32];
const NATIVE_LOADER_PROGRAM_ID: [u8; 32] = [
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
// All native programs export a symbol named process()
const ENTRYPOINT: &str = "process";
@ -68,16 +70,21 @@ pub fn process_transaction(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8])
trace!("Call native {:?}", name);
let path = create_path(&name);
// TODO linux tls bug can cause crash on dlclose(), workaround by never unloading
let library = Library::open(Some(path), libc::RTLD_NODELETE | libc::RTLD_NOW).unwrap();
unsafe {
let entrypoint: Symbol<Entrypoint> = match library.get(ENTRYPOINT.as_bytes()) {
Ok(s) => s,
Err(e) => {
warn!("{:?}: Unable to find {:?} in program", e, ENTRYPOINT);
return false;
}
};
return entrypoint(&mut keyed_accounts[1..], tx_data);
match Library::open(Some(&path), libc::RTLD_NODELETE | libc::RTLD_NOW) {
Ok(library) => unsafe {
let entrypoint: Symbol<Entrypoint> = match library.get(ENTRYPOINT.as_bytes()) {
Ok(s) => s,
Err(e) => {
warn!("{:?}: Unable to find {:?} in program", e, ENTRYPOINT);
return false;
}
};
return entrypoint(&mut keyed_accounts[1..], tx_data);
},
Err(e) => {
warn!("Unable to load: {:?}", e);
return false;
}
}
} else if let Ok(instruction) = deserialize(tx_data) {
match instruction {

View File

@ -6,6 +6,8 @@ use counter::Counter;
use entry::{EntryReceiver, EntrySender};
use hash::Hash;
use influx_db_client as influxdb;
use leader_scheduler::LeaderScheduler;
use ledger::Block;
use log::Level;
use metrics;
use result::{Error, Result};
@ -57,10 +59,11 @@ impl ReplicateStage {
cluster_info: &Arc<RwLock<ClusterInfo>>,
window_receiver: &EntryReceiver,
keypair: &Arc<Keypair>,
vote_account_keypair: &Arc<Keypair>,
vote_blob_sender: Option<&BlobSender>,
ledger_entry_sender: &EntrySender,
tick_height: &mut u64,
entry_height: &mut u64,
leader_scheduler: &Arc<RwLock<LeaderScheduler>>,
) -> Result<Hash> {
let timer = Duration::new(1, 0);
//coalesce all the available entries into a single vote
@ -78,23 +81,37 @@ impl ReplicateStage {
let mut res = Ok(());
let last_entry_id = {
let mut num_entries_to_write = entries.len();
let current_leader = bank
.get_current_leader()
.expect("Scheduled leader id should never be unknown while processing entries");
for (i, entry) in entries.iter().enumerate() {
res = bank.process_entry(&entry);
let my_id = keypair.pubkey();
let scheduled_leader = bank
.get_current_leader()
.expect("Scheduled leader id should never be unknown while processing entries");
// max_tick_height is the PoH height at which the next leader rotation will
// happen. The leader should send an entry such that the total PoH is equal
// to max_tick_height - guard.
// TODO: Introduce a "guard" for the end of transmission periods, the guard
// is assumed to be zero for now.
let max_tick_height = {
let ls_lock = leader_scheduler.read().unwrap();
ls_lock.max_height_for_leader(*tick_height)
};
// TODO: Remove this soon once we boot the leader from ClusterInfo
if scheduled_leader != current_leader {
cluster_info.write().unwrap().set_leader(scheduled_leader);
}
if my_id == scheduled_leader {
num_entries_to_write = i + 1;
break;
res = bank.process_entry(
&entry,
tick_height,
&mut *leader_scheduler.write().unwrap(),
);
// Will run only if leader_scheduler.use_only_bootstrap_leader is false
if let Some(max_tick_height) = max_tick_height {
let ls_lock = leader_scheduler.read().unwrap();
if *tick_height == max_tick_height {
let my_id = keypair.pubkey();
let scheduled_leader = ls_lock.get_scheduled_leader(*tick_height).expect(
"Scheduled leader id should never be unknown while processing entries",
);
cluster_info.write().unwrap().set_leader(scheduled_leader);
if my_id == scheduled_leader {
num_entries_to_write = i + 1;
break;
}
}
}
if res.is_err() {
@ -117,9 +134,11 @@ impl ReplicateStage {
};
if let Some(sender) = vote_blob_sender {
send_validator_vote(bank, vote_account_keypair, &cluster_info, sender)?;
send_validator_vote(bank, keypair, cluster_info, sender)?;
}
cluster_info.write().unwrap().insert_votes(&entries.votes());
inc_new_counter_info!(
"replicate-transactions",
entries.iter().map(|x| x.transactions.len()).sum()
@ -141,12 +160,13 @@ impl ReplicateStage {
pub fn new(
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
bank: Arc<Bank>,
cluster_info: Arc<RwLock<ClusterInfo>>,
window_receiver: EntryReceiver,
exit: Arc<AtomicBool>,
tick_height: u64,
entry_height: u64,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
) -> (Self, EntryReceiver) {
let (vote_blob_sender, vote_blob_receiver) = channel();
let (ledger_entry_sender, ledger_entry_receiver) = channel();
@ -162,15 +182,17 @@ impl ReplicateStage {
let now = Instant::now();
let mut next_vote_secs = 1;
let mut entry_height_ = entry_height;
let mut tick_height_ = tick_height;
let mut last_entry_id = None;
loop {
let leader_id =
bank.get_current_leader()
.expect("Scheduled leader id should never be unknown at this point");
let leader_id = leader_scheduler
.read()
.unwrap()
.get_scheduled_leader(tick_height_)
.expect("Scheduled leader id should never be unknown at this point");
if leader_id == keypair.pubkey() {
return Some(ReplicateStageReturnType::LeaderRotation(
bank.get_tick_height(),
tick_height_,
entry_height_,
// We should never start the TPU / this stage on an exact entry that causes leader
// rotation (Fullnode should automatically transition on startup if it detects
@ -193,10 +215,11 @@ impl ReplicateStage {
&cluster_info,
&window_receiver,
&keypair,
&vote_account_keypair,
vote_sender,
&ledger_entry_sender,
&mut tick_height_,
&mut entry_height_,
&leader_scheduler,
) {
Err(Error::RecvTimeoutError(RecvTimeoutError::Disconnected)) => break,
Err(Error::RecvTimeoutError(RecvTimeoutError::Timeout)) => (),
@ -271,7 +294,7 @@ mod test {
// 1) Give the validator a nonzero number of tokens 2) A vote from the validator .
// This will cause leader rotation after the bootstrap height
let mut ledger_writer = LedgerWriter::open(&my_ledger_path, false).unwrap();
let (active_set_entries, vote_account_keypair) =
let active_set_entries =
make_active_set_entries(&my_keypair, &mint.keypair(), &last_id, &last_id, 0);
last_id = active_set_entries.last().unwrap().id;
let initial_tick_height = genesis_entries
@ -296,23 +319,26 @@ mod test {
Some(bootstrap_height),
);
let leader_scheduler =
Arc::new(RwLock::new(LeaderScheduler::new(&leader_scheduler_config)));
let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
// Set up the bank
let (bank, _, _, _) = Fullnode::new_bank_from_ledger(&my_ledger_path, leader_scheduler);
let (bank, _, _, _) =
Fullnode::new_bank_from_ledger(&my_ledger_path, &mut leader_scheduler);
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));
// Set up the replicate stage
let (entry_sender, entry_receiver) = channel();
let exit = Arc::new(AtomicBool::new(false));
let (replicate_stage, _ledger_writer_recv) = ReplicateStage::new(
Arc::new(my_keypair),
Arc::new(vote_account_keypair),
Arc::new(bank),
Arc::new(RwLock::new(cluster_info_me)),
entry_receiver,
exit.clone(),
initial_tick_height,
initial_entry_len,
leader_scheduler.clone(),
);
// Send enough ticks to trigger leader rotation
@ -349,6 +375,13 @@ mod test {
assert_eq!(exit.load(Ordering::Relaxed), true);
// Check ledger height is correct
let mut leader_scheduler = Arc::try_unwrap(leader_scheduler)
.expect("Multiple references to this RwLock still exist")
.into_inner()
.expect("RwLock for LeaderScheduler is still locked");
leader_scheduler.reset();
let _ignored = remove_dir_all(&my_ledger_path);
}
}

View File

@ -198,16 +198,14 @@ mod tests {
let (mint, leader_ledger_path) = create_tmp_genesis(leader_ledger_path, 100);
info!("starting leader node");
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let network_addr = leader_node.sockets.gossip.local_addr().unwrap();
let leader_info = leader_node.info.clone();
let vote_account_keypair = Arc::new(Keypair::new());
let leader = Fullnode::new(
leader_node,
&leader_ledger_path,
leader_keypair,
vote_account_keypair,
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_info.id),

View File

@ -25,31 +25,31 @@ impl RequestProcessor {
Request::GetAccount { key } => {
let account = self.bank.get_account(&key);
let rsp = (Response::Account { key, account }, rsp_addr);
info!("Response::Account {:?}", rsp);
debug!("Response::Account {:?}", rsp);
Some(rsp)
}
Request::GetLastId => {
let id = self.bank.last_id();
let rsp = (Response::LastId { id }, rsp_addr);
info!("Response::LastId {:?}", rsp);
debug!("Response::LastId {:?}", rsp);
Some(rsp)
}
Request::GetTransactionCount => {
let transaction_count = self.bank.transaction_count() as u64;
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
info!("Response::TransactionCount {:?}", rsp);
debug!("Response::TransactionCount {:?}", rsp);
Some(rsp)
}
Request::GetSignature { signature } => {
let signature_status = self.bank.has_signature(&signature);
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
info!("Response::Signature {:?}", rsp);
debug!("Response::Signature {:?}", rsp);
Some(rsp)
}
Request::GetFinality => {
let time = self.bank.finality();
let rsp = (Response::Finality { time }, rsp_addr);
info!("Response::Finality {:?}", rsp);
debug!("Response::Finality {:?}", rsp);
Some(rsp)
}
}

View File

@ -60,7 +60,7 @@ impl RequestStage {
let blobs = to_blobs(rsps)?;
if !blobs.is_empty() {
info!("process: sending blobs: {}", blobs.len());
debug!("process: sending blobs: {}", blobs.len());
//don't wake up the other side if there is nothing
blob_sender.send(blobs)?;
}

View File

@ -10,7 +10,6 @@ use poh_recorder;
use serde_json;
use std;
use std::any::Any;
use vote_stage;
#[derive(Debug)]
pub enum Error {
@ -28,7 +27,6 @@ pub enum Error {
ErasureError(erasure::ErasureError),
SendError,
PohRecorderError(poh_recorder::PohRecorderError),
VoteError(vote_stage::VoteError),
}
pub type Result<T> = std::result::Result<T, Error>;
@ -102,11 +100,6 @@ impl std::convert::From<poh_recorder::PohRecorderError> for Error {
Error::PohRecorderError(e)
}
}
impl std::convert::From<vote_stage::VoteError> for Error {
fn from(e: vote_stage::VoteError) -> Error {
Error::VoteError(e)
}
}
#[cfg(test)]
mod tests {

View File

@ -28,7 +28,6 @@ pub const RPC_PORT: u16 = 8899;
pub struct JsonRpcService {
thread_hdl: JoinHandle<()>,
exit: Arc<AtomicBool>,
}
impl JsonRpcService {
@ -36,12 +35,11 @@ impl JsonRpcService {
bank: &Arc<Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
rpc_addr: SocketAddr,
exit: Arc<AtomicBool>,
) -> Self {
let exit = Arc::new(AtomicBool::new(false));
let request_processor = JsonRpcRequestProcessor::new(bank.clone());
let info = cluster_info.clone();
let exit_pubsub = exit.clone();
let exit_ = exit.clone();
let thread_hdl = Builder::new()
.name("solana-jsonrpc".to_string())
.spawn(move || {
@ -64,23 +62,14 @@ impl JsonRpcService {
warn!("JSON RPC service unavailable: unable to bind to RPC port {}. \nMake sure this port is not already in use by another application", rpc_addr.port());
return;
}
while !exit_.load(Ordering::Relaxed) {
while !exit.load(Ordering::Relaxed) {
sleep(Duration::from_millis(100));
}
server.unwrap().close();
()
})
.unwrap();
JsonRpcService { thread_hdl, exit }
}
pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed);
}
pub fn close(self) -> thread::Result<()> {
self.exit();
self.join()
JsonRpcService { thread_hdl }
}
}
@ -390,7 +379,8 @@ mod tests {
ClusterInfo::new(NodeInfo::new_unspecified()).unwrap(),
));
let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 24680);
let rpc_service = JsonRpcService::new(&Arc::new(bank), &cluster_info, rpc_addr);
let exit = Arc::new(AtomicBool::new(false));
let rpc_service = JsonRpcService::new(&Arc::new(bank), &cluster_info, rpc_addr, exit);
let thread = rpc_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
@ -596,11 +586,11 @@ mod tests {
#[test]
fn test_rpc_send_tx() {
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let alice = Mint::new(10_000_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_data = leader.info.clone();
let ledger_path = create_tmp_ledger_with_mint("rpc_send_tx", &alice);
@ -612,16 +602,8 @@ mod tests {
let genesis_entries = &alice.create_entries();
let entry_height = genesis_entries.len() as u64;
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
entry_height,
@ -630,6 +612,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(rpc_port),
);
sleep(Duration::from_millis(900));

View File

@ -27,7 +27,6 @@ pub enum ClientState {
pub struct PubSubService {
thread_hdl: JoinHandle<()>,
exit: Arc<AtomicBool>,
}
impl Service for PubSubService {
@ -39,10 +38,8 @@ impl Service for PubSubService {
}
impl PubSubService {
pub fn new(bank: &Arc<Bank>, pubsub_addr: SocketAddr) -> Self {
pub fn new(bank: &Arc<Bank>, pubsub_addr: SocketAddr, exit: Arc<AtomicBool>) -> Self {
let rpc = RpcSolPubSubImpl::new(JsonRpcRequestProcessor::new(bank.clone()), bank.clone());
let exit = Arc::new(AtomicBool::new(false));
let exit_ = exit.clone();
let thread_hdl = Builder::new()
.name("solana-pubsub".to_string())
.spawn(move || {
@ -63,23 +60,14 @@ impl PubSubService {
warn!("Pubsub service unavailable: unable to bind to port {}. \nMake sure this port is not already in use by another application", pubsub_addr.port());
return;
}
while !exit_.load(Ordering::Relaxed) {
while !exit.load(Ordering::Relaxed) {
sleep(Duration::from_millis(100));
}
server.unwrap().close();
()
})
.unwrap();
PubSubService { thread_hdl, exit }
}
pub fn exit(&self) {
self.exit.store(true, Ordering::Relaxed);
}
pub fn close(self) -> thread::Result<()> {
self.exit();
self.join()
PubSubService { thread_hdl }
}
}
@ -139,7 +127,6 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
subscriber: pubsub::Subscriber<Account>,
pubkey_str: String,
) {
info!("account_subscribe");
let pubkey_vec = bs58::decode(pubkey_str).into_vec().unwrap();
if pubkey_vec.len() != mem::size_of::<Pubkey>() {
subscriber
@ -154,6 +141,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
let id = self.uid.fetch_add(1, atomic::Ordering::SeqCst);
let sub_id = SubscriptionId::Number(id as u64);
info!("account_subscribe: account={:?} id={:?}", pubkey, sub_id);
let sink = subscriber.assign_id(sub_id.clone()).unwrap();
let bank_sub_id = Keypair::new().pubkey();
self.account_subscriptions
@ -166,7 +154,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
}
fn account_unsubscribe(&self, id: SubscriptionId) -> Result<bool> {
info!("account_unsubscribe");
info!("account_unsubscribe: id={:?}", id);
if let Some((bank_sub_id, pubkey)) = self.account_subscriptions.write().unwrap().remove(&id)
{
self.bank.remove_account_subscription(&bank_sub_id, &pubkey);
@ -261,7 +249,8 @@ mod tests {
let alice = Mint::new(10_000);
let bank = Bank::new(&alice);
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let pubsub_service = PubSubService::new(&Arc::new(bank), pubsub_addr);
let exit = Arc::new(AtomicBool::new(false));
let pubsub_service = PubSubService::new(&Arc::new(bank), pubsub_addr, exit);
let thread = pubsub_service.thread_hdl.thread();
assert_eq!(thread.name().unwrap(), "solana-pubsub");
}

View File

@ -17,7 +17,10 @@ pub enum StorageError {
InvalidUserData,
}
pub const STORAGE_PROGRAM_ID: [u8; 32] = [1u8; 32];
const STORAGE_PROGRAM_ID: [u8; 32] = [
130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
impl StorageProgram {
pub fn check_id(program_id: &Pubkey) -> bool {

View File

@ -261,7 +261,7 @@ mod test {
#[test]
fn test_sdk_serialize() {
let keypair = Keypair::new();
use budget_program::BUDGET_PROGRAM_ID;
use budget_program::BudgetState;
// CreateAccount
let tx = Transaction::system_create(
@ -270,14 +270,14 @@ mod test {
Hash::default(),
111,
222,
Pubkey::new(&BUDGET_PROGRAM_ID),
BudgetState::id(),
0,
);
assert_eq!(
tx.userdata(0).to_vec(),
vec![
0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 222, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 222, 0, 0, 0, 0, 0, 0, 0, 129, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
@ -302,17 +302,12 @@ mod test {
);
// Assign
let tx = Transaction::system_assign(
&keypair,
Hash::default(),
Pubkey::new(&BUDGET_PROGRAM_ID),
0,
);
let tx = Transaction::system_assign(&keypair, Hash::default(), BudgetState::id(), 0);
assert_eq!(
tx.userdata(0).to_vec(),
vec![
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
1, 0, 0, 0, 129, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
]
);

View File

@ -26,7 +26,6 @@ use std::time::Instant;
use system_transaction::SystemTransaction;
use timing;
use transaction::Transaction;
use vote_transaction::VoteTransaction;
use influx_db_client as influxdb;
use metrics;
@ -149,29 +148,6 @@ impl ThinClient {
))
}
pub fn create_vote_account(
&self,
node_keypair: &Keypair,
vote_account_id: Pubkey,
last_id: &Hash,
num_tokens: i64,
) -> io::Result<Signature> {
let tx =
Transaction::vote_account_new(&node_keypair, vote_account_id, *last_id, num_tokens);
self.transfer_signed(&tx)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn register_vote_account(
&self,
node_keypair: &Keypair,
vote_account_id: Pubkey,
last_id: &Hash,
) -> io::Result<Signature> {
let tx = Transaction::vote_account_register(node_keypair, vote_account_id, *last_id, 0);
self.transfer_signed(&tx)
}
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
pub fn transfer(
&self,
@ -194,24 +170,6 @@ impl ThinClient {
result
}
pub fn get_account_userdata(&mut self, pubkey: &Pubkey) -> io::Result<Option<Vec<u8>>> {
let req = Request::GetAccount { key: *pubkey };
let data = serialize(&req).expect("serialize GetAccount in pub fn get_account_userdata");
self.requests_socket
.send_to(&data, &self.requests_addr)
.expect("buffer error in pub fn get_account_userdata");
loop {
let resp = self.recv_response()?;
trace!("recv_response {:?}", resp);
if let Response::Account { key, account } = resp {
if key == *pubkey {
return Ok(account.map(|account| account.userdata));
}
}
}
}
/// Request the balance of the user holding `pubkey`. This method blocks
/// until the server sends a response. If the response packet is dropped
/// by the network, this method will hang indefinitely.
@ -488,23 +446,17 @@ mod tests {
#[ignore]
fn test_thin_client() {
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let alice = Mint::new(10_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let ledger_path = create_tmp_ledger_with_mint("thin_client", &alice);
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -513,6 +465,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(0),
);
sleep(Duration::from_millis(900));
@ -542,22 +495,16 @@ mod tests {
#[ignore]
fn test_bad_sig() {
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let alice = Mint::new(10_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_data = leader.info.clone();
let ledger_path = create_tmp_ledger_with_mint("bad_sig", &alice);
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -566,6 +513,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(0),
);
//TODO: remove this sleep, or add a retry so CI is stable
@ -608,25 +556,18 @@ mod tests {
#[test]
fn test_client_check_signature() {
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let alice = Mint::new(10_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_data = leader.info.clone();
let ledger_path = create_tmp_ledger_with_mint("client_check_signature", &alice);
let genesis_entries = &alice.create_entries();
let entry_height = genesis_entries.len() as u64;
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
entry_height,
@ -635,6 +576,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(0),
);
sleep(Duration::from_millis(300));
@ -678,25 +620,18 @@ mod tests {
#[test]
fn test_zero_balance_after_nonzero() {
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let alice = Mint::new(10_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_keypair = Keypair::new();
let leader_data = leader.info.clone();
let ledger_path = create_tmp_ledger_with_mint("zero_balance_check", &alice);
let genesis_entries = &alice.create_entries();
let entry_height = genesis_entries.len() as u64;
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
entry_height,
@ -705,6 +640,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(0),
);
sleep(Duration::from_millis(900));

View File

@ -105,8 +105,9 @@ impl Default for TokenProgram {
}
}
pub const TOKEN_PROGRAM_ID: [u8; 32] = [
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
const TOKEN_PROGRAM_ID: [u8; 32] = [
131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0,
];
impl TokenProgram {

View File

@ -27,18 +27,21 @@
use bank::Bank;
use banking_stage::{BankingStage, BankingStageReturnType};
use cluster_info::ClusterInfo;
use entry::Entry;
use fetch_stage::FetchStage;
use hash::Hash;
use leader_vote_stage::LeaderVoteStage;
use ledger_write_stage::LedgerWriteStage;
use poh_service::Config;
use service::Service;
use signature::Keypair;
use sigverify_stage::SigVerifyStage;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::channel;
use std::sync::mpsc::Receiver;
use std::sync::Arc;
use std::sync::{Arc, RwLock};
use std::thread;
pub enum TpuReturnType {
@ -49,6 +52,7 @@ pub struct Tpu {
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
banking_stage: BankingStage,
leader_vote_stage: LeaderVoteStage,
ledger_write_stage: LedgerWriteStage,
exit: Arc<AtomicBool>,
}
@ -56,7 +60,9 @@ pub struct Tpu {
impl Tpu {
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub fn new(
keypair: Arc<Keypair>,
bank: &Arc<Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
tick_duration: Config,
transactions_sockets: Vec<UdpSocket>,
ledger_path: &str,
@ -81,21 +87,28 @@ impl Tpu {
max_tick_height,
);
let (leader_vote_stage, ledger_entry_receiver) =
LeaderVoteStage::new(keypair, bank.clone(), cluster_info.clone(), entry_receiver);
let (ledger_entry_sender, entry_forwarder) = channel();
let ledger_write_stage =
LedgerWriteStage::new(Some(ledger_path), entry_receiver, Some(ledger_entry_sender));
let ledger_write_stage = LedgerWriteStage::new(
Some(ledger_path),
ledger_entry_receiver,
Some(ledger_entry_sender),
);
let tpu = Tpu {
fetch_stage,
sigverify_stage,
banking_stage,
leader_vote_stage,
ledger_write_stage,
exit: exit.clone(),
};
(tpu, entry_forwarder, exit)
}
pub fn exit(&self) {
pub fn exit(&self) -> () {
self.exit.store(true, Ordering::Relaxed);
}
@ -115,6 +128,7 @@ impl Service for Tpu {
fn join(self) -> thread::Result<(Option<TpuReturnType>)> {
self.fetch_stage.join()?;
self.sigverify_stage.join()?;
self.leader_vote_stage.join()?;
self.ledger_write_stage.join()?;
match self.banking_stage.join()? {
Some(BankingStageReturnType::LeaderRotation) => Ok(Some(TpuReturnType::LeaderRotation)),

View File

@ -40,6 +40,7 @@ use bank::Bank;
use blob_fetch_stage::BlobFetchStage;
use cluster_info::ClusterInfo;
use hash::Hash;
use leader_scheduler::LeaderScheduler;
use ledger_write_stage::LedgerWriteStage;
use replicate_stage::{ReplicateStage, ReplicateStageReturnType};
use retransmit_stage::RetransmitStage;
@ -79,8 +80,8 @@ impl Tvu {
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub fn new(
keypair: Arc<Keypair>,
vote_account_keypair: Arc<Keypair>,
bank: &Arc<Bank>,
tick_height: u64,
entry_height: u64,
cluster_info: Arc<RwLock<ClusterInfo>>,
window: SharedWindow,
@ -88,6 +89,7 @@ impl Tvu {
repair_socket: UdpSocket,
retransmit_socket: UdpSocket,
ledger_path: Option<&str>,
leader_scheduler: Arc<RwLock<LeaderScheduler>>,
) -> Self {
let exit = Arc::new(AtomicBool::new(false));
@ -103,22 +105,23 @@ impl Tvu {
let (retransmit_stage, blob_window_receiver) = RetransmitStage::new(
&cluster_info,
window,
bank.get_tick_height(),
tick_height,
entry_height,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
bank.leader_scheduler.clone(),
leader_scheduler.clone(),
);
let (replicate_stage, ledger_entry_receiver) = ReplicateStage::new(
keypair,
vote_account_keypair,
bank.clone(),
cluster_info,
blob_window_receiver,
exit.clone(),
tick_height,
entry_height,
leader_scheduler,
);
let ledger_write_stage = LedgerWriteStage::new(ledger_path, ledger_entry_receiver, None);
@ -136,7 +139,7 @@ impl Tvu {
self.exit.load(Ordering::Relaxed)
}
pub fn exit(&self) {
pub fn exit(&self) -> () {
self.exit.store(true, Ordering::Relaxed);
}
@ -252,12 +255,7 @@ pub mod tests {
let starting_balance = 10_000;
let mint = Mint::new(starting_balance);
let replicate_addr = target1.info.contact_info.tvu;
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_id,
)));
let mut bank = Bank::new(&mint);
bank.leader_scheduler = leader_scheduler;
let bank = Arc::new(bank);
let bank = Arc::new(Bank::new(&mint));
//start cluster_info1
let mut cluster_info1 = ClusterInfo::new(target1.info.clone()).expect("ClusterInfo::new");
@ -266,18 +264,20 @@ pub mod tests {
let cref1 = Arc::new(RwLock::new(cluster_info1));
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone());
let vote_account_keypair = Arc::new(Keypair::new());
let tvu = Tvu::new(
Arc::new(target1_keypair),
vote_account_keypair,
&bank,
0,
0,
cref1,
dr_1.1,
target1.sockets.replicate,
target1.sockets.repair,
target1.sockets.retransmit,
None,
Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_id,
))),
);
let mut alice_ref_balance = starting_balance;

View File

@ -1,151 +0,0 @@
//! Vote program
//! Receive and processes votes from validators
use bincode::{deserialize, serialize};
use byteorder::{ByteOrder, LittleEndian};
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
use std;
use std::collections::VecDeque;
use transaction::Transaction;
// Upper limit on the size of the Vote State
pub const MAX_STATE_SIZE: usize = 1024;
// Maximum number of votes to keep around
const MAX_VOTE_HISTORY: usize = 32;
#[derive(Debug, PartialEq)]
pub enum Error {
UserdataDeserializeFailure,
InvalidArguments,
InvalidUserdata,
UserdataTooSmall,
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "error")
}
}
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Vote {
// TODO: add signature of the state here as well
/// A vote for height tick_height
pub tick_height: u64,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum VoteInstruction {
/// Register a new "vote account" to represent a particular validator in the Vote Contract,
/// and initialize the VoteState for this "vote account"
/// * Transaction::keys[0] - the validator id
/// * Transaction::keys[1] - the new "vote account" to be associated with the validator
/// identified by keys[0] for voting
RegisterAccount,
NewVote(Vote),
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct VoteProgram {
pub votes: VecDeque<Vote>,
pub node_id: Pubkey,
}
pub const VOTE_PROGRAM_ID: [u8; 32] = [
6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
impl VoteProgram {
pub fn check_id(program_id: &Pubkey) -> bool {
program_id.as_ref() == VOTE_PROGRAM_ID
}
pub fn id() -> Pubkey {
Pubkey::new(&VOTE_PROGRAM_ID)
}
pub fn deserialize(input: &[u8]) -> Result<VoteProgram> {
let len = LittleEndian::read_u16(&input[0..2]) as usize;
if len == 0 || input.len() < len + 1 {
Err(Error::InvalidUserdata)
} else {
deserialize(&input[2..=len + 1]).map_err(|err| {
error!("Unable to deserialize vote state: {:?}", err);
Error::InvalidUserdata
})
}
}
pub fn serialize(self: &VoteProgram, output: &mut [u8]) -> Result<()> {
let self_serialized = serialize(self).unwrap();
if output.len() + 2 < self_serialized.len() {
warn!(
"{} bytes required to serialize but only have {} bytes",
self_serialized.len(),
output.len() + 2,
);
return Err(Error::UserdataTooSmall);
}
let serialized_len = self_serialized.len() as u16;
LittleEndian::write_u16(&mut output[0..2], serialized_len);
output[2..=serialized_len as usize + 1].clone_from_slice(&self_serialized);
Ok(())
}
pub fn process_transaction(
tx: &Transaction,
instruction_index: usize,
accounts: &mut [&mut Account],
) -> Result<()> {
match deserialize(tx.userdata(instruction_index)) {
Ok(VoteInstruction::RegisterAccount) => {
// TODO: a single validator could register multiple "vote accounts"
// which would clutter the "accounts" structure.
accounts[1].program_id = Self::id();
let mut vote_state = VoteProgram {
votes: VecDeque::new(),
node_id: *tx.from(),
};
vote_state.serialize(&mut accounts[1].userdata)?;
Ok(())
}
Ok(VoteInstruction::NewVote(vote)) => {
if !Self::check_id(&accounts[0].program_id) {
error!("accounts[0] is not assigned to the VOTE_PROGRAM");
Err(Error::InvalidArguments)?;
}
let mut vote_state = Self::deserialize(&accounts[0].userdata)?;
// TODO: Integrity checks
// a) Verify the vote's bank hash matches what is expected
// b) Verify vote is older than previous votes
// Only keep around the most recent MAX_VOTE_HISTORY votes
if vote_state.votes.len() == MAX_VOTE_HISTORY {
vote_state.votes.pop_front();
}
vote_state.votes.push_back(vote);
vote_state.serialize(&mut accounts[0].userdata)?;
Ok(())
}
Err(_) => {
info!(
"Invalid vote transaction userdata: {:?}",
tx.userdata(instruction_index)
);
Err(Error::UserdataDeserializeFailure)
}
}
}
}

View File

@ -2,90 +2,341 @@
use bank::Bank;
use bincode::serialize;
use budget_transaction::BudgetTransaction;
use cluster_info::ClusterInfo;
use counter::Counter;
use hash::Hash;
use influx_db_client as influxdb;
use log::Level;
use metrics;
use packet::SharedBlob;
use result::{Error, Result};
use result::Result;
use signature::Keypair;
use std::net::SocketAddr;
use solana_sdk::pubkey::Pubkey;
use std::result;
use std::sync::atomic::AtomicUsize;
use std::sync::{Arc, RwLock};
use streamer::BlobSender;
use timing;
use transaction::Transaction;
use vote_program::Vote;
use vote_transaction::VoteTransaction;
pub const VOTE_TIMEOUT_MS: u64 = 1000;
#[derive(Debug, PartialEq, Eq)]
pub enum VoteError {
enum VoteError {
NoValidLastIdsToVoteOn,
NoLeader,
LeaderInfoNotFound,
}
// TODO: Change voting to be on fixed tick intervals based on bank state
pub fn create_new_signed_vote_blob(
last_id: &Hash,
vote_account: &Keypair,
bank: &Arc<Bank>,
keypair: &Keypair,
cluster_info: &Arc<RwLock<ClusterInfo>>,
) -> Result<SharedBlob> {
let shared_blob = SharedBlob::default();
let tick_height = bank.get_tick_height();
let leader_tpu = get_leader_tpu(bank, cluster_info)?;
//TODO: doesn't seem like there is a synchronous call to get height and id
debug!("voting on {:?}", &last_id.as_ref()[..8]);
let vote = Vote { tick_height };
let tx = Transaction::vote_new(&vote_account, vote, *last_id, 0);
let (vote, addr) = {
let mut wcluster_info = cluster_info.write().unwrap();
//TODO: doesn't seem like there is a synchronous call to get height and id
debug!("voting on {:?}", &last_id.as_ref()[..8]);
wcluster_info.new_vote(*last_id)
}?;
let tx = Transaction::budget_new_vote(&keypair, vote, *last_id, 0);
{
let mut blob = shared_blob.write().unwrap();
let bytes = serialize(&tx)?;
let len = bytes.len();
blob.data[..len].copy_from_slice(&bytes);
blob.meta.set_addr(&leader_tpu);
blob.meta.set_addr(&addr);
blob.meta.size = len;
};
}
Ok(shared_blob)
}
fn get_leader_tpu(bank: &Bank, cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
let leader_id = {
if let Some(leader_id) = bank.get_current_leader() {
leader_id
} else {
return Err(Error::VoteError(VoteError::NoLeader));
}
};
fn get_last_id_to_vote_on(
id: &Pubkey,
ids: &[Hash],
bank: &Arc<Bank>,
now: u64,
last_vote: &mut u64,
last_valid_validator_timestamp: &mut u64,
) -> result::Result<(Hash, u64), VoteError> {
let mut valid_ids = bank.count_valid_ids(&ids);
let super_majority_index = (2 * ids.len()) / 3;
let rcluster_info = cluster_info.read().unwrap();
let leader_tpu = rcluster_info
.table
.get(&leader_id)
.map(|leader| leader.contact_info.tpu);
//TODO(anatoly): this isn't stake based voting
debug!(
"{}: valid_ids {}/{} {}",
id,
valid_ids.len(),
ids.len(),
super_majority_index,
);
if let Some(leader_tpu) = leader_tpu {
Ok(leader_tpu)
} else {
Err(Error::VoteError(VoteError::LeaderInfoNotFound))
metrics::submit(
influxdb::Point::new("vote_stage-peer_count")
.add_field("total_peers", influxdb::Value::Integer(ids.len() as i64))
.add_field(
"valid_peers",
influxdb::Value::Integer(valid_ids.len() as i64),
).to_owned(),
);
if valid_ids.len() > super_majority_index {
*last_vote = now;
// Sort by timestamp
valid_ids.sort_by(|a, b| a.1.cmp(&b.1));
let last_id = ids[valid_ids[super_majority_index].0];
return Ok((last_id, valid_ids[super_majority_index].1));
}
if *last_valid_validator_timestamp != 0 {
metrics::submit(
influxdb::Point::new(&"leader-finality")
.add_field(
"duration_ms",
influxdb::Value::Integer((now - *last_valid_validator_timestamp) as i64),
).to_owned(),
);
}
Err(VoteError::NoValidLastIdsToVoteOn)
}
pub fn send_leader_vote(
id: &Pubkey,
keypair: &Keypair,
bank: &Arc<Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
vote_blob_sender: &BlobSender,
last_vote: &mut u64,
last_valid_validator_timestamp: &mut u64,
) -> Result<()> {
let now = timing::timestamp();
if now - *last_vote > VOTE_TIMEOUT_MS {
let ids: Vec<_> = cluster_info.read().unwrap().valid_last_ids();
if let Ok((last_id, super_majority_timestamp)) = get_last_id_to_vote_on(
id,
&ids,
bank,
now,
last_vote,
last_valid_validator_timestamp,
) {
if let Ok(shared_blob) = create_new_signed_vote_blob(&last_id, keypair, cluster_info) {
vote_blob_sender.send(vec![shared_blob])?;
let finality_ms = now - super_majority_timestamp;
*last_valid_validator_timestamp = super_majority_timestamp;
debug!("{} leader_sent_vote finality: {} ms", id, finality_ms);
inc_new_counter_info!("vote_stage-leader_sent_vote", 1);
bank.set_finality((now - *last_valid_validator_timestamp) as usize);
metrics::submit(
influxdb::Point::new(&"leader-finality")
.add_field("duration_ms", influxdb::Value::Integer(finality_ms as i64))
.to_owned(),
);
}
}
}
Ok(())
}
pub fn send_validator_vote(
bank: &Arc<Bank>,
vote_account: &Keypair,
keypair: &Arc<Keypair>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
vote_blob_sender: &BlobSender,
) -> Result<()> {
let last_id = bank.last_id();
if let Ok(shared_blob) = create_new_signed_vote_blob(&last_id, keypair, cluster_info) {
inc_new_counter_info!("replicate-vote_sent", 1);
let shared_blob = create_new_signed_vote_blob(&last_id, vote_account, bank, cluster_info)?;
inc_new_counter_info!("replicate-vote_sent", 1);
vote_blob_sender.send(vec![shared_blob])?;
vote_blob_sender.send(vec![shared_blob])?;
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use bank::Bank;
use bincode::deserialize;
use budget_instruction::Vote;
use cluster_info::{ClusterInfo, NodeInfo};
use entry::next_entry;
use hash::{hash, Hash};
use logger;
use mint::Mint;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
use std::thread::sleep;
use std::time::Duration;
use system_transaction::SystemTransaction;
use transaction::Transaction;
#[test]
fn test_send_leader_vote() {
logger::setup();
// create a mint/bank
let mint = Mint::new(1000);
let bank = Arc::new(Bank::new(&mint));
let hash0 = Hash::default();
// get a non-default hash last_id
let entry = next_entry(&hash0, 1, vec![]);
bank.register_entry_id(&entry.id);
// Create a leader
let leader_data = NodeInfo::new_with_socketaddr(&"127.0.0.1:1234".parse().unwrap());
let leader_pubkey = leader_data.id.clone();
let mut leader_cluster_info = ClusterInfo::new(leader_data).unwrap();
// give the leader some tokens
let give_leader_tokens_tx =
Transaction::system_new(&mint.keypair(), leader_pubkey.clone(), 100, entry.id);
bank.process_transaction(&give_leader_tokens_tx).unwrap();
leader_cluster_info.set_leader(leader_pubkey);
// Insert 7 agreeing validators / 3 disagreeing
// and votes for new last_id
for i in 0..10 {
let mut validator =
NodeInfo::new_with_socketaddr(&format!("127.0.0.1:234{}", i).parse().unwrap());
let vote = Vote {
version: validator.version + 1,
contact_info_version: 1,
};
if i < 7 {
validator.ledger_state.last_id = entry.id;
}
leader_cluster_info.insert(&validator);
trace!("validator id: {:?}", validator.id);
leader_cluster_info.insert_vote(&validator.id, &vote, entry.id);
}
let leader = Arc::new(RwLock::new(leader_cluster_info));
let (vote_blob_sender, vote_blob_receiver) = channel();
let mut last_vote: u64 = timing::timestamp() - VOTE_TIMEOUT_MS - 1;
let mut last_valid_validator_timestamp = 0;
let res = send_leader_vote(
&mint.pubkey(),
&mint.keypair(),
&bank,
&leader,
&vote_blob_sender,
&mut last_vote,
&mut last_valid_validator_timestamp,
);
trace!("vote result: {:?}", res);
assert!(res.is_ok());
let vote_blob = vote_blob_receiver.recv_timeout(Duration::from_millis(500));
trace!("vote_blob: {:?}", vote_blob);
// leader shouldn't vote yet, not enough votes
assert!(vote_blob.is_err());
// add two more nodes and see that it succeeds
for i in 0..2 {
let mut validator =
NodeInfo::new_with_socketaddr(&format!("127.0.0.1:234{}", i).parse().unwrap());
let vote = Vote {
version: validator.version + 1,
contact_info_version: 1,
};
validator.ledger_state.last_id = entry.id;
leader.write().unwrap().insert(&validator);
trace!("validator id: {:?}", validator.id);
leader
.write()
.unwrap()
.insert_vote(&validator.id, &vote, entry.id);
}
last_vote = timing::timestamp() - VOTE_TIMEOUT_MS - 1;
let res = send_leader_vote(
&Pubkey::default(),
&mint.keypair(),
&bank,
&leader,
&vote_blob_sender,
&mut last_vote,
&mut last_valid_validator_timestamp,
);
trace!("vote result: {:?}", res);
assert!(res.is_ok());
let vote_blob = vote_blob_receiver.recv_timeout(Duration::from_millis(500));
trace!("vote_blob: {:?}", vote_blob);
// leader should vote now
assert!(vote_blob.is_ok());
// vote should be valid
let blob = &vote_blob.unwrap()[0];
let tx = deserialize(&(blob.read().unwrap().data)).unwrap();
assert_eq!(bank.process_transaction(&tx), Ok(()));
}
#[test]
fn test_get_last_id_to_vote_on() {
logger::setup();
let mint = Mint::new(1234);
let bank = Arc::new(Bank::new(&mint));
let mut last_vote = 0;
let mut last_valid_validator_timestamp = 0;
// generate 10 last_ids, register 6 with the bank
let ids: Vec<_> = (0..10)
.map(|i| {
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
if i < 6 {
bank.register_entry_id(&last_id);
}
// sleep to get a different timestamp in the bank
sleep(Duration::from_millis(1));
last_id
}).collect();
// see that we fail to have 2/3rds consensus
assert!(
get_last_id_to_vote_on(
&Pubkey::default(),
&ids,
&bank,
0,
&mut last_vote,
&mut last_valid_validator_timestamp
).is_err()
);
// register another, see passing
bank.register_entry_id(&ids[6]);
let res = get_last_id_to_vote_on(
&Pubkey::default(),
&ids,
&bank,
0,
&mut last_vote,
&mut last_valid_validator_timestamp,
);
if let Ok((hash, timestamp)) = res {
assert!(hash == ids[6]);
assert!(timestamp != 0);
} else {
assert!(false, "get_last_id returned error!: {:?}", res);
}
}
}

View File

@ -1,85 +0,0 @@
//! The `vote_transaction` module provides functionality for creating vote transactions.
use bincode::{deserialize, serialize};
use hash::Hash;
use signature::Keypair;
use solana_sdk::pubkey::Pubkey;
use system_transaction::SystemTransaction;
use transaction::Transaction;
use vote_program::{Vote, VoteInstruction, VoteProgram, MAX_STATE_SIZE};
pub trait VoteTransaction {
fn vote_new(vote_account: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self;
fn vote_account_new(
validator_id: &Keypair,
new_vote_account_id: Pubkey,
last_id: Hash,
num_tokens: i64,
) -> Self;
fn vote_account_register(
validator_id: &Keypair,
vote_account_id: Pubkey,
last_id: Hash,
fee: i64,
) -> Self;
fn get_votes(&self) -> Vec<(Pubkey, Vote, Hash)>;
}
impl VoteTransaction for Transaction {
fn vote_new(vote_account: &Keypair, vote: Vote, last_id: Hash, fee: i64) -> Self {
let instruction = VoteInstruction::NewVote(vote);
let userdata = serialize(&instruction).expect("serialize instruction");
Transaction::new(vote_account, &[], VoteProgram::id(), userdata, last_id, fee)
}
fn vote_account_new(
validator_id: &Keypair,
new_vote_account_id: Pubkey,
last_id: Hash,
num_tokens: i64,
) -> Self {
Transaction::system_create(
validator_id,
new_vote_account_id,
last_id,
num_tokens,
MAX_STATE_SIZE as u64,
VoteProgram::id(),
0,
)
}
fn vote_account_register(
validator_id: &Keypair,
vote_account_id: Pubkey,
last_id: Hash,
fee: i64,
) -> Self {
let register_tx = VoteInstruction::RegisterAccount;
let userdata = serialize(&register_tx).unwrap();
Transaction::new(
validator_id,
&[vote_account_id],
VoteProgram::id(),
userdata,
last_id,
fee,
)
}
fn get_votes(&self) -> Vec<(Pubkey, Vote, Hash)> {
let mut votes = vec![];
for i in 0..self.instructions.len() {
let tx_program_id = self.program_id(i);
if VoteProgram::check_id(&tx_program_id) {
if let Ok(Some(VoteInstruction::NewVote(vote))) = deserialize(&self.userdata(i)) {
votes.push((self.account_keys[0], vote, self.last_id))
}
}
}
votes
}
}
#[cfg(test)]
mod tests {}

View File

@ -780,7 +780,6 @@ mod tests {
use signature::{read_keypair, read_pkcs8, Keypair, KeypairUtil};
use std::fs::remove_dir_all;
use std::sync::mpsc::channel;
use std::sync::{Arc, RwLock};
#[test]
fn test_wallet_parse_command() {
@ -1075,11 +1074,11 @@ mod tests {
#[ignore]
fn test_wallet_process_command() {
let (alice, ledger_path) = create_tmp_genesis("wallet_process_command", 10_000_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let leader_data1 = leader.info.clone();
@ -1087,14 +1086,8 @@ mod tests {
let mut config = WalletConfig::default();
let rpc_port = 12345; // Needs to be distinct known number to not conflict with other tests
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -1103,6 +1096,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(rpc_port),
);
sleep(Duration::from_millis(900));
@ -1158,10 +1152,10 @@ mod tests {
#[test]
fn test_wallet_request_airdrop() {
let (alice, ledger_path) = create_tmp_genesis("wallet_request_airdrop", 10_000_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
@ -1169,15 +1163,8 @@ mod tests {
let genesis_entries = &alice.create_entries();
let entry_height = genesis_entries.len() as u64;
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
entry_height,
@ -1186,6 +1173,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(rpc_port),
);
sleep(Duration::from_millis(900));
@ -1239,11 +1227,11 @@ mod tests {
#[ignore]
fn test_wallet_timestamp_tx() {
let (alice, ledger_path) = create_tmp_genesis("wallet_timestamp_tx", 10_000_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let leader_data1 = leader.info.clone();
@ -1253,14 +1241,8 @@ mod tests {
let mut config_witness = WalletConfig::default();
let rpc_port = 13579; // Needs to be distinct known number to not conflict with other tests
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -1269,6 +1251,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(rpc_port),
);
sleep(Duration::from_millis(900));
@ -1366,9 +1349,9 @@ mod tests {
#[ignore]
fn test_wallet_witness_tx() {
let (alice, ledger_path) = create_tmp_genesis("wallet_witness_tx", 10_000_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let leader_data1 = leader.info.clone();
@ -1378,14 +1361,8 @@ mod tests {
let mut config_witness = WalletConfig::default();
let rpc_port = 11223; // Needs to be distinct known number to not conflict with other tests
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -1394,6 +1371,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(rpc_port),
);
sleep(Duration::from_millis(900));
@ -1489,9 +1467,9 @@ mod tests {
#[ignore]
fn test_wallet_cancel_tx() {
let (alice, ledger_path) = create_tmp_genesis("wallet_cancel_tx", 10_000_000);
let mut bank = Bank::new(&alice);
let bank = Bank::new(&alice);
let bob_pubkey = Keypair::new().pubkey();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let leader_data1 = leader.info.clone();
@ -1501,14 +1479,8 @@ mod tests {
let mut config_witness = WalletConfig::default();
let rpc_port = 13456; // Needs to be distinct known number to not conflict with other tests
let leader_scheduler = Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader(
leader_data.id,
)));
bank.leader_scheduler = leader_scheduler;
let vote_account_keypair = Arc::new(Keypair::new());
let server = Fullnode::new_with_bank(
leader_keypair,
vote_account_keypair,
bank,
0,
0,
@ -1517,6 +1489,7 @@ mod tests {
None,
&ledger_path,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
Some(rpc_port),
);
sleep(Duration::from_millis(900));

View File

@ -127,8 +127,8 @@ impl WindowUtil for Window {
// 2) We are on the border between seed_rotation_intervals, so the
// schedule won't be known until the entry on that cusp is received
// by the replicate stage (which comes after this stage). Hence, the next
// leader at the beginning of that next epoch will not know they are the
// leader until they receive that last "cusp" entry. The leader also won't ask for repairs
// leader at the beginning of that next epoch will not know he is the
// leader until he receives that last "cusp" entry. He also won't ask for repairs
// for that entry because "is_next_leader" won't be set here. In this case,
// everybody will be blocking waiting for that "cusp" entry instead of repairing,
// until the leader hits "times" >= the max times in calculate_max_repair().

View File

@ -109,7 +109,7 @@ fn make_tiny_test_entries(start_hash: Hash, num: usize) -> Vec<Entry> {
fn test_multi_node_ledger_window() -> result::Result<()> {
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_pubkey = leader_keypair.pubkey().clone();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
@ -136,7 +136,6 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
leader,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -149,7 +148,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
// start up another validator from zero, converge and then check
// balances
let keypair = Arc::new(Keypair::new());
let keypair = Keypair::new();
let validator_pubkey = keypair.pubkey().clone();
let validator = Node::new_localhost_with_pubkey(keypair.pubkey());
let validator_data = validator.info.clone();
@ -157,7 +156,6 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
validator,
&zero_ledger_path,
keypair,
Arc::new(Keypair::new()),
Some(leader_data.contact_info.ncp),
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -208,7 +206,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
logger::setup();
const N: usize = 5;
trace!("test_multi_node_validator_catchup_from_zero");
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_pubkey = leader_keypair.pubkey().clone();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
@ -229,7 +227,6 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
leader,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -242,7 +239,7 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
let mut nodes = vec![server];
for _ in 0..N {
let keypair = Arc::new(Keypair::new());
let keypair = Keypair::new();
let validator_pubkey = keypair.pubkey().clone();
let validator = Node::new_localhost_with_pubkey(keypair.pubkey());
let ledger_path = tmp_copy_ledger(
@ -261,7 +258,6 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
validator,
&ledger_path,
keypair,
Arc::new(Keypair::new()),
Some(leader_data.contact_info.ncp),
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -292,13 +288,12 @@ fn test_multi_node_validator_catchup_from_zero() -> result::Result<()> {
success = 0;
// start up another validator from zero, converge and then check everyone's
// balances
let keypair = Arc::new(Keypair::new());
let keypair = Keypair::new();
let validator = Node::new_localhost_with_pubkey(keypair.pubkey());
let val = Fullnode::new(
validator,
&zero_ledger_path,
keypair,
Arc::new(Keypair::new()),
Some(leader_data.contact_info.ncp),
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -352,7 +347,7 @@ fn test_multi_node_basic() {
logger::setup();
const N: usize = 5;
trace!("test_multi_node_basic");
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_pubkey = leader_keypair.pubkey().clone();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
@ -365,7 +360,6 @@ fn test_multi_node_basic() {
leader,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -378,7 +372,7 @@ fn test_multi_node_basic() {
let mut nodes = vec![server];
for _ in 0..N {
let keypair = Arc::new(Keypair::new());
let keypair = Keypair::new();
let validator_pubkey = keypair.pubkey().clone();
let validator = Node::new_localhost_with_pubkey(keypair.pubkey());
let ledger_path = tmp_copy_ledger(&leader_ledger_path, "multi_node_basic");
@ -394,7 +388,6 @@ fn test_multi_node_basic() {
validator,
&ledger_path,
keypair,
Arc::new(Keypair::new()),
Some(leader_data.contact_info.ncp),
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -433,7 +426,7 @@ fn test_multi_node_basic() {
#[ignore]
fn test_boot_validator_from_file() -> result::Result<()> {
logger::setup();
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_pubkey = leader_keypair.pubkey();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let bob_pubkey = Keypair::new().pubkey();
@ -446,7 +439,6 @@ fn test_boot_validator_from_file() -> result::Result<()> {
leader,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -458,7 +450,7 @@ fn test_boot_validator_from_file() -> result::Result<()> {
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, 500, Some(1000)).unwrap();
assert_eq!(leader_balance, 1000);
let keypair = Arc::new(Keypair::new());
let keypair = Keypair::new();
let validator = Node::new_localhost_with_pubkey(keypair.pubkey());
let validator_data = validator.info.clone();
let ledger_path = tmp_copy_ledger(&leader_ledger_path, "boot_validator_from_file");
@ -467,7 +459,6 @@ fn test_boot_validator_from_file() -> result::Result<()> {
validator,
&ledger_path,
keypair,
Arc::new(Keypair::new()),
Some(leader_data.contact_info.ncp),
false,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -486,14 +477,13 @@ fn test_boot_validator_from_file() -> result::Result<()> {
}
fn create_leader(ledger_path: &str) -> (NodeInfo, Fullnode) {
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_data = leader.info.clone();
let leader_fullnode = Fullnode::new(
leader,
&ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
@ -542,7 +532,7 @@ fn test_leader_restart_validator_start_from_old_ledger() -> result::Result<()> {
let (leader_data, leader_fullnode) = create_leader(&ledger_path);
// start validator from old ledger
let keypair = Arc::new(Keypair::new());
let keypair = Keypair::new();
let validator = Node::new_localhost_with_pubkey(keypair.pubkey());
let validator_data = validator.info.clone();
@ -550,7 +540,6 @@ fn test_leader_restart_validator_start_from_old_ledger() -> result::Result<()> {
validator,
&stale_ledger_path,
keypair,
Arc::new(Keypair::new()),
Some(leader_data.contact_info.ncp),
false,
LeaderScheduler::from_bootstrap_leader(leader_data.id),
@ -599,7 +588,7 @@ fn test_multi_node_dynamic_network() {
Err(_) => 120,
};
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_pubkey = leader_keypair.pubkey().clone();
let leader = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let bob_pubkey = Keypair::new().pubkey();
@ -615,7 +604,6 @@ fn test_multi_node_dynamic_network() {
leader,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
None,
true,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -687,8 +675,7 @@ fn test_multi_node_dynamic_network() {
let val = Fullnode::new(
validator,
&ledger_path,
Arc::new(keypair),
Arc::new(Keypair::new()),
keypair,
Some(leader_data.contact_info.ncp),
true,
LeaderScheduler::from_bootstrap_leader(leader_pubkey),
@ -793,7 +780,7 @@ fn test_leader_to_validator_transition() {
let validator_keypair = Keypair::new();
// Create the leader node information
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_info = leader_node.info.clone();
@ -814,7 +801,7 @@ fn test_leader_to_validator_transition() {
// Write the bootstrap entries to the ledger that will cause leader rotation
// after the bootstrap height
let mut ledger_writer = LedgerWriter::open(&leader_ledger_path, false).unwrap();
let (bootstrap_entries, _) =
let bootstrap_entries =
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id, 0);
ledger_writer.write_entries(&bootstrap_entries).unwrap();
@ -832,7 +819,6 @@ fn test_leader_to_validator_transition() {
leader_node,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
Some(leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -885,7 +871,7 @@ fn test_leader_to_validator_transition() {
_ => panic!("Expected reason for exit to be leader rotation"),
}
// Query newly transitioned validator to make sure that they have the proper balances in
// Query newly transitioned validator to make sure that he has the proper balances in
// the after the transitions
let mut leader_client = mk_client(&leader_info);
@ -897,10 +883,8 @@ fn test_leader_to_validator_transition() {
// Check the ledger to make sure it's the right height, we should've
// transitioned after tick_height == bootstrap_height
let (_, tick_height, _, _) = Fullnode::new_bank_from_ledger(
&leader_ledger_path,
Arc::new(RwLock::new(LeaderScheduler::default())),
);
let (_, tick_height, _, _) =
Fullnode::new_bank_from_ledger(&leader_ledger_path, &mut LeaderScheduler::default());
assert_eq!(tick_height, bootstrap_height);
@ -919,12 +903,12 @@ fn test_leader_validator_basic() {
let bob_pubkey = Keypair::new().pubkey();
// Create the leader node information
let leader_keypair = Arc::new(Keypair::new());
let leader_keypair = Keypair::new();
let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey());
let leader_info = leader_node.info.clone();
// Create the validator node information
let validator_keypair = Arc::new(Keypair::new());
let validator_keypair = Keypair::new();
let validator_node = Node::new_localhost_with_pubkey(validator_keypair.pubkey());
// Make a common mint and a genesis entry for both leader + validator ledgers
@ -947,7 +931,7 @@ fn test_leader_validator_basic() {
// Write the bootstrap entries to the ledger that will cause leader rotation
// after the bootstrap height
let mut ledger_writer = LedgerWriter::open(&leader_ledger_path, false).unwrap();
let (active_set_entries, vote_account_keypair) =
let active_set_entries =
make_active_set_entries(&validator_keypair, &mint.keypair(), &last_id, &last_id, 0);
ledger_writer.write_entries(&active_set_entries).unwrap();
@ -962,23 +946,21 @@ fn test_leader_validator_basic() {
Some(bootstrap_height),
);
// Start the validator node
let mut validator = Fullnode::new(
validator_node,
&validator_ledger_path,
validator_keypair,
Arc::new(vote_account_keypair),
Some(leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
);
// Start the leader fullnode
let mut leader = Fullnode::new(
leader_node,
&leader_ledger_path,
leader_keypair,
Arc::new(Keypair::new()),
Some(leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
);
// Start the validator node
let mut validator = Fullnode::new(
validator_node,
&validator_ledger_path,
validator_keypair,
Some(leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1017,7 +999,7 @@ fn test_leader_validator_basic() {
_ => panic!("Expected reason for exit to be leader rotation"),
}
// Query newly transitioned validator to make sure they have the proper balances
// Query newly transitioned validator to make sure that he has the proper balances
// in the bank after the transitions
let mut leader_client = mk_client(&leader_info);
@ -1089,7 +1071,7 @@ fn test_dropped_handoff_recovery() {
logger::setup();
// Create the bootstrap leader node information
let bootstrap_leader_keypair = Arc::new(Keypair::new());
let bootstrap_leader_keypair = Keypair::new();
let bootstrap_leader_node = Node::new_localhost_with_pubkey(bootstrap_leader_keypair.pubkey());
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
@ -1104,17 +1086,17 @@ fn test_dropped_handoff_recovery() {
.id;
// Create the validator keypair that will be the next leader in line
let next_leader_keypair = Arc::new(Keypair::new());
let next_leader_keypair = Keypair::new();
// Create a common ledger with entries in the beginning that will add only
// the "next_leader" validator to the active set for leader election, guaranteeing
// they are the next leader after bootstrap_height
// he is the next leader after bootstrap_height
let mut ledger_paths = Vec::new();
ledger_paths.push(bootstrap_leader_ledger_path.clone());
// Make the entries to give the next_leader validator some stake so that they will be in
// Make the entries to give the next_leader validator some stake so that he will be in
// leader election active set
let (active_set_entries, vote_account_keypair) =
let active_set_entries =
make_active_set_entries(&next_leader_keypair, &mint.keypair(), &last_id, &last_id, 0);
// Write the entries
@ -1149,7 +1131,6 @@ fn test_dropped_handoff_recovery() {
bootstrap_leader_node,
&bootstrap_leader_ledger_path,
bootstrap_leader_keypair,
Arc::new(Keypair::new()),
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1159,7 +1140,7 @@ fn test_dropped_handoff_recovery() {
// Start up the validators other than the "next_leader" validator
for _ in 0..(N - 1) {
let kp = Arc::new(Keypair::new());
let kp = Keypair::new();
let validator_ledger_path = tmp_copy_ledger(
&bootstrap_leader_ledger_path,
"test_dropped_handoff_recovery",
@ -1171,7 +1152,6 @@ fn test_dropped_handoff_recovery() {
validator_node,
&validator_ledger_path,
kp,
Arc::new(Keypair::new()),
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1196,7 +1176,6 @@ fn test_dropped_handoff_recovery() {
next_leader_node,
&next_leader_ledger_path,
next_leader_keypair,
Arc::new(vote_account_keypair),
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1268,11 +1247,10 @@ fn test_full_leader_validator_network() {
let mut ledger_paths = Vec::new();
ledger_paths.push(bootstrap_leader_ledger_path.clone());
let mut vote_account_keypairs = VecDeque::new();
for node_keypair in node_keypairs.iter() {
// Make entries to give each node some stake so that they will be in the
// Make entries to give the validator some stake so that he will be in
// leader election active set
let (bootstrap_entries, vote_account_keypair) = make_active_set_entries(
let bootstrap_entries = make_active_set_entries(
node_keypair,
&mint.keypair(),
&last_entry_id,
@ -1280,8 +1258,6 @@ fn test_full_leader_validator_network() {
0,
);
vote_account_keypairs.push_back(vote_account_keypair);
// Write the entries
let mut ledger_writer = LedgerWriter::open(&bootstrap_leader_ledger_path, false).unwrap();
last_entry_id = bootstrap_entries
@ -1310,8 +1286,7 @@ fn test_full_leader_validator_network() {
let bootstrap_leader = Arc::new(RwLock::new(Fullnode::new(
bootstrap_leader_node,
&bootstrap_leader_ledger_path,
Arc::new(node_keypairs.pop_front().unwrap()),
Arc::new(vote_account_keypairs.pop_front().unwrap()),
node_keypairs.pop_front().unwrap(),
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1336,8 +1311,7 @@ fn test_full_leader_validator_network() {
let validator = Arc::new(RwLock::new(Fullnode::new(
validator_node,
&validator_ledger_path,
Arc::new(kp),
Arc::new(vote_account_keypairs.pop_front().unwrap()),
kp,
Some(bootstrap_leader_info.contact_info.ncp),
false,
LeaderScheduler::new(&leader_scheduler_config),
@ -1364,7 +1338,7 @@ fn test_full_leader_validator_network() {
num_reached_target_height = 0;
for n in nodes.iter() {
let node_lock = n.read().unwrap();
let ls_lock = node_lock.get_leader_scheduler();
let ls_lock = &node_lock.leader_scheduler;
if let Some(sh) = ls_lock.read().unwrap().last_seed_height {
if sh >= target_height {
num_reached_target_height += 1;

View File

@ -22,9 +22,6 @@ use std::env;
#[cfg(feature = "bpf_c")]
use std::path::PathBuf;
/// BPF program file prefixes
#[cfg(feature = "bpf_c")]
const PLATFORM_FILE_PREFIX_BPF: &str = "";
/// BPF program file extension
#[cfg(feature = "bpf_c")]
const PLATFORM_FILE_EXTENSION_BPF: &str = "o";
@ -34,14 +31,14 @@ pub const PLATFORM_SECTION_C: &str = ".text.entrypoint";
/// Create a BPF program file name
#[cfg(feature = "bpf_c")]
fn create_bpf_path(name: &str) -> PathBuf {
let pathbuf = {
let mut pathbuf = {
let current_exe = env::current_exe().unwrap();
PathBuf::from(current_exe.parent().unwrap().parent().unwrap())
};
pathbuf.join(
PathBuf::from(PLATFORM_FILE_PREFIX_BPF.to_string() + name)
.with_extension(PLATFORM_FILE_EXTENSION_BPF),
)
pathbuf.push("bpf/");
pathbuf.push(name);
pathbuf.set_extension(PLATFORM_FILE_EXTENSION_BPF);
pathbuf
}
fn check_tx_results(bank: &Bank, tx: &Transaction, result: Vec<solana::bank::Result<()>>) {
@ -208,7 +205,7 @@ fn test_program_native_noop() {
fn test_program_lua_move_funds() {
logger::setup();
let loader = Loader::new_dynamic("lua_loader");
let loader = Loader::new_dynamic("solana_lua_loader");
let userdata = r#"
print("Lua Script!")
local tokens, _ = string.unpack("I", data)
@ -278,7 +275,7 @@ fn test_program_builtin_bpf_noop() {
let loader = Loader::new_bpf();
let program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("noop_c"))
elf::File::open_path(&create_bpf_path("noop"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
@ -307,10 +304,10 @@ fn test_program_builtin_bpf_noop() {
fn test_program_bpf_noop_c() {
logger::setup();
let loader = Loader::new_dynamic("bpf_loader");
let loader = Loader::new_dynamic("solana_bpf_loader");
let program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("noop_c"))
elf::File::open_path(&create_bpf_path("noop"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
@ -510,10 +507,10 @@ impl Dashboard {
fn test_program_bpf_tictactoe_c() {
logger::setup();
let loader = Loader::new_dynamic("bpf_loader");
let loader = Loader::new_dynamic("solana_bpf_loader");
let program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("tictactoe_c"))
elf::File::open_path(&create_bpf_path("tictactoe"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
@ -543,10 +540,10 @@ fn test_program_bpf_tictactoe_c() {
fn test_program_bpf_tictactoe_dashboard_c() {
logger::setup();
let loader = Loader::new_dynamic("bpf_loader");
let loader = Loader::new_dynamic("solana_bpf_loader");
let ttt_program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("tictactoe_c"))
elf::File::open_path(&create_bpf_path("tictactoe"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
@ -581,7 +578,7 @@ fn test_program_bpf_tictactoe_dashboard_c() {
let dashboard_program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("tictactoe_dashboard_c"))
elf::File::open_path(&create_bpf_path("tictactoe_dashboard"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()