Compare commits

...

49 Commits

Author SHA1 Message Date
9a30100a9c Create target/ if it doesn't exist yet 2018-11-09 11:52:19 -08:00
aa741b3147 v0.10.4 2018-11-09 10:29:32 -08:00
09db7b5b52 Determine network version for tar and local deploys 2018-11-09 10:27:18 -08:00
fa9faa2cec Upgrade Rust stable to 1.30.1
Fixes `cargo doc`
2018-11-09 10:25:00 -08:00
d2dc585974 Update wallet to pass full ELFs (#1738) 2018-11-08 09:03:48 -08:00
6721bdde3d v0.10.3 2018-11-07 21:39:51 -08:00
a733873b8f v0.10.2 2018-11-07 20:13:17 -08:00
7c02bbc47c Assign static IPs to {edge,beta}.testnet.solana.com 2018-11-07 20:11:53 -08:00
16a815d2b1 Install native programs in the correct location 2018-11-07 19:44:39 -08:00
ddb490e2fb Continue if docker0 is not present 2018-11-07 19:33:36 -08:00
242d0a23fb Switch testnet to AWS 2018-11-07 18:56:45 -08:00
869009243d Work around AWS key management limitation 2018-11-07 18:48:05 -08:00
7b61f5279c Switch to us-west-1a, us-west-1b is causing trouble 2018-11-07 18:22:24 -08:00
7ef0b815ec Remove docker0 interface if present 2018-11-07 17:49:57 -08:00
8742de789e Shuffle AWS regions 2018-11-07 17:49:57 -08:00
bfadd7b787 Work around AWS boot check weirdness 2018-11-07 15:47:47 -08:00
2e14bfcf4e Shuffle AWS regions 2018-11-07 15:43:56 -08:00
a19426f055 Revert "Restore testnet/testnet-perf to tip of beta channel for now"
This reverts commit 0d0a1c2919.
2018-11-07 15:43:56 -08:00
df366017a7 Invert gpu check 2018-11-07 13:50:42 -08:00
7d76badd03 Support local tarball deploys 2018-11-07 13:43:36 -08:00
8047ab777c Remove all cuda dependencies from release tarball beyond solana-fullnode-cuda 2018-11-07 13:43:24 -08:00
0d0a1c2919 Restore testnet/testnet-perf to tip of beta channel for now 2018-11-07 21:21:33 +00:00
1da90017ce Permit release tag tarballs 2018-11-07 10:33:20 -08:00
0909618efa Switch testnet/testnet-beta to tarball release 2018-11-07 10:29:53 -08:00
28bb7849f4 Fix tarball publishing for tags 2018-11-07 10:26:07 -08:00
9cffd3a1ea AWS AMIs are region specific 2018-11-07 10:04:45 -08:00
917151ce54 s/edge/beta/ 2018-11-07 08:54:44 -08:00
6dcd127634 Increase boot timeout 2018-11-07 08:32:03 -08:00
af66edf8c0 Add AWS-based nets 2018-11-07 07:52:34 -08:00
ab5b921e8f Set imageName if G 2018-11-07 07:52:29 -08:00
6c2843543b Bump EC2 validator machine type 2018-11-07 07:52:25 -08:00
85f74cc537 Upgrade GCP CPU-based testnet to 18.04 2018-11-07 07:52:19 -08:00
43665115b4 Switch testnet/testnet-perf to the latest beta or stable tag 2018-11-07 07:48:33 -08:00
156115c04c Publish release tarballs for tags 2018-11-07 07:48:30 -08:00
a66577eb87 Add support for using a release tar 2018-11-07 07:48:02 -08:00
3345d059e8 Elf relocations (#1724)
Use relocatable BPF ELFs
2018-11-06 14:28:46 -08:00
8c8c5de779 Remove unused debug trace 2018-11-06 14:19:07 -08:00
f03e971598 t 2018-11-06 14:06:07 -08:00
b4a1cdceaa Add timeout to prevent a stuck ssh 2018-11-06 14:02:27 -08:00
b250d20059 Remove node check from client start-up
If the network loses a validator or two, it's the job of the sanity
check to detect this not the bench clients
2018-11-06 13:59:42 -08:00
dc3b270410 Remove bpf tictactoe 2018-11-06 12:17:52 -08:00
9d5092a71c Set metrics database correctly 2018-11-06 07:24:49 -08:00
a287c9e5fa Remove stray line 2018-11-05 20:53:50 -08:00
ee85d534f9 Update testnet deploy docs 2018-11-05 19:12:43 -08:00
6e1b291c17 Add testnet-manager pipeline 2018-11-05 17:35:55 -08:00
68f7b1ecf3 Rename buildkite-snap to buildkite-secondary 2018-11-05 08:48:09 -08:00
58fe5cabd6 Document BPF C program limitations 2018-11-04 12:30:05 -08:00
8993c6ae24 Try harder to snap download 2018-11-03 00:29:48 +00:00
0e56473add 0.10.1 2018-11-02 16:30:13 -07:00
35 changed files with 738 additions and 832 deletions

View File

@ -1,7 +1,7 @@
[package]
name = "solana"
description = "Blockchain, Rebuilt for Scale"
version = "0.10.0"
version = "0.10.4"
documentation = "https://docs.rs/solana"
homepage = "http://solana.com/"
readme = "README.md"
@ -104,14 +104,14 @@ serde_cbor = "0.9.0"
serde_derive = "1.0.27"
serde_json = "1.0.10"
socket2 = "0.3.8"
solana-sdk = { path = "sdk", version = "0.10.0" }
solana-sdk = { path = "sdk", version = "0.10.4" }
sys-info = "0.5.6"
tokio = "0.1"
tokio-codec = "0.1"
untrusted = "0.6.2"
solana-noop = { path = "programs/native/noop", version = "0.10.0" }
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.0" }
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.0" }
solana-noop = { path = "programs/native/noop", version = "0.10.4" }
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.4" }
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.4" }
[[bench]]
name = "bank"

View File

@ -8,7 +8,7 @@ fn main() {
// Ensure target/perf-libs/ exists. It's been observed that
// a cargo:rerun-if-changed= directive with a non-existent
// directory triggers a rebuild on every |cargo build| invocation
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
fs::create_dir_all("target/perf-libs").unwrap_or_else(|err| {
if err.kind() != std::io::ErrorKind::AlreadyExists {
panic!("Unable to create target/perf-libs: {:?}", err);
}
@ -29,8 +29,6 @@ fn main() {
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/tictactoe.c");
println!("cargo:rerun-if-changed=programs/bpf/c/src/tictactoe_dashboard.c");
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
let status = Command::new("make")
.current_dir("programs/bpf/c")

View File

@ -1,5 +1,5 @@
steps:
- command: "ci/docker-run.sh solanalabs/rust:1.30.0 ci/test-stable.sh"
- command: "ci/docker-run.sh solanalabs/rust:1.30.1 ci/test-stable.sh"
name: "stable [public]"
env:
CARGO_TARGET_CACHE_NAME: "stable"
@ -36,7 +36,7 @@ steps:
timeout_in_minutes: 20
name: "snap [public]"
- wait
- trigger: "solana-snap"
- trigger: "solana-secondary"
branches: "!pull/*"
async: true
build:

View File

@ -1,6 +1,6 @@
# Note: when the rust version is changed also modify
# ci/buildkite.yml to pick up the new image tag
FROM rust:1.30.0
FROM rust:1.30.1
RUN set -x && \
apt update && \

View File

@ -5,6 +5,7 @@ cd "$(dirname "$0")/.."
DRYRUN=
if [[ -z $BUILDKITE_BRANCH ]]; then
DRYRUN="echo"
CHANNEL=unknown
fi
eval "$(ci/channel-info.sh)"
@ -17,42 +18,54 @@ elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
CHANNEL=beta
fi
if [[ -z $CHANNEL ]]; then
if [[ -n "$BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$BUILDKITE_TAG
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
else
CHANNEL_OR_TAG=$CHANNEL
fi
if [[ -z $CHANNEL_OR_TAG ]]; then
echo Unable to determine channel to publish into, exiting.
exit 0
fi
echo --- Creating tarball
if [[ -z $DRYRUN ]]; then
(
set -x
rm -rf solana-release/
mkdir solana-release/
(
echo "$CHANNEL"
echo "$CHANNEL_OR_TAG"
git rev-parse HEAD
) > solana-release/version.txt
cargo install --root solana-release
./scripts/install-native-programs.sh solana-release/bin
./fetch-perf-libs.sh
cargo install --features=cuda --root solana-release
./scripts/install-native-programs.sh solana-release
cargo install --features=cuda --root solana-release-cuda
cp solana-release-cuda/bin/solana-fullnode solana-release/bin/solana-fullnode-cuda
tar jvcf solana-release.tar.bz2 solana-release/
)
fi
echo --- AWS S3 Store
if [[ -z $DRYRUN ]]; then
(
set -x
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
fi
set -x
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
s3://solana-release/"$CHANNEL_OR_TAG"/solana-release.tar.bz2
)
else
echo Skipped due to DRYRUN
fi
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
s3://solana-release/"$CHANNEL"/solana-release.tar.bz2
exit 0

View File

@ -9,10 +9,10 @@ clientNodeCount=0
validatorNodeCount=10
publicNetwork=false
snapChannel=edge
releaseChannel=edge
tarChannelOrTag=edge
delete=false
enableGpu=false
useReleaseChannel=false
useTarReleaseChannel=false
usage() {
exitcode=0
@ -21,18 +21,21 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [name] [zone] [options...]
usage: $0 [name] [cloud] [zone] [options...]
Deploys a CD testnet
name - name of the network
zone - zone to deploy the network into
cloud - cloud provider to use (gce, ec2)
zone - cloud provider zone to deploy the network into
options:
-s edge|beta|stable - Deploy the specified Snap release channel
(default: $snapChannel)
-t edge|beta|stable - Deploy the specified prebuilt tar from channel
(default: $releaseChannel)
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
specified release channel (edge|beta|stable) or release tag
(vX.Y.Z)
(default: $tarChannelOrTag)
-n [number] - Number of validator nodes (default: $validatorNodeCount)
-c [number] - Number of client nodes (default: $clientNodeCount)
-P - Use public network IP addresses (default: $publicNetwork)
@ -48,10 +51,12 @@ EOF
}
netName=$1
zone=$2
cloudProvider=$2
zone=$3
[[ -n $netName ]] || usage
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
[[ -n $zone ]] || usage "Zone not specified"
shift 2
shift 3
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
case $opt in
@ -79,9 +84,9 @@ while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
;;
t)
case $OPTARG in
edge|beta|stable)
releaseChannel=$OPTARG
useReleaseChannel=true
edge|beta|stable|v*)
tarChannelOrTag=$OPTARG
useTarReleaseChannel=true
;;
*)
usage "Invalid release channel: $OPTARG"
@ -108,7 +113,7 @@ while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
done
gce_create_args=(
create_args=(
-a "$leaderAddress"
-c "$clientNodeCount"
-n "$validatorNodeCount"
@ -118,26 +123,26 @@ gce_create_args=(
if $enableGpu; then
if [[ -z $leaderMachineType ]]; then
gce_create_args+=(-g)
create_args+=(-g)
else
gce_create_args+=(-G "$leaderMachineType")
create_args+=(-G "$leaderMachineType")
fi
fi
if $publicNetwork; then
gce_create_args+=(-P)
create_args+=(-P)
fi
set -x
echo --- gce.sh delete
time net/gce.sh delete -z "$zone" -p "$netName"
echo "--- $cloudProvider.sh delete"
time net/"$cloudProvider".sh delete -z "$zone" -p "$netName"
if $delete; then
exit 0
fi
echo --- gce.sh create
time net/gce.sh create "${gce_create_args[@]}"
echo "--- $cloudProvider.sh create"
time net/"$cloudProvider".sh create "${create_args[@]}"
net/init-metrics.sh -e
echo --- net.sh start
@ -154,9 +159,9 @@ if [[ -n $NO_LEDGER_VERIFY ]]; then
maybeNoLedgerVerify="-o noLedgerVerify"
fi
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
if ! $useReleaseChannel; then
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
if $useTarReleaseChannel; then
time net/net.sh start -t "$tarChannelOrTag" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
else
time net/net.sh start -t "$releaseChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
fi
exit 0

359
ci/testnet-manager.sh Executable file
View File

@ -0,0 +1,359 @@
#!/bin/bash -e
cd "$(dirname "$0")"/..
if [[ -z $BUILDKITE ]]; then
echo BUILDKITE not defined
exit 1
fi
if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then
echo SOLANA_METRICS_PARTIAL_CONFIG not defined
exit 1
fi
if [[ -z $TESTNET ]]; then
TESTNET=$(buildkite-agent meta-data get "testnet" --default "")
fi
if [[ -z $TESTNET_OP ]]; then
TESTNET_OP=$(buildkite-agent meta-data get "testnet-operation" --default "")
fi
if [[ -z $TESTNET || -z $TESTNET_OP ]]; then
(
cat <<EOF
steps:
- block: "Manage Testnet"
fields:
- select: "Network"
key: "testnet"
options:
- label: "testnet"
value: "testnet"
- label: "testnet-perf"
value: "testnet-perf"
- label: "testnet-master"
value: "testnet-master"
- label: "testnet-master-perf"
value: "testnet-master-perf"
- label: "testnet-edge"
value: "testnet-edge"
- label: "testnet-edge-perf"
value: "testnet-edge-perf"
- label: "testnet-beta"
value: "testnet-beta"
- label: "testnet-beta-perf"
value: "testnet-beta-perf"
- select: "Operation"
key: "testnet-operation"
default: "sanity-or-restart"
options:
- label: "Sanity check. Restart network on failure"
value: "sanity-or-restart"
- label: "Start (or restart) the network"
value: "start"
- label: "Stop the network"
value: "stop"
- label: "Sanity check only"
value: "sanity"
- command: "ci/$(basename "$0")"
agents:
- "queue=$BUILDKITE_AGENT_META_DATA_QUEUE"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
export SOLANA_METRICS_CONFIG="db=$TESTNET,$SOLANA_METRICS_PARTIAL_CONFIG"
echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG"
ci/channel-info.sh
eval "$(ci/channel-info.sh)"
case $TESTNET in
testnet-edge|testnet-edge-perf|testnet-master|testnet-master-perf)
CHANNEL_OR_TAG=edge
CHANNEL_BRANCH=$EDGE_CHANNEL
;;
testnet-beta|testnet-beta-perf)
CHANNEL_OR_TAG=beta
CHANNEL_BRANCH=$BETA_CHANNEL
;;
testnet|testnet-perf)
if [[ -n $BETA_CHANNEL_LATEST_TAG ]]; then
CHANNEL_OR_TAG=$BETA_CHANNEL_LATEST_TAG
else
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
fi
CHANNEL_BRANCH=$BETA_CHANNEL
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
(
cat <<EOF
steps:
- trigger: "$BUILDKITE_PIPELINE_SLUG"
async: true
build:
message: "$BUILDKITE_MESSAGE"
branch: "$CHANNEL_BRANCH"
env:
TESTNET: "$TESTNET"
TESTNET_OP: "$TESTNET_OP"
EOF
) | buildkite-agent pipeline upload
exit 0
fi
sanity() {
echo "--- sanity $TESTNET"
case $TESTNET in
testnet-edge)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
)
;;
testnet-edge-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh edge-perf-testnet-solana-com ec2 us-west-2b
)
;;
testnet-beta)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
)
;;
testnet-beta-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh beta-perf-testnet-solana-com ec2 us-west-2b
)
;;
testnet-master)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh master-testnet-solana-com gce us-west1-b
)
;;
testnet-master-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-sanity.sh master-perf-testnet-solana-com gce us-west1-b
)
;;
testnet)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
#ci/testnet-sanity.sh testnet-solana-com gce us-east1-c
ci/testnet-sanity.sh testnet-solana-com ec2 us-west-1a
)
;;
testnet-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export REJECT_EXTRA_NODES=1
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
#ci/testnet-sanity.sh perf-testnet-solana-com ec2 us-east-1a
ci/testnet-sanity.sh perf-testnet-solana-com gce us-west1-b
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
}
start() {
declare maybeDelete=$1
if [[ -z $maybeDelete ]]; then
echo "--- start $TESTNET"
else
echo "--- stop $TESTNET"
fi
case $TESTNET in
testnet-edge)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh edge-testnet-solana-com ec2 us-west-1a \
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0ccd4f2239886fa94 \
${maybeDelete:+-d}
)
;;
testnet-edge-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh edge-perf-testnet-solana-com ec2 us-west-2b \
-g -t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
)
;;
testnet-beta)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh beta-testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0f286cf8a0771ce35 \
${maybeDelete:+-d}
)
;;
testnet-beta-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh beta-perf-testnet-solana-com ec2 us-west-2b \
-g -t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
)
;;
testnet-master)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh master-testnet-solana-com gce us-west1-b \
-s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a master-testnet-solana-com \
${maybeDelete:+-d}
)
;;
testnet-master-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh master-perf-testnet-solana-com gce us-west1-b \
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
-t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
)
;;
testnet)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
#ci/testnet-deploy.sh testnet-solana-com gce us-east1-c \
# -s "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a testnet-solana-com \
# ${maybeDelete:+-d}
ci/testnet-deploy.sh testnet-solana-com ec2 us-west-1a \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -P -a eipalloc-0fa502bf95f6f18b2 \
${maybeDelete:+-d}
)
;;
testnet-perf)
# shellcheck disable=2030
# shellcheck disable=2031
(
set -ex
export NO_LEDGER_VERIFY=1
export NO_VALIDATOR_SANITY=1
ci/testnet-deploy.sh perf-testnet-solana-com gce us-west1-b \
-G "n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100" \
-t "$CHANNEL_OR_TAG" -c 2 \
${maybeDelete:+-d}
#ci/testnet-deploy.sh perf-testnet-solana-com ec2 us-east-1a \
# -g \
# -t "$CHANNEL_OR_TAG" -c 2 \
# ${maybeDelete:+-d}
)
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
exit 1
;;
esac
}
stop() {
start delete
}
case $TESTNET_OP in
sanity)
sanity
;;
start)
start
;;
stop)
stop
;;
sanity-or-restart)
if sanity; then
echo Pass
else
echo "Sanity failed, restarting the network"
echo "^^^ +++"
start
fi
;;
esac
echo --- fin
exit 0

View File

@ -9,12 +9,13 @@ usage() {
echo "Error: $*"
fi
cat <<EOF
usage: $0 [name] [zone]
usage: $0 [name] [cloud] [zone]
Sanity check a CD testnet
name - name of the network
zone - zone of the network
cloud - cloud provider to use (gce, ec2)
zone - cloud provider zone of the network
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
metrics
@ -23,13 +24,15 @@ EOF
}
netName=$1
zone=$2
cloudProvider=$2
zone=$3
[[ -n $netName ]] || usage ""
[[ -n $cloudProvider ]] || usage "Cloud provider not specified"
[[ -n $zone ]] || usage "Zone not specified"
set -x
echo --- gce.sh config
net/gce.sh config -p "$netName" -z "$zone"
echo "--- $cloudProvider.sh config"
net/"$cloudProvider".sh config -p "$netName" -z "$zone"
net/init-metrics.sh -e
echo --- net.sh sanity
net/net.sh sanity \

View File

@ -4,14 +4,18 @@ Currently we have three testnets:
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
with transactions until failure. Runs 24/7
* `testnet-master` - private edge channel testnet with clients trying to flood the network
* `testnet-msater` - public edge channel testnet accessible via master.testnet.solana.com. Runs 24/7
* `testnet-master-perf` - private edge channel testnet with clients trying to flood the network
with transactions until failure. Runs on weekday mornings for a couple hours
## Deploy process
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
They are deployed with the `ci/testnet-manager.sh` script through a list of [scheduled
buildkite jobs](https://buildkite.com/solana-labs/testnet-management/settings/schedules).
Each testnet can be manually manipulated from buildkite as well. The `-perf`
testnets use a release tarball while the non`-perf` builds use the snap build
(we've observed that the snap build runs slower than a tarball but this has yet
to be root caused).
## Where are the testnet logs?
@ -29,7 +33,8 @@ $ net/ssh.sh
for log location details
## How do I reset the testnet?
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
Manually trigger the [testnet-management](https://buildkite.com/solana-labs/testnet-management) pipeline
and when prompted select the desired testnet
## How can I scale the tx generation rate?
@ -43,5 +48,5 @@ Currently, a merged PR is the only way to test a change on the testnet. But you
can run your own testnet using the scripts in the `net/` directory.
## Adjusting the number of clients or validators on the testnet
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
Edit `ci/testnet-manager.sh`

View File

@ -49,8 +49,6 @@ elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
declare program="$1"
printf "solana-%s" "$program"
}
# CUDA was/wasn't selected at build time, can't affect CUDA state here
unset SOLANA_CUDA
else
solana_program() {
declare program="$1"

View File

@ -11,7 +11,6 @@ gce)
# shellcheck source=net/scripts/gce-provider.sh
source "$here"/scripts/gce-provider.sh
imageName="ubuntu-16-04-cuda-9-2-new"
cpuLeaderMachineType=n1-standard-16
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
leaderMachineType=$cpuLeaderMachineType
@ -22,12 +21,11 @@ ec2)
# shellcheck source=net/scripts/ec2-provider.sh
source "$here"/scripts/ec2-provider.sh
imageName="ami-0466e26ccc0e752c1"
cpuLeaderMachineType=m4.4xlarge
gpuLeaderMachineType=p2.xlarge
leaderMachineType=$cpuLeaderMachineType
validatorMachineType=m4.xlarge
clientMachineType=m4.4xlarge
validatorMachineType=m4.2xlarge
clientMachineType=m4.2xlarge
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
@ -118,7 +116,7 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
;;
g)
enableGpu=true
leaderMachineType="$gpuLeaderMachineType"
leaderMachineType=$gpuLeaderMachineType
;;
G)
enableGpu=true
@ -131,14 +129,53 @@ while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
bootDiskType=$OPTARG
;;
*)
usage "Error: unhandled option: $opt"
usage "unhandled option: $opt"
;;
esac
done
shift $((OPTIND - 1))
[[ -z $1 ]] || usage "Unexpected argument: $1"
sshPrivateKey="$netConfigDir/id_$prefix"
if [[ $cloudProvider = ec2 ]]; then
# EC2 keys can't be retrieved from running instances like GCE keys can so save
# EC2 keys in the user's home directory so |./ec2.sh config| can at least be
# used on the same host that ran |./ec2.sh create| .
sshPrivateKey="$HOME/.ssh/solana-net-id_$prefix"
else
sshPrivateKey="$netConfigDir/id_$prefix"
fi
case $cloudProvider in
gce)
if $enableGpu; then
# TODO: GPU image is still 16.04-based pending resolution of
# https://github.com/solana-labs/solana/issues/1702
imageName="ubuntu-16-04-cuda-9-2-new"
else
imageName="ubuntu-1804-bionic-v20181029 --image-project ubuntu-os-cloud"
fi
;;
ec2)
# Deep Learning AMI (Ubuntu 16.04-based)
case $region in # (region global variable is set by cloud_SetZone)
us-east-1)
imageName="ami-047daf3f2b162fc35"
;;
us-west-1)
imageName="ami-08c8c7c4a57a6106d"
;;
us-west-2)
imageName="ami-0b63040ee445728bf"
;;
*)
usage "Unsupported region: $region"
;;
esac
;;
*)
echo "Error: Unknown cloud provider: $cloudProvider"
;;
esac
# cloud_ForEachInstance [cmd] [extra args to cmd]
@ -206,13 +243,18 @@ EOF
echo "Waiting for $name to finish booting..."
(
for i in $(seq 1 30); do
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
break
set -x +e
for i in $(seq 1 60); do
timeout 20s ssh "${sshOptions[@]}" "$publicIp" "ls -l /.instance-startup-complete"
ret=$?
if [[ $ret -eq 0 ]]; then
exit 0
fi
sleep 2
echo "Retry $i..."
done
echo "$name failed to boot."
exit 1
)
echo "$name has booted."
}
@ -230,7 +272,7 @@ EOF
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
# Try to ping the machine first.
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
timeout 90s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
if [[ ! -r $sshPrivateKey ]]; then
echo "Fetching $sshPrivateKey from $leaderName"
@ -377,6 +419,9 @@ $(
install-libssl-compatability.sh \
install-rsync.sh \
network-config.sh \
remove-docker-interface.sh \
update-default-cuda.sh \
)
cat > /etc/motd <<EOM

View File

@ -23,11 +23,14 @@ Operate a configured testnet
restart - Shortcut for stop then start
start-specific options:
-S [snapFilename] - Deploy the specified Snap file
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
-t edge|beta|stable - Deploy the latest tarball release for the specified channel
-f [cargoFeatures] - List of |cargo --feaures=| to activate
(ignored if -s or -S is specified)
-S [snapFilename] - Deploy the specified Snap file
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
-T [tarFilename] - Deploy the specified release tarball
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
specified release channel (edge|beta|stable) or release tag
(vX.Y.Z)
-f [cargoFeatures] - List of |cargo --feaures=| to activate
(ignored if -s or -S is specified)
Note: if RUST_LOG is set in the environment it will be propogated into the
network nodes.
@ -55,7 +58,7 @@ command=$1
[[ -n $command ]] || usage
shift
while getopts "h?S:s:t:o:f:" opt; do
while getopts "h?S:s:T:t:o:f:" opt; do
case $opt in
h | \?)
usage
@ -76,9 +79,14 @@ while getopts "h?S:s:t:o:f:" opt; do
;;
esac
;;
T)
tarballFilename=$OPTARG
[[ -f $tarballFilename ]] || usage "Snap not readable: $tarballFilename"
deployMethod=tar
;;
t)
case $OPTARG in
edge|beta|stable)
edge|beta|stable|v*)
releaseChannel=$OPTARG
deployMethod=tar
;;
@ -198,7 +206,7 @@ startClient() {
set -x
startCommon "$ipAddress"
ssh "${sshOptions[@]}" -f "$ipAddress" \
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \"$RUST_LOG\""
) >> "$logFile" 2>&1 || {
cat "$logFile"
echo "^^^ +++"
@ -213,10 +221,11 @@ sanity() {
echo "--- Sanity"
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
declare host=$leaderIp # TODO: maybe use ${validatorIpList[0]} ?
(
set -x
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$leaderIp" \
ssh "${sshOptions[@]}" "$host" \
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
) || ok=false
@ -236,13 +245,17 @@ start() {
set -ex;
apt-get -qq update;
apt-get -qq -y install snapd;
snap download --channel=$snapChannel solana;
until snap download --channel=$snapChannel solana; do
sleep 1;
done
"
)
else
(
cd "$SOLANA_ROOT"
snap download --channel="$snapChannel" solana
until snap download --channel="$snapChannel" solana; do
sleep 1
done
)
fi
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
@ -259,8 +272,9 @@ start() {
set -x
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
tar jxvf solana-release.tar.bz2
tarballFilename=solana-release.tar.bz2
fi
tar jxvf $tarballFilename
;;
local)
build
@ -313,15 +327,28 @@ start() {
clientDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
if [[ $deployMethod = "snap" ]]; then
declare networkVersion=unknown
declare networkVersion=unknown
case $deployMethod in
snap)
IFS=\ read -r _ networkVersion _ < <(
ssh "${sshOptions[@]}" "$leaderIp" \
"snap info solana | grep \"^installed:\""
)
networkVersion=${networkVersion/0+git./}
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
fi
;;
tar)
networkVersion="$(
tail -n1 "$SOLANA_ROOT"/solana-release/version.txt || echo "tar-unknown"
)"
;;
local)
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
echo
echo "+++ Deployment Successful"

View File

@ -6,8 +6,7 @@ echo "$(date) | $0 $*" > client.log
deployMethod="$1"
entrypointIp="$2"
numNodes="$3"
RUST_LOG="$4"
RUST_LOG="$3"
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
missing() {
@ -17,7 +16,6 @@ missing() {
[[ -n $deployMethod ]] || missing deployMethod
[[ -n $entrypointIp ]] || missing entrypointIp
[[ -n $numNodes ]] || missing numNodes
source net/common.sh
loadConfigFile
@ -58,7 +56,6 @@ clientCommand="\
$solana_bench_tps \
--network $entrypointIp:8001 \
--identity client.json \
--num-nodes $numNodes \
--duration 7500 \
--sustained \
--threads $threadCount \

View File

@ -84,13 +84,18 @@ local|tar)
export SOLANA_DEFAULT_METRICS_RATE=1
./fetch-perf-libs.sh
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
export LD_LIBRARY_PATH="$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH"
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
scripts/net-stats.sh > net-stats.log 2>&1 &
case $nodeType in
leader)
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
echo Selecting solana-fullnode-cuda
export SOLANA_CUDA=1
fi
./multinode-demo/setup.sh -t leader $setupArgs
./multinode-demo/drone.sh > drone.log 2>&1 &
./multinode-demo/leader.sh > leader.log 2>&1 &
@ -98,6 +103,11 @@ local|tar)
validator)
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
if [[ -e /dev/nvidia0 && -x ~/.cargo/bin/solana-fullnode-cuda ]]; then
echo Selecting solana-fullnode-cuda
export SOLANA_CUDA=1
fi
./multinode-demo/setup.sh -t validator $setupArgs
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
;;

View File

@ -31,7 +31,7 @@ __cloud_FindInstances() {
declare name zone publicIp privateIp status
while read -r name publicIp privateIp status; do
printf "%-30s | publicIp=%-16s privateIp=%s staus=%s\n" "$name" "$publicIp" "$privateIp" "$status"
printf "%-30s | publicIp=%-16s privateIp=%s status=%s\n" "$name" "$publicIp" "$privateIp" "$status"
instances+=("$name:$publicIp:$privateIp")
done < <(gcloud compute instances list \
@ -128,6 +128,9 @@ cloud_CreateInstances() {
--no-restart-on-failure
)
# shellcheck disable=SC2206 # Do not want to quote $imageName as it may contain extra args
args+=(--image $imageName)
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
args+=(--machine-type $machineType)
if [[ -n $optionalBootDiskSize ]]; then

View File

@ -0,0 +1,11 @@
#!/bin/bash -ex
#
# Some instances have docker running and docker0 network interface confuses
# gossip and airdrops fail. As a workaround for now simply remove the docker0
# interface
#
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
ip link delete docker0 || true

View File

@ -0,0 +1,9 @@
#!/bin/bash -ex
#
# Updates the default cuda symlink to the supported version
#
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
ln -sfT /usr/local/cuda-9.2 /usr/local/cuda

View File

@ -21,7 +21,6 @@ $ sudo apt-get install -y clang-7
```
### macOS
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
Once Homebrew is installed, ensure the latest llvm is installed:
@ -31,3 +30,34 @@ $ brew install llvm # <- should output “Warning: llvm 7.0.0 is already instal
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
```
## Development
### Quick start
To get started create a `makefile` containing:
```make
include path/to/bpf.mk
```
and `src/program.c` containing:
```c
#include <solana_sdk.h>
bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[1];
uint8_t *data;
uint64_t data_len;
if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
return false;
}
print_params(1, ka, data, data_len);
return true;
}
```
Then run `make` to build `out/program.o`.
Run `make help` for more details.
### Limitations
* Programs must be fully contained within a single .c file
* No libc is available but `solana_sdk.h` provides a minimal set of
primitives.

View File

@ -37,7 +37,6 @@ CC_FLAGS := \
LLC_FLAGS := \
-march=bpf \
-filetype=obj \
-function-sections \
OBJ_DUMP_FLAGS := \
-color \

View File

@ -34,36 +34,15 @@ typedef unsigned long int uint64_t;
typedef enum { false = 0, true } bool;
/**
* Built-in helper functions
* @{
* The BPF VM makes a limited number of helper functions available to BPF
* programs. They are resolved at run-time and identified by a function index.
* Calling any of these functions results in `Call` instruction out of the
* user's BPF program.
*
* The helper functions all follow the same signature:
*
* int helper(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)
*
* The meaning of each argument and return value is dependent on the particular
* helper function being called.
* Helper function that prints a string to stdout
*/
extern void sol_log(const char*);
/**
* Helper function that prints to stdout
*
* Prints the hexadecimal representation of each parameter
* Helper function that prints a 64 bit values represented in hexadecimal
* to stdout
*/
#define BPF_TRACE_PRINTK_IDX 6
static int (*sol_print)(
uint64_t,
uint64_t,
uint64_t,
uint64_t,
uint64_t
) = (void *)BPF_TRACE_PRINTK_IDX;
/**@}*/
extern void sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
/**
* Prefix for all BPF functions
@ -147,7 +126,7 @@ SOL_FN_PREFIX int sol_memcmp(const void *s1, const void *s2, int n) {
*/
#define sol_panic() _sol_panic(__LINE__)
SOL_FN_PREFIX void _sol_panic(uint64_t line) {
sol_print(0xFF, 0xFF, 0xFF, 0xFF, line);
sol_log_64(0xFF, 0xFF, 0xFF, 0xFF, line);
uint8_t *pv = (uint8_t *)1;
*pv = 1;
}
@ -241,9 +220,9 @@ SOL_FN_PREFIX bool sol_deserialize(
*
* @param key The public key to print
*/
SOL_FN_PREFIX void sol_print_key(const SolPubkey *key) {
SOL_FN_PREFIX void sol_log_key(const SolPubkey *key) {
for (int j = 0; j < sizeof(*key); j++) {
sol_print(0, 0, 0, j, key->x[j]);
sol_log_64(0, 0, 0, j, key->x[j]);
}
}
@ -252,9 +231,9 @@ SOL_FN_PREFIX void sol_print_key(const SolPubkey *key) {
*
* @param array The array to print
*/
SOL_FN_PREFIX void sol_print_array(const uint8_t *array, int len) {
SOL_FN_PREFIX void sol_log_array(const uint8_t *array, int len) {
for (int j = 0; j < len; j++) {
sol_print(0, 0, 0, j, array[j]);
sol_log_64(0, 0, 0, j, array[j]);
}
}
@ -266,20 +245,20 @@ SOL_FN_PREFIX void sol_print_array(const uint8_t *array, int len) {
* @param data A pointer to the instruction data to print
* @param data_len The length in bytes of the instruction data
*/
SOL_FN_PREFIX void sol_print_params(
SOL_FN_PREFIX void sol_log_params(
uint64_t num_ka,
const SolKeyedAccounts *ka,
const uint8_t *data,
uint64_t data_len
) {
sol_print(0, 0, 0, 0, num_ka);
sol_log_64(0, 0, 0, 0, num_ka);
for (int i = 0; i < num_ka; i++) {
sol_print_key(ka[i].key);
sol_print(0, 0, 0, 0, *ka[i].tokens);
sol_print_array(ka[i].userdata, ka[i].userdata_len);
sol_print_key(ka[i].program_id);
sol_log_key(ka[i].key);
sol_log_64(0, 0, 0, 0, *ka[i].tokens);
sol_log_array(ka[i].userdata, ka[i].userdata_len);
sol_log_key(ka[i].program_id);
}
sol_print_array(data, data_len);
sol_log_array(data, data_len);
}
/**@}*/
@ -299,7 +278,7 @@ SOL_FN_PREFIX void sol_print_params(
* if (!sol_deserialize(buf, ka, SOL_ARRAY_SIZE(ka), NULL, &data, &data_len)) {
* return false;
* }
* print_params(1, ka, data, data_len);
* sol_log_params(1, ka, data, data_len);
* return true;
* }
*/

View File

@ -24,9 +24,9 @@ extern bool entrypoint(const uint8_t *input) {
if (*ka[0].tokens >= tokens) {
*ka[0].tokens -= tokens;
*ka[2].tokens += tokens;
// sol_print(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
// sol_log_64(0, 0, *ka[0].tokens, *ka[2].tokens, tokens);
} else {
// sol_print(0, 0, 0xFF, *ka[0].tokens, tokens);
// sol_log_64(0, 0, 0xFF, *ka[0].tokens, tokens);
}
return true;
}

View File

@ -15,11 +15,13 @@ extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
sol_log("noop");
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
sol_print_params(NUM_KA, ka, data, data_len);
sol_log_params(NUM_KA, ka, data, data_len);
sol_assert(sizeof(int8_t) == 1);
sol_assert(sizeof(uint8_t) == 1);

View File

@ -1,231 +0,0 @@
/**
* @brief TicTacToe Dashboard C-based BPF program
*/
#include <solana_sdk.h>
#include "tictactoe.h"
typedef enum {
Result_Ok,
Result_Panic,
Result_GameInProgress,
Result_InvalidArguments,
Result_InvalidMove,
Result_InvalidUserdata,
Result_InvalidTimestamp,
Result_NoGame,
Result_NotYourTurn,
Result_PlayerNotFound,
Result_UserdataTooSmall,
} Result;
typedef enum {
Command_Init = 0,
Command_Join,
Command_KeepAlive,
Command_Move,
} Command;
SOL_FN_PREFIX void game_dump_board(Game *self) {
sol_print(0x9, 0x9, 0x9, 0x9, 0x9);
sol_print(0, 0, self->board[0], self->board[1], self->board[2]);
sol_print(0, 0, self->board[3], self->board[4], self->board[5]);
sol_print(0, 0, self->board[6], self->board[7], self->board[8]);
sol_print(0x9, 0x9, 0x9, 0x9, 0x9);
}
SOL_FN_PREFIX void game_create(Game *self, SolPubkey *player_x) {
// account memory is zero-initialized
sol_memcpy(self->player_x.x, player_x, SIZE_PUBKEY);
self->state = State_Waiting;
for (int i = 0; i < 9; i++) {
self->board[i] = BoardItem_F;
}
}
SOL_FN_PREFIX Result game_join(Game *self, SolPubkey *player_o,
int64_t timestamp) {
if (self->state == State_Waiting) {
sol_memcpy(self->player_o.x, player_o, SIZE_PUBKEY);
self->state = State_XMove;
if (timestamp <= self->keep_alive[1]) {
return Result_InvalidTimestamp;
} else {
self->keep_alive[1] = timestamp;
return Result_Ok;
}
}
return Result_GameInProgress;
}
SOL_FN_PREFIX bool game_same(BoardItem x_or_o, BoardItem one, BoardItem two,
BoardItem three) {
if (x_or_o == one && x_or_o == two && x_or_o == three) {
return true;
}
return false;
}
SOL_FN_PREFIX bool game_same_player(SolPubkey *one, SolPubkey *two) {
for (int i = 0; i < SIZE_PUBKEY; i++) {
if (one->x[i] != two->x[i]) {
return false;
}
}
return true;
}
SOL_FN_PREFIX Result game_next_move(Game *self, SolPubkey *player, int x,
int y) {
int board_index = y * 3 + x;
if (board_index >= 9 || self->board[board_index] != BoardItem_F) {
return Result_InvalidMove;
}
BoardItem x_or_o;
State won_state;
switch (self->state) {
case State_XMove:
if (!game_same_player(player, &self->player_x)) {
return Result_PlayerNotFound;
}
self->state = State_OMove;
x_or_o = BoardItem_X;
won_state = State_XWon;
break;
case State_OMove:
if (!game_same_player(player, &self->player_o)) {
return Result_PlayerNotFound;
}
self->state = State_XMove;
x_or_o = BoardItem_O;
won_state = State_OWon;
break;
default:
return Result_NotYourTurn;
}
self->board[board_index] = x_or_o;
// game_dump_board(self);
bool winner =
// Check rows
game_same(x_or_o, self->board[0], self->board[1], self->board[2]) ||
game_same(x_or_o, self->board[3], self->board[4], self->board[5]) ||
game_same(x_or_o, self->board[6], self->board[7], self->board[8]) ||
// Check columns
game_same(x_or_o, self->board[0], self->board[3], self->board[6]) ||
game_same(x_or_o, self->board[1], self->board[4], self->board[7]) ||
game_same(x_or_o, self->board[2], self->board[5], self->board[8]) ||
// Check both diagonals
game_same(x_or_o, self->board[0], self->board[4], self->board[8]) ||
game_same(x_or_o, self->board[2], self->board[4], self->board[6]);
if (winner) {
self->state = won_state;
}
{
int draw = true;
for (int i = 0; i < 9; i++) {
if (BoardItem_F == self->board[i]) {
draw = false;
break;
}
}
if (draw) {
self->state = State_Draw;
}
}
return Result_Ok;
}
SOL_FN_PREFIX Result game_keep_alive(Game *self, SolPubkey *player,
int64_t timestamp) {
switch (self->state) {
case State_Waiting:
case State_XMove:
case State_OMove:
if (game_same_player(player, &self->player_x)) {
if (timestamp <= self->keep_alive[0]) {
return Result_InvalidTimestamp;
}
self->keep_alive[0] = timestamp;
} else if (game_same_player(player, &self->player_o)) {
if (timestamp <= self->keep_alive[1]) {
return Result_InvalidTimestamp;
}
self->keep_alive[1] = timestamp;
} else {
return Result_PlayerNotFound;
}
break;
default:
break;
}
return Result_Ok;
}
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*
* accounts[0] On Init must be player X, after that doesn't matter,
* anybody can cause a dashboard update
* accounts[1] must be a TicTacToe state account
* accounts[2] must be account of current player, only Pubkey is used
*/
#define NUM_KA 3
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
int err = 0;
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
if (sizeof(Game) > ka[1].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
return false;
}
Game game;
sol_memcpy(&game, ka[1].userdata, sizeof(game));
Command command = *data;
switch (command) {
case Command_Init:
game_create(&game, ka[2].key);
break;
case Command_Join:
err = game_join(&game, ka[2].key, *((int64_t *)(data + 4)));
break;
case Command_KeepAlive:
err = game_keep_alive(&game, ka[2].key, /*TODO*/ 0);
break;
case Command_Move:
err = game_next_move(&game, ka[2].key, data[4], data[5]);
break;
default:
return false;
}
sol_memcpy(ka[1].userdata, &game, sizeof(game));
sol_print(0, 0, 0, err, game.state);
if (Result_Ok != err) {
return false;
}
return true;
}

View File

@ -1,36 +0,0 @@
#ifndef TICTACTOE_H
#define TICTACTOE_H
/**
* @brief Definitions common to tictactoe and tictactoe_dashboard
*/
typedef enum {
State_Waiting,
State_XMove,
State_OMove,
State_XWon,
State_OWon,
State_Draw,
} State;
typedef enum { BoardItem_F, BoardItem_X, BoardItem_O } BoardItem;
/**
* Game state
*
* This structure is stored in the owner's account userdata
*
* Board Coordinates
* | 0,0 | 1,0 | 2,0 |
* | 0,1 | 1,1 | 2,1 |
* | 0,2 | 1,2 | 2,2 |
*/
typedef struct {
SolPubkey player_x; /** Player who initialized the game */
SolPubkey player_o; /** Player who joined the game */
State state; /** Current state of the game */
BoardItem board[9]; /** Tracks the player moves */
int64_t keep_alive[2]; /** Keep Alive for each player */
} Game;
#endif // TICTACTOE_H

View File

@ -1,98 +0,0 @@
/**
* @brief TicTacToe C-based BPF program
*/
#include <solana_sdk.h>
#include "tictactoe.h"
#define MAX_GAMES_TRACKED 5
/**
* Dashboard state
*
* This structure is stored in the owner's account userdata
*/
typedef struct {
SolPubkey pending; /** Latest pending game */
SolPubkey completed[MAX_GAMES_TRACKED]; /** Last N completed games (0 is the
latest) */
uint32_t latest_game; /** Index into completed pointing to latest game completed */
uint32_t total; /** Total number of completed games */
} Dashboard;
SOL_FN_PREFIX bool update(Dashboard *self, Game *game, SolPubkey *game_pubkey) {
switch (game->state) {
case State_Waiting:
sol_memcpy(&self->pending, game_pubkey, SIZE_PUBKEY);
break;
case State_XMove:
case State_OMove:
// Nothing to do. In progress games are not managed by the dashboard
break;
case State_XWon:
case State_OWon:
case State_Draw:
for (int i = 0; i < MAX_GAMES_TRACKED; i++) {
if (SolPubkey_same(&self->completed[i], game_pubkey)) {
// TODO: Once the PoH height is exposed to programs, it could be used
// to ensure
// that old games are not being re-added and causing total to
// increment incorrectly.
return false;
}
}
self->total += 1;
self->latest_game = (self->latest_game + 1) % MAX_GAMES_TRACKED;
sol_memcpy(self->completed[self->latest_game].x, game_pubkey,
SIZE_PUBKEY);
break;
default:
break;
}
return true;
}
/**
* Number of SolKeyedAccounts expected. The program should bail if an
* unexpected number of accounts are passed to the program's entrypoint
*
* accounts[0] doesn't matter, anybody can cause a dashboard update
* accounts[1] must be a Dashboard account
* accounts[2] must be a Game account
*/
#define NUM_KA 3
extern bool entrypoint(const uint8_t *input) {
SolKeyedAccounts ka[NUM_KA];
const uint8_t *data;
uint64_t data_len;
int err = 0;
if (!sol_deserialize(input, ka, NUM_KA, NULL, &data, &data_len)) {
return false;
}
// TODO check dashboard and game program ids (how to check now that they are
// not known values)
// TODO check validity of dashboard and game structures contents
if (sizeof(Dashboard) > ka[1].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Dashboard), ka[2].userdata_len);
return false;
}
Dashboard dashboard;
sol_memcpy(&dashboard, ka[1].userdata, sizeof(dashboard));
if (sizeof(Game) > ka[2].userdata_len) {
sol_print(0, 0, 0xFF, sizeof(Game), ka[2].userdata_len);
return false;
}
Game game;
sol_memcpy(&game, ka[2].userdata, sizeof(game));
if (true != update(&dashboard, &game, ka[2].key)) {
return false;
}
sol_memcpy(ka[1].userdata, &dashboard, sizeof(dashboard));
return true;
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bpf-noop"
version = "0.10.0"
version = "0.10.4"
description = "Solana BPF noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -8,4 +8,4 @@ license = "Apache-2.0"
[dependencies]
rbpf = "0.1.0"
solana-sdk = { path = "../../../../sdk", version = "0.10.0" }
solana-sdk = { path = "../../../../sdk", version = "0.10.4" }

View File

@ -1,6 +1,6 @@
[package]
name = "solana-bpfloader"
version = "0.10.0"
version = "0.10.4"
description = "Solana BPF Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,10 +13,10 @@ elf = "0.0.10"
env_logger = "0.5.12"
libc = "0.2.43"
log = "0.4.2"
solana_rbpf = "0.1.2"
solana_rbpf = "0.1.3"
serde = "1.0.27"
serde_derive = "1.0.27"
solana-sdk = { path = "../../../sdk", version = "0.10.0" }
solana-sdk = { path = "../../../sdk", version = "0.10.4" }
[lib]
name = "solana_bpf_loader"

View File

@ -5,20 +5,24 @@ extern crate byteorder;
extern crate env_logger;
#[macro_use]
extern crate log;
extern crate libc;
extern crate solana_rbpf;
extern crate solana_sdk;
use bincode::deserialize;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use solana_rbpf::{helpers, EbpfVmRaw};
use libc::c_char;
use solana_rbpf::EbpfVmRaw;
use solana_sdk::account::KeyedAccount;
use solana_sdk::loader_instruction::LoaderInstruction;
use solana_sdk::pubkey::Pubkey;
use std::ffi::CStr;
use std::io::prelude::*;
use std::io::Error;
use std::io::{Error, ErrorKind};
use std::mem;
use std::sync::{Once, ONCE_INIT};
// TODO use rbpf's disassemble
#[allow(dead_code)]
fn dump_program(key: &Pubkey, prog: &[u8]) {
let mut eight_bytes: Vec<u8> = Vec::new();
@ -33,32 +37,64 @@ fn dump_program(key: &Pubkey, prog: &[u8]) {
}
}
pub fn helper_printf(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
#[allow(unused_variables)]
pub fn helper_sol_log_verify(
addr: u64,
unused2: u64,
unused3: u64,
unused4: u64,
unused5: u64,
ro_regions: &[&[u8]],
unused7: &[&[u8]],
) -> Result<(()), Error> {
for region in ro_regions.iter() {
if region.as_ptr() as u64 <= addr
&& addr as u64 <= region.as_ptr() as u64 + region.len() as u64
{
let c_buf: *const c_char = addr as *const c_char;
let max_size = (region.as_ptr() as u64 + region.len() as u64) - addr;
unsafe {
for i in 0..max_size {
if std::ptr::read(c_buf.offset(i as isize)) == 0 {
return Ok(());
}
}
}
return Err(Error::new(ErrorKind::Other, "Error, Unterminated string"));
}
}
Err(Error::new(
ErrorKind::Other,
"Error: Load segfault, bad string pointer",
))
}
#[allow(unused_variables)]
pub fn helper_sol_log(addr: u64, unused2: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 {
let c_buf: *const c_char = addr as *const c_char;
let c_str: &CStr = unsafe { CStr::from_ptr(c_buf) };
match c_str.to_str() {
Ok(slice) => info!("sol_log: {:?}", slice),
Err(e) => warn!("Error: Cannot print invalid string"),
};
0
}
pub fn helper_sol_log_u64(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 {
info!(
"bpf_trace_printf: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
"sol_log_u64: {:#x}, {:#x}, {:#x}, {:#x}, {:#x}",
arg1, arg2, arg3, arg4, arg5
);
let size_arg = |x| {
if x == 0 {
1
} else {
(x as f64).log(16.0).floor() as u64 + 1
}
};
"bpf_trace_printf: 0x, 0x, 0x, 0x, 0x\n".len() as u64
+ size_arg(arg1)
+ size_arg(arg2)
+ size_arg(arg3)
+ size_arg(arg4)
+ size_arg(arg5)
0
}
fn create_vm(prog: &[u8]) -> Result<EbpfVmRaw, Error> {
let mut vm = EbpfVmRaw::new(None)?;
vm.set_verifier(bpf_verifier::check)?;
vm.set_max_instruction_count(36000)?; // 36000 is a wag, need to tune
vm.set_program(&prog)?;
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helper_printf)?;
vm.set_elf(&prog)?;
vm.register_helper_ex("sol_log", Some(helper_sol_log_verify), helper_sol_log)?;
vm.register_helper_ex("sol_log_64", None, helper_sol_log_u64)?;
Ok(vm)
}
@ -163,6 +199,8 @@ pub extern "C" fn process(keyed_accounts: &mut [KeyedAccount], tx_data: &[u8]) -
#[cfg(test)]
mod tests {
use super::*;
use solana_rbpf::helpers;
#[test]
#[should_panic(expected = "Error: Execution exceeded maximum number of instructions")]
fn test_non_terminating_program() {
@ -180,7 +218,13 @@ mod tests {
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
];
let input = &mut [0x00];
let mut vm = create_vm(prog).unwrap();
let mut vm = EbpfVmRaw::new(None).unwrap();
vm.set_verifier(bpf_verifier::check).unwrap();
vm.set_max_instruction_count(36000).unwrap(); // 36000 is a wag, need to tune
vm.set_program(prog).unwrap();
vm.register_helper(helpers::BPF_TRACE_PRINTK_IDX, helpers::bpf_trace_printf)
.unwrap();
vm.execute_program(input).unwrap();
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-lualoader"
version = "0.10.0"
version = "0.10.4"
description = "Solana Lua Loader"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ log = "0.4.2"
rlua = "0.15.2"
serde = "1.0.27"
serde_derive = "1.0.27"
solana-sdk = { path = "../../../sdk", version = "0.10.0" }
solana-sdk = { path = "../../../sdk", version = "0.10.4" }
[dev-dependencies]
bincode = "1.0.0"

View File

@ -1,13 +1,13 @@
[package]
name = "solana-noop"
version = "0.10.0"
version = "0.10.4"
description = "Solana noop program"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
[dependencies]
solana-sdk = { path = "../../../sdk", version = "0.10.0" }
solana-sdk = { path = "../../../sdk", version = "0.10.4" }
[lib]
name = "noop"

View File

@ -117,6 +117,14 @@ $ solana-wallet send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
<TX_SIGNATURE>
```
### Deploy program
```
// Command
$ solana-wallet deploy <PATH>
// Return
<PROGRAM_ID>
```
## Javascript solana-web3.js Interface

View File

@ -1,6 +1,6 @@
[package]
name = "solana-sdk"
version = "0.10.0"
version = "0.10.4"
description = "Solana SDK"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -7,7 +7,6 @@ use chrono::prelude::*;
use clap::ArgMatches;
use cluster_info::NodeInfo;
use drone::DroneRequest;
use elf;
use fullnode::Config;
use hash::Hash;
use loader_transaction::LoaderTransaction;
@ -31,7 +30,6 @@ use std::{error, fmt, mem};
use system_transaction::SystemTransaction;
use transaction::Transaction;
const PLATFORM_SECTION_C: &str = ".text.entrypoint";
const USERDATA_CHUNK_SIZE: usize = 256;
#[derive(Debug, PartialEq)]
@ -396,16 +394,14 @@ pub fn process_command(config: &WalletConfig) -> Result<String, Box<error::Error
let last_id = get_last_id(&config)?;
let program = Keypair::new();
let program_userdata = elf::File::open_path(program_location)
let mut program_userdata = Vec::new();
File::open(program_location)
.map_err(|_| {
WalletError::DynamicProgramError("Could not parse program file".to_string())
})?.get_section(PLATFORM_SECTION_C)
.ok_or_else(|| {
WalletError::DynamicProgramError(
"Could not find entrypoint in program file".to_string(),
)
})?.data
.clone();
})?.read_to_end(&mut program_userdata)
.map_err(|_| {
WalletError::DynamicProgramError("Could not read program file".to_string())
})?;
let tx = Transaction::system_create(
&config.id,

View File

@ -1,5 +1,6 @@
extern crate bincode;
extern crate elf;
extern crate serde_derive;
extern crate solana;
extern crate solana_sdk;
@ -13,13 +14,15 @@ use solana::mint::Mint;
use solana::native_loader;
use solana::signature::{Keypair, KeypairUtil};
use solana::system_transaction::SystemTransaction;
#[cfg(feature = "bpf_c")]
use solana::tictactoe_program::Command;
use solana::transaction::Transaction;
use solana_sdk::pubkey::Pubkey;
#[cfg(feature = "bpf_c")]
use std::env;
#[cfg(feature = "bpf_c")]
use std::fs::File;
#[cfg(feature = "bpf_c")]
use std::io::Read;
#[cfg(feature = "bpf_c")]
use std::path::PathBuf;
/// BPF program file extension
@ -119,7 +122,7 @@ struct Program {
}
impl Program {
pub fn new(loader: &Loader, userdata: Vec<u8>) -> Self {
pub fn new(loader: &Loader, userdata: &Vec<u8>) -> Self {
let program = Keypair::new();
// allocate, populate, finalize and spawn program
@ -183,7 +186,7 @@ fn test_program_native_noop() {
let loader = Loader::new_native();
let name = String::from("noop");
let userdata = name.as_bytes().to_vec();
let program = Program::new(&loader, userdata);
let program = Program::new(&loader, &userdata);
// Call user program
let tx = Transaction::new(
@ -213,7 +216,7 @@ fn test_program_lua_move_funds() {
accounts[2].tokens = accounts[2].tokens + tokens
"#.as_bytes()
.to_vec();
let program = Program::new(&loader, userdata);
let program = Program::new(&loader, &userdata);
let from = Keypair::new();
let to = Keypair::new().pubkey();
@ -272,16 +275,12 @@ fn test_program_lua_move_funds() {
fn test_program_builtin_bpf_noop() {
logger::setup();
let mut file = File::open(create_bpf_path("noop")).expect("file open failed");
let mut elf = Vec::new();
file.read_to_end(&mut elf).unwrap();
let loader = Loader::new_bpf();
let program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("noop"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
.data
.clone(),
);
let program = Program::new(&loader, &elf);
// Call user program
let tx = Transaction::new(
@ -304,16 +303,12 @@ fn test_program_builtin_bpf_noop() {
fn test_program_bpf_noop_c() {
logger::setup();
let mut file = File::open(create_bpf_path("noop")).expect("file open failed");
let mut elf = Vec::new();
file.read_to_end(&mut elf).unwrap();
let loader = Loader::new_dynamic("solana_bpf_loader");
let program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("noop"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
.data
.clone(),
);
let program = Program::new(&loader, &elf);
// Call user program
let tx = Transaction::new(
@ -330,268 +325,3 @@ fn test_program_bpf_noop_c() {
loader.bank.process_transactions(&vec![tx.clone()]),
);
}
#[cfg(feature = "bpf_c")]
struct TicTacToe {
game: Keypair,
}
#[cfg(feature = "bpf_c")]
impl TicTacToe {
pub fn new(loader: &Loader, program: &Program) -> Self {
let game = Keypair::new();
// Create game account
let tx = Transaction::system_create(
&loader.mint.keypair(),
game.pubkey(),
loader.mint.last_id(),
1,
0x78, // corresponds to the C structure size
program.program.pubkey(),
0,
);
check_tx_results(
&loader.bank,
&tx,
loader.bank.process_transactions(&vec![tx.clone()]),
);
TicTacToe { game }
}
pub fn id(&self) -> Pubkey {
self.game.pubkey().clone()
}
pub fn init(&self, loader: &Loader, program: &Program, player: &Pubkey) {
let userdata = serialize(&Command::Init).unwrap();
let tx = Transaction::new(
&self.game,
&[self.game.pubkey(), *player],
program.program.pubkey(),
userdata,
loader.mint.last_id(),
0,
);
check_tx_results(
&loader.bank,
&tx,
loader.bank.process_transactions(&vec![tx.clone()]),
);
}
pub fn command(&self, loader: &Loader, program: &Program, command: Command, player: &Pubkey) {
let userdata = serialize(&command).unwrap();
let tx = Transaction::new(
&loader.mint.keypair(),
&[self.game.pubkey(), *player],
program.program.pubkey(),
userdata,
loader.mint.last_id(),
0,
);
check_tx_results(
&loader.bank,
&tx,
loader.bank.process_transactions(&vec![tx.clone()]),
);
}
pub fn get_player_x(&self, loader: &Loader) -> Vec<u8> {
loader
.bank
.get_account(&self.game.pubkey())
.unwrap()
.userdata[0..32]
.to_vec()
}
pub fn get_player_y(&self, loader: &Loader) -> Vec<u8> {
loader
.bank
.get_account(&self.game.pubkey())
.unwrap()
.userdata[32..64]
.to_vec()
}
pub fn game(&self, loader: &Loader) -> Vec<u8> {
loader
.bank
.get_account(&self.game.pubkey())
.unwrap()
.userdata[64..68]
.to_vec()
}
}
#[cfg(feature = "bpf_c")]
struct Dashboard {
dashboard: Keypair,
}
#[cfg(feature = "bpf_c")]
impl Dashboard {
pub fn new(loader: &Loader, program: &Program) -> Self {
let dashboard = Keypair::new();
// Create game account
let tx = Transaction::system_create(
&loader.mint.keypair(),
dashboard.pubkey(),
loader.mint.last_id(),
1,
0xD0, // corresponds to the C structure size
program.program.pubkey(),
0,
);
check_tx_results(
&loader.bank,
&tx,
loader.bank.process_transactions(&vec![tx.clone()]),
);
Dashboard { dashboard }
}
pub fn update(&self, loader: &Loader, program: &Program, game: &Pubkey) {
let tx = Transaction::new(
&self.dashboard,
&[self.dashboard.pubkey(), *game],
program.program.pubkey(),
vec![],
loader.mint.last_id(),
0,
);
check_tx_results(
&loader.bank,
&tx,
loader.bank.process_transactions(&vec![tx.clone()]),
);
}
pub fn get_game(&self, loader: &Loader, since_last: usize) -> Vec<u8> {
let userdata = loader
.bank
.get_account(&self.dashboard.pubkey())
.unwrap()
.userdata;
// TODO serialize
let last_game = userdata[192] as usize;
let this_game = (last_game + since_last * 4) % 5;
let start = 32 + this_game * 32;
let end = start + 32;
loader
.bank
.get_account(&self.dashboard.pubkey())
.unwrap()
.userdata[start..end]
.to_vec()
}
pub fn get_pending(&self, loader: &Loader) -> Vec<u8> {
loader
.bank
.get_account(&self.dashboard.pubkey())
.unwrap()
.userdata[0..32]
.to_vec()
}
}
#[cfg(feature = "bpf_c")]
#[test]
fn test_program_bpf_tictactoe_c() {
logger::setup();
let loader = Loader::new_dynamic("solana_bpf_loader");
let program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("tictactoe"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
.data
.clone(),
);
let player_x = Pubkey::new(&[0xA; 32]);
let player_y = Pubkey::new(&[0xB; 32]);
let ttt = TicTacToe::new(&loader, &program);
ttt.init(&loader, &program, &player_x);
ttt.command(&loader, &program, Command::Join(0xAABBCCDD), &player_y);
ttt.command(&loader, &program, Command::Move(1, 1), &player_x);
ttt.command(&loader, &program, Command::Move(0, 0), &player_y);
ttt.command(&loader, &program, Command::Move(2, 0), &player_x);
ttt.command(&loader, &program, Command::Move(0, 2), &player_y);
ttt.command(&loader, &program, Command::Move(2, 2), &player_x);
ttt.command(&loader, &program, Command::Move(0, 1), &player_y);
assert_eq!(player_x.as_ref(), &ttt.get_player_x(&loader)[..]); // validate x's key
assert_eq!(player_y.as_ref(), &ttt.get_player_y(&loader)[..]); // validate o's key
assert_eq!([4, 0, 0, 0], ttt.game(&loader)[..]); // validate that o won
}
#[cfg(feature = "bpf_c")]
#[test]
fn test_program_bpf_tictactoe_dashboard_c() {
logger::setup();
let loader = Loader::new_dynamic("solana_bpf_loader");
let ttt_program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("tictactoe"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
.data
.clone(),
);
let player_x = Pubkey::new(&[0xA; 32]);
let player_y = Pubkey::new(&[0xB; 32]);
let ttt1 = TicTacToe::new(&loader, &ttt_program);
ttt1.init(&loader, &ttt_program, &player_x);
ttt1.command(&loader, &ttt_program, Command::Join(0xAABBCCDD), &player_y);
ttt1.command(&loader, &ttt_program, Command::Move(1, 1), &player_x);
ttt1.command(&loader, &ttt_program, Command::Move(0, 0), &player_y);
ttt1.command(&loader, &ttt_program, Command::Move(2, 0), &player_x);
ttt1.command(&loader, &ttt_program, Command::Move(0, 2), &player_y);
ttt1.command(&loader, &ttt_program, Command::Move(2, 2), &player_x);
ttt1.command(&loader, &ttt_program, Command::Move(0, 1), &player_y);
let ttt2 = TicTacToe::new(&loader, &ttt_program);
ttt2.init(&loader, &ttt_program, &player_x);
ttt2.command(&loader, &ttt_program, Command::Join(0xAABBCCDD), &player_y);
ttt2.command(&loader, &ttt_program, Command::Move(1, 1), &player_x);
ttt2.command(&loader, &ttt_program, Command::Move(0, 0), &player_y);
ttt2.command(&loader, &ttt_program, Command::Move(2, 0), &player_x);
ttt2.command(&loader, &ttt_program, Command::Move(0, 2), &player_y);
ttt2.command(&loader, &ttt_program, Command::Move(2, 2), &player_x);
ttt2.command(&loader, &ttt_program, Command::Move(0, 1), &player_y);
let ttt3 = TicTacToe::new(&loader, &ttt_program);
ttt3.init(&loader, &ttt_program, &player_x);
let dashboard_program = Program::new(
&loader,
elf::File::open_path(&create_bpf_path("tictactoe_dashboard"))
.unwrap()
.get_section(PLATFORM_SECTION_C)
.unwrap()
.data
.clone(),
);
let dashboard = Dashboard::new(&loader, &dashboard_program);
dashboard.update(&loader, &dashboard_program, &ttt1.id());
dashboard.update(&loader, &dashboard_program, &ttt2.id());
dashboard.update(&loader, &dashboard_program, &ttt3.id());
assert_eq!(ttt1.id().as_ref(), &dashboard.get_game(&loader, 1)[..]);
assert_eq!(ttt2.id().as_ref(), &dashboard.get_game(&loader, 0)[..]);
assert_eq!(ttt3.id().as_ref(), &dashboard.get_pending(&loader)[..]);
}