Compare commits
10 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
b4adb1c266 | ||
|
b9b541441b | ||
|
e510d4e272 | ||
|
9341e64ec7 | ||
|
d934f94e05 | ||
|
59dc123fa8 | ||
|
0faea87c84 | ||
|
19137ce3f4 | ||
|
8bdeb2d1ed | ||
|
d29a45266b |
@@ -1,40 +0,0 @@
|
|||||||
version: '{build}'
|
|
||||||
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- /^v[0-9.]+/
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- '%USERPROFILE%\.cargo'
|
|
||||||
- '%APPVEYOR_BUILD_FOLDER%\target'
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- bash ci/publish-tarball.sh
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
- provider: Slack
|
|
||||||
incoming_webhook:
|
|
||||||
secure: GJsBey+F5apAtUm86MHVJ68Uqa6WN1SImcuIc4TsTZrDhA8K1QWUNw9FFQPybUWDyOcS5dly3kubnUqlGt9ux6Ad2efsfRIQYWv0tOVXKeY=
|
|
||||||
channel: ci-status
|
|
||||||
on_build_success: false
|
|
||||||
on_build_failure: true
|
|
||||||
on_build_status_changed: true
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
- provider: S3
|
|
||||||
access_key_id:
|
|
||||||
secure: fTbJl6JpFebR40J7cOWZ2mXBa3kIvEiXgzxAj6L3N7A=
|
|
||||||
secret_access_key:
|
|
||||||
secure: vItsBXb2rEFLvkWtVn/Rcxu5a5+2EwC+b7GsA0waJy9hXh6XuBAD0lnHd9re3g/4
|
|
||||||
bucket: release.solana.com
|
|
||||||
region: us-west-1
|
|
||||||
set_public: true
|
|
||||||
|
|
||||||
- provider: GitHub
|
|
||||||
auth_token:
|
|
||||||
secure: 81fEmPZ0cV1wLtNuUrcmtgxKF6ROQF1+/ft5m+fHX21z6PoeCbaNo8cTyLioWBj7
|
|
||||||
draft: false
|
|
||||||
prerelease: false
|
|
||||||
on:
|
|
||||||
appveyor_repo_tag: true
|
|
1
.buildkite/env/.gitignore
vendored
1
.buildkite/env/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
/secrets_unencrypted.ejson
|
|
16
.buildkite/env/secrets.ejson
vendored
16
.buildkite/env/secrets.ejson
vendored
@@ -1,14 +1,12 @@
|
|||||||
{
|
{
|
||||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||||
"environment": {
|
"environment": {
|
||||||
"CODECOV_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:ks2/ElgxwgxqgmFcxTHANNLmj23YH74h:U4uzRONRfiQyqy6HrPQ/e7OnBUY4HkW37R0iekkF3KJ9UGnHqT1UvwgVbDqLahtDIJ4rWw==]",
|
"CODECOV_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:jA0WqO70coUtF0iokRdgtCR/lF/lETAI:d/Wl8Tdl6xVh/B39cTf1DaQkomR7I/2vMhvxd1msJ++BjI2l3p2dFoGsXqWT+/os8VgiPg==]",
|
||||||
"CRATES_IO_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:lKMh3aLW+jyRrfS/c7yvkpB+TaPhXqLq:j0v27EbaPgwRdHZAbsM0FlAnt3r9ScQrFbWJYOAZtM3qestEiByTlKpZ0eyF/823]",
|
"CRATES_IO_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:2FaZ6k4RGH8luyNRaN6yeZUQDNAu2KwC:XeYe0tCAivYE0F9HEWM79mAI6kNbfYaqP7k7yY+SBDvs0341U9BdGZp7SErbHleS]",
|
||||||
"GITHUB_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:Ll78c3jGpYqnTwR7HJq3mNNUC7pOv9Lu:GrInO2r8MjmP5c54szkyygdsrW5KQYkDgJQUVyFEPyG8SWfchyM9Gur8RV0a+cdwuxNkHLi4U2M=]",
|
"GITHUB_TOKEN": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:9kh4DGPiGDcUU7ejSFWg3gTW8nrOM09Q:b+GE07Wu6/bEnkDZcUtf48vTKAFphrCSt3tNNER9h6A+wZ80k499edw4pbDdl9kEvxB30fFwrLQ=]",
|
||||||
"INFLUX_DATABASE": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:IlH/ZLTXv3SwlY3TVyAPCX2KzLRY6iG3:gGmUGSU/kCfR/mTwKONaUC/X]",
|
"INFLUX_DATABASE": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:rCHsYi0rc7dmvr1V3wEgNoaNIyr+9ClM:omjVcOqM7vwt44kJ+As4BjJL]",
|
||||||
"INFLUX_PASSWORD": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:o2qm95GU4VrrcC4OU06jjPvCwKZy/CZF:OW2ga3kLOQJvaDEdGRJ+gn3L2ckFm8AJZtv9wj/GeUIKDH2A4uBPTHsAH9PMe6zujpuHGk3qbeg=]",
|
"INFLUX_PASSWORD": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:bP5Gw1Vy66viKFKO41o2Gho998XajH/5:khkCYz2LFvkJkk7R4xY1Hfz1yU3/NENjauiUkPhXA+dmg1qOIToxEagCgIkRwyeCiYaoCR6CZyw=]",
|
||||||
"INFLUX_USERNAME": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:yDWW/uIHsJqOTDYskZoSx3pzoB1vztWY:2z31oTA3g0Xs9fCczGNJRcx8xf/hFCed]",
|
"INFLUX_USERNAME": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:ZamCvza2W9/bZRGSkqDu55xNN04XKKhp:5jlmCOdFbpL7EFez41zCbLfk3ZZlfmhI]",
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:RqRaHlYUvGPNFJa6gmciaYM3tRJTURUH:q78/3GTHCN3Uqx9z4nOBjPZcO1lOazNoB/mdhGRDFsnAqVd2hU8zbKkqLrZfLlGqyD8WQOFuw5oTJR9qWg6L9LcOyj3pGL8jWF2yjgZxdtNMXnkbSrCWLooWBBLT61jYQnEwg73gT8ld3Q8EVv3T+MeSMu6FnPz+0+bqQCAGgfqksP4hsUAJGzgZu+i0tNOdlT7fxnh5KJK/yFM/CKgN2sRwEjukA9hXsffyB61g2zqzTDJxCUDLbCVrCkA/bfUk7Of/t0W5t0nK1H3oyGZEc/lRMauCknDBka3Gz11dVss2QT19WQNh0u7bHVaT/U4lepX1j9Zv]",
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:eSGdiZR0Qi0g7qnsI+qJ5H+/ik+H2qL3ned/cBdv/SY=:Oi2nsRxnvWnnBYsB6KwEDzLPcYgpYojU:ELbvjXkXKlgFCMES45R+fxG7Ex43WHWErjMbxZoqasxyr7GSH66hQzUWqiQSJyT4ukYrRhRC9YrsKKGkjACLU57X4EGIy9TuLgTnyBYhPnxLYStC3y/7o/MB5FCTt5wHJw3/A9p+me5+T4UmyZ7OeP21NhDUCGQcb0040VwYWS78klW2aQESJJ6wTI1xboE8/zC0vtnB/u50+LydbKEyb21r6y3OH9FYNEpSwIspWKcgpruJdQSCnDoKxP9YR1yzvk2rabss13LJNdV1Y6mQNIdP4OIFQhCs6dXT253RTl5qdZ0MruHwlp8wX4btOuYDcCoM5exr]"
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wFDl3INEnA3EQDHRX40avqGe1OMoJxyy:6ncCRVRTIRuYI5o/gayeuWCudWvmKNYr8KEHAWeTq34a5bdcKInBdKhjmjX+wLHqsEwQ5gcyhcxy4Ri2mbuN6AHazfZOZlubQkGlyUOAIYO5D5jkbyIh40DAtjVzo1MD/0HsW9zdGOzqUKp5xJJeDsbR4F153jbxa7fvwF90Q4UQjYFTKAtExEmHtDGSJG48ToVwTabTV/OnISMIggDZBviIv2QWHvXgK07b2mUj34rHJywEDGN1nj5rITTDdUeRcB1x4BAMOe94kTFPSTaj/OszvYlGECt8rkKFqbm092qL+XLfiBaImqe/WJHRCnAj6Don]",
|
|
||||||
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wAh+dBuZopv6vruVOYegUcq/aBnbksT1:qIJfCfDvDWiqicMOkmbJs/0n7UJLKNmgMQaKzeQ8J7Q60YpXbtWzKVW3tS6lzlgf64m3MrPXyo1C+mWh6jkjsb18T/OfggZy1ZHM4AcsOC6/ldUkV5YtuxUQuAmd5jCuV/R7iuYY8Z66AcfAevlb+bnLpgIifdA8fh/IktOo58nZUQwZDdppAacmftsLc6Frn5Er6A6+EXpxK1nmnlmLJ4AJztqlh6X0r+JvE2O7qeoZUXrIegnkxo7Aay7I/dd8zdYpp7ICSiTEtfVN/xNIu/5QmTRU7gWoz7cPl9epq4aiEALzPOzb6KVOiRcsOg+TlFvLQ71Ik5o=]"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,8 +1,6 @@
|
|||||||
CI_BUILD_START=$(date +%s)
|
CI_BUILD_START=$(date +%s)
|
||||||
export CI_BUILD_START
|
export CI_BUILD_START
|
||||||
|
|
||||||
source ci/env.sh
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Kill any running docker containers, which are potentially left over from the
|
# Kill any running docker containers, which are potentially left over from the
|
||||||
# previous CI job
|
# previous CI job
|
||||||
@@ -33,10 +31,3 @@ source ci/env.sh
|
|||||||
kill -9 "$victim" || true
|
kill -9 "$victim" || true
|
||||||
done
|
done
|
||||||
)
|
)
|
||||||
|
|
||||||
# HACK: These are in our docker images, need to be removed from CARGO_HOME
|
|
||||||
# because we try to cache downloads across builds with CARGO_HOME
|
|
||||||
# cargo lacks a facility for "system" tooling, always tries CARGO_HOME first
|
|
||||||
cargo uninstall cargo-audit || true
|
|
||||||
cargo uninstall svgbob_cli || true
|
|
||||||
cargo uninstall mdbook || true
|
|
||||||
|
@@ -10,13 +10,7 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
|
|
||||||
if [[ -n $BUILDKITE_TAG ]]; then
|
buildkite-agent pipeline upload ci/buildkite.yml
|
||||||
buildkite-agent annotate --style info --context release-tag \
|
|
||||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
|
||||||
buildkite-agent pipeline upload ci/buildkite-release.yml
|
|
||||||
else
|
|
||||||
buildkite-agent pipeline upload ci/buildkite.yml
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||||
# Add helpful link back to the corresponding Github Pull Request
|
# Add helpful link back to the corresponding Github Pull Request
|
||||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@@ -2,10 +2,9 @@
|
|||||||
/book/src/img/
|
/book/src/img/
|
||||||
/book/src/tests.ok
|
/book/src/tests.ok
|
||||||
/farf/
|
/farf/
|
||||||
|
/metrics/scripts/lib/
|
||||||
/solana-release/
|
/solana-release/
|
||||||
/solana-release.tar.bz2
|
solana-release.tar.bz2
|
||||||
/solana-metrics/
|
|
||||||
/solana-metrics.tar.bz2
|
|
||||||
/target/
|
/target/
|
||||||
|
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
@@ -23,4 +22,3 @@ log-*.txt
|
|||||||
# intellij files
|
# intellij files
|
||||||
/.idea/
|
/.idea/
|
||||||
/solana.iml
|
/solana.iml
|
||||||
/.vscode/
|
|
||||||
|
45
.mergify.yml
45
.mergify.yml
@@ -1,45 +0,0 @@
|
|||||||
# Validate your changes with:
|
|
||||||
#
|
|
||||||
# $ curl -F 'data=@.mergify.yml' https://gh.mergify.io/validate
|
|
||||||
#
|
|
||||||
# https://doc.mergify.io/
|
|
||||||
pull_request_rules:
|
|
||||||
- name: remove outdated reviews
|
|
||||||
conditions:
|
|
||||||
- base=master
|
|
||||||
actions:
|
|
||||||
dismiss_reviews:
|
|
||||||
changes_requested: true
|
|
||||||
- name: set automerge label on mergify backport PRs
|
|
||||||
conditions:
|
|
||||||
- author=mergify[bot]
|
|
||||||
- head~=^mergify/bp/
|
|
||||||
- "#status-failure=0"
|
|
||||||
actions:
|
|
||||||
label:
|
|
||||||
add:
|
|
||||||
- automerge
|
|
||||||
- name: v0.16 backport
|
|
||||||
conditions:
|
|
||||||
- base=master
|
|
||||||
- label=v0.16
|
|
||||||
actions:
|
|
||||||
backport:
|
|
||||||
branches:
|
|
||||||
- v0.16
|
|
||||||
- name: v0.17 backport
|
|
||||||
conditions:
|
|
||||||
- base=master
|
|
||||||
- label=v0.17
|
|
||||||
actions:
|
|
||||||
backport:
|
|
||||||
branches:
|
|
||||||
- v0.17
|
|
||||||
- name: v0.18 backport
|
|
||||||
conditions:
|
|
||||||
- base=master
|
|
||||||
- label=v0.18
|
|
||||||
actions:
|
|
||||||
backport:
|
|
||||||
branches:
|
|
||||||
- v0.18
|
|
44
.travis.yml
44
.travis.yml
@@ -1,44 +0,0 @@
|
|||||||
os:
|
|
||||||
- osx
|
|
||||||
|
|
||||||
language: rust
|
|
||||||
cache: cargo
|
|
||||||
rust:
|
|
||||||
- 1.36.0
|
|
||||||
|
|
||||||
install:
|
|
||||||
- source ci/rust-version.sh
|
|
||||||
- test $rust_stable = $TRAVIS_RUST_VERSION # Update .travis.yml rust version above when this fails
|
|
||||||
|
|
||||||
script:
|
|
||||||
- source ci/env.sh
|
|
||||||
- ci/publish-tarball.sh
|
|
||||||
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
slack:
|
|
||||||
on_success: change
|
|
||||||
secure: F4IjOE05MyaMOdPRL+r8qhs7jBvv4yDM3RmFKE1zNXnfUOqV4X38oQM1EI+YVsgpMQLj/pxnEB7wcTE4Bf86N6moLssEULCpvAuMVoXj4QbWdomLX+01WbFa6fLVeNQIg45NHrz2XzVBhoKOrMNnl+QI5mbR2AlS5oqsudHsXDnyLzZtd4Y5SDMdYG1zVWM01+oNNjgNfjcCGmOE/K0CnOMl6GPi3X9C34tJ19P2XT7MTDsz1/IfEF7fro2Q8DHEYL9dchJMoisXSkem5z7IDQkGzXsWdWT4NnndUvmd1MlTCE9qgoXDqRf95Qh8sB1Dz08HtvgfaosP2XjtNTfDI9BBYS15Ibw9y7PchAJE1luteNjF35EOy6OgmCLw/YpnweqfuNViBZz+yOPWXVC0kxnPIXKZ1wyH9ibeH6E4hr7a8o9SV/6SiWIlbYF+IR9jPXyTCLP/cc3sYljPWxDnhWFwFdRVIi3PbVAhVu7uWtVUO17Oc9gtGPgs/GrhOMkJfwQPXaudRJDpVZowxTX4x9kefNotlMAMRgq+Drbmgt4eEBiCNp0ITWgh17BiE1U09WS3myuduhoct85+FoVeaUkp1sxzHVtGsNQH0hcz7WcpZyOM+AwistJA/qzeEDQao5zi1eKWPbO2xAhi2rV1bDH6bPf/4lDBwLRqSiwvlWU=
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
- provider: s3
|
|
||||||
access_key_id: $AWS_ACCESS_KEY_ID
|
|
||||||
secret_access_key: $AWS_SECRET_ACCESS_KEY
|
|
||||||
bucket: release.solana.com
|
|
||||||
region: us-west-1
|
|
||||||
skip_cleanup: true
|
|
||||||
acl: public_read
|
|
||||||
local_dir: travis-s3-upload
|
|
||||||
on:
|
|
||||||
all_branches: true
|
|
||||||
- provider: releases
|
|
||||||
api_key: $GITHUB_TOKEN
|
|
||||||
skip_cleanup: true
|
|
||||||
file_glob: true
|
|
||||||
file: travis-release-upload/*
|
|
||||||
on:
|
|
||||||
tags: true
|
|
3952
Cargo.lock
generated
3952
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
17
Cargo.toml
17
Cargo.toml
@@ -3,26 +3,21 @@ members = [
|
|||||||
"bench-exchange",
|
"bench-exchange",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
"bench-tps",
|
"bench-tps",
|
||||||
"chacha-sys",
|
|
||||||
"client",
|
"client",
|
||||||
"core",
|
"core",
|
||||||
"drone",
|
"drone",
|
||||||
"validator",
|
"validator",
|
||||||
"genesis",
|
"genesis",
|
||||||
"genesis_programs",
|
|
||||||
"gossip",
|
"gossip",
|
||||||
"install",
|
"install",
|
||||||
"keygen",
|
"keygen",
|
||||||
"kvstore",
|
"kvstore",
|
||||||
"ledger-tool",
|
"ledger-tool",
|
||||||
"logger",
|
"logger",
|
||||||
"merkle-tree",
|
|
||||||
"measure",
|
|
||||||
"metrics",
|
"metrics",
|
||||||
"netutil",
|
"netutil",
|
||||||
"programs/bpf",
|
"programs/bpf",
|
||||||
"programs/bpf_loader_api",
|
"programs/bpf_loader",
|
||||||
"programs/bpf_loader_program",
|
|
||||||
"programs/budget_api",
|
"programs/budget_api",
|
||||||
"programs/budget_program",
|
"programs/budget_program",
|
||||||
"programs/config_api",
|
"programs/config_api",
|
||||||
@@ -30,13 +25,9 @@ members = [
|
|||||||
"programs/exchange_api",
|
"programs/exchange_api",
|
||||||
"programs/exchange_program",
|
"programs/exchange_program",
|
||||||
"programs/failure_program",
|
"programs/failure_program",
|
||||||
"programs/move_loader_api",
|
|
||||||
"programs/move_loader_program",
|
|
||||||
"programs/librapay_api",
|
|
||||||
"programs/noop_program",
|
"programs/noop_program",
|
||||||
"programs/stake_api",
|
"programs/stake_api",
|
||||||
"programs/stake_program",
|
"programs/stake_program",
|
||||||
"programs/stake_tests",
|
|
||||||
"programs/storage_api",
|
"programs/storage_api",
|
||||||
"programs/storage_program",
|
"programs/storage_program",
|
||||||
"programs/token_api",
|
"programs/token_api",
|
||||||
@@ -47,11 +38,7 @@ members = [
|
|||||||
"runtime",
|
"runtime",
|
||||||
"sdk",
|
"sdk",
|
||||||
"upload-perf",
|
"upload-perf",
|
||||||
"validator-info",
|
|
||||||
"vote-signer",
|
"vote-signer",
|
||||||
"wallet",
|
"wallet",
|
||||||
]
|
]
|
||||||
|
exclude = ["programs/bpf/rust/noop"]
|
||||||
exclude = [
|
|
||||||
"programs/bpf/rust/noop",
|
|
||||||
]
|
|
||||||
|
39
README.md
39
README.md
@@ -30,40 +30,6 @@ Before you jump into the code, review the online book [Solana: Blockchain Rebuil
|
|||||||
|
|
||||||
(The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).)
|
(The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).)
|
||||||
|
|
||||||
Release Binaries
|
|
||||||
===
|
|
||||||
Official release binaries are available at [Github Releases](https://github.com/solana-labs/solana/releases).
|
|
||||||
|
|
||||||
Additionally we provide pre-release binaries for the latest code on the edge and
|
|
||||||
beta channels. Note that these pre-release binaries may be less stable than an
|
|
||||||
official release.
|
|
||||||
|
|
||||||
### Edge channel
|
|
||||||
#### Linux (x86_64-unknown-linux-gnu)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
|
||||||
#### mac OS (x86_64-apple-darwin)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-apple-darwin.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/edge/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
|
||||||
#### Windows (x86_64-pc-windows-msvc)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/edge/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
|
||||||
* [solana-install-init.exe](http://release.solana.com/edge/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
|
||||||
#### All platforms
|
|
||||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/edge/solana-metrics.tar.bz2)
|
|
||||||
|
|
||||||
### Beta channel
|
|
||||||
#### Linux (x86_64-unknown-linux-gnu)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-unknown-linux-gnu.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-unknown-linux-gnu) as a stand-alone executable
|
|
||||||
#### mac OS (x86_64-apple-darwin)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-apple-darwin.tar.bz2)
|
|
||||||
* [solana-install-init](http://release.solana.com/beta/solana-install-init-x86_64-apple-darwin) as a stand-alone executable
|
|
||||||
#### Windows (x86_64-pc-windows-msvc)
|
|
||||||
* [solana.tar.bz2](http://release.solana.com/beta/solana-release-x86_64-pc-windows-msvc.tar.bz2)
|
|
||||||
* [solana-install-init.exe](http://release.solana.com/beta/solana-install-init-x86_64-pc-windows-msvc.exe) as a stand-alone executable
|
|
||||||
#### All platforms
|
|
||||||
* [solana-metrics.tar.bz2](http://release.solana.com.s3.amazonaws.com/beta/solana-metrics.tar.bz2)
|
|
||||||
|
|
||||||
Developing
|
Developing
|
||||||
===
|
===
|
||||||
|
|
||||||
@@ -127,9 +93,12 @@ Remote Testnets
|
|||||||
|
|
||||||
We maintain several testnets:
|
We maintain several testnets:
|
||||||
|
|
||||||
* `testnet` - public stable testnet accessible via testnet.solana.com. Runs 24/7
|
* `testnet` - public stable testnet accessible via testnet.solana.com, with an https proxy for web apps at api.testnet.solana.com. Runs 24/7
|
||||||
* `testnet-beta` - public beta channel testnet accessible via beta.testnet.solana.com. Runs 24/7
|
* `testnet-beta` - public beta channel testnet accessible via beta.testnet.solana.com. Runs 24/7
|
||||||
* `testnet-edge` - public edge channel testnet accessible via edge.testnet.solana.com. Runs 24/7
|
* `testnet-edge` - public edge channel testnet accessible via edge.testnet.solana.com. Runs 24/7
|
||||||
|
* `testnet-perf` - permissioned stable testnet running a 24/7 soak test
|
||||||
|
* `testnet-beta-perf` - permissioned beta channel testnet running a multi-hour soak test weekday mornings
|
||||||
|
* `testnet-edge-perf` - permissioned edge channel testnet running a multi-hour soak test weekday mornings
|
||||||
|
|
||||||
## Deploy process
|
## Deploy process
|
||||||
|
|
||||||
|
33
RELEASE.md
33
RELEASE.md
@@ -61,7 +61,7 @@ There are three release channels that map to branches as follows:
|
|||||||
|
|
||||||
## Release Steps
|
## Release Steps
|
||||||
|
|
||||||
### Creating a new branch from master
|
### Advance the Channels
|
||||||
|
|
||||||
#### Create the new branch
|
#### Create the new branch
|
||||||
1. Pick your branch point for release on master.
|
1. Pick your branch point for release on master.
|
||||||
@@ -84,12 +84,6 @@ There are three release channels that map to branches as follows:
|
|||||||
At this point, `ci/channel-info.sh` should show your freshly cut release branch as
|
At this point, `ci/channel-info.sh` should show your freshly cut release branch as
|
||||||
"BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
|
"BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
|
||||||
|
|
||||||
### Update documentation
|
|
||||||
|
|
||||||
Document the new recommended version by updating
|
|
||||||
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
|
|
||||||
in book/src/testnet-participation.md on the release (beta) branch.
|
|
||||||
|
|
||||||
### Make the Release
|
### Make the Release
|
||||||
|
|
||||||
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
|
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
|
||||||
@@ -112,25 +106,6 @@ We use [github's Releases UI](https://github.com/solana-labs/solana/releases) fo
|
|||||||
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
|
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
|
||||||
release branch.
|
release branch.
|
||||||
|
|
||||||
### Publish updated Book
|
|
||||||
We maintain three copies of the "book" as official documentation:
|
|
||||||
|
|
||||||
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
|
|
||||||
https://solana-labs.github.io/book/
|
|
||||||
|
|
||||||
2) "Book-edge" tracks the tip of the master branch and updates automatically.
|
|
||||||
https://solana-labs.github.io/book-edge/
|
|
||||||
|
|
||||||
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
|
|
||||||
https://solana-labs.github.io/book-beta/
|
|
||||||
|
|
||||||
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
|
|
||||||
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
|
|
||||||
```bash
|
|
||||||
PUBLISH_BOOK_TAG=v0.16.6
|
|
||||||
```
|
|
||||||
https://buildkite.com/solana-labs/manual-update-book
|
|
||||||
|
|
||||||
### Update software on testnet.solana.com
|
### Update software on testnet.solana.com
|
||||||
|
|
||||||
The testnet running on testnet.solana.com is set to use a fixed release tag
|
The testnet running on testnet.solana.com is set to use a fixed release tag
|
||||||
@@ -170,6 +145,12 @@ TESTNET_TAG=[same value as used in TESTNET_TAG in the schedules]
|
|||||||
TESTNET_OP=create-and-start
|
TESTNET_OP=create-and-start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Update documentation
|
||||||
|
|
||||||
|
Document the new recommended version by updating
|
||||||
|
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
|
||||||
|
in book/src/testnet-participation.md for both edge and beta channel branches.
|
||||||
|
|
||||||
### Alert the community
|
### Alert the community
|
||||||
|
|
||||||
Notify Discord users on #validator-support that a new release for
|
Notify Discord users on #validator-support that a new release for
|
||||||
|
1
bench-exchange/.gitignore
vendored
1
bench-exchange/.gitignore
vendored
@@ -1,4 +1,3 @@
|
|||||||
/target/
|
/target/
|
||||||
/config/
|
/config/
|
||||||
/config-local/
|
/config-local/
|
||||||
/farf/
|
|
||||||
|
@@ -2,41 +2,39 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-exchange"
|
name = "solana-bench-exchange"
|
||||||
version = "0.17.2"
|
version = "0.15.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
publish = false
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.1.4"
|
|
||||||
bs58 = "0.2.0"
|
bs58 = "0.2.0"
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
env_logger = "0.6.2"
|
bincode = "1.1.4"
|
||||||
|
env_logger = "0.6.0"
|
||||||
itertools = "0.8.0"
|
itertools = "0.8.0"
|
||||||
log = "0.4.7"
|
log = "0.4.6"
|
||||||
num-derive = "0.2"
|
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
|
num-derive = "0.2"
|
||||||
rand = "0.6.5"
|
rand = "0.6.5"
|
||||||
rayon = "1.1.0"
|
rayon = "1.0.3"
|
||||||
serde = "1.0.97"
|
serde = "1.0.91"
|
||||||
serde_derive = "1.0.97"
|
serde_derive = "1.0.91"
|
||||||
serde_json = "1.0.40"
|
serde_json = "1.0.38"
|
||||||
serde_yaml = "0.8.9"
|
|
||||||
# solana-runtime = { path = "../solana/runtime"}
|
# solana-runtime = { path = "../solana/runtime"}
|
||||||
solana = { path = "../core", version = "0.17.2" }
|
solana = { path = "../core", version = "0.15.0" }
|
||||||
solana-client = { path = "../client", version = "0.17.2" }
|
solana-client = { path = "../client", version = "0.15.0" }
|
||||||
solana-drone = { path = "../drone", version = "0.17.2" }
|
solana-drone = { path = "../drone", version = "0.15.0" }
|
||||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.17.2" }
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.15.0" }
|
||||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.17.2" }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.15.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.17.2" }
|
solana-logger = { path = "../logger", version = "0.15.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.17.2" }
|
solana-metrics = { path = "../metrics", version = "0.15.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.17.2" }
|
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
||||||
solana-runtime = { path = "../runtime", version = "0.17.2" }
|
solana-runtime = { path = "../runtime", version = "0.15.0" }
|
||||||
solana-sdk = { path = "../sdk", version = "0.17.2" }
|
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
||||||
untrusted = "0.7.0"
|
|
||||||
ws = "0.8.1"
|
ws = "0.8.1"
|
||||||
|
untrusted = "0.6.2"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
erasure = []
|
||||||
|
@@ -6,10 +6,10 @@ learn how to start and interact with the exchange.
|
|||||||
|
|
||||||
### Table of Contents
|
### Table of Contents
|
||||||
[Overview](#Overview)<br>
|
[Overview](#Overview)<br>
|
||||||
[Premise](#Premise)<br>
|
[Premiss](#Premiss)<br>
|
||||||
[Exchange startup](#Exchange-startup)<br>
|
[Exchange startup](#Exchange-startup)<br>
|
||||||
[Order Requests](#Trade-requests)<br>
|
[Trade requests](#Trade-requests)<br>
|
||||||
[Order Cancellations](#Trade-cancellations)<br>
|
[Trade cancellations](#Trade-cancellations)<br>
|
||||||
[Trade swap](#Trade-swap)<br>
|
[Trade swap](#Trade-swap)<br>
|
||||||
[Exchange program operations](#Exchange-program-operations)<br>
|
[Exchange program operations](#Exchange-program-operations)<br>
|
||||||
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
|
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
|
||||||
@@ -22,9 +22,9 @@ An exchange is a marketplace where one asset can be traded for another. This
|
|||||||
demo demonstrates one way to host an exchange on the Solana blockchain by
|
demo demonstrates one way to host an exchange on the Solana blockchain by
|
||||||
emulating a currency exchange.
|
emulating a currency exchange.
|
||||||
|
|
||||||
The assets are virtual tokens held by investors who may post order requests to
|
The assets are virtual tokens held by investors who may post trade requests to
|
||||||
the exchange. A Swapper monitors the exchange and posts swap requests for
|
the exchange. A Swapper monitors the exchange and posts swap requests for
|
||||||
matching orders. All the transactions can execute concurrently.
|
matching trade orders. All the transactions can execute concurrently.
|
||||||
|
|
||||||
## Premise
|
## Premise
|
||||||
|
|
||||||
@@ -59,43 +59,43 @@ matching orders. All the transactions can execute concurrently.
|
|||||||
ratios are represented as fixed point numbers. The fixed point scaler is
|
ratios are represented as fixed point numbers. The fixed point scaler is
|
||||||
defined in
|
defined in
|
||||||
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
||||||
- Order request
|
- Trade request
|
||||||
- A Solana transaction executed by the exchange requesting the trade of one
|
- A Solana transaction executed by the exchange requesting the trade of one
|
||||||
type of token for another. order requests are made up of the token pair,
|
type of token for another. Trade requests are made up of the token pair,
|
||||||
the direction of the trade, quantity of the primary token, the price ratio,
|
the direction of the trade, quantity of the primary token, the price ratio,
|
||||||
and the two token accounts to be credited/deducted. An example trade
|
and the two token accounts to be credited/deducted. An example trade
|
||||||
request looks like "T AB 5 2" which reads "Exchange 5 A tokens to B tokens
|
request looks like "T AB 5 2" which reads "Exchange 5 A tokens to B tokens
|
||||||
at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||||
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
||||||
Successful order requests result in an order.
|
Successful trade requests result in a trade order.
|
||||||
- Order
|
- Trade order
|
||||||
- The result of a successful order request. orders are stored in
|
- The result of a successful trade request. Trade orders are stored in
|
||||||
accounts owned by the submitter of the order request. They can only be
|
accounts owned by the submitter of the trade request. They can only be
|
||||||
canceled by their owner but can be used by anyone in a trade swap. They
|
canceled by their owner but can be used by anyone in a trade swap. They
|
||||||
contain the same information as the order request.
|
contain the same information as the trade request.
|
||||||
- Price spread
|
- Price spread
|
||||||
- The difference between the two matching orders. The spread is the
|
- The difference between the two matching trade orders. The spread is the
|
||||||
profit of the Swapper initiating the swap request.
|
profit of the Swapper initiating the swap request.
|
||||||
- Swap requirements
|
- Swap requirements
|
||||||
- Policies that result in a successful trade swap.
|
- Policies that result in a successful trade swap.
|
||||||
- Swap request
|
- Swap request
|
||||||
- A request to exchange tokens between to orders
|
- A request to exchange tokens between to trade orders
|
||||||
- Trade swap
|
- Trade swap
|
||||||
- A successful trade. A swap consists of two matching orders that meet
|
- A successful trade. A swap consists of two matching trade orders that meet
|
||||||
swap requirements. A trade swap may not wholly satisfy one or both of the
|
swap requirements. A trade swap may not wholly satisfy one or both of the
|
||||||
orders in which case the orders are adjusted appropriately. As
|
trade orders in which case the trade orders are adjusted appropriately. As
|
||||||
long as the swap requirements are met there will be an exchange of tokens
|
long as the swap requirements are met there will be an exchange of tokens
|
||||||
between accounts. Any price spread is deposited into the Swapper's profit
|
between accounts. Any price spread is deposited into the Swapper's profit
|
||||||
account. All trade swaps are recorded in a new account for posterity.
|
account. All trade swaps are recorded in a new account for posterity.
|
||||||
- Investor
|
- Investor
|
||||||
- Individual investors who hold a number of tokens and wish to trade them on
|
- Individual investors who hold a number of tokens and wish to trade them on
|
||||||
the exchange. Investors operate as Solana thin clients who own a set of
|
the exchange. Investors operate as Solana thin clients who own a set of
|
||||||
accounts containing tokens and/or order requests. Investors post
|
accounts containing tokens and/or trade requests. Investors post
|
||||||
transactions to the exchange in order to request tokens and post or cancel
|
transactions to the exchange in order to request tokens and post or cancel
|
||||||
order requests.
|
trade requests.
|
||||||
- Swapper
|
- Swapper
|
||||||
- An agent who facilitates trading between investors. Swappers operate as
|
- An agent who facilitates trading between investors. Swappers operate as
|
||||||
Solana thin clients who monitor all the orders looking for a trade
|
Solana thin clients who monitor all the trade orders looking for a trade
|
||||||
match. Once found, the Swapper issues a swap request to the exchange.
|
match. Once found, the Swapper issues a swap request to the exchange.
|
||||||
Swappers are the engine of the exchange and are rewarded for their efforts by
|
Swappers are the engine of the exchange and are rewarded for their efforts by
|
||||||
accumulating the price spreads of the swaps they initiate. Swappers also
|
accumulating the price spreads of the swaps they initiate. Swappers also
|
||||||
@@ -123,7 +123,7 @@ the investors that trades submitted after that point will be analyzed. <!--This
|
|||||||
is not ideal, and instead investors should be able to submit trades at any time,
|
is not ideal, and instead investors should be able to submit trades at any time,
|
||||||
and the Swapper could come and go without missing a trade. One way to achieve
|
and the Swapper could come and go without missing a trade. One way to achieve
|
||||||
this is for the Swapper to read the current state of all accounts looking for all
|
this is for the Swapper to read the current state of all accounts looking for all
|
||||||
open orders.-->
|
open trade orders.-->
|
||||||
|
|
||||||
Investors will initially query the exchange to discover their current balance
|
Investors will initially query the exchange to discover their current balance
|
||||||
for each type of token. If the investor does not already have an account for
|
for each type of token. If the investor does not already have an account for
|
||||||
@@ -181,19 +181,19 @@ pub enum ExchangeInstruction {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Order Requests
|
## Trade requests
|
||||||
|
|
||||||
When an investor decides to exchange a token of one type for another, they
|
When an investor decides to exchange a token of one type for another, they
|
||||||
submit a transaction to the Solana Blockchain containing an order request, which,
|
submit a transaction to the Solana Blockchain containing a trade request, which,
|
||||||
if successful, is turned into an order. orders do not expire but are
|
if successful, is turned into a trade order. Trade orders do not expire but are
|
||||||
cancellable. <!-- orders should have a timestamp to enable trade
|
cancellable. <!-- Trade orders should have a timestamp to enable trade
|
||||||
expiration --> When an order is created, tokens are deducted from a token
|
expiration --> When a trade order is created, tokens are deducted from a token
|
||||||
account and the order acts as an escrow. The tokens are held until the
|
account and the trade order acts as an escrow. The tokens are held until the
|
||||||
order is fulfilled or canceled. If the direction is `To`, then the number
|
trade order is fulfilled or canceled. If the direction is `To`, then the number
|
||||||
of `tokens` are deducted from the primary account, if `From` then `tokens`
|
of `tokens` are deducted from the primary account, if `From` then `tokens`
|
||||||
multiplied by `price` are deducted from the secondary account. orders are
|
multiplied by `price` are deducted from the secondary account. Trade orders are
|
||||||
no longer valid when the number of `tokens` goes to zero, at which point they
|
no longer valid when the number of `tokens` goes to zero, at which point they
|
||||||
can no longer be used. <!-- Could support refilling orders, so order
|
can no longer be used. <!-- Could support refilling trade orders, so trade order
|
||||||
accounts are refilled rather than accumulating -->
|
accounts are refilled rather than accumulating -->
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
@@ -205,7 +205,7 @@ pub enum Direction {
|
|||||||
From,
|
From,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct OrderRequestInfo {
|
pub struct TradeRequestInfo {
|
||||||
/// Direction of trade
|
/// Direction of trade
|
||||||
pub direction: Direction,
|
pub direction: Direction,
|
||||||
|
|
||||||
@@ -224,7 +224,7 @@ pub struct OrderRequestInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub enum ExchangeInstruction {
|
pub enum ExchangeInstruction {
|
||||||
/// order request
|
/// Trade request
|
||||||
/// key 0 - Signer
|
/// key 0 - Signer
|
||||||
/// key 1 - Account in which to record the swap
|
/// key 1 - Account in which to record the swap
|
||||||
/// key 2 - Token account associated with this trade
|
/// key 2 - Token account associated with this trade
|
||||||
@@ -233,7 +233,7 @@ pub enum ExchangeInstruction {
|
|||||||
|
|
||||||
/// Trade accounts are populated with this structure
|
/// Trade accounts are populated with this structure
|
||||||
pub struct TradeOrderInfo {
|
pub struct TradeOrderInfo {
|
||||||
/// Owner of the order
|
/// Owner of the trade order
|
||||||
pub owner: Pubkey,
|
pub owner: Pubkey,
|
||||||
/// Direction of the exchange
|
/// Direction of the exchange
|
||||||
pub direction: Direction,
|
pub direction: Direction,
|
||||||
@@ -252,7 +252,7 @@ pub struct TradeOrderInfo {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Order cancellations
|
## Trade cancellations
|
||||||
|
|
||||||
An investor may cancel a trade at anytime, but only trades they own. If the
|
An investor may cancel a trade at anytime, but only trades they own. If the
|
||||||
cancellation is successful, any tokens held in escrow are returned to the
|
cancellation is successful, any tokens held in escrow are returned to the
|
||||||
@@ -260,9 +260,9 @@ account from which they came.
|
|||||||
|
|
||||||
```rust
|
```rust
|
||||||
pub enum ExchangeInstruction {
|
pub enum ExchangeInstruction {
|
||||||
/// order cancellation
|
/// Trade cancellation
|
||||||
/// key 0 - Signer
|
/// key 0 - Signer
|
||||||
/// key 1 -order to cancel
|
/// key 1 -Trade order to cancel
|
||||||
TradeCancellation,
|
TradeCancellation,
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -270,14 +270,14 @@ pub enum ExchangeInstruction {
|
|||||||
## Trade swaps
|
## Trade swaps
|
||||||
|
|
||||||
The Swapper is monitoring the accounts assigned to the exchange program and
|
The Swapper is monitoring the accounts assigned to the exchange program and
|
||||||
building a trade-order table. The order table is used to identify
|
building a trade-order table. The trade order table is used to identify
|
||||||
matching orders which could be fulfilled. When a match is found the
|
matching trade orders which could be fulfilled. When a match is found the
|
||||||
Swapper should issue a swap request. Swap requests may not satisfy the entirety
|
Swapper should issue a swap request. Swap requests may not satisfy the entirety
|
||||||
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
||||||
in either account will keep the order valid for further swap requests in
|
in either account will keep the trade order valid for further swap requests in
|
||||||
the future.
|
the future.
|
||||||
|
|
||||||
Matching orders are defined by the following swap requirements:
|
Matching trade orders are defined by the following swap requirements:
|
||||||
|
|
||||||
- Opposite polarity (one `To` and one `From`)
|
- Opposite polarity (one `To` and one `From`)
|
||||||
- Operate on the same token pair
|
- Operate on the same token pair
|
||||||
@@ -379,8 +379,8 @@ pub enum ExchangeInstruction {
|
|||||||
/// Trade swap request
|
/// Trade swap request
|
||||||
/// key 0 - Signer
|
/// key 0 - Signer
|
||||||
/// key 1 - Account in which to record the swap
|
/// key 1 - Account in which to record the swap
|
||||||
/// key 2 - 'To' order
|
/// key 2 - 'To' trade order
|
||||||
/// key 3 - `From` order
|
/// key 3 - `From` trade order
|
||||||
/// key 4 - Token account associated with the To Trade
|
/// key 4 - Token account associated with the To Trade
|
||||||
/// key 5 - Token account associated with From trade
|
/// key 5 - Token account associated with From trade
|
||||||
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
||||||
@@ -391,9 +391,9 @@ pub enum ExchangeInstruction {
|
|||||||
pub struct TradeSwapInfo {
|
pub struct TradeSwapInfo {
|
||||||
/// Pair swapped
|
/// Pair swapped
|
||||||
pub pair: TokenPair,
|
pub pair: TokenPair,
|
||||||
/// `To` order
|
/// `To` trade order
|
||||||
pub to_trade_order: Pubkey,
|
pub to_trade_order: Pubkey,
|
||||||
/// `From` order
|
/// `From` trade order
|
||||||
pub from_trade_order: Pubkey,
|
pub from_trade_order: Pubkey,
|
||||||
/// Number of primary tokens exchanged
|
/// Number of primary tokens exchanged
|
||||||
pub primary_tokens: u64,
|
pub primary_tokens: u64,
|
||||||
@@ -424,22 +424,22 @@ pub enum ExchangeInstruction {
|
|||||||
/// the exchange has a limitless number of tokens it can transfer.
|
/// the exchange has a limitless number of tokens it can transfer.
|
||||||
TransferRequest(Token, u64),
|
TransferRequest(Token, u64),
|
||||||
|
|
||||||
/// order request
|
/// Trade request
|
||||||
/// key 0 - Signer
|
/// key 0 - Signer
|
||||||
/// key 1 - Account in which to record the swap
|
/// key 1 - Account in which to record the swap
|
||||||
/// key 2 - Token account associated with this trade
|
/// key 2 - Token account associated with this trade
|
||||||
TradeRequest(TradeRequestInfo),
|
TradeRequest(TradeRequestInfo),
|
||||||
|
|
||||||
/// order cancellation
|
/// Trade cancellation
|
||||||
/// key 0 - Signer
|
/// key 0 - Signer
|
||||||
/// key 1 -order to cancel
|
/// key 1 -Trade order to cancel
|
||||||
TradeCancellation,
|
TradeCancellation,
|
||||||
|
|
||||||
/// Trade swap request
|
/// Trade swap request
|
||||||
/// key 0 - Signer
|
/// key 0 - Signer
|
||||||
/// key 1 - Account in which to record the swap
|
/// key 1 - Account in which to record the swap
|
||||||
/// key 2 - 'To' order
|
/// key 2 - 'To' trade order
|
||||||
/// key 3 - `From` order
|
/// key 3 - `From` trade order
|
||||||
/// key 4 - Token account associated with the To Trade
|
/// key 4 - Token account associated with the To Trade
|
||||||
/// key 5 - Token account associated with From trade
|
/// key 5 - Token account associated with From trade
|
||||||
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
/// key 6 - Token account in which to deposit the Swappers profit from the swap.
|
||||||
@@ -478,3 +478,6 @@ To also see the cluster messages:
|
|||||||
```bash
|
```bash
|
||||||
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
|
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@@ -20,12 +20,9 @@ use solana_sdk::system_instruction;
|
|||||||
use solana_sdk::timing::{duration_as_ms, duration_as_s};
|
use solana_sdk::timing::{duration_as_ms, duration_as_s};
|
||||||
use solana_sdk::transaction::Transaction;
|
use solana_sdk::transaction::Transaction;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::VecDeque;
|
||||||
use std::fs::File;
|
|
||||||
use std::io::prelude::*;
|
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::Path;
|
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
@@ -51,8 +48,6 @@ pub struct Config {
|
|||||||
pub batch_size: usize,
|
pub batch_size: usize,
|
||||||
pub chunk_size: usize,
|
pub chunk_size: usize,
|
||||||
pub account_groups: usize,
|
pub account_groups: usize,
|
||||||
pub client_ids_and_stake_file: String,
|
|
||||||
pub read_from_client_file: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@@ -66,38 +61,10 @@ impl Default for Config {
|
|||||||
batch_size: 10,
|
batch_size: 10,
|
||||||
chunk_size: 10,
|
chunk_size: 10,
|
||||||
account_groups: 100,
|
account_groups: 100,
|
||||||
client_ids_and_stake_file: String::new(),
|
|
||||||
read_from_client_file: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_client_accounts_file(
|
|
||||||
client_ids_and_stake_file: &str,
|
|
||||||
batch_size: usize,
|
|
||||||
account_groups: usize,
|
|
||||||
fund_amount: u64,
|
|
||||||
) {
|
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
|
||||||
const NUM_KEYPAIR_GROUPS: u64 = 2;
|
|
||||||
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
|
|
||||||
|
|
||||||
let keypairs = generate_keypairs(total_keys);
|
|
||||||
|
|
||||||
let mut accounts = HashMap::new();
|
|
||||||
keypairs.iter().for_each(|keypair| {
|
|
||||||
accounts.insert(
|
|
||||||
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
|
||||||
fund_amount,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
let serialized = serde_yaml::to_string(&accounts).unwrap();
|
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
|
||||||
let mut file = File::create(path).unwrap();
|
|
||||||
file.write_all(&serialized.into_bytes()).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn do_bench_exchange<T>(clients: Vec<T>, config: Config)
|
pub fn do_bench_exchange<T>(clients: Vec<T>, config: Config)
|
||||||
where
|
where
|
||||||
T: 'static + Client + Send + Sync,
|
T: 'static + Client + Send + Sync,
|
||||||
@@ -111,8 +78,6 @@ where
|
|||||||
batch_size,
|
batch_size,
|
||||||
chunk_size,
|
chunk_size,
|
||||||
account_groups,
|
account_groups,
|
||||||
client_ids_and_stake_file,
|
|
||||||
read_from_client_file,
|
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@@ -127,55 +92,35 @@ where
|
|||||||
);
|
);
|
||||||
|
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
const NUM_KEYPAIR_GROUPS: u64 = 2;
|
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||||
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
|
|
||||||
|
|
||||||
let mut signer_keypairs = if read_from_client_file {
|
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
|
||||||
let file = File::open(path).unwrap();
|
|
||||||
|
|
||||||
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
|
|
||||||
accounts
|
|
||||||
.into_iter()
|
|
||||||
.map(|(keypair, _)| {
|
|
||||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
|
||||||
Keypair::from_bytes(&bytes).unwrap()
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
info!("Generating {:?} signer keys", total_keys);
|
|
||||||
generate_keypairs(total_keys)
|
|
||||||
};
|
|
||||||
|
|
||||||
let trader_signers: Vec<_> = signer_keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(Arc::new)
|
|
||||||
.collect();
|
|
||||||
let swapper_signers: Vec<_> = signer_keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(Arc::new)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||||
let client = clients[0].as_ref();
|
let client = clients[0].as_ref();
|
||||||
|
|
||||||
if !read_from_client_file {
|
const NUM_KEYPAIR_GROUPS: u64 = 4;
|
||||||
|
let total_keys = accounts_in_groups as u64 * NUM_KEYPAIR_GROUPS;
|
||||||
|
info!("Generating {:?} keys", total_keys);
|
||||||
|
let mut keypairs = generate_keypairs(total_keys);
|
||||||
|
let trader_signers: Vec<_> = keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(Arc::new)
|
||||||
|
.collect();
|
||||||
|
let swapper_signers: Vec<_> = keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(Arc::new)
|
||||||
|
.collect();
|
||||||
|
let src_pubkeys: Vec<_> = keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(|keypair| keypair.pubkey())
|
||||||
|
.collect();
|
||||||
|
let profit_pubkeys: Vec<_> = keypairs
|
||||||
|
.drain(0..accounts_in_groups)
|
||||||
|
.map(|keypair| keypair.pubkey())
|
||||||
|
.collect();
|
||||||
|
|
||||||
info!("Fund trader accounts");
|
info!("Fund trader accounts");
|
||||||
fund_keys(client, &identity, &trader_signers, fund_amount);
|
fund_keys(client, &identity, &trader_signers, fund_amount);
|
||||||
info!("Fund swapper accounts");
|
info!("Fund swapper accounts");
|
||||||
fund_keys(client, &identity, &swapper_signers, fund_amount);
|
fund_keys(client, &identity, &swapper_signers, fund_amount);
|
||||||
}
|
|
||||||
|
|
||||||
info!("Generating {:?} account keys", total_keys);
|
|
||||||
let mut account_keypairs = generate_keypairs(total_keys);
|
|
||||||
let src_pubkeys: Vec<_> = account_keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(|keypair| keypair.pubkey())
|
|
||||||
.collect();
|
|
||||||
let profit_pubkeys: Vec<_> = account_keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
|
||||||
.map(|keypair| keypair.pubkey())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
info!("Create {:?} source token accounts", src_pubkeys.len());
|
info!("Create {:?} source token accounts", src_pubkeys.len());
|
||||||
create_token_accounts(client, &trader_signers, &src_pubkeys);
|
create_token_accounts(client, &trader_signers, &src_pubkeys);
|
||||||
@@ -191,7 +136,6 @@ where
|
|||||||
transfer_delay
|
transfer_delay
|
||||||
);
|
);
|
||||||
|
|
||||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
|
||||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||||
let total_txs_sent_count = Arc::new(AtomicUsize::new(0));
|
let total_txs_sent_count = Arc::new(AtomicUsize::new(0));
|
||||||
let s_threads: Vec<_> = (0..threads)
|
let s_threads: Vec<_> = (0..threads)
|
||||||
@@ -332,7 +276,7 @@ fn do_tx_transfers<T>(
|
|||||||
|
|
||||||
struct TradeInfo {
|
struct TradeInfo {
|
||||||
trade_account: Pubkey,
|
trade_account: Pubkey,
|
||||||
order_info: OrderInfo,
|
order_info: TradeOrderInfo,
|
||||||
}
|
}
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn swapper<T>(
|
fn swapper<T>(
|
||||||
@@ -509,7 +453,7 @@ fn trader<T>(
|
|||||||
T: Client,
|
T: Client,
|
||||||
{
|
{
|
||||||
// TODO Hard coded for now
|
// TODO Hard coded for now
|
||||||
let pair = AssetPair::default();
|
let pair = TokenPair::AB;
|
||||||
let tokens = 1;
|
let tokens = 1;
|
||||||
let price = 1000;
|
let price = 1000;
|
||||||
let mut account_group: usize = 0;
|
let mut account_group: usize = 0;
|
||||||
@@ -538,7 +482,7 @@ fn trader<T>(
|
|||||||
} else {
|
} else {
|
||||||
Direction::To
|
Direction::To
|
||||||
};
|
};
|
||||||
let order_info = OrderInfo {
|
let order_info = TradeOrderInfo {
|
||||||
/// Owner of the trade order
|
/// Owner of the trade order
|
||||||
owner: Pubkey::default(), // don't care
|
owner: Pubkey::default(), // don't care
|
||||||
direction,
|
direction,
|
||||||
@@ -646,20 +590,6 @@ where
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_funding_transfer<T: SyncClient + ?Sized>(
|
|
||||||
client: &T,
|
|
||||||
tx: &Transaction,
|
|
||||||
amount: u64,
|
|
||||||
) -> bool {
|
|
||||||
for a in &tx.message().account_keys[1..] {
|
|
||||||
if client.get_balance(a).unwrap_or(0) >= amount {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
||||||
let total = lamports * (dests.len() as u64 + 1);
|
let total = lamports * (dests.len() as u64 + 1);
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
@@ -717,7 +647,6 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut retries = 0;
|
let mut retries = 0;
|
||||||
let amount = chunk[0].1[0].1;
|
|
||||||
while !to_fund_txs.is_empty() {
|
while !to_fund_txs.is_empty() {
|
||||||
let receivers = to_fund_txs
|
let receivers = to_fund_txs
|
||||||
.iter()
|
.iter()
|
||||||
@@ -746,7 +675,7 @@ pub fn fund_keys(client: &Client, source: &Keypair, dests: &[Arc<Keypair>], lamp
|
|||||||
let mut waits = 0;
|
let mut waits = 0;
|
||||||
loop {
|
loop {
|
||||||
sleep(Duration::from_millis(200));
|
sleep(Duration::from_millis(200));
|
||||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
|
to_fund_txs.retain(|(_, tx)| !verify_transfer(client, &tx));
|
||||||
if to_fund_txs.is_empty() {
|
if to_fund_txs.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -963,7 +892,7 @@ pub fn airdrop_lamports(client: &Client, drone_addr: &SocketAddr, id: &Keypair,
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
use solana::gossip_service::{discover_cluster, get_clients};
|
||||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||||
use solana::validator::ValidatorConfig;
|
use solana::validator::ValidatorConfig;
|
||||||
use solana_drone::drone::run_local_drone;
|
use solana_drone::drone::run_local_drone;
|
||||||
@@ -978,6 +907,7 @@ mod tests {
|
|||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
|
let validator_config = ValidatorConfig::default();
|
||||||
|
|
||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
config.identity = Keypair::new();
|
config.identity = Keypair::new();
|
||||||
@@ -999,7 +929,7 @@ mod tests {
|
|||||||
let cluster = LocalCluster::new(&ClusterConfig {
|
let cluster = LocalCluster::new(&ClusterConfig {
|
||||||
node_stakes: vec![100_000; NUM_NODES],
|
node_stakes: vec![100_000; NUM_NODES],
|
||||||
cluster_lamports: 100_000_000_000_000,
|
cluster_lamports: 100_000_000_000_000,
|
||||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
validator_config,
|
||||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
});
|
});
|
||||||
@@ -1022,20 +952,25 @@ mod tests {
|
|||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
let (client, num_clients) = get_multi_client(&nodes);
|
let clients = get_clients(&nodes);
|
||||||
|
|
||||||
info!("clients: {}", num_clients);
|
if clients.len() < NUM_NODES {
|
||||||
assert!(num_clients >= NUM_NODES);
|
error!(
|
||||||
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||||
|
NUM_NODES
|
||||||
|
);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
const NUM_SIGNERS: u64 = 2;
|
const NUM_SIGNERS: u64 = 2;
|
||||||
airdrop_lamports(
|
airdrop_lamports(
|
||||||
&client,
|
&clients[0],
|
||||||
&drone_addr,
|
&drone_addr,
|
||||||
&config.identity,
|
&config.identity,
|
||||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
);
|
);
|
||||||
|
|
||||||
do_bench_exchange(vec![client], config);
|
do_bench_exchange(clients, config);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@@ -18,9 +18,6 @@ pub struct Config {
|
|||||||
pub batch_size: usize,
|
pub batch_size: usize,
|
||||||
pub chunk_size: usize,
|
pub chunk_size: usize,
|
||||||
pub account_groups: usize,
|
pub account_groups: usize,
|
||||||
pub client_ids_and_stake_file: String,
|
|
||||||
pub write_to_client_file: bool,
|
|
||||||
pub read_from_client_file: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@@ -37,9 +34,6 @@ impl Default for Config {
|
|||||||
batch_size: 100,
|
batch_size: 100,
|
||||||
chunk_size: 100,
|
chunk_size: 100,
|
||||||
account_groups: 100,
|
account_groups: 100,
|
||||||
client_ids_and_stake_file: String::new(),
|
|
||||||
write_to_client_file: false,
|
|
||||||
read_from_client_file: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -147,20 +141,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("10")
|
.default_value("10")
|
||||||
.help("Number of account groups to cycle for each batch"),
|
.help("Number of account groups to cycle for each batch"),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("write-client-keys")
|
|
||||||
.long("write-client-keys")
|
|
||||||
.value_name("FILENAME")
|
|
||||||
.takes_value(true)
|
|
||||||
.help("Generate client keys and stakes and write the list to YAML file"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("read-client-keys")
|
|
||||||
.long("read-client-keys")
|
|
||||||
.value_name("FILENAME")
|
|
||||||
.takes_value(true)
|
|
||||||
.help("Read client keys and stakes from the YAML file"),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||||
@@ -204,15 +184,5 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
||||||
.expect("Failed to parse account-groups");
|
.expect("Failed to parse account-groups");
|
||||||
|
|
||||||
if let Some(s) = matches.value_of("write-client-keys") {
|
|
||||||
args.write_to_client_file = true;
|
|
||||||
args.client_ids_and_stake_file = s.to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(s) = matches.value_of("read-client-keys") {
|
|
||||||
assert!(!args.write_to_client_file);
|
|
||||||
args.read_from_client_file = true;
|
|
||||||
args.client_ids_and_stake_file = s.to_string();
|
|
||||||
}
|
|
||||||
args
|
args
|
||||||
}
|
}
|
||||||
|
@@ -6,9 +6,9 @@ pub mod order_book;
|
|||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate solana_exchange_program;
|
extern crate solana_exchange_program;
|
||||||
|
|
||||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
use crate::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||||
use log::*;
|
use log::*;
|
||||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
use solana::gossip_service::{discover_cluster, get_clients};
|
||||||
use solana_sdk::signature::KeypairUtil;
|
use solana_sdk::signature::KeypairUtil;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@@ -30,12 +30,33 @@ fn main() {
|
|||||||
batch_size,
|
batch_size,
|
||||||
chunk_size,
|
chunk_size,
|
||||||
account_groups,
|
account_groups,
|
||||||
client_ids_and_stake_file,
|
|
||||||
write_to_client_file,
|
|
||||||
read_from_client_file,
|
|
||||||
..
|
..
|
||||||
} = cli_config;
|
} = cli_config;
|
||||||
|
|
||||||
|
info!("Connecting to the cluster");
|
||||||
|
let (nodes, _replicators) =
|
||||||
|
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||||
|
panic!("Failed to discover nodes");
|
||||||
|
});
|
||||||
|
|
||||||
|
let clients = get_clients(&nodes);
|
||||||
|
|
||||||
|
info!("{} nodes found", clients.len());
|
||||||
|
if clients.len() < num_nodes {
|
||||||
|
panic!("Error: Insufficient nodes discovered");
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Funding keypair: {}", identity.pubkey());
|
||||||
|
|
||||||
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
|
const NUM_SIGNERS: u64 = 2;
|
||||||
|
airdrop_lamports(
|
||||||
|
&clients[0],
|
||||||
|
&drone_addr,
|
||||||
|
&identity,
|
||||||
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
|
);
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
identity,
|
identity,
|
||||||
threads,
|
threads,
|
||||||
@@ -45,43 +66,7 @@ fn main() {
|
|||||||
batch_size,
|
batch_size,
|
||||||
chunk_size,
|
chunk_size,
|
||||||
account_groups,
|
account_groups,
|
||||||
client_ids_and_stake_file,
|
|
||||||
read_from_client_file,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if write_to_client_file {
|
do_bench_exchange(clients, config);
|
||||||
create_client_accounts_file(
|
|
||||||
&config.client_ids_and_stake_file,
|
|
||||||
config.batch_size,
|
|
||||||
config.account_groups,
|
|
||||||
config.fund_amount,
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
info!("Connecting to the cluster");
|
|
||||||
let (nodes, _replicators) =
|
|
||||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
|
||||||
panic!("Failed to discover nodes");
|
|
||||||
});
|
|
||||||
|
|
||||||
let (client, num_clients) = get_multi_client(&nodes);
|
|
||||||
|
|
||||||
info!("{} nodes found", num_clients);
|
|
||||||
if num_clients < num_nodes {
|
|
||||||
panic!("Error: Insufficient nodes discovered");
|
|
||||||
}
|
|
||||||
|
|
||||||
if !read_from_client_file {
|
|
||||||
info!("Funding keypair: {}", config.identity.pubkey());
|
|
||||||
|
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
|
||||||
const NUM_SIGNERS: u64 = 2;
|
|
||||||
airdrop_lamports(
|
|
||||||
&client,
|
|
||||||
&drone_addr,
|
|
||||||
&config.identity,
|
|
||||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
do_bench_exchange(vec![client], config);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -10,7 +10,7 @@ use std::{error, fmt};
|
|||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
pub struct ToOrder {
|
pub struct ToOrder {
|
||||||
pub pubkey: Pubkey,
|
pub pubkey: Pubkey,
|
||||||
pub info: OrderInfo,
|
pub info: TradeOrderInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Ord for ToOrder {
|
impl Ord for ToOrder {
|
||||||
@@ -26,7 +26,7 @@ impl PartialOrd for ToOrder {
|
|||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
pub struct FromOrder {
|
pub struct FromOrder {
|
||||||
pub pubkey: Pubkey,
|
pub pubkey: Pubkey,
|
||||||
pub info: OrderInfo,
|
pub info: TradeOrderInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Ord for FromOrder {
|
impl Ord for FromOrder {
|
||||||
@@ -95,7 +95,11 @@ impl OrderBook {
|
|||||||
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
|
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
|
||||||
// Ok(())
|
// Ok(())
|
||||||
// }
|
// }
|
||||||
pub fn push(&mut self, pubkey: Pubkey, info: OrderInfo) -> Result<(), Box<dyn error::Error>> {
|
pub fn push(
|
||||||
|
&mut self,
|
||||||
|
pubkey: Pubkey,
|
||||||
|
info: TradeOrderInfo,
|
||||||
|
) -> Result<(), Box<dyn error::Error>> {
|
||||||
check_trade(info.direction, info.tokens, info.price)?;
|
check_trade(info.direction, info.tokens, info.price)?;
|
||||||
match info.direction {
|
match info.direction {
|
||||||
Direction::To => {
|
Direction::To => {
|
||||||
|
1
bench-streamer/.gitignore
vendored
1
bench-streamer/.gitignore
vendored
@@ -1,2 +1 @@
|
|||||||
/target/
|
/target/
|
||||||
/farf/
|
|
||||||
|
@@ -2,17 +2,17 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "0.17.2"
|
version = "0.15.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
solana = { path = "../core", version = "0.17.2" }
|
solana = { path = "../core", version = "0.15.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.17.2" }
|
solana-logger = { path = "../logger", version = "0.15.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.17.2" }
|
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
erasure = []
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
||||||
use solana::packet::PacketsRecycler;
|
|
||||||
use solana::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
use solana::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||||
use solana::result::Result;
|
use solana::result::Result;
|
||||||
use solana::streamer::{receiver, PacketReceiver};
|
use solana::streamer::{receiver, PacketReceiver};
|
||||||
@@ -17,7 +16,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
|||||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let mut msgs = Packets::default();
|
let mut msgs = Packets::default();
|
||||||
msgs.packets.resize(10, Packet::default());
|
msgs.packets.resize(10, Packet::default());
|
||||||
for w in msgs.packets.iter_mut() {
|
for w in &mut msgs.packets {
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
}
|
}
|
||||||
@@ -75,7 +74,6 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let mut read_channels = Vec::new();
|
let mut read_channels = Vec::new();
|
||||||
let mut read_threads = Vec::new();
|
let mut read_threads = Vec::new();
|
||||||
let recycler = PacketsRecycler::default();
|
|
||||||
for _ in 0..num_sockets {
|
for _ in 0..num_sockets {
|
||||||
let read = solana_netutil::bind_to(port, false).unwrap();
|
let read = solana_netutil::bind_to(port, false).unwrap();
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||||
@@ -85,13 +83,7 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
read_channels.push(r_reader);
|
read_channels.push(r_reader);
|
||||||
read_threads.push(receiver(
|
read_threads.push(receiver(Arc::new(read), &exit, s_reader));
|
||||||
Arc::new(read),
|
|
||||||
&exit,
|
|
||||||
s_reader,
|
|
||||||
recycler.clone(),
|
|
||||||
"bench-streamer-test",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let t_producer1 = producer(&addr, exit.clone());
|
let t_producer1 = producer(&addr, exit.clone());
|
||||||
|
1
bench-tps/.gitignore
vendored
1
bench-tps/.gitignore
vendored
@@ -1,4 +1,3 @@
|
|||||||
/target/
|
/target/
|
||||||
/config/
|
/config/
|
||||||
/config-local/
|
/config-local/
|
||||||
/farf/
|
|
||||||
|
@@ -2,33 +2,25 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "0.17.2"
|
version = "0.15.0"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.1.4"
|
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
log = "0.4.7"
|
log = "0.4.6"
|
||||||
rayon = "1.1.0"
|
rayon = "1.0.3"
|
||||||
serde = "1.0.97"
|
serde_json = "1.0.39"
|
||||||
serde_derive = "1.0.97"
|
solana = { path = "../core", version = "0.15.0" }
|
||||||
serde_json = "1.0.40"
|
solana-client = { path = "../client", version = "0.15.0" }
|
||||||
serde_yaml = "0.8.9"
|
solana-drone = { path = "../drone", version = "0.15.0" }
|
||||||
solana = { path = "../core", version = "0.17.2" }
|
solana-logger = { path = "../logger", version = "0.15.0" }
|
||||||
solana-client = { path = "../client", version = "0.17.2" }
|
solana-metrics = { path = "../metrics", version = "0.15.0" }
|
||||||
solana-drone = { path = "../drone", version = "0.17.2" }
|
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
||||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.17.2" }
|
solana-runtime = { path = "../runtime", version = "0.15.0" }
|
||||||
solana-logger = { path = "../logger", version = "0.17.2" }
|
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
||||||
solana-metrics = { path = "../metrics", version = "0.17.2" }
|
|
||||||
solana-measure = { path = "../measure", version = "0.17.2" }
|
|
||||||
solana-netutil = { path = "../netutil", version = "0.17.2" }
|
|
||||||
solana-runtime = { path = "../runtime", version = "0.17.2" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "0.17.2" }
|
|
||||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.17.2" }
|
|
||||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.17.2" }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
cuda = ["solana/cuda"]
|
cuda = ["solana/cuda"]
|
||||||
|
erasure = []
|
||||||
|
@@ -1,17 +1,13 @@
|
|||||||
use solana_metrics;
|
use solana_metrics;
|
||||||
|
|
||||||
use bincode;
|
|
||||||
use log::*;
|
use log::*;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana::gen_keys::GenKeys;
|
use solana::gen_keys::GenKeys;
|
||||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||||
use solana_drone::drone::request_airdrop_transaction;
|
use solana_drone::drone::request_airdrop_transaction;
|
||||||
use solana_librapay_api::{create_genesis, upload_mint_program, upload_payment_program};
|
|
||||||
use solana_measure::measure::Measure;
|
|
||||||
use solana_metrics::datapoint_info;
|
use solana_metrics::datapoint_info;
|
||||||
use solana_sdk::client::Client;
|
use solana_sdk::client::Client;
|
||||||
use solana_sdk::hash::Hash;
|
use solana_sdk::hash::Hash;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
use solana_sdk::system_instruction;
|
use solana_sdk::system_instruction;
|
||||||
use solana_sdk::system_transaction;
|
use solana_sdk::system_transaction;
|
||||||
@@ -21,6 +17,7 @@ use solana_sdk::transaction::Transaction;
|
|||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::process::exit;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
@@ -28,17 +25,8 @@ use std::thread::Builder;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use solana_librapay_api::librapay_transaction;
|
pub const MAX_SPENDS_PER_TX: usize = 4;
|
||||||
|
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 20;
|
||||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
|
||||||
pub const NUM_LAMPORTS_PER_ACCOUNT: u64 = 128;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum BenchTpsError {
|
|
||||||
AirdropFailure,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
|
||||||
|
|
||||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||||
|
|
||||||
@@ -49,7 +37,6 @@ pub struct Config {
|
|||||||
pub duration: Duration,
|
pub duration: Duration,
|
||||||
pub tx_count: usize,
|
pub tx_count: usize,
|
||||||
pub sustained: bool,
|
pub sustained: bool,
|
||||||
pub use_move: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@@ -61,19 +48,15 @@ impl Default for Config {
|
|||||||
duration: Duration::new(std::u64::MAX, 0),
|
duration: Duration::new(std::u64::MAX, 0),
|
||||||
tx_count: 500_000,
|
tx_count: 500_000,
|
||||||
sustained: false,
|
sustained: false,
|
||||||
use_move: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type LibraKeys = (Keypair, Pubkey, Pubkey, Vec<Keypair>);
|
|
||||||
|
|
||||||
pub fn do_bench_tps<T>(
|
pub fn do_bench_tps<T>(
|
||||||
clients: Vec<T>,
|
clients: Vec<T>,
|
||||||
config: Config,
|
config: Config,
|
||||||
gen_keypairs: Vec<Keypair>,
|
gen_keypairs: Vec<Keypair>,
|
||||||
keypair0_balance: u64,
|
keypair0_balance: u64,
|
||||||
libra_args: Option<LibraKeys>,
|
|
||||||
) -> u64
|
) -> u64
|
||||||
where
|
where
|
||||||
T: 'static + Client + Send + Sync,
|
T: 'static + Client + Send + Sync,
|
||||||
@@ -85,7 +68,6 @@ where
|
|||||||
duration,
|
duration,
|
||||||
tx_count,
|
tx_count,
|
||||||
sustained,
|
sustained,
|
||||||
..
|
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||||
@@ -177,7 +159,6 @@ where
|
|||||||
&keypairs[len..],
|
&keypairs[len..],
|
||||||
threads,
|
threads,
|
||||||
reclaim_lamports_back_to_source_account,
|
reclaim_lamports_back_to_source_account,
|
||||||
&libra_args,
|
|
||||||
);
|
);
|
||||||
// In sustained mode overlap the transfers with generation
|
// In sustained mode overlap the transfers with generation
|
||||||
// this has higher average performance but lower peak performance
|
// this has higher average performance but lower peak performance
|
||||||
@@ -234,74 +215,6 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_move_txs(
|
|
||||||
source: &[Keypair],
|
|
||||||
dest: &[Keypair],
|
|
||||||
reclaim: bool,
|
|
||||||
move_keypairs: &[Keypair],
|
|
||||||
libra_pay_program_id: &Pubkey,
|
|
||||||
libra_mint_id: &Pubkey,
|
|
||||||
blockhash: &Hash,
|
|
||||||
) -> Vec<(Transaction, u64)> {
|
|
||||||
let count = move_keypairs.len() / 2;
|
|
||||||
let source_move = &move_keypairs[..count];
|
|
||||||
let dest_move = &move_keypairs[count..];
|
|
||||||
let pairs: Vec<_> = if !reclaim {
|
|
||||||
source_move
|
|
||||||
.iter()
|
|
||||||
.zip(dest_move.iter())
|
|
||||||
.zip(source.iter())
|
|
||||||
.collect()
|
|
||||||
} else {
|
|
||||||
dest_move
|
|
||||||
.iter()
|
|
||||||
.zip(source_move.iter())
|
|
||||||
.zip(dest.iter())
|
|
||||||
.collect()
|
|
||||||
};
|
|
||||||
|
|
||||||
pairs
|
|
||||||
.par_iter()
|
|
||||||
.map(|((from, to), payer)| {
|
|
||||||
(
|
|
||||||
librapay_transaction::transfer(
|
|
||||||
libra_pay_program_id,
|
|
||||||
libra_mint_id,
|
|
||||||
&payer,
|
|
||||||
&from,
|
|
||||||
&to.pubkey(),
|
|
||||||
1,
|
|
||||||
*blockhash,
|
|
||||||
),
|
|
||||||
timestamp(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_system_txs(
|
|
||||||
source: &[Keypair],
|
|
||||||
dest: &[Keypair],
|
|
||||||
reclaim: bool,
|
|
||||||
blockhash: &Hash,
|
|
||||||
) -> Vec<(Transaction, u64)> {
|
|
||||||
let pairs: Vec<_> = if !reclaim {
|
|
||||||
source.iter().zip(dest.iter()).collect()
|
|
||||||
} else {
|
|
||||||
dest.iter().zip(source.iter()).collect()
|
|
||||||
};
|
|
||||||
|
|
||||||
pairs
|
|
||||||
.par_iter()
|
|
||||||
.map(|(from, to)| {
|
|
||||||
(
|
|
||||||
system_transaction::create_user_account(from, &to.pubkey(), 1, *blockhash),
|
|
||||||
timestamp(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generate_txs(
|
fn generate_txs(
|
||||||
shared_txs: &SharedTransactions,
|
shared_txs: &SharedTransactions,
|
||||||
blockhash: &Hash,
|
blockhash: &Hash,
|
||||||
@@ -309,31 +222,25 @@ fn generate_txs(
|
|||||||
dest: &[Keypair],
|
dest: &[Keypair],
|
||||||
threads: usize,
|
threads: usize,
|
||||||
reclaim: bool,
|
reclaim: bool,
|
||||||
libra_args: &Option<LibraKeys>,
|
|
||||||
) {
|
) {
|
||||||
let tx_count = source.len();
|
let tx_count = source.len();
|
||||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||||
let signing_start = Instant::now();
|
let signing_start = Instant::now();
|
||||||
|
|
||||||
let transactions = if let Some((
|
let pairs: Vec<_> = if !reclaim {
|
||||||
libra_genesis_keypair,
|
source.iter().zip(dest.iter()).collect()
|
||||||
libra_pay_program_id,
|
|
||||||
_libra_mint_program_id,
|
|
||||||
libra_keys,
|
|
||||||
)) = libra_args
|
|
||||||
{
|
|
||||||
generate_move_txs(
|
|
||||||
source,
|
|
||||||
dest,
|
|
||||||
reclaim,
|
|
||||||
&libra_keys,
|
|
||||||
libra_pay_program_id,
|
|
||||||
&libra_genesis_keypair.pubkey(),
|
|
||||||
blockhash,
|
|
||||||
)
|
|
||||||
} else {
|
} else {
|
||||||
generate_system_txs(source, dest, reclaim, blockhash)
|
dest.iter().zip(source.iter()).collect()
|
||||||
};
|
};
|
||||||
|
let transactions: Vec<_> = pairs
|
||||||
|
.par_iter()
|
||||||
|
.map(|(id, keypair)| {
|
||||||
|
(
|
||||||
|
system_transaction::create_user_account(id, &keypair.pubkey(), 1, *blockhash),
|
||||||
|
timestamp(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
let duration = signing_start.elapsed();
|
let duration = signing_start.elapsed();
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||||
@@ -428,17 +335,10 @@ fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64)
|
|||||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||||
/// or full
|
/// or full
|
||||||
pub fn fund_keys<T: Client>(
|
pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Keypair], lamports: u64) {
|
||||||
client: &T,
|
let total = lamports * dests.len() as u64;
|
||||||
source: &Keypair,
|
|
||||||
dests: &[Keypair],
|
|
||||||
total: u64,
|
|
||||||
max_fee: u64,
|
|
||||||
mut extra: u64,
|
|
||||||
) {
|
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
|
||||||
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
|
|
||||||
|
|
||||||
println!("funding keys {}", dests.len());
|
println!("funding keys {}", dests.len());
|
||||||
while !notfunded.is_empty() {
|
while !notfunded.is_empty() {
|
||||||
@@ -446,13 +346,12 @@ pub fn fund_keys<T: Client>(
|
|||||||
let mut to_fund = vec![];
|
let mut to_fund = vec![];
|
||||||
println!("creating from... {}", funded.len());
|
println!("creating from... {}", funded.len());
|
||||||
for f in &mut funded {
|
for f in &mut funded {
|
||||||
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
|
let max_units = cmp::min(notfunded.len(), MAX_SPENDS_PER_TX);
|
||||||
if max_units == 0 {
|
if max_units == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let start = notfunded.len() - max_units as usize;
|
let start = notfunded.len() - max_units;
|
||||||
let fees = if extra > 0 { max_fee } else { 0 };
|
let per_unit = f.1 / (max_units as u64);
|
||||||
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
|
|
||||||
let moves: Vec<_> = notfunded[start..]
|
let moves: Vec<_> = notfunded[start..]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|k| (k.pubkey(), per_unit))
|
.map(|k| (k.pubkey(), per_unit))
|
||||||
@@ -464,7 +363,6 @@ pub fn fund_keys<T: Client>(
|
|||||||
if !moves.is_empty() {
|
if !moves.is_empty() {
|
||||||
to_fund.push((f.0, moves));
|
to_fund.push((f.0, moves));
|
||||||
}
|
}
|
||||||
extra -= 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to transfer a "few" at a time with recent blockhash
|
// try to transfer a "few" at a time with recent blockhash
|
||||||
@@ -479,10 +377,13 @@ pub fn fund_keys<T: Client>(
|
|||||||
let mut to_fund_txs: Vec<_> = chunk
|
let mut to_fund_txs: Vec<_> = chunk
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|(k, m)| {
|
.map(|(k, m)| {
|
||||||
let tx = Transaction::new_unsigned_instructions(
|
(
|
||||||
system_instruction::transfer_many(&k.pubkey(), &m),
|
k.clone(),
|
||||||
);
|
Transaction::new_unsigned_instructions(system_instruction::transfer_many(
|
||||||
(k.clone(), tx)
|
&k.pubkey(),
|
||||||
|
&m,
|
||||||
|
)),
|
||||||
|
)
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@@ -541,7 +442,7 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
drone_addr: &SocketAddr,
|
drone_addr: &SocketAddr,
|
||||||
id: &Keypair,
|
id: &Keypair,
|
||||||
tx_count: u64,
|
tx_count: u64,
|
||||||
) -> Result<()> {
|
) {
|
||||||
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||||
metrics_submit_lamport_balance(starting_balance);
|
metrics_submit_lamport_balance(starting_balance);
|
||||||
println!("starting balance {}", starting_balance);
|
println!("starting balance {}", starting_balance);
|
||||||
@@ -590,10 +491,9 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
current_balance,
|
current_balance,
|
||||||
starting_balance
|
starting_balance
|
||||||
);
|
);
|
||||||
return Err(BenchTpsError::AirdropFailure);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_and_report_stats(
|
fn compute_and_report_stats(
|
||||||
@@ -670,172 +570,31 @@ fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
|
|||||||
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
pub fn generate_keypairs(seed_keypair: &Keypair, count: usize) -> Vec<Keypair> {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
|
||||||
let mut rnd = GenKeys::new(seed);
|
let mut rnd = GenKeys::new(seed);
|
||||||
|
|
||||||
let mut total_keys = 0;
|
let mut total_keys = 0;
|
||||||
let mut extra = 0; // This variable tracks the number of keypairs needing extra transaction fees funded
|
let mut target = count;
|
||||||
let mut delta = 1;
|
while target > 1 {
|
||||||
while total_keys < count {
|
total_keys += target;
|
||||||
extra += delta;
|
// Use the upper bound for this division otherwise it may not generate enough keys
|
||||||
delta *= MAX_SPENDS_PER_TX;
|
target = (target + MAX_SPENDS_PER_TX - 1) / MAX_SPENDS_PER_TX;
|
||||||
total_keys += delta;
|
|
||||||
}
|
}
|
||||||
(rnd.gen_n_keypairs(total_keys), extra)
|
rnd.gen_n_keypairs(total_keys as u64)
|
||||||
}
|
|
||||||
|
|
||||||
fn fund_move_keys<T: Client>(
|
|
||||||
client: &T,
|
|
||||||
funding_key: &Keypair,
|
|
||||||
keypairs: &[Keypair],
|
|
||||||
total: u64,
|
|
||||||
libra_pay_program_id: &Pubkey,
|
|
||||||
libra_mint_program_id: &Pubkey,
|
|
||||||
libra_mint_key: &Keypair,
|
|
||||||
) {
|
|
||||||
let (mut blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
|
||||||
|
|
||||||
info!("creating the libra funding account..");
|
|
||||||
let libra_funding_key = Keypair::new();
|
|
||||||
let tx = librapay_transaction::create_account(
|
|
||||||
funding_key,
|
|
||||||
&libra_funding_key.pubkey(),
|
|
||||||
1,
|
|
||||||
blockhash,
|
|
||||||
);
|
|
||||||
let sig = client
|
|
||||||
.async_send_transaction(tx)
|
|
||||||
.expect("create_account in generate_and_fund_keypairs");
|
|
||||||
client.poll_for_signature(&sig).unwrap();
|
|
||||||
|
|
||||||
info!("minting to funding keypair");
|
|
||||||
let tx = librapay_transaction::mint_tokens(
|
|
||||||
&libra_mint_program_id,
|
|
||||||
funding_key,
|
|
||||||
libra_mint_key,
|
|
||||||
&libra_funding_key.pubkey(),
|
|
||||||
total,
|
|
||||||
blockhash,
|
|
||||||
);
|
|
||||||
let sig = client
|
|
||||||
.async_send_transaction(tx)
|
|
||||||
.expect("create_account in generate_and_fund_keypairs");
|
|
||||||
client.poll_for_signature(&sig).unwrap();
|
|
||||||
|
|
||||||
info!("creating move accounts.. {}", keypairs.len());
|
|
||||||
let create_len = 8;
|
|
||||||
let mut funding_time = Measure::start("funding_time");
|
|
||||||
for (i, keys) in keypairs.chunks(create_len).enumerate() {
|
|
||||||
if client.get_balance(&keys[0].pubkey()).unwrap_or(0) > 0 {
|
|
||||||
// already created these accounts.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut tx_send = Measure::start("poll");
|
|
||||||
|
|
||||||
let pubkeys: Vec<_> = keys.iter().map(|k| k.pubkey()).collect();
|
|
||||||
let tx = librapay_transaction::create_accounts(funding_key, &pubkeys, 1, blockhash);
|
|
||||||
let ser_size = bincode::serialized_size(&tx).unwrap();
|
|
||||||
let sig = client
|
|
||||||
.async_send_transaction(tx)
|
|
||||||
.expect("create_account in generate_and_fund_keypairs");
|
|
||||||
tx_send.stop();
|
|
||||||
let mut poll = Measure::start("poll");
|
|
||||||
client.poll_for_signature(&sig).unwrap();
|
|
||||||
poll.stop();
|
|
||||||
if i % 10 == 0 {
|
|
||||||
blockhash = client.get_recent_blockhash().unwrap().0;
|
|
||||||
info!(
|
|
||||||
"size: {} created {} accounts of {} sig: {}us send: {}us",
|
|
||||||
ser_size,
|
|
||||||
i,
|
|
||||||
(keypairs.len() / create_len),
|
|
||||||
poll.as_us(),
|
|
||||||
tx_send.as_us()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
funding_time.stop();
|
|
||||||
info!("funding accounts {}ms", funding_time.as_ms());
|
|
||||||
let mut sigs = vec![];
|
|
||||||
let tx_count = keypairs.len();
|
|
||||||
let amount = total / (tx_count as u64);
|
|
||||||
for (i, key) in keypairs[..tx_count].iter().enumerate() {
|
|
||||||
let tx = librapay_transaction::transfer(
|
|
||||||
libra_pay_program_id,
|
|
||||||
&libra_mint_key.pubkey(),
|
|
||||||
funding_key,
|
|
||||||
&libra_funding_key,
|
|
||||||
&key.pubkey(),
|
|
||||||
amount,
|
|
||||||
blockhash,
|
|
||||||
);
|
|
||||||
|
|
||||||
let sig = client
|
|
||||||
.async_send_transaction(tx.clone())
|
|
||||||
.expect("create_account in generate_and_fund_keypairs");
|
|
||||||
|
|
||||||
let mut poll_time = Measure::start("poll_start");
|
|
||||||
let poll_status = client.poll_for_signature(&sig);
|
|
||||||
poll_time.stop();
|
|
||||||
info!(
|
|
||||||
"i: {} poll: {:?} time: {}ms",
|
|
||||||
i,
|
|
||||||
poll_status,
|
|
||||||
poll_time.as_ms()
|
|
||||||
);
|
|
||||||
|
|
||||||
sigs.push((sig, key));
|
|
||||||
|
|
||||||
if i % 50 == 0 {
|
|
||||||
blockhash = client.get_recent_blockhash().unwrap().0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i, (sig, key)) in sigs.iter().enumerate() {
|
|
||||||
let mut times = 0;
|
|
||||||
loop {
|
|
||||||
match client.poll_for_signature(&sig) {
|
|
||||||
Ok(_) => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
info!("e :{:?} waiting times: {} sig: {}", e, times, sig);
|
|
||||||
times += 1;
|
|
||||||
sleep(Duration::from_secs(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
times = 0;
|
|
||||||
loop {
|
|
||||||
let balance = librapay_transaction::get_libra_balance(client, &key.pubkey()).unwrap();
|
|
||||||
if balance < amount {
|
|
||||||
info!("i: {} balance: {} times: {}", i, balance, times);
|
|
||||||
times += 1;
|
|
||||||
sleep(Duration::from_secs(1));
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i % 10 == 0 {
|
|
||||||
info!("funding {} of {}", i, tx_count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("done..");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_and_fund_keypairs<T: Client>(
|
pub fn generate_and_fund_keypairs<T: Client>(
|
||||||
client: &T,
|
client: &T,
|
||||||
drone_addr: Option<SocketAddr>,
|
drone_addr: Option<SocketAddr>,
|
||||||
funding_key: &Keypair,
|
funding_pubkey: &Keypair,
|
||||||
tx_count: usize,
|
tx_count: usize,
|
||||||
lamports_per_account: u64,
|
lamports_per_account: u64,
|
||||||
use_move: bool,
|
) -> (Vec<Keypair>, u64) {
|
||||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
|
|
||||||
info!("Creating {} keypairs...", tx_count * 2);
|
info!("Creating {} keypairs...", tx_count * 2);
|
||||||
let (mut keypairs, extra) = generate_keypairs(funding_key, tx_count as u64 * 2);
|
let mut keypairs = generate_keypairs(funding_pubkey, tx_count * 2);
|
||||||
|
|
||||||
info!("Get lamports...");
|
info!("Get lamports...");
|
||||||
|
|
||||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||||
@@ -844,76 +603,24 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
|||||||
.get_balance(&keypairs[tx_count * 2 - 1].pubkey())
|
.get_balance(&keypairs[tx_count * 2 - 1].pubkey())
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
let mut move_keypairs_ret = None;
|
|
||||||
|
|
||||||
if lamports_per_account > last_keypair_balance {
|
if lamports_per_account > last_keypair_balance {
|
||||||
let (_blockhash, fee_calculator) = client.get_recent_blockhash().unwrap();
|
let extra = lamports_per_account - last_keypair_balance;
|
||||||
let account_desired_balance =
|
let total = extra * (keypairs.len() as u64);
|
||||||
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
|
if client.get_balance(&funding_pubkey.pubkey()).unwrap_or(0) < total {
|
||||||
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
|
airdrop_lamports(client, &drone_addr.unwrap(), funding_pubkey, total);
|
||||||
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
|
|
||||||
if use_move {
|
|
||||||
total *= 2;
|
|
||||||
}
|
}
|
||||||
|
info!("adding more lamports {}", extra);
|
||||||
println!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
|
fund_keys(client, funding_pubkey, &keypairs, extra);
|
||||||
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
|
|
||||||
account_desired_balance, total
|
|
||||||
);
|
|
||||||
|
|
||||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
|
||||||
airdrop_lamports(client, &drone_addr.unwrap(), funding_key, total)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if use_move {
|
|
||||||
let libra_genesis_keypair = create_genesis(&funding_key, client, 1_000_000);
|
|
||||||
let libra_mint_program_id = upload_mint_program(&funding_key, client);
|
|
||||||
let libra_pay_program_id = upload_payment_program(&funding_key, client);
|
|
||||||
|
|
||||||
// Generate another set of keypairs for move accounts.
|
|
||||||
// Still fund the solana ones which will be used for fees.
|
|
||||||
let seed = [0u8; 32];
|
|
||||||
let mut rnd = GenKeys::new(seed);
|
|
||||||
let move_keypairs = rnd.gen_n_keypairs(tx_count as u64 * 2);
|
|
||||||
fund_move_keys(
|
|
||||||
client,
|
|
||||||
funding_key,
|
|
||||||
&move_keypairs,
|
|
||||||
total / 2,
|
|
||||||
&libra_pay_program_id,
|
|
||||||
&libra_mint_program_id,
|
|
||||||
&libra_genesis_keypair,
|
|
||||||
);
|
|
||||||
move_keypairs_ret = Some((
|
|
||||||
libra_genesis_keypair,
|
|
||||||
libra_pay_program_id,
|
|
||||||
libra_mint_program_id,
|
|
||||||
move_keypairs,
|
|
||||||
));
|
|
||||||
|
|
||||||
// Give solana keys half and move keys half the lamports.
|
|
||||||
total /= 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
fund_keys(
|
|
||||||
client,
|
|
||||||
funding_key,
|
|
||||||
&keypairs,
|
|
||||||
total,
|
|
||||||
fee_calculator.max_lamports_per_signature,
|
|
||||||
extra,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||||
keypairs.truncate(2 * tx_count);
|
keypairs.truncate(2 * tx_count);
|
||||||
|
|
||||||
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
|
(keypairs, last_keypair_balance)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
use solana::cluster_info::FULLNODE_PORT_RANGE;
|
||||||
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
use solana::local_cluster::{ClusterConfig, LocalCluster};
|
||||||
@@ -923,7 +630,6 @@ mod tests {
|
|||||||
use solana_runtime::bank::Bank;
|
use solana_runtime::bank::Bank;
|
||||||
use solana_runtime::bank_client::BankClient;
|
use solana_runtime::bank_client::BankClient;
|
||||||
use solana_sdk::client::SyncClient;
|
use solana_sdk::client::SyncClient;
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
|
||||||
use solana_sdk::genesis_block::create_genesis_block;
|
use solana_sdk::genesis_block::create_genesis_block;
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
|
|
||||||
@@ -942,68 +648,47 @@ mod tests {
|
|||||||
assert_eq!(should_switch_directions(20, 101), false);
|
assert_eq!(should_switch_directions(20, 101), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_bench_tps_local_cluster(config: Config) {
|
#[test]
|
||||||
|
fn test_bench_tps_local_cluster() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
|
let validator_config = ValidatorConfig::default();
|
||||||
const NUM_NODES: usize = 1;
|
const NUM_NODES: usize = 1;
|
||||||
let cluster = LocalCluster::new(&ClusterConfig {
|
let cluster = LocalCluster::new(&ClusterConfig {
|
||||||
node_stakes: vec![999_990; NUM_NODES],
|
node_stakes: vec![999_990; NUM_NODES],
|
||||||
cluster_lamports: 200_000_000,
|
cluster_lamports: 2_000_000,
|
||||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
validator_config,
|
||||||
native_instruction_processors: vec![solana_move_loader_program!()],
|
|
||||||
..ClusterConfig::default()
|
..ClusterConfig::default()
|
||||||
});
|
});
|
||||||
|
|
||||||
let drone_keypair = Keypair::new();
|
let drone_keypair = Keypair::new();
|
||||||
cluster.transfer(
|
cluster.transfer(&cluster.funding_keypair, &drone_keypair.pubkey(), 1_000_000);
|
||||||
&cluster.funding_keypair,
|
|
||||||
&drone_keypair.pubkey(),
|
let (addr_sender, addr_receiver) = channel();
|
||||||
100_000_000,
|
run_local_drone(drone_keypair, addr_sender, None);
|
||||||
);
|
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||||
|
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.tx_count = 100;
|
||||||
|
config.duration = Duration::from_secs(5);
|
||||||
|
|
||||||
let client = create_client(
|
let client = create_client(
|
||||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||||
FULLNODE_PORT_RANGE,
|
FULLNODE_PORT_RANGE,
|
||||||
);
|
);
|
||||||
|
|
||||||
let (addr_sender, addr_receiver) = channel();
|
|
||||||
run_local_drone(drone_keypair, addr_sender, None);
|
|
||||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
|
||||||
|
|
||||||
let lamports_per_account = 100;
|
let lamports_per_account = 100;
|
||||||
|
let (keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||||
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
|
||||||
&client,
|
&client,
|
||||||
Some(drone_addr),
|
Some(drone_addr),
|
||||||
&config.id,
|
&config.id,
|
||||||
config.tx_count,
|
config.tx_count,
|
||||||
lamports_per_account,
|
lamports_per_account,
|
||||||
config.use_move,
|
);
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
|
let total = do_bench_tps(vec![client], config, keypairs, 0);
|
||||||
assert!(total > 100);
|
assert!(total > 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bench_tps_local_cluster_solana() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.tx_count = 100;
|
|
||||||
config.duration = Duration::from_secs(10);
|
|
||||||
|
|
||||||
test_bench_tps_local_cluster(config);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bench_tps_local_cluster_move() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.tx_count = 100;
|
|
||||||
config.duration = Duration::from_secs(20);
|
|
||||||
config.use_move = true;
|
|
||||||
|
|
||||||
test_bench_tps_local_cluster(config);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bench_tps_bank_client() {
|
fn test_bench_tps_bank_client() {
|
||||||
let (genesis_block, id) = create_genesis_block(10_000);
|
let (genesis_block, id) = create_genesis_block(10_000);
|
||||||
@@ -1015,11 +700,10 @@ mod tests {
|
|||||||
config.tx_count = 10;
|
config.tx_count = 10;
|
||||||
config.duration = Duration::from_secs(5);
|
config.duration = Duration::from_secs(5);
|
||||||
|
|
||||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
let (keypairs, _keypair_balance) =
|
||||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, false)
|
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20);
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
do_bench_tps(clients, config, keypairs, 0, None);
|
do_bench_tps(clients, config, keypairs, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -1030,37 +714,12 @@ mod tests {
|
|||||||
let tx_count = 10;
|
let tx_count = 10;
|
||||||
let lamports = 20;
|
let lamports = 20;
|
||||||
|
|
||||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
let (keypairs, _keypair_balance) =
|
||||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
|
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports);
|
||||||
|
|
||||||
for kp in &keypairs {
|
for kp in &keypairs {
|
||||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
// TODO: This should be >= lamports, but fails at the moment
|
||||||
}
|
assert_ne!(client.get_balance(&kp.pubkey()).unwrap(), 0);
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bench_tps_fund_keys_with_fees() {
|
|
||||||
let (mut genesis_block, id) = create_genesis_block(10_000);
|
|
||||||
let fee_calculator = FeeCalculator::new(11);
|
|
||||||
genesis_block.fee_calculator = fee_calculator;
|
|
||||||
let bank = Bank::new(&genesis_block);
|
|
||||||
let client = BankClient::new(bank);
|
|
||||||
let tx_count = 10;
|
|
||||||
let lamports = 20;
|
|
||||||
|
|
||||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
|
||||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
|
|
||||||
|
|
||||||
let max_fee = client
|
|
||||||
.get_recent_blockhash()
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.max_lamports_per_signature;
|
|
||||||
for kp in &keypairs {
|
|
||||||
assert_eq!(
|
|
||||||
client.get_balance(&kp.pubkey()).unwrap(),
|
|
||||||
lamports + max_fee
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -4,7 +4,6 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
|
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
|
||||||
use solana_drone::drone::DRONE_PORT;
|
use solana_drone::drone::DRONE_PORT;
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
|
||||||
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||||
|
|
||||||
/// Holds the configuration for a single run of the benchmark
|
/// Holds the configuration for a single run of the benchmark
|
||||||
@@ -18,11 +17,6 @@ pub struct Config {
|
|||||||
pub tx_count: usize,
|
pub tx_count: usize,
|
||||||
pub thread_batch_sleep_ms: usize,
|
pub thread_batch_sleep_ms: usize,
|
||||||
pub sustained: bool,
|
pub sustained: bool,
|
||||||
pub client_ids_and_stake_file: String,
|
|
||||||
pub write_to_client_file: bool,
|
|
||||||
pub read_from_client_file: bool,
|
|
||||||
pub target_lamports_per_signature: u64,
|
|
||||||
pub use_move: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@@ -37,11 +31,6 @@ impl Default for Config {
|
|||||||
tx_count: 500_000,
|
tx_count: 500_000,
|
||||||
thread_batch_sleep_ms: 0,
|
thread_batch_sleep_ms: 0,
|
||||||
sustained: false,
|
sustained: false,
|
||||||
client_ids_and_stake_file: String::new(),
|
|
||||||
write_to_client_file: false,
|
|
||||||
read_from_client_file: false,
|
|
||||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
|
||||||
use_move: false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -102,11 +91,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.long("sustained")
|
.long("sustained")
|
||||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("use-move")
|
|
||||||
.long("use-move")
|
|
||||||
.help("Use Move language transactions to perform transfers."),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("tx_count")
|
Arg::with_name("tx_count")
|
||||||
.long("tx_count")
|
.long("tx_count")
|
||||||
@@ -122,30 +106,6 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Per-thread-per-iteration sleep in ms"),
|
.help("Per-thread-per-iteration sleep in ms"),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("write-client-keys")
|
|
||||||
.long("write-client-keys")
|
|
||||||
.value_name("FILENAME")
|
|
||||||
.takes_value(true)
|
|
||||||
.help("Generate client keys and stakes and write the list to YAML file"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("read-client-keys")
|
|
||||||
.long("read-client-keys")
|
|
||||||
.value_name("FILENAME")
|
|
||||||
.takes_value(true)
|
|
||||||
.help("Read client keys and stakes from the YAML file"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("target_lamports_per_signature")
|
|
||||||
.long("target-lamports-per-signature")
|
|
||||||
.value_name("LAMPORTS")
|
|
||||||
.takes_value(true)
|
|
||||||
.help(
|
|
||||||
"The cost in lamports that the cluster will charge for signature \
|
|
||||||
verification when the cluster is operating at target-signatures-per-slot",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||||
@@ -203,22 +163,5 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
|
|
||||||
args.sustained = matches.is_present("sustained");
|
args.sustained = matches.is_present("sustained");
|
||||||
|
|
||||||
if let Some(s) = matches.value_of("write-client-keys") {
|
|
||||||
args.write_to_client_file = true;
|
|
||||||
args.client_ids_and_stake_file = s.to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(s) = matches.value_of("read-client-keys") {
|
|
||||||
assert!(!args.write_to_client_file);
|
|
||||||
args.read_from_client_file = true;
|
|
||||||
args.client_ids_and_stake_file = s.to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(v) = matches.value_of("target_lamports_per_signature") {
|
|
||||||
args.target_lamports_per_signature = v.to_string().parse().expect("can't parse lamports");
|
|
||||||
}
|
|
||||||
|
|
||||||
args.use_move = matches.is_present("use-move");
|
|
||||||
|
|
||||||
args
|
args
|
||||||
}
|
}
|
||||||
|
@@ -1,25 +1,10 @@
|
|||||||
#[cfg(test)]
|
|
||||||
#[macro_use]
|
|
||||||
extern crate solana_move_loader_program;
|
|
||||||
|
|
||||||
mod bench;
|
mod bench;
|
||||||
mod cli;
|
mod cli;
|
||||||
|
|
||||||
use crate::bench::{
|
use crate::bench::{do_bench_tps, generate_and_fund_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT};
|
||||||
do_bench_tps, generate_and_fund_keypairs, generate_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT,
|
use solana::gossip_service::{discover_cluster, get_clients};
|
||||||
};
|
|
||||||
use solana::gossip_service::{discover_cluster, get_multi_client};
|
|
||||||
use solana_sdk::fee_calculator::FeeCalculator;
|
|
||||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::prelude::*;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
|
||||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
|
||||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
solana_metrics::set_panic_hook("bench-tps");
|
solana_metrics::set_panic_hook("bench-tps");
|
||||||
@@ -37,45 +22,15 @@ fn main() {
|
|||||||
tx_count,
|
tx_count,
|
||||||
thread_batch_sleep_ms,
|
thread_batch_sleep_ms,
|
||||||
sustained,
|
sustained,
|
||||||
client_ids_and_stake_file,
|
|
||||||
write_to_client_file,
|
|
||||||
read_from_client_file,
|
|
||||||
target_lamports_per_signature,
|
|
||||||
use_move,
|
|
||||||
} = cli_config;
|
} = cli_config;
|
||||||
|
|
||||||
if write_to_client_file {
|
|
||||||
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2);
|
|
||||||
let num_accounts = keypairs.len() as u64;
|
|
||||||
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
|
|
||||||
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
|
||||||
/ num_accounts
|
|
||||||
+ NUM_LAMPORTS_PER_ACCOUNT;
|
|
||||||
let mut accounts = HashMap::new();
|
|
||||||
keypairs.iter().for_each(|keypair| {
|
|
||||||
accounts.insert(
|
|
||||||
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
|
||||||
num_lamports_per_account,
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
let serialized = serde_yaml::to_string(&accounts).unwrap();
|
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
|
||||||
let mut file = File::create(path).unwrap();
|
|
||||||
file.write_all(&serialized.into_bytes()).unwrap();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Connecting to the cluster");
|
println!("Connecting to the cluster");
|
||||||
let (nodes, _replicators) =
|
let (nodes, _replicators) =
|
||||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| {
|
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| {
|
||||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
|
if nodes.len() < num_nodes {
|
||||||
let (client, num_clients) = get_multi_client(&nodes);
|
|
||||||
|
|
||||||
if nodes.len() < num_clients {
|
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||||
num_nodes
|
num_nodes
|
||||||
@@ -83,38 +38,15 @@ fn main() {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (keypairs, move_keypairs, keypair_balance) = if read_from_client_file && !use_move {
|
let clients = get_clients(&nodes);
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
|
||||||
let file = File::open(path).unwrap();
|
|
||||||
|
|
||||||
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
|
let (keypairs, keypair_balance) = generate_and_fund_keypairs(
|
||||||
let mut keypairs = vec![];
|
&clients[0],
|
||||||
let mut last_balance = 0;
|
|
||||||
|
|
||||||
accounts.into_iter().for_each(|(keypair, balance)| {
|
|
||||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
|
||||||
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
|
||||||
last_balance = balance;
|
|
||||||
});
|
|
||||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
|
||||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
|
||||||
// across multiple runs.
|
|
||||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
|
||||||
(keypairs, None, last_balance)
|
|
||||||
} else {
|
|
||||||
generate_and_fund_keypairs(
|
|
||||||
&client,
|
|
||||||
Some(drone_addr),
|
Some(drone_addr),
|
||||||
&id,
|
&id,
|
||||||
tx_count,
|
tx_count,
|
||||||
NUM_LAMPORTS_PER_ACCOUNT,
|
NUM_LAMPORTS_PER_ACCOUNT,
|
||||||
use_move,
|
);
|
||||||
)
|
|
||||||
.unwrap_or_else(|e| {
|
|
||||||
eprintln!("Error could not fund keys: {:?}", e);
|
|
||||||
exit(1);
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
id,
|
id,
|
||||||
@@ -123,14 +55,7 @@ fn main() {
|
|||||||
duration,
|
duration,
|
||||||
tx_count,
|
tx_count,
|
||||||
sustained,
|
sustained,
|
||||||
use_move,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
do_bench_tps(
|
do_bench_tps(clients, config, keypairs, keypair_balance);
|
||||||
vec![client],
|
|
||||||
config,
|
|
||||||
keypairs,
|
|
||||||
keypair_balance,
|
|
||||||
move_keypairs,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
@@ -1,18 +0,0 @@
|
|||||||
+------------+
|
|
||||||
| Bank-Merkle|
|
|
||||||
+------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+-----------------+ +-------------+
|
|
||||||
| Bank-Diff-Merkle| | Block-Merkle|
|
|
||||||
+-----------------+ +-------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+------+ +--------------------------+
|
|
||||||
| Hash | | Previous Bank-Diff-Merkle|
|
|
||||||
+------+ +--------------------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+---------------+ +---------------+
|
|
||||||
| Hash(Account1)| | Hash(Account2)|
|
|
||||||
+---------------+ +---------------+
|
|
@@ -1,19 +0,0 @@
|
|||||||
+---------------+
|
|
||||||
| Block-Merkle |
|
|
||||||
+---------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+-------------+ +-------------+
|
|
||||||
| Entry-Merkle| | Entry-Merkle|
|
|
||||||
+-------------+ +-------------+
|
|
||||||
^ ^
|
|
||||||
/ \
|
|
||||||
+-------+ +-------+
|
|
||||||
| Hash | | Hash |
|
|
||||||
+-------+ +-------+
|
|
||||||
^ ^ ^ ^
|
|
||||||
/ | | \
|
|
||||||
+-----------------+ +-----------------+ +-----------------+ +---+
|
|
||||||
| Hash(T1, status)| | Hash(T2, status)| | Hash(T3, status)| | 0 |
|
|
||||||
+-----------------+ +-----------------+ +-----------------+ +---+
|
|
||||||
|
|
@@ -3,4 +3,4 @@ set -e
|
|||||||
|
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
make -j"$(nproc)" test
|
make -j"$(nproc)"
|
||||||
|
@@ -4,14 +4,11 @@ MD_SRCS=$(wildcard src/*.md)
|
|||||||
|
|
||||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg) $(MSC_SRCS:art/%.msc=src/img/%.svg)
|
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg) $(MSC_SRCS:art/%.msc=src/img/%.svg)
|
||||||
|
|
||||||
TARGET=html/index.html
|
all: html/index.html
|
||||||
TEST_STAMP=src/tests.ok
|
|
||||||
|
|
||||||
all: $(TARGET)
|
test: src/tests.ok
|
||||||
|
|
||||||
test: $(TEST_STAMP)
|
open: all
|
||||||
|
|
||||||
open: $(TEST_STAMP)
|
|
||||||
mdbook build --open
|
mdbook build --open
|
||||||
|
|
||||||
watch: $(SVG_IMGS)
|
watch: $(SVG_IMGS)
|
||||||
@@ -29,11 +26,11 @@ src/%.md: %.md
|
|||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
@cp $< $@
|
@cp $< $@
|
||||||
|
|
||||||
$(TEST_STAMP): $(TARGET)
|
src/tests.ok: $(SVG_IMGS) $(MD_SRCS)
|
||||||
mdbook test
|
mdbook test
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
$(TARGET): $(SVG_IMGS) $(MD_SRCS)
|
html/index.html: src/tests.ok
|
||||||
mdbook build
|
mdbook build
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
|
@@ -5,8 +5,6 @@
|
|||||||
- [Terminology](terminology.md)
|
- [Terminology](terminology.md)
|
||||||
|
|
||||||
- [Getting Started](getting-started.md)
|
- [Getting Started](getting-started.md)
|
||||||
- [Testnet Participation](testnet-participation.md)
|
|
||||||
- [Testnet Replicator](testnet-replicator.md)
|
|
||||||
- [Example: Web Wallet](webwallet.md)
|
- [Example: Web Wallet](webwallet.md)
|
||||||
|
|
||||||
- [Programming Model](programs.md)
|
- [Programming Model](programs.md)
|
||||||
@@ -18,7 +16,7 @@
|
|||||||
- [Leader Rotation](leader-rotation.md)
|
- [Leader Rotation](leader-rotation.md)
|
||||||
- [Fork Generation](fork-generation.md)
|
- [Fork Generation](fork-generation.md)
|
||||||
- [Managing Forks](managing-forks.md)
|
- [Managing Forks](managing-forks.md)
|
||||||
- [Turbine Block Propagation](turbine-block-propagation.md)
|
- [Data Plane Fanout](data-plane-fanout.md)
|
||||||
- [Ledger Replication](ledger-replication.md)
|
- [Ledger Replication](ledger-replication.md)
|
||||||
- [Secure Vote Signing](vote-signing.md)
|
- [Secure Vote Signing](vote-signing.md)
|
||||||
- [Stake Delegation and Rewards](stake-delegation-and-rewards.md)
|
- [Stake Delegation and Rewards](stake-delegation-and-rewards.md)
|
||||||
@@ -31,11 +29,7 @@
|
|||||||
- [Gossip Service](gossip.md)
|
- [Gossip Service](gossip.md)
|
||||||
- [The Runtime](runtime.md)
|
- [The Runtime](runtime.md)
|
||||||
|
|
||||||
- [Anatomy of a Transaction](transaction.md)
|
|
||||||
|
|
||||||
- [API Reference](api-reference.md)
|
- [API Reference](api-reference.md)
|
||||||
- [Transaction](transaction-api.md)
|
|
||||||
- [Instruction](instruction-api.md)
|
|
||||||
- [Blockstreamer](blockstreamer.md)
|
- [Blockstreamer](blockstreamer.md)
|
||||||
- [JSON RPC API](jsonrpc-api.md)
|
- [JSON RPC API](jsonrpc-api.md)
|
||||||
- [JavaScript API](javascript-api.md)
|
- [JavaScript API](javascript-api.md)
|
||||||
@@ -59,21 +53,17 @@
|
|||||||
- [Economic Design MVP](ed_mvp.md)
|
- [Economic Design MVP](ed_mvp.md)
|
||||||
- [References](ed_references.md)
|
- [References](ed_references.md)
|
||||||
- [Cluster Test Framework](cluster-test-framework.md)
|
- [Cluster Test Framework](cluster-test-framework.md)
|
||||||
|
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||||
|
- [Deterministic Transaction Fees](transaction-fees.md)
|
||||||
- [Validator](validator-proposal.md)
|
- [Validator](validator-proposal.md)
|
||||||
- [Simple Payment and State Verification](simple-payment-and-state-verification.md)
|
|
||||||
- [Cross-Program Invocation](cross-program-invocation.md)
|
|
||||||
|
|
||||||
- [Implemented Design Proposals](implemented-proposals.md)
|
- [Implemented Design Proposals](implemented-proposals.md)
|
||||||
- [Blocktree](blocktree.md)
|
- [Fork Selection](fork-selection.md)
|
||||||
- [Cluster Software Installation and Updates](installer.md)
|
|
||||||
- [Deterministic Transaction Fees](transaction-fees.md)
|
|
||||||
- [Tower BFT](tower-bft.md)
|
|
||||||
- [Leader-to-Leader Transition](leader-leader-transition.md)
|
- [Leader-to-Leader Transition](leader-leader-transition.md)
|
||||||
- [Leader-to-Validator Transition](leader-validator-transition.md)
|
- [Leader-to-Validator Transition](leader-validator-transition.md)
|
||||||
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
- [Testnet Participation](testnet-participation.md)
|
||||||
- [Persistent Account Storage](persistent-account-storage.md)
|
|
||||||
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
|
||||||
- [Repair Service](repair-service.md)
|
|
||||||
- [Testing Programs](testing-programs.md)
|
- [Testing Programs](testing-programs.md)
|
||||||
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
||||||
- [Embedding the Move Langauge](embedding-move.md)
|
- [Persistent Account Storage](persistent-account-storage.md)
|
||||||
|
- [Cluster Software Installation and Updates](installer.md)
|
||||||
|
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
||||||
|
@@ -4,7 +4,7 @@ A validator votes on a PoH hash for two purposes. First, the vote indicates it
|
|||||||
believes the ledger is valid up until that point in time. Second, since many
|
believes the ledger is valid up until that point in time. Second, since many
|
||||||
valid forks may exist at a given height, the vote also indicates exclusive
|
valid forks may exist at a given height, the vote also indicates exclusive
|
||||||
support for the fork. This document describes only the former. The latter is
|
support for the fork. This document describes only the former. The latter is
|
||||||
described in [Tower BFT](tower-bft.md).
|
described in [fork selection](fork-selection.md).
|
||||||
|
|
||||||
## Current Design
|
## Current Design
|
||||||
|
|
||||||
@@ -50,11 +50,12 @@ log the time since the NewBlock transaction was submitted.
|
|||||||
|
|
||||||
### Finality and Payouts
|
### Finality and Payouts
|
||||||
|
|
||||||
[Tower BFT](tower-bft.md) is the proposed fork selection algorithm. It proposes
|
Locktower is the proposed [fork selection](fork-selection.md) algorithm. It
|
||||||
that payment to miners be postponed until the *stack* of validator votes reaches
|
proposes that payment to miners be postponed until the *stack* of validator
|
||||||
a certain depth, at which point rollback is not economically feasible. The vote
|
votes reaches a certain depth, at which point rollback is not economically
|
||||||
program may therefore implement Tower BFT. Vote instructions would need to
|
feasible. The vote program may therefore implement locktower. Vote instructions
|
||||||
reference a global Tower account so that it can track cross-block state.
|
would need to reference a global locktower account so that it can track
|
||||||
|
cross-block state.
|
||||||
|
|
||||||
## Challenges
|
## Challenges
|
||||||
|
|
||||||
|
@@ -1,111 +0,0 @@
|
|||||||
# Cross-Program Invocation
|
|
||||||
|
|
||||||
## Problem
|
|
||||||
|
|
||||||
In today's implementation a client can create a transaction that modifies two
|
|
||||||
accounts, each owned by a separate on-chain program:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
let message = Message::new(vec![
|
|
||||||
token_instruction::pay(&alice_pubkey),
|
|
||||||
acme_instruction::launch_missiles(&bob_pubkey),
|
|
||||||
]);
|
|
||||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
|
||||||
```
|
|
||||||
|
|
||||||
The current implementation does not, however, allow the `acme` program to
|
|
||||||
conveniently invoke `token` instructions on the client's behalf:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
let message = Message::new(vec![
|
|
||||||
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
|
||||||
]);
|
|
||||||
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
|
||||||
```
|
|
||||||
|
|
||||||
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes
|
|
||||||
`token_instruction::pay` from the `acme` program. The workaround is to extend the
|
|
||||||
`acme` program with the implementation of the `token` program, and create `token`
|
|
||||||
accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify.
|
|
||||||
With that workaround, `acme` can modify token-like accounts created by the `acme`
|
|
||||||
program, but not token accounts created by the `token` program.
|
|
||||||
|
|
||||||
|
|
||||||
## Proposed Solution
|
|
||||||
|
|
||||||
The goal of this design is to modify Solana's runtime such that an on-chain
|
|
||||||
program can invoke an instruction from another program.
|
|
||||||
|
|
||||||
Given two on-chain programs `token` and `acme`, each implementing instructions
|
|
||||||
`pay()` and `launch_missiles()` respectively, we would ideally like to implement
|
|
||||||
the `acme` module with a call to a function defined in the `token` module:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
use token;
|
|
||||||
|
|
||||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
token::pay(&keyed_accounts[1..])?;
|
|
||||||
|
|
||||||
launch_missiles(keyed_accounts)?;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The above code would require that the `token` crate be dynamically linked,
|
|
||||||
so that a custom linker could intercept calls and validate accesses to
|
|
||||||
`keyed_accounts`. That is, even though the client intends to modify both
|
|
||||||
`token` and `acme` accounts, only `token` program is permitted to modify
|
|
||||||
the `token` account, and only the `acme` program is permitted to modify
|
|
||||||
the `acme` account.
|
|
||||||
|
|
||||||
Backing off from that ideal cross-program call, a slightly more
|
|
||||||
verbose solution is to expose token's existing `process_instruction()`
|
|
||||||
entrypoint to the acme program:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
use token_instruction;
|
|
||||||
|
|
||||||
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
|
||||||
let alice_pubkey = keyed_accounts[1].key;
|
|
||||||
let instruction = token_instruction::pay(&alice_pubkey);
|
|
||||||
process_instruction(&instruction)?;
|
|
||||||
|
|
||||||
launch_missiles(keyed_accounts)?;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
where `process_instruction()` is built into Solana's runtime and responsible
|
|
||||||
for routing the given instruction to the `token` program via the instruction's
|
|
||||||
`program_id` field. Before invoking `pay()`, the runtime must also ensure that
|
|
||||||
`acme` didn't modify any accounts owned by `token`. It does this by calling
|
|
||||||
`runtime::verify_instruction()` and then afterward updating all the `pre_*`
|
|
||||||
variables to tentatively commit `acme`'s account modifications. After `pay()`
|
|
||||||
completes, the runtime must again ensure that `token` didn't modify any
|
|
||||||
accounts owned by `acme`. It should call `verify_instruction()` again, but this
|
|
||||||
time with the `token` program ID. Lastly, after `pay_and_launch_missiles()`
|
|
||||||
completes, the runtime must call `verify_instruction()` one more time, where it
|
|
||||||
normally would, but using all updated `pre_*` variables. If executing
|
|
||||||
`pay_and_launch_missiles()` up to `pay()` made no invalid account changes,
|
|
||||||
`pay()` made no invalid changes, and executing from `pay()` until
|
|
||||||
`pay_and_launch_missiles()` returns made no invalid changes, then the runtime
|
|
||||||
can transitively assume `pay_and_launch_missiles()` as whole made no invalid
|
|
||||||
account changes, and therefore commit all account modifications.
|
|
||||||
|
|
||||||
### Setting `KeyedAccount.is_signer`
|
|
||||||
|
|
||||||
When `process_instruction()` is invoked, the runtime must create a new
|
|
||||||
`KeyedAccounts` parameter using the signatures from the *original* transaction
|
|
||||||
data. Since the `token` program is immutable and existed on-chain prior to the
|
|
||||||
`acme` program, the runtime can safely treat the transaction signature as a
|
|
||||||
signature of a transaction with a `token` instruction. When the runtime sees
|
|
||||||
the given instruction references `alice_pubkey`, it looks up the key in the
|
|
||||||
transaction to see if that key corresponds to a transaction signature. In this
|
|
||||||
case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the
|
|
||||||
`token` program to modify Alice's account.
|
|
@@ -1,12 +1,12 @@
|
|||||||
# Turbine Block Propagation
|
# Data Plane Fanout
|
||||||
|
|
||||||
A Solana cluster uses a multi-layer block propagation mechanism called *Turbine*
|
A Solana cluster uses a multi-layer mechanism called *data plane fanout* to
|
||||||
to broadcast transaction blobs to all nodes with minimal amount of duplicate
|
broadcast transaction blobs to all nodes in a very quick and efficient manner.
|
||||||
messages. The cluster divides itself into small collections of nodes, called
|
In order to establish the fanout, the cluster divides itself into small
|
||||||
*neighborhoods*. Each node is responsible for sharing any data it receives with
|
collections of nodes, called *neighborhoods*. Each node is responsible for
|
||||||
the other nodes in its neighborhood, as well as propagating the data on to a
|
sharing any data it receives with the other nodes in its neighborhood, as well
|
||||||
small set of nodes in other neighborhoods. This way each node only has to
|
as propagating the data on to a small set of nodes in other neighborhoods.
|
||||||
communicate with a small number of nodes.
|
This way each node only has to communicate with a small number of nodes.
|
||||||
|
|
||||||
During its slot, the leader node distributes blobs between the validator nodes
|
During its slot, the leader node distributes blobs between the validator nodes
|
||||||
in the first neighborhood (layer 0). Each validator shares its data within its
|
in the first neighborhood (layer 0). Each validator shares its data within its
|
||||||
@@ -26,14 +26,6 @@ make up layer 0. These will automatically be the highest stake holders, allowing
|
|||||||
the heaviest votes to come back to the leader first. Layer-0 and lower-layer
|
the heaviest votes to come back to the leader first. Layer-0 and lower-layer
|
||||||
nodes use the same logic to find their neighbors and next layer peers.
|
nodes use the same logic to find their neighbors and next layer peers.
|
||||||
|
|
||||||
To reduce the possibility of attack vectors, each blob is transmitted over a
|
|
||||||
random tree of neighborhoods. Each node uses the same set of nodes representing
|
|
||||||
the cluster. A random tree is generated from the set for each blob using
|
|
||||||
randomness derived from the blob itself. Since the random seed is not known in
|
|
||||||
advance, attacks that try to eclipse neighborhoods from certain leaders or
|
|
||||||
blocks become very difficult, and should require almost complete control of the
|
|
||||||
stake in the cluster.
|
|
||||||
|
|
||||||
## Layer and Neighborhood Structure
|
## Layer and Neighborhood Structure
|
||||||
|
|
||||||
The current leader makes its initial broadcasts to at most `DATA_PLANE_FANOUT`
|
The current leader makes its initial broadcasts to at most `DATA_PLANE_FANOUT`
|
@@ -1,66 +0,0 @@
|
|||||||
# Embedding the Move Language
|
|
||||||
|
|
||||||
## Problem
|
|
||||||
|
|
||||||
Solana enables developers to write on-chain programs in general purpose
|
|
||||||
programming languages such as C or Rust, but those programs contain
|
|
||||||
Solana-specific mechanisms. For example, there isn't another chain that asks
|
|
||||||
developers to create a Rust module with a `process_instruction(KeyedAccounts)`
|
|
||||||
function. Whenever practical, Solana should offer dApp developers more portable
|
|
||||||
options.
|
|
||||||
|
|
||||||
Until just recently, no popular blockchain offered a language that could expose
|
|
||||||
the value of Solana's massively parallel [runtime](runtime.md). Solidity
|
|
||||||
contracts, for example, do not separate references to shared data from contract
|
|
||||||
code, and therefore need to be executed serially to ensure deterministic
|
|
||||||
behavior. In practice we see that the most aggressively optimized EVM-based
|
|
||||||
blockchains all seem to peak out around 1,200 TPS - a small fraction of what
|
|
||||||
Solana can do. The Libra project, on the other hand, designed an on-chain
|
|
||||||
programming language called Move that is more suitable for parallel execution.
|
|
||||||
Like Solana's runtime, Move programs depend on accounts for all shared state.
|
|
||||||
|
|
||||||
The biggest design difference between Solana's runtime and Libra's Move VM is
|
|
||||||
how they manage safe invocations between modules. Solana took an operating
|
|
||||||
systems approach and Libra took the domain-specific language approach. In the
|
|
||||||
runtime, a module must trap back into the runtime to ensure the caller's module
|
|
||||||
did not write to data owned by the callee. Likewise, when the callee completes,
|
|
||||||
it must again trap back to the runtime to ensure the callee did not write to
|
|
||||||
data owned by the caller. Move, on the other hand, includes an advanced type
|
|
||||||
system that allows these checks to be run by its bytecode verifier. Because
|
|
||||||
Move bytecode can be verified, the cost of verification is paid just once, at
|
|
||||||
the time the module is loaded on-chain. In the runtime, the cost is paid each
|
|
||||||
time a transaction crosses between modules. The difference is similar in spirit
|
|
||||||
to the difference between a dynamically-typed language like Python versus a
|
|
||||||
statically-typed language like Java. Solana's runtime allows dApps to be
|
|
||||||
written in general purpose programming languages, but that comes with the cost
|
|
||||||
of runtime checks when jumping between programs.
|
|
||||||
|
|
||||||
This proposal attempts to define a way to embed the Move VM such that:
|
|
||||||
|
|
||||||
* cross-module invocations within Move do not require the runtime's
|
|
||||||
cross-program runtime checks
|
|
||||||
* Move programs can leverage functionality in other Solana programs and vice
|
|
||||||
versa
|
|
||||||
* Solana's runtime parallelism is exposed to batches of Move and non-Move
|
|
||||||
transactions
|
|
||||||
|
|
||||||
## Proposed Solution
|
|
||||||
|
|
||||||
### Move VM as a Solana loader
|
|
||||||
|
|
||||||
The Move VM shall be embedded as a Solana loader under the identifier
|
|
||||||
`MOVE_PROGRAM_ID`, so that Move modules can be marked as `executable` with the
|
|
||||||
VM as its `owner`. This will allow modules to load module dependencies, as well
|
|
||||||
as allow for parallel execution of Move scripts.
|
|
||||||
|
|
||||||
All data accounts owned by Move modules must set their owners to the loader,
|
|
||||||
`MOVE_PROGRAM_ID`. Since Move modules encapsulate their account data in the
|
|
||||||
same way Solana programs encapsulate theirs, the Move module owner should be
|
|
||||||
embedded in the account data. The runtime will grant write access to the Move
|
|
||||||
VM, and Move grants access to the module accounts.
|
|
||||||
|
|
||||||
### Interacting with Solana programs
|
|
||||||
|
|
||||||
To invoke instructions in non-Move programs, Solana would need to extend the
|
|
||||||
Move VM with a `process_instruction()` system call. It would work the same as
|
|
||||||
`process_instruction()` Rust BPF programs.
|
|
@@ -55,7 +55,7 @@ Validators can ignore forks at other points (e.g. from the wrong leader), or
|
|||||||
slash the leader responsible for the fork.
|
slash the leader responsible for the fork.
|
||||||
|
|
||||||
Validators vote based on a greedy choice to maximize their reward described in
|
Validators vote based on a greedy choice to maximize their reward described in
|
||||||
[Tower BFT](tower-bft.md).
|
[forks selection](fork-selection.md).
|
||||||
|
|
||||||
### Validator's View
|
### Validator's View
|
||||||
|
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
# Tower BFT
|
# Fork Selection
|
||||||
|
|
||||||
This design describes Solana's *Tower BFT* algorithm. It addresses the
|
This design describes a *Fork Selection* algorithm. It addresses the following
|
||||||
following problems:
|
problems:
|
||||||
|
|
||||||
* Some forks may not end up accepted by the super-majority of the cluster, and
|
* Some forks may not end up accepted by the super-majority of the cluster, and
|
||||||
voters need to recover from voting on such forks.
|
voters need to recover from voting on such forks.
|
@@ -161,7 +161,7 @@ This will dump all the threads stack traces into gdb.txt
|
|||||||
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --drone testnet.solana.com:9900 --duration 60 --tx_count 50
|
$ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --duration 60
|
||||||
```
|
```
|
||||||
|
|
||||||
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
||||||
|
@@ -22,7 +22,7 @@ gossip endpoint (a socket address).
|
|||||||
|
|
||||||
Records shared over gossip are arbitrary, but signed and versioned (with a
|
Records shared over gossip are arbitrary, but signed and versioned (with a
|
||||||
timestamp) as needed to make sense to the node receiving them. If a node
|
timestamp) as needed to make sense to the node receiving them. If a node
|
||||||
receives two records from the same source, it updates its own copy with the
|
recieves two records from the same source, it it updates its own copy with the
|
||||||
record with the most recent timestamp.
|
record with the most recent timestamp.
|
||||||
|
|
||||||
## Gossip Service Interface
|
## Gossip Service Interface
|
||||||
@@ -34,8 +34,8 @@ Nodes send push messages to `PUSH_FANOUT` push peers.
|
|||||||
|
|
||||||
Upon receiving a push message, a node examines the message for:
|
Upon receiving a push message, a node examines the message for:
|
||||||
|
|
||||||
1. Duplication: if the message has been seen before, the node drops the message
|
1. Duplication: if the message has been seen before, the node responds with
|
||||||
and may respond with `PushMessagePrune` if forwarded from a low staked node
|
`PushMessagePrune` and drops the message
|
||||||
|
|
||||||
2. New data: if the message is new to the node
|
2. New data: if the message is new to the node
|
||||||
* Stores the new information with an updated version in its cluster info and
|
* Stores the new information with an updated version in its cluster info and
|
||||||
@@ -51,7 +51,7 @@ Upon receiving a push message, a node examines the message for:
|
|||||||
A nodes selects its push peers at random from the active set of known peers.
|
A nodes selects its push peers at random from the active set of known peers.
|
||||||
The node keeps this selection for a relatively long time. When a prune message
|
The node keeps this selection for a relatively long time. When a prune message
|
||||||
is received, the node drops the push peer that sent the prune. Prune is an
|
is received, the node drops the push peer that sent the prune. Prune is an
|
||||||
indication that there is another, higher stake weighted path to that node than direct push.
|
indication that there is another, faster path to that node than direct push.
|
||||||
|
|
||||||
The set of push peers is kept fresh by rotating a new node into the set every
|
The set of push peers is kept fresh by rotating a new node into the set every
|
||||||
`PUSH_MSG_TIMEOUT/2` milliseconds.
|
`PUSH_MSG_TIMEOUT/2` milliseconds.
|
||||||
|
@@ -12,18 +12,18 @@ updates is managed using an on-chain update manifest program.
|
|||||||
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
||||||
The easiest install method for supported platforms:
|
The easiest install method for supported platforms:
|
||||||
```bash
|
```bash
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This script will check github for the latest tagged release and download and run the
|
This script will check github for the latest tagged release and download and run the
|
||||||
`solana-install-init` binary from there.
|
`solana-install` binary from there.
|
||||||
|
|
||||||
|
|
||||||
If additional arguments need to be specified during the installation, the
|
If additional arguments need to be specified during the installation, the
|
||||||
following shell syntax is used:
|
following shell syntax is used:
|
||||||
```bash
|
```bash
|
||||||
$ init_args=.... # arguments for `solana-install-init ...`
|
$ init_args=.... # arguments for `solana-installer init ...`
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.16.0/install/solana-install-init.sh | sh -s - ${init_args}
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.13.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Fetch and run a pre-built installer from a Github release
|
#### Fetch and run a pre-built installer from a Github release
|
||||||
@@ -31,9 +31,9 @@ With a well-known release URL, a pre-built binary can be obtained for supported
|
|||||||
platforms:
|
platforms:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.16.0/solana-install-init-x86_64-apple-darwin
|
$ curl -o solana-install https://github.com/solana-labs/solana/releases/download/v0.13.0/solana-install-x86_64-apple-darwin
|
||||||
$ chmod +x ./solana-install-init
|
$ chmod +x ./solana-install
|
||||||
$ ./solana-install-init --help
|
$ ./solana-install --help
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Build and run the installer from source
|
#### Build and run the installer from source
|
||||||
@@ -49,7 +49,7 @@ $ cargo run -- --help
|
|||||||
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
|
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
|
||||||
the following commands will deploy the update:
|
the following commands will deploy the update:
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users
|
$ solana-keygen -o update-manifest.json # <-- only generated once, the public key is shared with users
|
||||||
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
|
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -119,7 +119,7 @@ It manages the following files and directories in the user's home directory:
|
|||||||
|
|
||||||
#### Command-line Interface
|
#### Command-line Interface
|
||||||
```manpage
|
```manpage
|
||||||
solana-install 0.16.0
|
solana-install 0.13.0
|
||||||
The solana cluster software installer
|
The solana cluster software installer
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
@@ -130,7 +130,7 @@ FLAGS:
|
|||||||
-V, --version Prints version information
|
-V, --version Prints version information
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
-c, --config <PATH> Configuration file to use [default: .../Library/Preferences/solana/install.yml]
|
-c, --config <PATH> Configuration file to use [default: /Users/mvines/Library/Preferences/solana/install.yml]
|
||||||
|
|
||||||
SUBCOMMANDS:
|
SUBCOMMANDS:
|
||||||
deploy deploys a new update
|
deploy deploys a new update
|
||||||
@@ -152,8 +152,8 @@ FLAGS:
|
|||||||
-h, --help Prints help information
|
-h, --help Prints help information
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
-d, --data_dir <PATH> Directory to store install data [default: .../Library/Application Support/solana]
|
-d, --data_dir <PATH> Directory to store install data [default: /Users/mvines/Library/Application Support/solana]
|
||||||
-u, --url <URL> JSON RPC URL for the solana cluster [default: http://testnet.solana.com:8899]
|
-u, --url <URL> JSON RPC URL for the solana cluster [default: https://api.testnet.solana.com/]
|
||||||
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
|
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@@ -1,25 +0,0 @@
|
|||||||
# Instructions
|
|
||||||
|
|
||||||
For the purposes of building a [Transaction](transaction.md), a more
|
|
||||||
verbose instruction format is used:
|
|
||||||
|
|
||||||
* **Instruction:**
|
|
||||||
* **program_id:** The pubkey of the on-chain program that executes the
|
|
||||||
instruction
|
|
||||||
* **accounts:** An ordered list of accounts that should be passed to
|
|
||||||
the program processing the instruction, including metadata detailing
|
|
||||||
if an account is a signer of the transaction and if it is a credit
|
|
||||||
only account.
|
|
||||||
* **data:** A byte array that is passed to the program executing the
|
|
||||||
instruction
|
|
||||||
|
|
||||||
A more compact form is actually included in a `Transaction`:
|
|
||||||
|
|
||||||
* **CompiledInstruction:**
|
|
||||||
* **program_id_index:** The index of the `program_id` in the
|
|
||||||
`account_keys` list
|
|
||||||
* **accounts:** An ordered list of indices into `account_keys`
|
|
||||||
specifying the accounds that should be passed to the program
|
|
||||||
processing the instruction.
|
|
||||||
* **data:** A byte array that is passed to the program executing the
|
|
||||||
instruction
|
|
@@ -1,13 +1,13 @@
|
|||||||
# What is Solana?
|
# What is Solana?
|
||||||
|
|
||||||
Solana is an open source project implementing a new,
|
Solana is the name of an open source project that is implementing a new,
|
||||||
high-performance, permissionless blockchain. Solana is also the name of a
|
high-performance, permissionless blockchain. Solana is also the name of a
|
||||||
company headquartered in San Francisco that maintains the open source project.
|
company headquartered in San Francisco that maintains the open source project.
|
||||||
|
|
||||||
# About this Book
|
# About this Book
|
||||||
|
|
||||||
This book describes the Solana open source project, a blockchain built from the
|
This book describes the Solana open source project, a blockchain built from the
|
||||||
ground up for scale. The book covers why Solana is useful, how to use it, how it
|
ground up for scale. The book covers why it's useful, how to use it, how it
|
||||||
works, and why it will continue to work long after the company Solana closes
|
works, and why it will continue to work long after the company Solana closes
|
||||||
its doors. The goal of the Solana architecture is to demonstrate there exists a
|
its doors. The goal of the Solana architecture is to demonstrate there exists a
|
||||||
set of software algorithms that when used in combination to implement a
|
set of software algorithms that when used in combination to implement a
|
||||||
|
@@ -25,18 +25,11 @@ Methods
|
|||||||
* [getAccountInfo](#getaccountinfo)
|
* [getAccountInfo](#getaccountinfo)
|
||||||
* [getBalance](#getbalance)
|
* [getBalance](#getbalance)
|
||||||
* [getClusterNodes](#getclusternodes)
|
* [getClusterNodes](#getclusternodes)
|
||||||
* [getEpochInfo](#getepochinfo)
|
|
||||||
* [getLeaderSchedule](#getleaderschedule)
|
|
||||||
* [getProgramAccounts](#getprogramaccounts)
|
|
||||||
* [getRecentBlockhash](#getrecentblockhash)
|
* [getRecentBlockhash](#getrecentblockhash)
|
||||||
* [getSignatureStatus](#getsignaturestatus)
|
* [getSignatureStatus](#getsignaturestatus)
|
||||||
* [getSlotLeader](#getslotleader)
|
* [getSlotLeader](#getslotleader)
|
||||||
* [getSlotsPerSegment](#getslotspersegment)
|
|
||||||
* [getStorageTurn](#getstorageturn)
|
|
||||||
* [getStorageTurnRate](#getstorageturnrate)
|
|
||||||
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
||||||
* [getTransactionCount](#gettransactioncount)
|
* [getTransactionCount](#gettransactioncount)
|
||||||
* [getTotalSupply](#gettotalsupply)
|
|
||||||
* [getEpochVoteAccounts](#getepochvoteaccounts)
|
* [getEpochVoteAccounts](#getepochvoteaccounts)
|
||||||
* [requestAirdrop](#requestairdrop)
|
* [requestAirdrop](#requestairdrop)
|
||||||
* [sendTransaction](#sendtransaction)
|
* [sendTransaction](#sendtransaction)
|
||||||
@@ -102,32 +95,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "
|
|||||||
{"jsonrpc":"2.0","result":true,"id":1}
|
{"jsonrpc":"2.0","result":true,"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### getAccountInfo
|
|
||||||
Returns all information associated with the account of provided Pubkey
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
The result field will be a JSON object with the following sub fields:
|
|
||||||
|
|
||||||
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
|
||||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
|
||||||
* `data`, array of bytes representing any data associated with the account
|
|
||||||
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getBalance
|
### getBalance
|
||||||
@@ -158,7 +125,7 @@ None
|
|||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
The result field will be an array of JSON objects, each with the following sub fields:
|
The result field will be an array of JSON objects, each with the following sub fields:
|
||||||
* `pubkey` - Node public key, as base-58 encoded string
|
* `id` - Node identifier, as base-58 encoded string
|
||||||
* `gossip` - Gossip network address for the node
|
* `gossip` - Gossip network address for the node
|
||||||
* `tpu` - TPU network address for the node
|
* `tpu` - TPU network address for the node
|
||||||
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||||
@@ -169,78 +136,33 @@ The result field will be an array of JSON objects, each with the following sub f
|
|||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","id":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getEpochInfo
|
### getAccountInfo
|
||||||
Returns information about the current epoch
|
Returns all information associated with the account of provided Pubkey
|
||||||
|
|
||||||
##### Parameters:
|
##### Parameters:
|
||||||
None
|
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
The result field will be an object with the following fields:
|
The result field will be a JSON object with the following sub fields:
|
||||||
* `epoch`, the current epoch
|
|
||||||
* `slotIndex`, the current slot relative to the start of the current epoch
|
|
||||||
* `slotsInEpoch`, the number of slots in this epoch
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### getLeaderSchedule
|
|
||||||
Returns the leader schedule for the current epoch
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
None
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
The result field will be an array of leader public keys (as base-58 encoded
|
|
||||||
strings) for each slot in the current epoch
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":[...],"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### getProgramAccounts
|
|
||||||
Returns all accounts owned by the provided program Pubkey
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
* `string` - Pubkey of program, as base-58 encoded string
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
The result field will be an array of arrays. Each sub array will contain:
|
|
||||||
* `string` - a the account Pubkey as base-58 encoded string
|
|
||||||
and a JSON object, with the following sub fields:
|
|
||||||
|
|
||||||
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
* `data`, array of bytes representing any data associated with the account
|
* `data`, array of bytes representing any data associated with the account
|
||||||
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
|
* `loader`, array of 32 bytes representing the loader for this program (if `executable`), otherwise all
|
||||||
|
|
||||||
##### Example:
|
##### Example:
|
||||||
```bash
|
```bash
|
||||||
// Request
|
// Request
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
|
{"jsonrpc":"2.0","result":{"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -311,67 +233,7 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
|||||||
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
----
|
-----
|
||||||
|
|
||||||
### getSlotsPerSegment
|
|
||||||
Returns the current storage segment size in terms of slots
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
None
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
* `u64` - Number of slots in a storage segment
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
----
|
|
||||||
|
|
||||||
### getStorageTurn
|
|
||||||
Returns the current storage turn's blockhash and slot
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
None
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
An array consisting of
|
|
||||||
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
|
||||||
* `u64` - the current storage turn slot
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
----
|
|
||||||
|
|
||||||
### getStorageTurnRate
|
|
||||||
Returns the current storage turn rate in terms of slots per turn
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
None
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
* `u64` - Number of slots in storage turn
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":"1024","id":1}
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
----
|
|
||||||
|
|
||||||
### getNumBlocksSinceSignatureConfirmation
|
### getNumBlocksSinceSignatureConfirmation
|
||||||
Returns the current number of blocks since signature has been confirmed.
|
Returns the current number of blocks since signature has been confirmed.
|
||||||
@@ -413,26 +275,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### getTotalSupply
|
|
||||||
Returns the current total supply in Lamports
|
|
||||||
|
|
||||||
##### Parameters:
|
|
||||||
None
|
|
||||||
|
|
||||||
##### Results:
|
|
||||||
* `integer` - Total supply, as unsigned 64-bit integer
|
|
||||||
|
|
||||||
##### Example:
|
|
||||||
```bash
|
|
||||||
// Request
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
|
|
||||||
|
|
||||||
// Result
|
|
||||||
{"jsonrpc":"2.0","result":10126,"id":1}
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### getEpochVoteAccounts
|
### getEpochVoteAccounts
|
||||||
Returns the account info and associated stake for all the voting accounts in the current epoch.
|
Returns the account info and associated stake for all the voting accounts in the current epoch.
|
||||||
|
|
||||||
@@ -440,11 +282,19 @@ Returns the account info and associated stake for all the voting accounts in the
|
|||||||
None
|
None
|
||||||
|
|
||||||
##### Results:
|
##### Results:
|
||||||
The result field will be an array of JSON objects, each with the following sub fields:
|
An array consisting of vote accounts:
|
||||||
* `votePubkey` - Vote account public key, as base-58 encoded string
|
* `string` - the vote account's Pubkey as base-58 encoded string
|
||||||
* `nodePubkey` - Node public key, as base-58 encoded string
|
* `integer` - the stake, in lamports, delegated to this vote account
|
||||||
* `stake` - the stake, in lamports, delegated to this vote account
|
* `VoteState` - the vote account's state
|
||||||
|
|
||||||
|
Each VoteState will be a JSON object with the following sub fields:
|
||||||
|
|
||||||
|
* `votes`, array of most recent vote lockouts
|
||||||
|
* `node_pubkey`, the pubkey of the node that votes using this account
|
||||||
|
* `authorized_voter_pubkey`, the pubkey of the authorized vote signer for this account
|
||||||
* `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
|
* `commission`, a 32-bit integer used as a fraction (commission/MAX_U32) for rewards payout
|
||||||
|
* `root_slot`, the most recent slot this account has achieved maximum lockout
|
||||||
|
* `credits`, credits accrued by this account for reaching lockouts
|
||||||
|
|
||||||
##### Example:
|
##### Example:
|
||||||
```bash
|
```bash
|
||||||
@@ -452,7 +302,7 @@ The result field will be an array of JSON objects, each with the following sub f
|
|||||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochVoteAccounts"}' http://localhost:8899
|
||||||
|
|
||||||
// Result
|
// Result
|
||||||
{"jsonrpc":"2.0","result":[{"commission":0,"nodePubkey":"Et2RaZJdJRTzTkodUwiHr4H6sLkVmijBFv8tkd7oSSFY","stake":42,"votePubkey":"B4CdWq3NBSoH2wYsVE1CaZSWPo2ZtopE4SJipQhZ3srF"}],"id":1}
|
{"jsonrpc":"2.0","result":[[[84,115,89,23,41,83,221,72,58,23,53,245,195,188,140,161,242,189,200,164,139,214,12,180,84,161,28,151,24,243,159,125],10000000,{"authorized_voter_pubkey":[84,115,89,23,41,83,221,72,58,23,53,245,195,188,140,161,242,189,200,164,139,214,12,180,84,161,28,151,24,243,159,125],"commission":0,"credits":0,"node_pubkey":[49,139,227,211,47,39,69,86,131,244,160,144,228,169,84,143,142,253,83,81,212,110,254,12,242,71,219,135,30,60,157,213],"root_slot":null,"votes":[{"confirmation_count":1,"slot":0}]}]],"id":1}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -539,7 +389,7 @@ for a given account public key changes
|
|||||||
|
|
||||||
##### Notification Format:
|
##### Notification Format:
|
||||||
```bash
|
```bash
|
||||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"loader":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@@ -45,7 +45,7 @@ The upsides compared to guards:
|
|||||||
* The timeout is not fixed.
|
* The timeout is not fixed.
|
||||||
|
|
||||||
* The timeout is local to the leader, and therefore can be clever. The leader's
|
* The timeout is local to the leader, and therefore can be clever. The leader's
|
||||||
heuristic can take into account turbine performance.
|
heuristic can take into account avalanche performance.
|
||||||
|
|
||||||
* This design doesn't require a ledger hard fork to update.
|
* This design doesn't require a ledger hard fork to update.
|
||||||
|
|
||||||
|
@@ -96,7 +96,7 @@ ends up scheduled for the first two epochs because the leader schedule is also
|
|||||||
generated at slot 0 for the next epoch. The length of the first two epochs can
|
generated at slot 0 for the next epoch. The length of the first two epochs can
|
||||||
be specified in the genesis block as well. The minimum length of the first
|
be specified in the genesis block as well. The minimum length of the first
|
||||||
epochs must be greater than or equal to the maximum rollback depth as defined in
|
epochs must be greater than or equal to the maximum rollback depth as defined in
|
||||||
[Tower BFT](tower-bft.md).
|
[fork selection](fork-selection.md).
|
||||||
|
|
||||||
## Leader Schedule Generation Algorithm
|
## Leader Schedule Generation Algorithm
|
||||||
|
|
||||||
|
@@ -74,7 +74,7 @@ The program should have a list of slots which are valid storage mining slots.
|
|||||||
This list should be maintained by keeping track of slots which are rooted slots in which a significant
|
This list should be maintained by keeping track of slots which are rooted slots in which a significant
|
||||||
portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT
|
portion of the network has voted on with a high lockout value, maybe 32-votes old. Every SLOTS\_PER\_SEGMENT
|
||||||
number of slots would be added to this set. The program should check that the slot is in this set. The set can
|
number of slots would be added to this set. The program should check that the slot is in this set. The set can
|
||||||
be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/Tower BFT state.
|
be maintained by receiving a AdvertiseStorageRecentBlockHash and checking with its bank/locktower state.
|
||||||
|
|
||||||
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of
|
The program should do a signature verify check on the signature, public key from the transaction submitter and the message of
|
||||||
the previous storage epoch PoH value.
|
the previous storage epoch PoH value.
|
||||||
|
@@ -1,18 +1,19 @@
|
|||||||
# Ledger Replication
|
# Ledger Replication
|
||||||
|
|
||||||
At full capacity on a 1gbps network solana will generate 4 petabytes of data
|
At full capacity on a 1gbps network solana will generate 4 petabytes of data
|
||||||
per year. To prevent the network from centralizing around validators that have
|
per year. To prevent the network from centralizing around full nodes that have
|
||||||
to store the full data set this protocol proposes a way for mining nodes to
|
to store the full data set this protocol proposes a way for mining nodes to
|
||||||
provide storage capacity for pieces of the data.
|
provide storage capacity for pieces of the network.
|
||||||
|
|
||||||
The basic idea to Proof of Replication is encrypting a dataset with a public
|
The basic idea to Proof of Replication is encrypting a dataset with a public
|
||||||
symmetric key using CBC encryption, then hash the encrypted dataset. The main
|
symmetric key using CBC encryption, then hash the encrypted dataset. The main
|
||||||
problem with the naive approach is that a dishonest storage node can stream the
|
problem with the naive approach is that a dishonest storage node can stream the
|
||||||
encryption and delete the data as it's hashed. The simple solution is to periodically
|
encryption and delete the data as its hashed. The simple solution is to force
|
||||||
regenerate the hash based on a signed PoH value. This ensures that all the data is present
|
the hash to be done on the reverse of the encryption, or perhaps with a random
|
||||||
during the generation of the proof and it also requires validators to have the
|
order. This ensures that all the data is present during the generation of the
|
||||||
entirety of the encrypted data present for verification of every proof of every identity.
|
proof and it also requires the validator to have the entirety of the encrypted
|
||||||
So the space required to validate is `number_of_proofs * data_size`
|
data present for verification of every proof of every identity. So the space
|
||||||
|
required to validate is `number_of_proofs * data_size`
|
||||||
|
|
||||||
## Optimization with PoH
|
## Optimization with PoH
|
||||||
|
|
||||||
@@ -28,12 +29,13 @@ core. The total space required for verification is `1_ledger_segment +
|
|||||||
## Network
|
## Network
|
||||||
|
|
||||||
Validators for PoRep are the same validators that are verifying transactions.
|
Validators for PoRep are the same validators that are verifying transactions.
|
||||||
If a replicator can prove that a validator verified a fake PoRep, then the
|
They have some stake that they have put up as collateral that ensures that
|
||||||
validator will not receive a reward for that storage epoch.
|
their work is honest. If you can prove that a validator verified a fake PoRep,
|
||||||
|
then the validator will not receive a reward for that storage epoch.
|
||||||
|
|
||||||
Replicators are specialized *light clients*. They download a part of the
|
Replicators are specialized *light clients*. They download a part of the ledger
|
||||||
ledger (a.k.a Segment) and store it, and provide PoReps of storing the ledger.
|
and store it, and provide PoReps of storing the ledger. For each verified PoRep
|
||||||
For each verified PoRep replicators earn a reward of sol from the mining pool.
|
replicators earn a reward of sol from the mining pool.
|
||||||
|
|
||||||
## Constraints
|
## Constraints
|
||||||
|
|
||||||
@@ -53,8 +55,9 @@ changes to determine what rate it can validate storage proofs.
|
|||||||
|
|
||||||
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
|
1. SLOTS\_PER\_SEGMENT: Number of slots in a segment of ledger data. The
|
||||||
unit of storage for a replicator.
|
unit of storage for a replicator.
|
||||||
2. NUM\_KEY\_ROTATION\_SEGMENTS: Number of segments after which replicators
|
2. NUM\_KEY\_ROTATION\_TICKS: Number of ticks to save a PoH value and cause a
|
||||||
regenerate their encryption keys and select a new dataset to store.
|
key generation for the section of ledger just generated and the rotation of
|
||||||
|
another key in the set.
|
||||||
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
|
3. NUM\_STORAGE\_PROOFS: Number of storage proofs required for a storage proof
|
||||||
claim to be successfully rewarded.
|
claim to be successfully rewarded.
|
||||||
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
|
4. RATIO\_OF\_FAKE\_PROOFS: Ratio of fake proofs to real proofs that a storage
|
||||||
@@ -63,40 +66,36 @@ mining proof claim has to contain to be valid for a reward.
|
|||||||
proof.
|
proof.
|
||||||
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
|
6. NUM\_CHACHA\_ROUNDS: Number of encryption rounds performed to generate
|
||||||
encrypted state.
|
encrypted state.
|
||||||
7. NUM\_SLOTS\_PER\_TURN: Number of slots that define a single storage epoch or
|
|
||||||
a "turn" of the PoRep game.
|
|
||||||
|
|
||||||
### Validator behavior
|
### Validator behavior
|
||||||
|
|
||||||
1. Validators join the network and begin looking for replicator accounts at each
|
1. Validator joins the network and submits a storage validation capacity
|
||||||
storage epoch/turn boundary.
|
transaction which tells the network how many proofs it can process in a given
|
||||||
2. Every turn, Validators sign the PoH value at the boundary and use that signature
|
period defined by NUM\_KEY\_ROTATION\_TICKS.
|
||||||
to randomly pick proofs to verify from each storage account found in the turn boundary.
|
2. Every NUM\_KEY\_ROTATION\_TICKS the validator stores the PoH value at that
|
||||||
This signed value is also submitted to the validator's storage account and will be used by
|
height.
|
||||||
replicators at a later stage to cross-verify.
|
3. Validator generates a storage proof confirmation transaction.
|
||||||
3. Every `NUM_SLOTS_PER_TURN` slots the validator advertises the PoH value. This is value
|
4. The storage proof confirmation transaction is integrated into the ledger.
|
||||||
is also served to Replicators via RPC interfaces.
|
6. Validator responds to RPC interfaces for what the last storage epoch PoH
|
||||||
4. For a given turn N, all validations get locked out until turn N+3 (a gap of 2 turn/epoch).
|
value is and its slot.
|
||||||
At which point all validations during that turn are available for reward collection.
|
|
||||||
5. Any incorrect validations will be marked during the turn in between.
|
|
||||||
|
|
||||||
|
|
||||||
### Replicator behavior
|
### Replicator behavior
|
||||||
|
|
||||||
1. Since a replicator is somewhat of a light client and not downloading all the
|
1. Since a replicator is somewhat of a light client and not downloading all the
|
||||||
ledger data, they have to rely on other validators and replicators for information.
|
ledger data, they have to rely on other full nodes (validators) for
|
||||||
Any given validator may or may not be malicious and give incorrect information, although
|
information. Any given validator may or may not be malicious and give incorrect
|
||||||
there are not any obvious attack vectors that this could accomplish besides having the
|
information, although there are not any obvious attack vectors that this could
|
||||||
replicator do extra wasted work. For many of the operations there are a number of options
|
accomplish besides having the replicator do extra wasted work. For many of the
|
||||||
depending on how paranoid a replicator is:
|
operations there are a number of options depending on how paranoid a replicator
|
||||||
|
is:
|
||||||
- (a) replicator can ask a validator
|
- (a) replicator can ask a validator
|
||||||
- (b) replicator can ask multiple validators
|
- (b) replicator can ask multiple validators
|
||||||
- (c) replicator can ask other replicators
|
- (c) replicator can subscribe to the full transaction stream and generate
|
||||||
- (d) replicator can subscribe to the full transaction stream and generate
|
the information itself
|
||||||
the information itself (assuming the slot is recent enough)
|
- (d) replicator can subscribe to an abbreviated transaction stream to
|
||||||
- (e) replicator can subscribe to an abbreviated transaction stream to
|
generate the information itself
|
||||||
generate the information itself (assuming the slot is recent enough)
|
2. A replicator obtains the PoH hash corresponding to the last key rotation
|
||||||
2. A replicator obtains the PoH hash corresponding to the last turn with its slot.
|
along with its slot.
|
||||||
3. The replicator signs the PoH hash with its keypair. That signature is the
|
3. The replicator signs the PoH hash with its keypair. That signature is the
|
||||||
seed used to pick the segment to replicate and also the encryption key. The
|
seed used to pick the segment to replicate and also the encryption key. The
|
||||||
replicator mods the signature with the slot to get which segment to
|
replicator mods the signature with the slot to get which segment to
|
||||||
@@ -104,67 +103,38 @@ replicate.
|
|||||||
4. The replicator retrives the ledger by asking peer validators and
|
4. The replicator retrives the ledger by asking peer validators and
|
||||||
replicators. See 6.5.
|
replicators. See 6.5.
|
||||||
5. The replicator then encrypts that segment with the key with chacha algorithm
|
5. The replicator then encrypts that segment with the key with chacha algorithm
|
||||||
in CBC mode with `NUM_CHACHA_ROUNDS` of encryption.
|
in CBC mode with NUM\_CHACHA\_ROUNDS of encryption.
|
||||||
6. The replicator initializes a chacha rng with the a signed recent PoH value as
|
6. The replicator initializes a chacha rng with the signature from step 2 as
|
||||||
the seed.
|
the seed.
|
||||||
7. The replicator generates `NUM_STORAGE_SAMPLES` samples in the range of the
|
7. The replicator generates NUM\_STORAGE\_SAMPLES samples in the range of the
|
||||||
entry size and samples the encrypted segment with sha256 for 32-bytes at each
|
entry size and samples the encrypted segment with sha256 for 32-bytes at each
|
||||||
offset value. Sampling the state should be faster than generating the encrypted
|
offset value. Sampling the state should be faster than generating the encrypted
|
||||||
segment.
|
segment.
|
||||||
8. The replicator sends a PoRep proof transaction which contains its sha state
|
8. The replicator sends a PoRep proof transaction which contains its sha state
|
||||||
at the end of the sampling operation, its seed and the samples it used to the
|
at the end of the sampling operation, its seed and the samples it used to the
|
||||||
current leader and it is put onto the ledger.
|
current leader and it is put onto the ledger.
|
||||||
9. During a given turn the replicator should submit many proofs for the same segment
|
|
||||||
and based on the `RATIO_OF_FAKE_PROOFS` some of those proofs must be fake.
|
|
||||||
10. As the PoRep game enters the next turn, the replicator must submit a
|
|
||||||
transaction with the mask of which proofs were fake during the last turn. This
|
|
||||||
transaction will define the rewards for both replicators and validators.
|
|
||||||
11. Finally for a turn N, as the PoRep game enters turn N + 3, replicator's proofs for
|
|
||||||
turn N will be counted towards their rewards.
|
|
||||||
|
|
||||||
|
|
||||||
### The PoRep Game
|
|
||||||
|
|
||||||
The Proof of Replication game has 4 primary stages. For each "turn" multiple PoRep
|
|
||||||
games can be in progress but each in a different stage.
|
|
||||||
|
|
||||||
The 4 stages of the PoRep Game are as follows:
|
|
||||||
|
|
||||||
1. Proof submission stage
|
|
||||||
- Replicators: submit as many proofs as possible during this stage
|
|
||||||
- Validators: No-op
|
|
||||||
2. Proof verification stage
|
|
||||||
- Replicators: No-op
|
|
||||||
- Validators: Select replicators and verify their proofs from the previous turn
|
|
||||||
3. Proof challenge stage
|
|
||||||
- Replicators: Submit the proof mask with justifications (for fake proofs submitted 2 turns ago)
|
|
||||||
- Validators: No-op
|
|
||||||
4. Reward collection stage
|
|
||||||
- Replicators: Collect rewards for 3 turns ago
|
|
||||||
- Validators: Collect rewards for 3 turns ago
|
|
||||||
|
|
||||||
|
|
||||||
For each turn of the PoRep game, both Validators and Replicators evaluate each
|
|
||||||
stage. The stages are run as separate transactions on the storage program.
|
|
||||||
|
|
||||||
### Finding who has a given block of ledger
|
### Finding who has a given block of ledger
|
||||||
|
|
||||||
1. Validators monitor the turns in the PoRep game and look at the rooted bank
|
1. Validators monitor the transaction stream for storage mining proofs, and
|
||||||
at turn boundaries for any proofs.
|
keep a mapping of ledger segments by slot to public keys. When it sees
|
||||||
2. Validators maintain a map of ledger segments and corresponding replicator public keys.
|
a storage mining proof it updates this mapping and provides an RPC interface
|
||||||
The map is updated when a Validator processes a replicator's proofs for a segment.
|
which takes a slot and hands back a list of public keys. The client
|
||||||
The validator provides an RPC interface to access the this map. Using this API, clients
|
then looks up in their cluster\_info table to see which network address that
|
||||||
can map a segment to a replicator's network address (correlating it via cluster_info table).
|
corresponds to and sends a repair request to retrieve the necessary blocks of
|
||||||
The clients can then send repair requests to the replicator to retrieve segments.
|
ledger.
|
||||||
3. Validators would need to invalidate this list every N turns.
|
2. Validators would need to prune this list which it could do by periodically
|
||||||
|
looking at the oldest entries in its mappings and doing a network query to see
|
||||||
|
if the storage host is still serving the first entry.
|
||||||
|
|
||||||
## Sybil attacks
|
## Sybil attacks
|
||||||
|
|
||||||
For any random seed, we force everyone to use a signature that is derived from
|
For any random seed, we force everyone to use a signature that is derived from
|
||||||
a PoH hash at the turn boundary. Everyone uses the same count, so the same PoH
|
a PoH hash. Everyone must use the same count, so the same PoH hash is signed by
|
||||||
hash is signed by every participant. The signatures are then each cryptographically
|
every participant. The signatures are then each cryptographically tied to the
|
||||||
tied to the keypair, which prevents a leader from grinding on the resulting
|
keypair, which prevents a leader from grinding on the resulting value for more
|
||||||
value for more than 1 identity.
|
than 1 identity.
|
||||||
|
|
||||||
Since there are many more client identities then encryption identities, we need
|
Since there are many more client identities then encryption identities, we need
|
||||||
to split the reward for multiple clients, and prevent Sybil attacks from
|
to split the reward for multiple clients, and prevent Sybil attacks from
|
||||||
@@ -185,7 +155,8 @@ the network can reward long lived client identities more than new ones.
|
|||||||
showing the initial state for the hash.
|
showing the initial state for the hash.
|
||||||
- If a validator marks real proofs as fake, no on-chain computation can be done
|
- If a validator marks real proofs as fake, no on-chain computation can be done
|
||||||
to distinguish who is correct. Rewards would have to rely on the results from
|
to distinguish who is correct. Rewards would have to rely on the results from
|
||||||
multiple validators to catch bad actors and replicators from being denied rewards.
|
multiple validators in a stake-weighted fashion to catch bad actors and
|
||||||
|
replicators from being locked out of the network.
|
||||||
- Validator stealing mining proof results for itself. The proofs are derived
|
- Validator stealing mining proof results for itself. The proofs are derived
|
||||||
from a signature from a replicator, since the validator does not know the
|
from a signature from a replicator, since the validator does not know the
|
||||||
private key used to generate the encryption key, it cannot be the generator of
|
private key used to generate the encryption key, it cannot be the generator of
|
||||||
|
@@ -76,24 +76,21 @@ this field can only modified by this entity
|
|||||||
|
|
||||||
### StakeState
|
### StakeState
|
||||||
|
|
||||||
A StakeState takes one of two forms, StakeState::Stake and StakeState::MiningPool.
|
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
|
||||||
|
|
||||||
### StakeState::Stake
|
### StakeState::Delegate
|
||||||
|
|
||||||
Stake is the current delegation preference of the **staker**. Stake
|
StakeState is the current delegation preference of the **staker**. StakeState
|
||||||
contains the following state information:
|
contains the following state information:
|
||||||
|
|
||||||
|
* Account::lamports - The staked lamports.
|
||||||
|
|
||||||
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
||||||
delegated to.
|
delegated to.
|
||||||
|
|
||||||
* `credits_observed` - The total credits claimed over the lifetime of the
|
* `credits_observed` - The total credits claimed over the lifetime of the
|
||||||
program.
|
program.
|
||||||
|
|
||||||
* `stake` - The actual activated stake.
|
|
||||||
|
|
||||||
* Account::lamports - Lamports available for staking, including any earned as rewards.
|
|
||||||
|
|
||||||
|
|
||||||
### StakeState::MiningPool
|
### StakeState::MiningPool
|
||||||
|
|
||||||
There are two approaches to the mining pool. The bank could allow the
|
There are two approaches to the mining pool. The bank could allow the
|
||||||
@@ -108,12 +105,11 @@ tokens stored as `Account::lamports`.
|
|||||||
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
||||||
program.
|
program.
|
||||||
|
|
||||||
### StakeInstruction::DelegateStake(stake)
|
### StakeInstruction::Initialize
|
||||||
|
|
||||||
* `account[0]` - RW - The StakeState::Stake instance.
|
* `account[0]` - RW - The StakeState::Delegate instance.
|
||||||
`StakeState::Stake::credits_observed` is initialized to `VoteState::credits`.
|
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
|
||||||
`StakeState::Stake::voter_pubkey` is initialized to `account[1]`
|
`StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
|
||||||
`StakeState::Stake::stake` is initialized to `stake`, as long as it's less than account[0].lamports
|
|
||||||
|
|
||||||
* `account[1]` - R - The VoteState instance.
|
* `account[1]` - R - The VoteState instance.
|
||||||
|
|
||||||
@@ -128,7 +124,7 @@ deposited into the StakeState and as validator commission is proportional to
|
|||||||
|
|
||||||
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
||||||
reward.
|
reward.
|
||||||
* `account[1]` - RW - The StakeState::Stake instance that is redeeming votes
|
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
|
||||||
credits.
|
credits.
|
||||||
* `account[2]` - R - The VoteState instance, must be the same as
|
* `account[2]` - R - The VoteState instance, must be the same as
|
||||||
`StakeState::voter_pubkey`
|
`StakeState::voter_pubkey`
|
||||||
@@ -136,7 +132,7 @@ credits.
|
|||||||
Reward is payed out for the difference between `VoteState::credits` to
|
Reward is payed out for the difference between `VoteState::credits` to
|
||||||
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
||||||
`VoteState::credits`. The commission is deposited into the `VoteState` token
|
`VoteState::credits`. The commission is deposited into the `VoteState` token
|
||||||
balance, and the reward is deposited to the `StakeState::Stake` token balance. The
|
balance, and the reward is deposited to the `StakeState::Delegate` token balance. The
|
||||||
reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked.
|
reward and the commission is weighted by the `StakeState::lamports` divided by total lamports staked.
|
||||||
|
|
||||||
The Staker or the owner of the Stake program sends a transaction with this
|
The Staker or the owner of the Stake program sends a transaction with this
|
||||||
@@ -150,7 +146,7 @@ stake_state.credits_observed = vote_state.credits;
|
|||||||
```
|
```
|
||||||
|
|
||||||
`credits_to_claim` is used to compute the reward and commission, and
|
`credits_to_claim` is used to compute the reward and commission, and
|
||||||
`StakeState::Stake::credits_observed` is updated to the latest
|
`StakeState::Delegate::credits_observed` is updated to the latest
|
||||||
`VoteState::credits` value.
|
`VoteState::credits` value.
|
||||||
|
|
||||||
### Collecting network fees into the MiningPool
|
### Collecting network fees into the MiningPool
|
||||||
@@ -179,13 +175,13 @@ many rewards to be claimed concurrently.
|
|||||||
|
|
||||||
## Passive Delegation
|
## Passive Delegation
|
||||||
|
|
||||||
Any number of instances of StakeState::Stake programs can delegate to a single
|
Any number of instances of StakeState::Delegate programs can delegate to a single
|
||||||
VoteState program without an interactive action from the identity controlling
|
VoteState program without an interactive action from the identity controlling
|
||||||
the VoteState program or submitting votes to the program.
|
the VoteState program or submitting votes to the program.
|
||||||
|
|
||||||
The total stake allocated to a VoteState program can be calculated by the sum of
|
The total stake allocated to a VoteState program can be calculated by the sum of
|
||||||
all the StakeState programs that have the VoteState pubkey as the
|
all the StakeState programs that have the VoteState pubkey as the
|
||||||
`StakeState::Stake::voter_pubkey`.
|
`StakeState::Delegate::voter_pubkey`.
|
||||||
|
|
||||||
## Example Callflow
|
## Example Callflow
|
||||||
|
|
||||||
|
@@ -60,7 +60,7 @@ The read is satisfied by pointing to a memory-mapped location in the
|
|||||||
|
|
||||||
## Root Forks
|
## Root Forks
|
||||||
|
|
||||||
[Tower BFT](tower-bft.md) eventually selects a fork as a
|
The [fork selection algorithm](fork-selection.md) eventually selects a fork as a
|
||||||
root fork and the fork is squashed. A squashed/root fork cannot be rolled back.
|
root fork and the fork is squashed. A squashed/root fork cannot be rolled back.
|
||||||
|
|
||||||
When a fork is squashed, all accounts in its parents not already present in the
|
When a fork is squashed, all accounts in its parents not already present in the
|
||||||
|
@@ -1,172 +0,0 @@
|
|||||||
# Simple Payment and State Verification
|
|
||||||
|
|
||||||
It is often useful to allow low resourced clients to participate in a Solana
|
|
||||||
cluster. Be this participation economic or contract execution, verification
|
|
||||||
that a client's activity has been accepted by the network is typically
|
|
||||||
expensive. This proposal lays out a mechanism for such clients to confirm that
|
|
||||||
their actions have been committed to the ledger state with minimal resource
|
|
||||||
expenditure and third-party trust.
|
|
||||||
|
|
||||||
## A Naive Approach
|
|
||||||
|
|
||||||
Validators store the signatures of recently confirmed transactions for a short
|
|
||||||
period of time to ensure that they are not processed more than once. Validators
|
|
||||||
provide a JSON RPC endpoint, which clients can use to query the cluster if a
|
|
||||||
transaction has been recently processed. Validators also provide a PubSub
|
|
||||||
notification, whereby a client registers to be notified when a given signature
|
|
||||||
is observed by the validator. While these two mechanisms allow a client to
|
|
||||||
verify a payment, they are not a proof and rely on completely trusting a
|
|
||||||
fullnode.
|
|
||||||
|
|
||||||
We will describe a way to minimize this trust using Merkle Proofs to anchor the
|
|
||||||
fullnode's response in the ledger, allowing the client to confirm on their own
|
|
||||||
that a sufficient number of their preferred validators have confirmed a
|
|
||||||
transaction. Requiring multiple validator attestations further reduces trust in
|
|
||||||
the fullnode, as it increases both the technical and economic difficulty of
|
|
||||||
compromising several other network participants.
|
|
||||||
|
|
||||||
## Light Clients
|
|
||||||
|
|
||||||
A 'light client' is a cluster participant that does not itself run a fullnode.
|
|
||||||
This light client would provide a level of security greater than trusting a
|
|
||||||
remote fullnode, without requiring the light client to spend a lot of resources
|
|
||||||
verifying the ledger.
|
|
||||||
|
|
||||||
Rather than providing transaction signatures directly to a light client, the
|
|
||||||
fullnode instead generates a Merkle Proof from the transaction of interest to
|
|
||||||
the root of a Merkle Tree of all transactions in the including block. This Merkle
|
|
||||||
Root is stored in a ledger entry which is voted on by validators, providing it
|
|
||||||
consensus legitimacy. The additional level of security for a light client depends
|
|
||||||
on an initial canonical set of validators the light client considers to be the
|
|
||||||
stakeholders of the cluster. As that set is changed, the client can update its
|
|
||||||
internal set of known validators with [receipts](#receipts). This may become
|
|
||||||
challenging with a large number of delegated stakes.
|
|
||||||
|
|
||||||
Fullnodes themselves may want to use light client APIs for performance reasons.
|
|
||||||
For example, during the initial launch of a fullnode, the fullnode may use a
|
|
||||||
cluster provided checkpoint of the state and verify it with a receipt.
|
|
||||||
|
|
||||||
## Receipts
|
|
||||||
|
|
||||||
A receipt is a minimal proof that; a transaction has been included in a block,
|
|
||||||
that the block has been voted on by the client's preferred set of validators and
|
|
||||||
that the votes have reached the desired confirmation depth.
|
|
||||||
|
|
||||||
The receipts for both state and payments start with a Merkle Path from the
|
|
||||||
value into a Bank-Merkle that has been voted on and included in the ledger. A
|
|
||||||
chain of PoH Entries containing subsequent validator votes, deriving from the
|
|
||||||
Bank-Merkle, is the confirmation proof.
|
|
||||||
|
|
||||||
Clients can examine this ledger data and compute the finality using Solana's fork
|
|
||||||
selection rules.
|
|
||||||
|
|
||||||
### Payment Merkle Path
|
|
||||||
|
|
||||||
A payment receipt is a data structure that contains a Merkle Path from a
|
|
||||||
transaction to the required set of validator votes.
|
|
||||||
|
|
||||||
An Entry-Merkle is a Merkle Root including all transactions in the entry, sorted
|
|
||||||
by signature.
|
|
||||||
|
|
||||||
<img alt="Block Merkle Diagram" src="img/spv-block-merkle.svg" class="center"/>
|
|
||||||
|
|
||||||
A Block-Merkle is a Merkle root of all the Entry-Merkles sequenced in the block.
|
|
||||||
Transaction status is necessary for the receipt because the state receipt is
|
|
||||||
constructed for the block. Two transactions over the same state can appear in
|
|
||||||
the block, and therefore, there is no way to infer from just the state whether a
|
|
||||||
transaction that is committed to the ledger has succeeded or failed in modifying
|
|
||||||
the intended state. It may not be necessary to encode the full status code, but
|
|
||||||
a single status bit to indicate the transaction's success.
|
|
||||||
|
|
||||||
### State Merkle Path
|
|
||||||
|
|
||||||
A state receipt provides a confirmation that a specific state is committed at the
|
|
||||||
end of the block. Inter-block state transitions do not generate a receipt.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
* A sends 5 Lamports to B
|
|
||||||
* B spends 5 Lamports
|
|
||||||
* C sends 5 Lamports to A
|
|
||||||
|
|
||||||
At the end of the block, A and B are in the exact same starting state, and any
|
|
||||||
state receipt would point to the same value for A or B.
|
|
||||||
|
|
||||||
The Bank-Merkle is computed from the Merkle Tree of the new state changes, along
|
|
||||||
with the Previous Bank-Merkle, and the Block-Merkle.
|
|
||||||
|
|
||||||
<img alt="Bank Merkle Diagram" src="img/spv-bank-merkle.svg" class="center"/>
|
|
||||||
|
|
||||||
A state receipt contains only the state changes occurring in the block. A direct
|
|
||||||
Merkle Path to the current Bank-Merkle guarantees the state value at that bank
|
|
||||||
hash, but it cannot be used to generate a “current” receipt to the latest state
|
|
||||||
if the state modification occurred in some previous block. There is no guarantee
|
|
||||||
that the path provided by the validator is the latest one available out of all
|
|
||||||
the previous Bank-Merkles.
|
|
||||||
|
|
||||||
Clients that want to query the chain for a receipt of the "latest" state would
|
|
||||||
need to create a transaction that would update the Merkle Path for that account,
|
|
||||||
such as a credit of 0 Lamports.
|
|
||||||
|
|
||||||
### Validator Votes
|
|
||||||
|
|
||||||
Leaders should coalesce the validator votes by stake weight into a single entry.
|
|
||||||
This will reduce the number of entries necessary to create a receipt.
|
|
||||||
|
|
||||||
### Chain of Entries
|
|
||||||
|
|
||||||
A receipt has a PoH link from the payment or state Merkle Path root to a list of
|
|
||||||
consecutive validation votes.
|
|
||||||
|
|
||||||
It contains the following:
|
|
||||||
* State -> Bank-Merkle
|
|
||||||
or
|
|
||||||
* Transaction -> Entry-Merkle -> Block-Merkle -> Bank-Merkle
|
|
||||||
|
|
||||||
And a vector of PoH entries:
|
|
||||||
|
|
||||||
* Validator vote entries
|
|
||||||
* Ticks
|
|
||||||
* Light entries
|
|
||||||
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
/// This Entry definition skips over the transactions and only contains the
|
|
||||||
/// hash of the transactions used to modify PoH.
|
|
||||||
LightEntry {
|
|
||||||
/// The number of hashes since the previous Entry ID.
|
|
||||||
pub num_hashes: u64,
|
|
||||||
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
|
||||||
hash: Hash,
|
|
||||||
/// The Merkle Root of the transactions encoded into the Entry.
|
|
||||||
entry_hash: Hash,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
The light entries are reconstructed from Entries and simply show the entry Merkle
|
|
||||||
Root that was mixed in to the PoH hash, instead of the full transaction set.
|
|
||||||
|
|
||||||
Clients do not need the starting vote state. The [fork selection](book/src/fork-selection.md) algorithm is
|
|
||||||
defined such that only votes that appear after the transaction provide finality
|
|
||||||
for the transaction, and finality is independent of the starting state.
|
|
||||||
|
|
||||||
### Verification
|
|
||||||
|
|
||||||
A light client that is aware of the supermajority set validators can verify a
|
|
||||||
receipt by following the Merkle Path to the PoH chain. The Bank-Merkle is the
|
|
||||||
Merkle Root and will appear in votes included in an Entry. The light client can
|
|
||||||
simulate [fork selection](book/src/fork-selection.md) for the consecutive votes
|
|
||||||
and verify that the receipt is confirmed at the desired lockout threshold.
|
|
||||||
|
|
||||||
### Synthetic State
|
|
||||||
|
|
||||||
Synthetic state should be computed into the Bank-Merkle along with the bank
|
|
||||||
generated state.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
* Epoch validator accounts and their stakes and weights.
|
|
||||||
* Computed fee rates
|
|
||||||
|
|
||||||
These values should have an entry in the Bank-Merkle. They should live under
|
|
||||||
known accounts, and therefore have an exact address in the Merkle Path.
|
|
@@ -11,7 +11,7 @@ of getting its stake slashed. The economics are covered in [staking
|
|||||||
rewards](staking-rewards.md). This chapter, on the other hand, describes the
|
rewards](staking-rewards.md). This chapter, on the other hand, describes the
|
||||||
underlying mechanics of its implementation.
|
underlying mechanics of its implementation.
|
||||||
|
|
||||||
## Basic Design
|
## Basic Besign
|
||||||
|
|
||||||
The general idea is that the validator owns a Vote account. The Vote account
|
The general idea is that the validator owns a Vote account. The Vote account
|
||||||
tracks validator votes, counts validator generated credits, and provides any
|
tracks validator votes, counts validator generated credits, and provides any
|
||||||
@@ -20,7 +20,7 @@ stakes delegated to it and has no staking weight.
|
|||||||
|
|
||||||
A separate Stake account (created by a staker) names a Vote account to which the
|
A separate Stake account (created by a staker) names a Vote account to which the
|
||||||
stake is delegated. Rewards generated are proportional to the amount of
|
stake is delegated. Rewards generated are proportional to the amount of
|
||||||
lamports staked. The Stake account is owned by the staker only. Some portion of the lamports
|
lamports staked. The Stake account is owned by the staker only. Lamports
|
||||||
stored in this account are the stake.
|
stored in this account are the stake.
|
||||||
|
|
||||||
## Passive Delegation
|
## Passive Delegation
|
||||||
@@ -31,7 +31,7 @@ the Vote account or submitting votes to the account.
|
|||||||
|
|
||||||
The total stake allocated to a Vote account can be calculated by the sum of
|
The total stake allocated to a Vote account can be calculated by the sum of
|
||||||
all the Stake accounts that have the Vote account pubkey as the
|
all the Stake accounts that have the Vote account pubkey as the
|
||||||
`StakeState::Stake::voter_pubkey`.
|
`StakeState::Delegate::voter_pubkey`.
|
||||||
|
|
||||||
## Vote and Stake accounts
|
## Vote and Stake accounts
|
||||||
|
|
||||||
@@ -46,15 +46,15 @@ program that its delegate has participated in validating the ledger.
|
|||||||
VoteState is the current state of all the votes the validator has submitted to
|
VoteState is the current state of all the votes the validator has submitted to
|
||||||
the network. VoteState contains the following state information:
|
the network. VoteState contains the following state information:
|
||||||
|
|
||||||
* `votes` - The submitted votes data structure.
|
* votes - The submitted votes data structure.
|
||||||
|
|
||||||
* `credits` - The total number of rewards this vote program has generated over its
|
* credits - The total number of rewards this vote program has generated over its
|
||||||
lifetime.
|
lifetime.
|
||||||
|
|
||||||
* `root_slot` - The last slot to reach the full lockout commitment necessary for
|
* root\_slot - The last slot to reach the full lockout commitment necessary for
|
||||||
rewards.
|
rewards.
|
||||||
|
|
||||||
* `commission` - The commission taken by this VoteState for any rewards claimed by
|
* commission - The commission taken by this VoteState for any rewards claimed by
|
||||||
staker's Stake accounts. This is the percentage ceiling of the reward.
|
staker's Stake accounts. This is the percentage ceiling of the reward.
|
||||||
|
|
||||||
* Account::lamports - The accumulated lamports from the commission. These do not
|
* Account::lamports - The accumulated lamports from the commission. These do not
|
||||||
@@ -71,17 +71,13 @@ count as stakes.
|
|||||||
### VoteInstruction::AuthorizeVoteSigner(Pubkey)
|
### VoteInstruction::AuthorizeVoteSigner(Pubkey)
|
||||||
|
|
||||||
* `account[0]` - RW - The VoteState
|
* `account[0]` - RW - The VoteState
|
||||||
`VoteState::authorized_vote_signer` is set to to `Pubkey`, the transaction must by
|
`VoteState::authorized_vote_signer` is set to to `Pubkey`, instruction must by
|
||||||
signed by the Vote account's current `authorized_vote_signer`. <br>
|
signed by Pubkey
|
||||||
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
|
|
||||||
for its votes. That service is responsible for ensuring the vote won't cause
|
|
||||||
the staker to be slashed.
|
|
||||||
|
|
||||||
|
|
||||||
### VoteInstruction::Vote(Vec<Vote>)
|
### VoteInstruction::Vote(Vec<Vote>)
|
||||||
|
|
||||||
* `account[0]` - RW - The VoteState
|
* `account[0]` - RW - The VoteState
|
||||||
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Tower BFT](tower-bft.md)
|
`VoteState::lockouts` and `VoteState::credits` are updated according to voting lockout rules see [Fork Selection](fork-selection.md)
|
||||||
|
|
||||||
|
|
||||||
* `account[1]` - RO - A list of some N most recent slots and their hashes for the vote to be verified against.
|
* `account[1]` - RO - A list of some N most recent slots and their hashes for the vote to be verified against.
|
||||||
@@ -89,16 +85,14 @@ the staker to be slashed.
|
|||||||
|
|
||||||
### StakeState
|
### StakeState
|
||||||
|
|
||||||
A StakeState takes one of three forms, StakeState::Uninitialized, StakeState::Stake and StakeState::RewardsPool.
|
A StakeState takes one of two forms, StakeState::Delegate and StakeState::MiningPool.
|
||||||
|
|
||||||
### StakeState::Stake
|
### StakeState::Delegate
|
||||||
|
|
||||||
StakeState::Stake is the current delegation preference of the **staker** and
|
StakeState is the current delegation preference of the **staker**. StakeState
|
||||||
contains the following state information:
|
contains the following state information:
|
||||||
|
|
||||||
* Account::lamports - The lamports available for staking.
|
* Account::lamports - The staked lamports.
|
||||||
|
|
||||||
* `stake` - the staked amount (subject to warm up and cool down) for generating rewards, always less than or equal to Account::lamports
|
|
||||||
|
|
||||||
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
* `voter_pubkey` - The pubkey of the VoteState instance the lamports are
|
||||||
delegated to.
|
delegated to.
|
||||||
@@ -106,53 +100,56 @@ delegated to.
|
|||||||
* `credits_observed` - The total credits claimed over the lifetime of the
|
* `credits_observed` - The total credits claimed over the lifetime of the
|
||||||
program.
|
program.
|
||||||
|
|
||||||
* `activated` - the epoch at which this stake was activated/delegated. The full stake will be counted after warm up.
|
### StakeState::MiningPool
|
||||||
|
|
||||||
* `deactivated` - the epoch at which this stake will be completely de-activated, which is `cool down` epochs after StakeInstruction::Deactivate is issued.
|
There are two approaches to the mining pool. The bank could allow the
|
||||||
|
StakeState program to bypass the token balance check, or a program representing
|
||||||
|
the mining pool could run on the network. To avoid a single network wide lock,
|
||||||
|
the pool can be split into several mining pools. This design focuses on using
|
||||||
|
StakeState::MiningPool instances as the cluster wide mining pools.
|
||||||
|
|
||||||
### StakeState::RewardsPool
|
* 256 StakeState::MiningPool are initialized, each with 1/256 number of mining pool
|
||||||
|
tokens stored as `Account::lamports`.
|
||||||
|
|
||||||
To avoid a single network wide lock or contention in redemption, 256 RewardsPools are part of genesis under pre-determined keys, each with std::u64::MAX credits to be able to satisfy redemptions according to point value.
|
The stakes and the MiningPool are accounts that are owned by the same `Stake`
|
||||||
|
program.
|
||||||
|
|
||||||
The Stakes and the RewardsPool are accounts that are owned by the same `Stake` program.
|
### StakeInstruction::Initialize
|
||||||
|
|
||||||
### StakeInstruction::DelegateStake(u64)
|
* `account[0]` - RW - The StakeState::Delegate instance.
|
||||||
|
`StakeState::Delegate::credits_observed` is initialized to `VoteState::credits`.
|
||||||
The Stake account is moved from Uninitialized to StakeState::Stake form. This is
|
`StakeState::Delegate::voter_pubkey` is initialized to `account[1]`
|
||||||
how stakers choose their initial delegate validator node and activate their
|
|
||||||
stake account lamports.
|
|
||||||
|
|
||||||
* `account[0]` - RW - The StakeState::Stake instance. <br>
|
|
||||||
`StakeState::Stake::credits_observed` is initialized to `VoteState::credits`,<br>
|
|
||||||
`StakeState::Stake::voter_pubkey` is initialized to `account[1]`,<br>
|
|
||||||
`StakeState::Stake::stake` is initialized to the u64 passed as an argument above,<br>
|
|
||||||
`StakeState::Stake::activated` is initialized to current Bank epoch, and<br>
|
|
||||||
`StakeState::Stake::deactivated` is initialized to std::u64::MAX
|
|
||||||
|
|
||||||
* `account[1]` - R - The VoteState instance.
|
* `account[1]` - R - The VoteState instance.
|
||||||
|
|
||||||
* `account[2]` - R - syscall::current account, carries information about current Bank epoch
|
|
||||||
|
|
||||||
### StakeInstruction::RedeemVoteCredits
|
### StakeInstruction::RedeemVoteCredits
|
||||||
|
|
||||||
The staker or the owner of the Stake account sends a transaction with this
|
The Staker or the owner of the Stake account sends a transaction with this
|
||||||
instruction to claim rewards.
|
instruction to claim rewards.
|
||||||
|
|
||||||
The Vote account and the Stake account pair maintain a lifetime counter of total
|
The Vote account and the Stake account pair maintain a lifetime counter
|
||||||
rewards generated and claimed. Rewards are paid according to a point value
|
of total rewards generated and claimed. When claiming rewards, the total lamports
|
||||||
supplied by the Bank from inflation. A `point` is one credit * one staked
|
deposited into the Stake account and as validator commission is proportional to
|
||||||
lamport, rewards paid are proportional to the number of lamports staked.
|
`VoteState::credits - StakeState::credits_observed`.
|
||||||
|
|
||||||
* `account[0]` - RW - The StakeState::Stake instance that is redeeming rewards.
|
|
||||||
* `account[1]` - R - The VoteState instance, must be the same as `StakeState::voter_pubkey`
|
* `account[0]` - RW - The StakeState::MiningPool instance that will fulfill the
|
||||||
* `account[2]` - RW - The StakeState::RewardsPool instance that will fulfill the request (picked at random).
|
reward.
|
||||||
* `account[3]` - R - syscall::rewards account from the Bank that carries point value.
|
* `account[1]` - RW - The StakeState::Delegate instance that is redeeming votes
|
||||||
|
credits.
|
||||||
|
* `account[2]` - R - The VoteState instance, must be the same as
|
||||||
|
`StakeState::voter_pubkey`
|
||||||
|
|
||||||
Reward is paid out for the difference between `VoteState::credits` to
|
Reward is paid out for the difference between `VoteState::credits` to
|
||||||
`StakeState::Stake::credits_observed`, multiplied by `syscall::rewards::Rewards::validator_point_value`.
|
`StakeState::Delgate.credits_observed`, and `credits_observed` is updated to
|
||||||
`StakeState::Stake::credits_observed` is updated to`VoteState::credits`. The commission is deposited into the Vote account token
|
`VoteState::credits`. The commission is deposited into the Vote account token
|
||||||
balance, and the reward is deposited to the Stake account token balance.
|
balance, and the reward is deposited to the Stake account token balance.
|
||||||
|
|
||||||
|
The total lamports paid is a percentage-rate of the lamports staked muiltplied by
|
||||||
|
the ratio of rewards being redeemed to rewards that could have been generated
|
||||||
|
during the rate period.
|
||||||
|
|
||||||
|
Any random MiningPool can be used to redeem the credits.
|
||||||
|
|
||||||
```rust,ignore
|
```rust,ignore
|
||||||
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
|
let credits_to_claim = vote_state.credits - stake_state.credits_observed;
|
||||||
@@ -160,26 +157,24 @@ stake_state.credits_observed = vote_state.credits;
|
|||||||
```
|
```
|
||||||
|
|
||||||
`credits_to_claim` is used to compute the reward and commission, and
|
`credits_to_claim` is used to compute the reward and commission, and
|
||||||
`StakeState::Stake::credits_observed` is updated to the latest
|
`StakeState::Delegate::credits_observed` is updated to the latest
|
||||||
`VoteState::credits` value.
|
`VoteState::credits` value.
|
||||||
|
|
||||||
### StakeInstruction::Deactivate
|
## Collecting network fees into the MiningPool
|
||||||
A staker may wish to withdraw from the network. To do so he must first deactivate his stake, and wait for cool down.
|
|
||||||
|
|
||||||
* `account[0]` - RW - The StakeState::Stake instance that is deactivating, the transaction must be signed by this key.
|
At the end of the block, before the bank is frozen, but after it processed all
|
||||||
* `account[1]` - R - syscall::current account from the Bank that carries current epoch
|
the transactions for the block, a virtual instruction is executed to collect
|
||||||
|
the transaction fees.
|
||||||
|
|
||||||
StakeState::Stake::deactivated is set to the current epoch + cool down. The account's stake will ramp down to zero by
|
* A portion of the fees are deposited into the leader's account.
|
||||||
that epoch, and Account::lamports will be available for withdrawal.
|
* A portion of the fees are deposited into the smallest StakeState::MiningPool
|
||||||
|
account.
|
||||||
|
|
||||||
|
## Authorizing a Vote Signer
|
||||||
|
|
||||||
### StakeInstruction::Withdraw(u64)
|
`VoteInstruction::AuthorizeVoter` allows a staker to choose a signing service
|
||||||
Lamports build up over time in a Stake account and any excess over activated stake can be withdrawn.
|
for its votes. That service is responsible for ensuring the vote won't cause
|
||||||
|
the staker to be slashed.
|
||||||
* `account[0]` - RW - The StakeState::Stake from which to withdraw, the transaction must be signed by this key.
|
|
||||||
* `account[1]` - RW - Account that should be credited with the withdrawn lamports.
|
|
||||||
* `account[2]` - R - syscall::current account from the Bank that carries current epoch, to calculate stake.
|
|
||||||
|
|
||||||
|
|
||||||
## Benefits of the design
|
## Benefits of the design
|
||||||
|
|
||||||
@@ -192,6 +187,9 @@ Lamports build up over time in a Stake account and any excess over activated sta
|
|||||||
* Commission for the work is deposited when a reward is claimed by the delegated
|
* Commission for the work is deposited when a reward is claimed by the delegated
|
||||||
stake.
|
stake.
|
||||||
|
|
||||||
|
This proposal would benefit from the `read-only` accounts proposal to allow for
|
||||||
|
many rewards to be claimed concurrently.
|
||||||
|
|
||||||
## Example Callflow
|
## Example Callflow
|
||||||
|
|
||||||
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>
|
<img alt="Passive Staking Callflow" src="img/passive-staking-callflow.svg" class="center"/>
|
||||||
|
@@ -48,7 +48,7 @@ specific parameters will be necessary:
|
|||||||
|
|
||||||
Solana's trustless sense of time and ordering provided by its PoH data
|
Solana's trustless sense of time and ordering provided by its PoH data
|
||||||
structure, along with its
|
structure, along with its
|
||||||
[turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast
|
[avalanche](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast
|
||||||
and transmission design, should provide sub-second transaction confirmation times that scale
|
and transmission design, should provide sub-second transaction confirmation times that scale
|
||||||
with the log of the number of nodes in the cluster. This means we shouldn't
|
with the log of the number of nodes in the cluster. This means we shouldn't
|
||||||
have to restrict the number of validating nodes with a prohibitive 'minimum
|
have to restrict the number of validating nodes with a prohibitive 'minimum
|
||||||
|
@@ -91,10 +91,6 @@ History](#proof-of-history).
|
|||||||
The time, i.e. number of [slots](#slot), for which a [leader
|
The time, i.e. number of [slots](#slot), for which a [leader
|
||||||
schedule](#leader-schedule) is valid.
|
schedule](#leader-schedule) is valid.
|
||||||
|
|
||||||
#### finality
|
|
||||||
|
|
||||||
When nodes representing 2/3rd of the stake have a common [root](#root).
|
|
||||||
|
|
||||||
#### fork
|
#### fork
|
||||||
|
|
||||||
A [ledger](#ledger) derived from common entries but then diverged.
|
A [ledger](#ledger) derived from common entries but then diverged.
|
||||||
@@ -217,15 +213,6 @@ The public key of a [keypair](#keypair).
|
|||||||
Storage mining client, stores some part of the ledger enumerated in blocks and
|
Storage mining client, stores some part of the ledger enumerated in blocks and
|
||||||
submits storage proofs to the chain. Not a full-node.
|
submits storage proofs to the chain. Not a full-node.
|
||||||
|
|
||||||
#### root
|
|
||||||
|
|
||||||
A [block](#block) or [slot](#slot) that has reached maximum [lockout](#lockout)
|
|
||||||
on a validator. The root is the highest block that is an ancestor of all active
|
|
||||||
forks on a validator. All ancestor blocks of a root are also transitively a
|
|
||||||
root. Blocks that are not an ancestor and not a descendant of the root are
|
|
||||||
excluded from consideration for consensus and can be discarded.
|
|
||||||
|
|
||||||
|
|
||||||
#### runtime
|
#### runtime
|
||||||
|
|
||||||
The component of a [fullnode](#fullnode) responsible for [program](#program)
|
The component of a [fullnode](#fullnode) responsible for [program](#program)
|
||||||
|
@@ -32,7 +32,7 @@ traversal issues. A cloud-hosted machine works best. **Ensure that IP ports
|
|||||||
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended).
|
||||||
MacOS or WSL users may build from source.
|
MacOS or WSL users may build from source.
|
||||||
|
|
||||||
For a performance testnet with many transactions we have some preliminary recommended setups:
|
For a performance testnet with many transactions we have some preliminary recomended setups:
|
||||||
|
|
||||||
| | Low end | Medium end | High end | Notes |
|
| | Low end | Medium end | High end | Notes |
|
||||||
| --- | ---------|------------|----------| -- |
|
| --- | ---------|------------|----------| -- |
|
||||||
@@ -42,13 +42,6 @@ For a performance testnet with many transactions we have some preliminary recomm
|
|||||||
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
| Accounts Drive(s) | None | Samsung 970 Pro 1TB | 2x Samsung 970 Pro 1TB | |
|
||||||
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
| GPU | 4x Nvidia 1070 or 2x Nvidia 1080 Ti or 2x Nvidia 2070 | 2x Nvidia 2080 Ti | 4x Nvidia 2080 Ti | Any number of cuda-capable GPUs are supported on Linux platforms. |
|
||||||
|
|
||||||
#### GPU Requirements
|
|
||||||
CUDA is required to make use of the GPU on your system. The provided Solana
|
|
||||||
release binaries are built on Ubuntu 18.04 with <a
|
|
||||||
href="https://developer.nvidia.com/cuda-toolkit-archive">CUDA Toolkit 10.1
|
|
||||||
update 1"</a>. If your machine is using a different CUDA version then you will
|
|
||||||
need to rebuild from source.
|
|
||||||
|
|
||||||
#### Confirm The Testnet Is Reachable
|
#### Confirm The Testnet Is Reachable
|
||||||
Before attaching a validator node, sanity check that the cluster is accessible
|
Before attaching a validator node, sanity check that the cluster is accessible
|
||||||
to your machine by running some simple commands. If any of the commands fail,
|
to your machine by running some simple commands. If any of the commands fail,
|
||||||
@@ -71,10 +64,11 @@ for more detail on cluster activity.
|
|||||||
##### Bootstrap with `solana-install`
|
##### Bootstrap with `solana-install`
|
||||||
|
|
||||||
The `solana-install` tool can be used to easily install and upgrade the cluster
|
The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||||
software on Linux x86_64 and mac OS systems.
|
software on Linux x86_64 systems.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.17.0/install/solana-install-init.sh | sh -s
|
$ export SOLANA_RELEASE=v0.14.2 # skip this line to install the latest release
|
||||||
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.14.0/install/solana-install-init.sh | sh -s
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively build the `solana-install` program from source and run the
|
Alternatively build the `solana-install` program from source and run the
|
||||||
@@ -84,12 +78,11 @@ $ solana-install init
|
|||||||
```
|
```
|
||||||
|
|
||||||
After a successful install, `solana-install update` may be used to easily update the cluster
|
After a successful install, `solana-install update` may be used to easily update the cluster
|
||||||
software to a newer version at any time.
|
software to a newer version.
|
||||||
|
|
||||||
##### Download Prebuilt Binaries
|
##### Download Prebuilt Binaries
|
||||||
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
Binaries are available for Linux x86_64 systems.
|
||||||
|
|
||||||
###### Linux
|
|
||||||
Download the binaries by navigating to
|
Download the binaries by navigating to
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
||||||
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
||||||
@@ -99,17 +92,6 @@ $ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|||||||
$ cd solana-release/
|
$ cd solana-release/
|
||||||
$ export PATH=$PWD/bin:$PATH
|
$ export PATH=$PWD/bin:$PATH
|
||||||
```
|
```
|
||||||
###### mac OS
|
|
||||||
Download the binaries by navigating to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
|
|
||||||
archive:
|
|
||||||
```bash
|
|
||||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
|
||||||
$ cd solana-release/
|
|
||||||
$ export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Build From Source
|
##### Build From Source
|
||||||
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
If you are unable to use the prebuilt binaries or prefer to build it yourself
|
||||||
from source, navigate to
|
from source, navigate to
|
||||||
@@ -121,15 +103,6 @@ $ ./scripts/cargo-install-all.sh .
|
|||||||
$ export PATH=$PWD/bin:$PATH
|
$ export PATH=$PWD/bin:$PATH
|
||||||
```
|
```
|
||||||
|
|
||||||
If building for CUDA (Linux only), fetch the perf-libs first then include the
|
|
||||||
`cuda` feature flag when building:
|
|
||||||
```bash
|
|
||||||
$ ./fetch-perf-libs.sh
|
|
||||||
$ source ./target/perf-libs/env.sh
|
|
||||||
$ ./scripts/cargo-install-all.sh . cuda
|
|
||||||
$ export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
### Starting The Validator
|
### Starting The Validator
|
||||||
Sanity check that you are able to interact with the cluster by receiving a small
|
Sanity check that you are able to interact with the cluster by receiving a small
|
||||||
airdrop of lamports from the testnet drone:
|
airdrop of lamports from the testnet drone:
|
||||||
@@ -146,7 +119,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|||||||
|
|
||||||
Now configure a key pair for your validator by running:
|
Now configure a key pair for your validator by running:
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen new -o ~/validator-keypair.json
|
$ solana-keygen -o validator-keypair.json
|
||||||
```
|
```
|
||||||
|
|
||||||
Then use one of the following commands, depending on your installation
|
Then use one of the following commands, depending on your installation
|
||||||
@@ -154,31 +127,23 @@ choice, to start the node:
|
|||||||
|
|
||||||
If this is a `solana-install`-installation:
|
If this is a `solana-install`-installation:
|
||||||
```bash
|
```bash
|
||||||
$ validator.sh --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
|
$ clear-config.sh
|
||||||
|
$ validator.sh --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, the `solana-install run` command can be used to run the validator
|
Alternatively, the `solana-install run` command can be used to run the validator
|
||||||
node while periodically checking for and applying software updates:
|
node while periodically checking for and applying software updates:
|
||||||
```bash
|
```bash
|
||||||
$ solana-install run validator.sh -- --identity ~/validator-keypair.json --config-dir ~/validator-config --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
|
$ clear-config.sh
|
||||||
|
$ solana-install run validator.sh -- --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
If you built from source:
|
If you built from source:
|
||||||
```bash
|
```bash
|
||||||
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --rpc-port 8899 --poll-for-new-genesis-block testnet.solana.com
|
$ USE_INSTALL=1 ./multinode-demo/clear-config.sh
|
||||||
|
$ USE_INSTALL=1 ./multinode-demo/validator.sh --identity validator-keypair.json --poll-for-new-genesis-block testnet.solana.com
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Enabling CUDA
|
|
||||||
By default CUDA is disabled. If your machine has a GPU with CUDA installed,
|
|
||||||
define the SOLANA_CUDA flag in your environment *before* running any of the
|
|
||||||
previusly mentioned commands
|
|
||||||
```bash
|
|
||||||
$ export SOLANA_CUDA=1
|
|
||||||
```
|
|
||||||
|
|
||||||
When your validator is started look for the following log message to indicate that CUDA is enabled:
|
|
||||||
`"[<timestamp> solana::validator] CUDA is enabled"`
|
|
||||||
|
|
||||||
#### Controlling local network port allocation
|
#### Controlling local network port allocation
|
||||||
By default the validator will dynamically select available network ports in the
|
By default the validator will dynamically select available network ports in the
|
||||||
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
||||||
@@ -199,7 +164,7 @@ accounts: ...
|
|||||||
|
|
||||||
The **identity pubkey** for your validator can also be found by running:
|
The **identity pubkey** for your validator can also be found by running:
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen pubkey ~/validator-keypair.json
|
$ solana-keygen pubkey validator-keypair.json
|
||||||
```
|
```
|
||||||
|
|
||||||
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the
|
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the
|
||||||
@@ -211,7 +176,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|||||||
Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view
|
Provide the **vote pubkey** to the `solana-wallet show-vote-account` command to view
|
||||||
the recent voting activity from your validator:
|
the recent voting activity from your validator:
|
||||||
```bash
|
```bash
|
||||||
$ solana-wallet show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
$ solana-wallet -n testnet.solana.com show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||||
```
|
```
|
||||||
|
|
||||||
The vote pubkey for the validator can also be found by running:
|
The vote pubkey for the validator can also be found by running:
|
||||||
@@ -222,63 +187,13 @@ $ solana-keygen pubkey ~/.local/share/solana/install/active_release/config-local
|
|||||||
$ solana-keygen pubkey ./config-local/validator-vote-keypair.json
|
$ solana-keygen pubkey ./config-local/validator-vote-keypair.json
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Sharing Metrics From Your Validator
|
||||||
#### Validator Metrics
|
If you have obtained a metrics username/password from the Solana maintainers to
|
||||||
Metrics are available for local monitoring of your validator.
|
help us monitor the health of the testnet, please perform the following steps
|
||||||
|
before starting the validator to activate metrics reporting:
|
||||||
Docker must be installed and the current user added to the docker group. Then
|
|
||||||
download `solana-metrics.tar.bz2` from the Github Release and run
|
|
||||||
```bash
|
```bash
|
||||||
$ tar jxf solana-metrics.tar.bz2
|
export u="username obtained from the Solana maintainers"
|
||||||
$ cd solana-metrics/
|
export p="password obtained from the Solana maintainers"
|
||||||
$ ./start.sh
|
export SOLANA_METRICS_CONFIG="db=testnet,u=${u:?},p=${p:?}"
|
||||||
|
source scripts/configure-metrics.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
A local InfluxDB and Grafana instance is now running on your machine. Define
|
|
||||||
`SOLANA_METRICS_CONFIG` in your environment as described at the end of the
|
|
||||||
`start.sh` output and restart your validator.
|
|
||||||
|
|
||||||
Metrics should now be streaming and visible from your local Grafana dashboard.
|
|
||||||
|
|
||||||
#### Timezone For Log Messages
|
|
||||||
Log messages emitted by your validator include a timestamp. When sharing logs
|
|
||||||
with others to help triage issues, that timestamp can cause confusion as it does
|
|
||||||
not contain timezone information.
|
|
||||||
|
|
||||||
To make it easier to compare logs between different sources we request that
|
|
||||||
everybody use Pacific Time on their validator nodes. In Linux this can be
|
|
||||||
accomplished by running:
|
|
||||||
```bash
|
|
||||||
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Publishing Validator Info
|
|
||||||
|
|
||||||
You can publish your validator information to the chain to be publicly visible
|
|
||||||
to other users.
|
|
||||||
|
|
||||||
Run the solana-validator-info CLI to populate a validator-info account:
|
|
||||||
```bash
|
|
||||||
$ solana-validator-info publish ~/validator-keypair.json <VALIDATOR_NAME> <VALIDATOR_INFO_ARGS>
|
|
||||||
```
|
|
||||||
Optional fields for VALIDATOR_INFO_ARGS:
|
|
||||||
* Website
|
|
||||||
* Keybase Username
|
|
||||||
* Details
|
|
||||||
|
|
||||||
##### Keybase
|
|
||||||
|
|
||||||
Including a Keybase username allows client applications (like the Solana Network
|
|
||||||
Explorer) to automatically pull in your validator public profile, including
|
|
||||||
cryptographic proofs, brand identity, etc. To connect your validator pubkey with
|
|
||||||
Keybase:
|
|
||||||
|
|
||||||
1. Join https://keybase.io/ and complete the profile for your validator
|
|
||||||
2. Add your validator **identity pubkey** to Keybase:
|
|
||||||
* Create an empty file on your local computer called `validator-<PUBKEY>`
|
|
||||||
* In Keybase, navigate to the Files section, and upload your pubkey file to
|
|
||||||
a `solana` subdirectory in your public folder: `/keybase/public/<KEYBASE_USERNAME>/solana`
|
|
||||||
* To check your pubkey, ensure you can successfully browse to
|
|
||||||
`https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<PUBKEY>`
|
|
||||||
3. Add or update your `solana-validator-info` with your Keybase username. The
|
|
||||||
CLI will verify the `validator-<PUBKEY>` file
|
|
||||||
|
@@ -1,153 +0,0 @@
|
|||||||
## Testnet Replicator
|
|
||||||
This document describes how to setup a replicator in the testnet
|
|
||||||
|
|
||||||
Please note some of the information and instructions described here may change
|
|
||||||
in future releases.
|
|
||||||
|
|
||||||
### Overview
|
|
||||||
Replicators are specialized light clients. They download a part of the
|
|
||||||
ledger (a.k.a Segment) and store it. They earn rewards for storing segments.
|
|
||||||
|
|
||||||
The testnet features a validator running at testnet.solana.com, which
|
|
||||||
serves as the entrypoint to the cluster for your replicator node.
|
|
||||||
|
|
||||||
Additionally there is a blockexplorer available at
|
|
||||||
[http://testnet.solana.com/](http://testnet.solana.com/).
|
|
||||||
|
|
||||||
The testnet is configured to reset the ledger daily, or sooner
|
|
||||||
should the hourly automated cluster sanity test fail.
|
|
||||||
|
|
||||||
### Machine Requirements
|
|
||||||
Replicators don't need specialized hardware. Anything with more than
|
|
||||||
128GB of disk space will be able to participate in the cluster as a replicator node.
|
|
||||||
|
|
||||||
Currently the disk space requirements are very low but we expect them to change
|
|
||||||
in the future.
|
|
||||||
|
|
||||||
Prebuilt binaries are available for Linux x86_64 (Ubuntu 18.04 recommended),
|
|
||||||
macOS, and Windows.
|
|
||||||
|
|
||||||
#### Confirm The Testnet Is Reachable
|
|
||||||
Before starting a replicator node, sanity check that the cluster is accessible
|
|
||||||
to your machine by running some simple commands. If any of the commands fail,
|
|
||||||
please retry 5-10 minutes later to confirm the testnet is not just restarting
|
|
||||||
itself before debugging further.
|
|
||||||
|
|
||||||
Fetch the current transaction count over JSON RPC:
|
|
||||||
```bash
|
|
||||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
|
||||||
```
|
|
||||||
|
|
||||||
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
|
||||||
|
|
||||||
View the [metrics dashboard](
|
|
||||||
https://metrics.solana.com:3000/d/testnet-beta/testnet-monitor-beta?var-testnet=testnet)
|
|
||||||
for more detail on cluster activity.
|
|
||||||
|
|
||||||
### Replicator Setup
|
|
||||||
##### Obtaining The Software
|
|
||||||
##### Bootstrap with `solana-install`
|
|
||||||
|
|
||||||
The `solana-install` tool can be used to easily install and upgrade the cluster
|
|
||||||
software.
|
|
||||||
|
|
||||||
##### Linux and mac OS
|
|
||||||
```bash
|
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.17.0/install/solana-install-init.sh | sh -s
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively build the `solana-install` program from source and run the
|
|
||||||
following command to obtain the same result:
|
|
||||||
```bash
|
|
||||||
$ solana-install init
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Windows
|
|
||||||
Download and install **solana-install-init** from
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest)
|
|
||||||
|
|
||||||
After a successful install, `solana-install update` may be used to
|
|
||||||
easily update the software to a newer version at any time.
|
|
||||||
|
|
||||||
##### Download Prebuilt Binaries
|
|
||||||
If you would rather not use `solana-install` to manage the install, you can manually download and install the binaries.
|
|
||||||
|
|
||||||
##### Linux
|
|
||||||
Download the binaries by navigating to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
download **solana-release-x86_64-unknown-linux-gnu.tar.bz2**, then extract the
|
|
||||||
archive:
|
|
||||||
```bash
|
|
||||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
|
||||||
$ cd solana-release/
|
|
||||||
$ export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
##### mac OS
|
|
||||||
Download the binaries by navigating to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
download **solana-release-x86_64-apple-darwin.tar.bz2**, then extract the
|
|
||||||
archive:
|
|
||||||
```bash
|
|
||||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
|
||||||
$ cd solana-release/
|
|
||||||
$ export PATH=$PWD/bin:$PATH
|
|
||||||
```
|
|
||||||
##### Windows
|
|
||||||
Download the binaries by navigating to
|
|
||||||
[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest),
|
|
||||||
download **solana-release-x86_64-pc-windows-msvc.tar.bz2**, then extract it into a folder.
|
|
||||||
It is a good idea to add this extracted folder to your windows PATH.
|
|
||||||
|
|
||||||
### Starting The Replicator
|
|
||||||
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
|
||||||
```bash
|
|
||||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|
||||||
# Press ^C to exit
|
|
||||||
```
|
|
||||||
|
|
||||||
Now configure the keypairs for your replicator by running:
|
|
||||||
|
|
||||||
Navigate to the solana install location and open a cmd prompt
|
|
||||||
```bash
|
|
||||||
$ solana-keygen new -o replicator-keypair.json
|
|
||||||
$ solana-keygen new -o storage-keypair.json
|
|
||||||
```
|
|
||||||
|
|
||||||
Use solana-keygen to show the public keys for each of the keypairs,
|
|
||||||
they will be needed in the next step:
|
|
||||||
- Windows
|
|
||||||
```bash
|
|
||||||
# The replicator's identity
|
|
||||||
$ solana-keygen pubkey replicator-keypair.json
|
|
||||||
$ solana-keygen pubkey storage-keypair.json
|
|
||||||
```
|
|
||||||
- Linux and mac OS
|
|
||||||
```bash
|
|
||||||
$ export REPLICATOR_IDENTITY=$(solana-keygen pubkey replicator-keypair.json)
|
|
||||||
$ export STORAGE_IDENTITY=$(solana-keygen pubkey storage-keypair.json)
|
|
||||||
|
|
||||||
```
|
|
||||||
Then set up the storage accounts for your replicator by running:
|
|
||||||
```bash
|
|
||||||
$ solana-wallet --keypair replicator-keypair.json airdrop 100000
|
|
||||||
$ solana-wallet --keypair replicator-keypair.json create-replicator-storage-account $REPLICATOR_IDENTITY $STORAGE_IDENTITY
|
|
||||||
```
|
|
||||||
Note: Every time the testnet restarts, run the wallet steps to setup the replicator accounts again.
|
|
||||||
|
|
||||||
To start the replicator:
|
|
||||||
```bash
|
|
||||||
$ solana-replicator --entrypoint testnet.solana.com:8001 --identity replicator-keypair.json --storage-keypair storage-keypair.json --ledger replicator-ledger
|
|
||||||
```
|
|
||||||
|
|
||||||
### Verify Replicator Setup
|
|
||||||
From another console, confirm the IP address and **identity pubkey** of your replicator is visible in the
|
|
||||||
gossip network by running:
|
|
||||||
```bash
|
|
||||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
|
||||||
```
|
|
||||||
|
|
||||||
Provide the **storage account pubkey** to the `solana-wallet show-storage-account` command to view
|
|
||||||
the recent mining activity from your replicator:
|
|
||||||
```bash
|
|
||||||
$ solana-wallet --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
|
||||||
```
|
|
@@ -1,48 +0,0 @@
|
|||||||
# The Transaction
|
|
||||||
|
|
||||||
### Components of a `Transaction`
|
|
||||||
|
|
||||||
* **Transaction:**
|
|
||||||
* **message:** Defines the transaction
|
|
||||||
* **header:** Details the account types of and signatures required by
|
|
||||||
the transaction
|
|
||||||
* **num_required_signatures:** The total number of signatures
|
|
||||||
required to make the transaction valid.
|
|
||||||
* **num_credit_only_signed_accounts:** The last
|
|
||||||
`num_credit_only_signed_accounts` signatures refer to signing
|
|
||||||
credit only accounts. Credit only accounts can be used concurrently
|
|
||||||
by multiple parallel transactions, but their balance may only be
|
|
||||||
increased, and their account data is read-only.
|
|
||||||
* **num_credit_only_unsigned_accounts:** The last
|
|
||||||
`num_credit_only_unsigned_accounts` pubkeys in `account_keys` refer
|
|
||||||
to non-signing credit only accounts
|
|
||||||
* **account_keys:** List of pubkeys used by the transaction, including
|
|
||||||
by the instructions and for signatures. The first
|
|
||||||
`num_required_signatures` pubkeys must sign the transaction.
|
|
||||||
* **recent_blockhash:** The ID of a recent ledger entry. Validators will
|
|
||||||
reject transactions with a `recent_blockhash` that is too old.
|
|
||||||
* **instructions:** A list of [instructions](instruction.md) that are
|
|
||||||
run sequentially and committed in one atomic transaction if all
|
|
||||||
succeed.
|
|
||||||
* **signatures:** A list of signatures applied to the transaction. The
|
|
||||||
list is always of length `num_required_signatures`, and the signature
|
|
||||||
at index `i` corresponds to the pubkey at index `i` in `account_keys`.
|
|
||||||
The list is initialized with empty signatures (i.e. zeros), and
|
|
||||||
populated as signatures are added.
|
|
||||||
|
|
||||||
### Transaction Signing
|
|
||||||
|
|
||||||
A `Transaction` is signed by using an ed25519 keypair to sign the
|
|
||||||
serialization of the `message`. The resulting signature is placed at the
|
|
||||||
index of `signatures` matching the index of the keypair's pubkey in
|
|
||||||
`account_keys`.
|
|
||||||
|
|
||||||
### Transaction Serialization
|
|
||||||
|
|
||||||
`Transaction`s (and their `message`s) are serialized and deserialized
|
|
||||||
using the [bincode](https://crates.io/crates/bincode) crate with a
|
|
||||||
non-standard vector serialization that uses only one byte for the length
|
|
||||||
if it can be encoded in 7 bits, 2 bytes if it fits in 14 bits, or 3
|
|
||||||
bytes if it requires 15 or 16 bits. The vector serialization is defined
|
|
||||||
by Solana's
|
|
||||||
[short-vec](https://github.com/solana-labs/solana/blob/master/sdk/src/short_vec.rs).
|
|
@@ -8,14 +8,17 @@ client won't know how much was collected until the transaction is confirmed by
|
|||||||
the cluster and the remaining balance is checked. It smells of exactly what we
|
the cluster and the remaining balance is checked. It smells of exactly what we
|
||||||
dislike about Ethereum's "gas", non-determinism.
|
dislike about Ethereum's "gas", non-determinism.
|
||||||
|
|
||||||
|
## Implementation Status
|
||||||
|
|
||||||
|
This design is not yet implemented, but is written as though it has been. Once
|
||||||
|
implemented, delete this comment.
|
||||||
|
|
||||||
### Congestion-driven fees
|
### Congestion-driven fees
|
||||||
|
|
||||||
Each validator uses *signatures per slot* (SPS) to estimate network congestion
|
Each validator uses *signatures per slot* (SPS) to estimate network congestion
|
||||||
and *SPS target* to estimate the desired processing capacity of the cluster.
|
and *SPS target* to estimate the desired processing capacity of the cluster.
|
||||||
The validator learns the SPS target from the genesis block, whereas it
|
The validator learns the SPS target from the genesis block, whereas it
|
||||||
calculates SPS from recently processed transactions. The genesis block also
|
calculates SPS from the ledger data in the previous epoch.
|
||||||
defines a target `lamports_per_signature`, which is the fee to charge per
|
|
||||||
signature when the cluster is operating at *SPS target*.
|
|
||||||
|
|
||||||
### Calculating fees
|
### Calculating fees
|
||||||
|
|
||||||
@@ -34,11 +37,8 @@ lamports as returned by the fee calculator.
|
|||||||
In the first implementation of this design, the only fee parameter is
|
In the first implementation of this design, the only fee parameter is
|
||||||
`lamports_per_signature`. The more signatures the cluster needs to verify, the
|
`lamports_per_signature`. The more signatures the cluster needs to verify, the
|
||||||
higher the fee. The exact number of lamports is determined by the ratio of SPS
|
higher the fee. The exact number of lamports is determined by the ratio of SPS
|
||||||
to the SPS target. At the end of each slot, the cluster lowers
|
to the SPS target. The cluster lowers `lamports_per_signature` when SPS is
|
||||||
`lamports_per_signature` when SPS is below the target and raises it when above
|
below the target and raises it when at or above the target.
|
||||||
the target. The minimum value for `lamports_per_signature` is 50% of the target
|
|
||||||
`lamports_per_signature` and the maximum value is 10x the target
|
|
||||||
`lamports_per_signature'
|
|
||||||
|
|
||||||
Future parameters might include:
|
Future parameters might include:
|
||||||
|
|
||||||
|
@@ -1,43 +0,0 @@
|
|||||||
# Anatomy of a Transaction
|
|
||||||
|
|
||||||
Transactions encode lists of instructions that are executed
|
|
||||||
sequentially, and only committed if all the instructions complete
|
|
||||||
successfully. All account states are reverted upon the failure of a
|
|
||||||
transaction. Each Transaction details the accounts used, including which
|
|
||||||
must sign and which are credit only, a recent blockhash, the
|
|
||||||
instructions, and any signatures.
|
|
||||||
|
|
||||||
## Accounts and Signatures
|
|
||||||
|
|
||||||
Each transaction explicitly lists all accounts that it needs access to.
|
|
||||||
This includes accounts that are transferring tokens, accounts whose user
|
|
||||||
data is being modified, and the program accounts that are being called
|
|
||||||
by the instructions. Each account that is not an executable program can
|
|
||||||
be marked as a requiring a signature and/or as credit only. All accounts
|
|
||||||
marked as signers must have a valid signature in the transaction's list
|
|
||||||
of signatures before the transaction is considered valid. Any accounts
|
|
||||||
marked as credit only may only have their token value increased, and
|
|
||||||
their user data is read only. Accounts are locked by the runtime,
|
|
||||||
ensuring that they are not modified by a concurrent program while the
|
|
||||||
transaction is running. Credit only accounts can safely be shared, so
|
|
||||||
the runtime will allow multiple concurrent credit only locks on an
|
|
||||||
account.
|
|
||||||
|
|
||||||
## Recent Blockhash
|
|
||||||
|
|
||||||
A Transaction includes a recent blockhash to prevent duplication and to
|
|
||||||
give transactions lifetimes. Any transaction that is completely
|
|
||||||
identical to a previous one is rejected, so adding a newer blockhash
|
|
||||||
allows multiple transactions to repeat the exact same action.
|
|
||||||
Transactions also have lifetimes that are defined by the blockhash, as
|
|
||||||
any transaction whose blockhash is too old will be rejected.
|
|
||||||
|
|
||||||
## Instructions
|
|
||||||
|
|
||||||
Each instruction specifies a single program account (which must be
|
|
||||||
marked executable), a subset of the transaction's accounts that should
|
|
||||||
be passed to the program, and a data byte array instruction that is
|
|
||||||
passed to the program. The program interprets the data array and
|
|
||||||
operates on the accounts specified by the instructions. The program can
|
|
||||||
return successfully, or with an error code. An error return causes the
|
|
||||||
entire transaction to fail immediately.
|
|
@@ -284,18 +284,6 @@ ARGS:
|
|||||||
<PATH> /path/to/program.o
|
<PATH> /path/to/program.o
|
||||||
```
|
```
|
||||||
|
|
||||||
```manpage
|
|
||||||
solana-wallet-fees
|
|
||||||
Display current cluster fees
|
|
||||||
|
|
||||||
USAGE:
|
|
||||||
solana-wallet fees
|
|
||||||
|
|
||||||
FLAGS:
|
|
||||||
-h, --help Prints help information
|
|
||||||
-V, --version Prints version information
|
|
||||||
```
|
|
||||||
|
|
||||||
```manpage
|
```manpage
|
||||||
solana-wallet-get-transaction-count
|
solana-wallet-get-transaction-count
|
||||||
Get current transaction count
|
Get current transaction count
|
||||||
|
23
build-perf-libs.sh
Executable file
23
build-perf-libs.sh
Executable file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Builds perf-libs from the upstream source and installs them into the correct
|
||||||
|
# location in the tree
|
||||||
|
#
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [[ -d target/perf-libs ]]; then
|
||||||
|
echo "target/perf-libs/ already exists, to continue run:"
|
||||||
|
echo "$ rm -rf target/perf-libs"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
git clone git@github.com:solana-labs/solana-perf-libs.git target/perf-libs
|
||||||
|
cd target/perf-libs
|
||||||
|
make -j"$(nproc)"
|
||||||
|
make DESTDIR=. install
|
||||||
|
)
|
||||||
|
|
||||||
|
./fetch-perf-libs.sh
|
@@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "solana-chacha-sys"
|
|
||||||
version = "0.17.2"
|
|
||||||
description = "Solana chacha-sys"
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
cc = "1.0.38"
|
|
@@ -1,8 +0,0 @@
|
|||||||
extern crate cc;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
cc::Build::new()
|
|
||||||
.file("cpu-crypt/chacha20_core.c")
|
|
||||||
.file("cpu-crypt/chacha_cbc.c")
|
|
||||||
.compile("libcpu-crypt");
|
|
||||||
}
|
|
1
chacha-sys/cpu-crypt/.gitignore
vendored
1
chacha-sys/cpu-crypt/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
release/
|
|
@@ -1,25 +0,0 @@
|
|||||||
V:=debug
|
|
||||||
|
|
||||||
LIB:=cpu-crypt
|
|
||||||
|
|
||||||
CFLAGS_common:=-Wall -Werror -pedantic -fPIC
|
|
||||||
CFLAGS_release:=-march=native -O3 $(CFLAGS_common)
|
|
||||||
CFLAGS_debug:=-g $(CFLAGS_common)
|
|
||||||
CFLAGS:=$(CFLAGS_$V)
|
|
||||||
|
|
||||||
all: $V/lib$(LIB).a
|
|
||||||
|
|
||||||
$V/chacha20_core.o: chacha20_core.c chacha.h
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$V/chacha_cbc.o: chacha_cbc.c chacha.h
|
|
||||||
@mkdir -p $(@D)
|
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
$V/lib$(LIB).a: $V/chacha20_core.o $V/chacha_cbc.o
|
|
||||||
$(AR) rcs $@ $^
|
|
||||||
|
|
||||||
.PHONY:clean
|
|
||||||
clean:
|
|
||||||
rm -rf $V
|
|
@@ -1,35 +0,0 @@
|
|||||||
#ifndef HEADER_CHACHA_H
|
|
||||||
# define HEADER_CHACHA_H
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
#include <inttypes.h>
|
|
||||||
# include <stddef.h>
|
|
||||||
# ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
# endif
|
|
||||||
|
|
||||||
typedef unsigned int u32;
|
|
||||||
|
|
||||||
#define CHACHA_KEY_SIZE 32
|
|
||||||
#define CHACHA_NONCE_SIZE 12
|
|
||||||
#define CHACHA_BLOCK_SIZE 64
|
|
||||||
#define CHACHA_ROUNDS 500
|
|
||||||
|
|
||||||
void chacha20_encrypt(const u32 input[16],
|
|
||||||
unsigned char output[64],
|
|
||||||
int num_rounds);
|
|
||||||
|
|
||||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
|
||||||
const uint8_t key[CHACHA_KEY_SIZE], const uint8_t nonce[CHACHA_NONCE_SIZE],
|
|
||||||
uint32_t counter);
|
|
||||||
|
|
||||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
|
||||||
uint32_t len, const uint8_t* key,
|
|
||||||
unsigned char* ivec);
|
|
||||||
|
|
||||||
|
|
||||||
# ifdef __cplusplus
|
|
||||||
}
|
|
||||||
# endif
|
|
||||||
|
|
||||||
#endif
|
|
@@ -1,102 +0,0 @@
|
|||||||
#include "chacha.h"
|
|
||||||
|
|
||||||
#define ROTL32(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
|
|
||||||
|
|
||||||
#define ROTATE(v, c) ROTL32((v), (c))
|
|
||||||
|
|
||||||
#define XOR(v, w) ((v) ^ (w))
|
|
||||||
|
|
||||||
#define PLUS(x, y) ((x) + (y))
|
|
||||||
|
|
||||||
#define U32TO8_LITTLE(p, v) \
|
|
||||||
{ (p)[0] = ((v) ) & 0xff; (p)[1] = ((v) >> 8) & 0xff; \
|
|
||||||
(p)[2] = ((v) >> 16) & 0xff; (p)[3] = ((v) >> 24) & 0xff; }
|
|
||||||
|
|
||||||
#define U8TO32_LITTLE(p) \
|
|
||||||
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
|
|
||||||
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
|
|
||||||
|
|
||||||
#define QUARTERROUND(a,b,c,d) \
|
|
||||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
|
|
||||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
|
|
||||||
x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
|
|
||||||
x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
|
|
||||||
|
|
||||||
// sigma contains the ChaCha constants, which happen to be an ASCII string.
|
|
||||||
static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3',
|
|
||||||
'2', '-', 'b', 'y', 't', 'e', ' ', 'k' };
|
|
||||||
|
|
||||||
void chacha20_encrypt(const u32 input[16],
|
|
||||||
unsigned char output[64],
|
|
||||||
int num_rounds)
|
|
||||||
{
|
|
||||||
u32 x[16];
|
|
||||||
int i;
|
|
||||||
memcpy(x, input, sizeof(u32) * 16);
|
|
||||||
for (i = num_rounds; i > 0; i -= 2) {
|
|
||||||
QUARTERROUND( 0, 4, 8,12)
|
|
||||||
QUARTERROUND( 1, 5, 9,13)
|
|
||||||
QUARTERROUND( 2, 6,10,14)
|
|
||||||
QUARTERROUND( 3, 7,11,15)
|
|
||||||
QUARTERROUND( 0, 5,10,15)
|
|
||||||
QUARTERROUND( 1, 6,11,12)
|
|
||||||
QUARTERROUND( 2, 7, 8,13)
|
|
||||||
QUARTERROUND( 3, 4, 9,14)
|
|
||||||
}
|
|
||||||
for (i = 0; i < 16; ++i) {
|
|
||||||
x[i] = PLUS(x[i], input[i]);
|
|
||||||
}
|
|
||||||
for (i = 0; i < 16; ++i) {
|
|
||||||
U32TO8_LITTLE(output + 4 * i, x[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void chacha20_encrypt_ctr(const uint8_t *in, uint8_t *out, size_t in_len,
|
|
||||||
const uint8_t key[CHACHA_KEY_SIZE],
|
|
||||||
const uint8_t nonce[CHACHA_NONCE_SIZE],
|
|
||||||
uint32_t counter)
|
|
||||||
{
|
|
||||||
uint32_t input[16];
|
|
||||||
uint8_t buf[64];
|
|
||||||
size_t todo, i;
|
|
||||||
|
|
||||||
input[0] = U8TO32_LITTLE(sigma + 0);
|
|
||||||
input[1] = U8TO32_LITTLE(sigma + 4);
|
|
||||||
input[2] = U8TO32_LITTLE(sigma + 8);
|
|
||||||
input[3] = U8TO32_LITTLE(sigma + 12);
|
|
||||||
|
|
||||||
input[4] = U8TO32_LITTLE(key + 0);
|
|
||||||
input[5] = U8TO32_LITTLE(key + 4);
|
|
||||||
input[6] = U8TO32_LITTLE(key + 8);
|
|
||||||
input[7] = U8TO32_LITTLE(key + 12);
|
|
||||||
|
|
||||||
input[8] = U8TO32_LITTLE(key + 16);
|
|
||||||
input[9] = U8TO32_LITTLE(key + 20);
|
|
||||||
input[10] = U8TO32_LITTLE(key + 24);
|
|
||||||
input[11] = U8TO32_LITTLE(key + 28);
|
|
||||||
|
|
||||||
input[12] = counter;
|
|
||||||
input[13] = U8TO32_LITTLE(nonce + 0);
|
|
||||||
input[14] = U8TO32_LITTLE(nonce + 4);
|
|
||||||
input[15] = U8TO32_LITTLE(nonce + 8);
|
|
||||||
|
|
||||||
while (in_len > 0) {
|
|
||||||
todo = sizeof(buf);
|
|
||||||
if (in_len < todo) {
|
|
||||||
todo = in_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
chacha20_encrypt(input, buf, 20);
|
|
||||||
for (i = 0; i < todo; i++) {
|
|
||||||
out[i] = in[i] ^ buf[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
out += todo;
|
|
||||||
in += todo;
|
|
||||||
in_len -= todo;
|
|
||||||
|
|
||||||
input[12]++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@@ -1,72 +0,0 @@
|
|||||||
#include "chacha.h"
|
|
||||||
|
|
||||||
#if !defined(STRICT_ALIGNMENT) && !defined(PEDANTIC)
|
|
||||||
# define STRICT_ALIGNMENT 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void chacha20_cbc128_encrypt(const unsigned char* in, unsigned char* out,
|
|
||||||
uint32_t len, const uint8_t* key,
|
|
||||||
unsigned char* ivec)
|
|
||||||
{
|
|
||||||
size_t n;
|
|
||||||
unsigned char *iv = ivec;
|
|
||||||
(void)key;
|
|
||||||
|
|
||||||
if (len == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(OPENSSL_SMALL_FOOTPRINT)
|
|
||||||
if (STRICT_ALIGNMENT &&
|
|
||||||
((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) {
|
|
||||||
while (len >= CHACHA_BLOCK_SIZE) {
|
|
||||||
for (n = 0; n < CHACHA_BLOCK_SIZE; ++n) {
|
|
||||||
out[n] = in[n] ^ iv[n];
|
|
||||||
//printf("%x ", out[n]);
|
|
||||||
}
|
|
||||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
|
||||||
iv = out;
|
|
||||||
len -= CHACHA_BLOCK_SIZE;
|
|
||||||
in += CHACHA_BLOCK_SIZE;
|
|
||||||
out += CHACHA_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (len >= CHACHA_BLOCK_SIZE) {
|
|
||||||
for (n = 0; n < CHACHA_BLOCK_SIZE; n += sizeof(size_t)) {
|
|
||||||
*(size_t *)(out + n) =
|
|
||||||
*(size_t *)(in + n) ^ *(size_t *)(iv + n);
|
|
||||||
//printf("%zu ", *(size_t *)(iv + n));
|
|
||||||
}
|
|
||||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
|
||||||
iv = out;
|
|
||||||
len -= CHACHA_BLOCK_SIZE;
|
|
||||||
in += CHACHA_BLOCK_SIZE;
|
|
||||||
out += CHACHA_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
while (len) {
|
|
||||||
for (n = 0; n < CHACHA_BLOCK_SIZE && n < len; ++n) {
|
|
||||||
out[n] = in[n] ^ iv[n];
|
|
||||||
}
|
|
||||||
for (; n < CHACHA_BLOCK_SIZE; ++n) {
|
|
||||||
out[n] = iv[n];
|
|
||||||
}
|
|
||||||
chacha20_encrypt((const u32*)out, out, CHACHA_ROUNDS);
|
|
||||||
iv = out;
|
|
||||||
if (len <= CHACHA_BLOCK_SIZE) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
len -= CHACHA_BLOCK_SIZE;
|
|
||||||
in += CHACHA_BLOCK_SIZE;
|
|
||||||
out += CHACHA_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
memcpy(ivec, iv, CHACHA_BLOCK_SIZE);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void chacha20_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t in_len,
|
|
||||||
const uint8_t key[CHACHA_KEY_SIZE], uint8_t* ivec)
|
|
||||||
{
|
|
||||||
chacha20_cbc128_encrypt(in, out, in_len, key, ivec);
|
|
||||||
}
|
|
@@ -1,21 +0,0 @@
|
|||||||
extern "C" {
|
|
||||||
fn chacha20_cbc_encrypt(
|
|
||||||
input: *const u8,
|
|
||||||
output: *mut u8,
|
|
||||||
in_len: usize,
|
|
||||||
key: *const u8,
|
|
||||||
ivec: *mut u8,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
|
|
||||||
unsafe {
|
|
||||||
chacha20_cbc_encrypt(
|
|
||||||
input.as_ptr(),
|
|
||||||
output.as_mut_ptr(),
|
|
||||||
input.len(),
|
|
||||||
key.as_ptr(),
|
|
||||||
ivec.as_mut_ptr(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@@ -12,7 +12,7 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
|
|
||||||
if [[ -n $CI_PULL_REQUEST ]]; then
|
if ci/is-pr.sh; then
|
||||||
affectedFiles="$(buildkite-agent meta-data get affected_files)"
|
affectedFiles="$(buildkite-agent meta-data get affected_files)"
|
||||||
echo "Affected files in this PR: $affectedFiles"
|
echo "Affected files in this PR: $affectedFiles"
|
||||||
|
|
||||||
|
@@ -1,15 +0,0 @@
|
|||||||
# Build steps that run on a release tag
|
|
||||||
#
|
|
||||||
# All the steps in `buildkite.yml` are skipped and we jump directly to the
|
|
||||||
# secondary build steps since it's assumed the commit that was tagged is known
|
|
||||||
# to be good so there's no need to rebuild and retest it.
|
|
||||||
steps:
|
|
||||||
- trigger: "solana-secondary"
|
|
||||||
branches: "!pull/*"
|
|
||||||
async: true
|
|
||||||
build:
|
|
||||||
message: "${BUILDKITE_MESSAGE}"
|
|
||||||
commit: "${BUILDKITE_COMMIT}"
|
|
||||||
branch: "${BUILDKITE_BRANCH}"
|
|
||||||
env:
|
|
||||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
|
@@ -1,19 +1,16 @@
|
|||||||
#
|
|
||||||
# Build steps that run after the primary pipeline on pushes and tags.
|
|
||||||
# Pull requests to not run these steps.
|
|
||||||
steps:
|
steps:
|
||||||
- command: "sdk/docker-solana/build.sh"
|
- command: "sdk/docker-solana/build.sh"
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 20
|
||||||
name: "publish docker"
|
name: "publish docker"
|
||||||
- command: "ci/publish-crate.sh"
|
- command: "ci/publish-crate.sh"
|
||||||
timeout_in_minutes: 120
|
timeout_in_minutes: 40
|
||||||
name: "publish crate"
|
name: "publish crate"
|
||||||
branches: "!master"
|
branches: "!master"
|
||||||
- command: "ci/publish-bpf-sdk.sh"
|
- command: "ci/publish-bpf-sdk.sh"
|
||||||
timeout_in_minutes: 5
|
timeout_in_minutes: 5
|
||||||
name: "publish bpf sdk"
|
name: "publish bpf sdk"
|
||||||
- command: "ci/publish-tarball.sh"
|
- command: "ci/publish-tarball.sh"
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 25
|
||||||
name: "publish tarball"
|
name: "publish tarball"
|
||||||
- command: "ci/publish-book.sh"
|
- command: "ci/publish-book.sh"
|
||||||
timeout_in_minutes: 15
|
timeout_in_minutes: 15
|
||||||
|
@@ -1,17 +1,14 @@
|
|||||||
# Build steps that run on pushes and pull requests.
|
|
||||||
#
|
|
||||||
# Release tags use buildkite-release.yml instead
|
|
||||||
steps:
|
steps:
|
||||||
- command: "ci/shellcheck.sh"
|
- command: "ci/shellcheck.sh"
|
||||||
name: "shellcheck"
|
name: "shellcheck"
|
||||||
timeout_in_minutes: 5
|
timeout_in_minutes: 5
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-checks.sh"
|
||||||
name: "checks"
|
name: "checks"
|
||||||
timeout_in_minutes: 35
|
timeout_in_minutes: 15
|
||||||
- wait
|
- wait
|
||||||
- command: "ci/test-stable-perf.sh"
|
- command: "ci/test-stable-perf.sh"
|
||||||
name: "stable-perf"
|
name: "stable-perf"
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 20
|
||||||
artifact_paths: "log-*.txt"
|
artifact_paths: "log-*.txt"
|
||||||
agents:
|
agents:
|
||||||
- "queue=cuda"
|
- "queue=cuda"
|
||||||
@@ -24,7 +21,7 @@ steps:
|
|||||||
artifact_paths: "log-*.txt"
|
artifact_paths: "log-*.txt"
|
||||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||||
name: "coverage"
|
name: "coverage"
|
||||||
timeout_in_minutes: 40
|
timeout_in_minutes: 20
|
||||||
# TODO: Fix and re-enable test-large-network.sh
|
# TODO: Fix and re-enable test-large-network.sh
|
||||||
# - command: "ci/test-large-network.sh || true"
|
# - command: "ci/test-large-network.sh || true"
|
||||||
# name: "large-network [ignored]"
|
# name: "large-network [ignored]"
|
||||||
|
@@ -89,11 +89,11 @@ BETA_CHANNEL_LATEST_TAG=${beta_tag:+v$beta_tag}
|
|||||||
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
|
STABLE_CHANNEL_LATEST_TAG=${stable_tag:+v$stable_tag}
|
||||||
|
|
||||||
|
|
||||||
if [[ $CI_BRANCH = "$STABLE_CHANNEL" ]]; then
|
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||||
CHANNEL=stable
|
CHANNEL=stable
|
||||||
elif [[ $CI_BRANCH = "$EDGE_CHANNEL" ]]; then
|
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||||
CHANNEL=edge
|
CHANNEL=edge
|
||||||
elif [[ $CI_BRANCH = "$BETA_CHANNEL" ]]; then
|
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||||
CHANNEL=beta
|
CHANNEL=beta
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@@ -64,14 +64,11 @@ fi
|
|||||||
ARGS+=(
|
ARGS+=(
|
||||||
--env BUILDKITE
|
--env BUILDKITE
|
||||||
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
||||||
|
--env BUILDKITE_BRANCH
|
||||||
|
--env BUILDKITE_COMMIT
|
||||||
--env BUILDKITE_JOB_ID
|
--env BUILDKITE_JOB_ID
|
||||||
|
--env BUILDKITE_TAG
|
||||||
--env CI
|
--env CI
|
||||||
--env CI_BRANCH
|
|
||||||
--env CI_BUILD_ID
|
|
||||||
--env CI_COMMIT
|
|
||||||
--env CI_JOB_ID
|
|
||||||
--env CI_PULL_REQUEST
|
|
||||||
--env CI_REPO_SLUG
|
|
||||||
--env CODECOV_TOKEN
|
--env CODECOV_TOKEN
|
||||||
--env CRATES_IO_TOKEN
|
--env CRATES_IO_TOKEN
|
||||||
)
|
)
|
||||||
|
@@ -3,7 +3,6 @@ ARG date
|
|||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
&& rustup install nightly-$date \
|
&& rustup install nightly-$date \
|
||||||
&& rustup component add clippy --toolchain=nightly-$date \
|
|
||||||
&& rustup show \
|
&& rustup show \
|
||||||
&& rustc --version \
|
&& rustc --version \
|
||||||
&& cargo --version \
|
&& cargo --version \
|
||||||
|
@@ -15,12 +15,12 @@ To update the pinned version:
|
|||||||
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
||||||
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
||||||
specific YYYY-MM-DD that is desired (default is today's build).
|
specific YYYY-MM-DD that is desired (default is today's build).
|
||||||
1. Update `ci/rust-version.sh` to reflect the new nightly `YYY-MM-DD`
|
|
||||||
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh`
|
||||||
to confirm the new nightly image builds. Fix any issues as needed
|
to confirm the new nightly image builds. Fix any issues as needed
|
||||||
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
|
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
|
||||||
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
|
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
|
||||||
1. Send a PR with the `ci/rust-version.sh` change and any codebase adjustments needed.
|
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/rust-version.sh` from the previous to
|
||||||
|
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
@@ -1,10 +1,6 @@
|
|||||||
# Note: when the rust version is changed also modify
|
# Note: when the rust version is changed also modify
|
||||||
# ci/rust-version.sh to pick up the new image tag
|
# ci/buildkite.yml to pick up the new image tag
|
||||||
FROM rust:1.36.0
|
FROM rust:1.34.0
|
||||||
|
|
||||||
# Add Google Protocol Buffers for Libra's metrics library.
|
|
||||||
ENV PROTOC_VERSION 3.8.0
|
|
||||||
ENV PROTOC_ZIP protoc-$PROTOC_VERSION-linux-x86_64.zip
|
|
||||||
|
|
||||||
RUN set -x \
|
RUN set -x \
|
||||||
&& apt update \
|
&& apt update \
|
||||||
@@ -24,8 +20,6 @@ RUN set -x \
|
|||||||
mscgen \
|
mscgen \
|
||||||
rsync \
|
rsync \
|
||||||
sudo \
|
sudo \
|
||||||
golang \
|
|
||||||
unzip \
|
|
||||||
\
|
\
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& rustup component add rustfmt \
|
&& rustup component add rustfmt \
|
||||||
@@ -34,8 +28,4 @@ RUN set -x \
|
|||||||
&& cargo install svgbob_cli \
|
&& cargo install svgbob_cli \
|
||||||
&& cargo install mdbook \
|
&& cargo install mdbook \
|
||||||
&& rustc --version \
|
&& rustc --version \
|
||||||
&& cargo --version \
|
&& cargo --version
|
||||||
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
|
||||||
&& unzip -o $PROTOC_ZIP -d /usr/local bin/protoc \
|
|
||||||
&& unzip -o $PROTOC_ZIP -d /usr/local include/* \
|
|
||||||
&& rm -f $PROTOC_ZIP
|
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
Docker image containing rust and some preinstalled packages used in CI.
|
Docker image containing rust and some preinstalled packages used in CI.
|
||||||
|
|
||||||
This image manually maintained:
|
This image may be manually updated by running `./build.sh` if you are a member
|
||||||
1. Edit `Dockerfile` to match the desired rust version
|
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||||
2. Run `./build.sh` to publish the new image, if you are a member of the [Solana
|
organization, but it is also automatically updated periodically by
|
||||||
Labs](https://hub.docker.com/u/solanalabs/) Docker Hub organization.
|
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust).
|
||||||
|
|
||||||
|
89
ci/env.sh
89
ci/env.sh
@@ -1,89 +0,0 @@
|
|||||||
#
|
|
||||||
# Normalized CI environment variables
|
|
||||||
#
|
|
||||||
# |source| me
|
|
||||||
#
|
|
||||||
|
|
||||||
if [[ -n $CI ]]; then
|
|
||||||
export CI=1
|
|
||||||
if [[ -n $TRAVIS ]]; then
|
|
||||||
export CI_BRANCH=$TRAVIS_BRANCH
|
|
||||||
export CI_BUILD_ID=$TRAVIS_BUILD_ID
|
|
||||||
export CI_COMMIT=$TRAVIS_COMMIT
|
|
||||||
export CI_JOB_ID=$TRAVIS_JOB_ID
|
|
||||||
if $TRAVIS_PULL_REQUEST; then
|
|
||||||
export CI_PULL_REQUEST=true
|
|
||||||
else
|
|
||||||
export CI_PULL_REQUEST=
|
|
||||||
fi
|
|
||||||
export CI_OS_NAME=$TRAVIS_OS_NAME
|
|
||||||
export CI_REPO_SLUG=$TRAVIS_REPO_SLUG
|
|
||||||
export CI_TAG=$TRAVIS_TAG
|
|
||||||
elif [[ -n $BUILDKITE ]]; then
|
|
||||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
|
||||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
|
||||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
|
||||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
|
||||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
|
||||||
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
|
||||||
# standard Buildkite PR trigger.
|
|
||||||
if [[ $CI_BRANCH =~ pull/* ]]; then
|
|
||||||
export CI_PULL_REQUEST=true
|
|
||||||
else
|
|
||||||
export CI_PULL_REQUEST=
|
|
||||||
fi
|
|
||||||
export CI_OS_NAME=linux
|
|
||||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
|
||||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
|
||||||
# triggered it
|
|
||||||
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG
|
|
||||||
else
|
|
||||||
export CI_REPO_SLUG=$BUILDKITE_ORGANIZATION_SLUG/$BUILDKITE_PIPELINE_SLUG
|
|
||||||
fi
|
|
||||||
# TRIGGERED_BUILDKITE_TAG is a workaround to propagate BUILDKITE_TAG into
|
|
||||||
# the solana-secondary pipeline
|
|
||||||
if [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
|
||||||
export CI_TAG=$TRIGGERED_BUILDKITE_TAG
|
|
||||||
else
|
|
||||||
export CI_TAG=$BUILDKITE_TAG
|
|
||||||
fi
|
|
||||||
elif [[ -n $APPVEYOR ]]; then
|
|
||||||
export CI_BRANCH=$APPVEYOR_REPO_BRANCH
|
|
||||||
export CI_BUILD_ID=$APPVEYOR_BUILD_ID
|
|
||||||
export CI_COMMIT=$APPVEYOR_REPO_COMMIT
|
|
||||||
export CI_JOB_ID=$APPVEYOR_JOB_ID
|
|
||||||
if [[ -n $APPVEYOR_PULL_REQUEST_NUMBER ]]; then
|
|
||||||
export CI_PULL_REQUEST=true
|
|
||||||
else
|
|
||||||
export CI_PULL_REQUEST=
|
|
||||||
fi
|
|
||||||
if [[ $CI_LINUX = True ]]; then
|
|
||||||
export CI_OS_NAME=linux
|
|
||||||
else
|
|
||||||
export CI_OS_NAME=windows
|
|
||||||
fi
|
|
||||||
export CI_REPO_SLUG=$APPVEYOR_REPO_NAME
|
|
||||||
export CI_TAG=$APPVEYOR_REPO_TAG_NAME
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
export CI=
|
|
||||||
export CI_BRANCH=
|
|
||||||
export CI_BUILD_ID=
|
|
||||||
export CI_COMMIT=
|
|
||||||
export CI_JOB_ID=
|
|
||||||
export CI_OS_NAME=
|
|
||||||
export CI_PULL_REQUEST=
|
|
||||||
export CI_REPO_SLUG=
|
|
||||||
export CI_TAG=
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
CI=$CI
|
|
||||||
CI_BRANCH=$CI_BRANCH
|
|
||||||
CI_BUILD_ID=$CI_BUILD_ID
|
|
||||||
CI_COMMIT=$CI_COMMIT
|
|
||||||
CI_JOB_ID=$CI_JOB_ID
|
|
||||||
CI_OS_NAME=$CI_OS_NAME
|
|
||||||
CI_PULL_REQUEST=$CI_PULL_REQUEST
|
|
||||||
CI_TAG=$CI_TAG
|
|
||||||
EOF
|
|
9
ci/is-pr.sh
Executable file
9
ci/is-pr.sh
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
#
|
||||||
|
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||||
|
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||||
|
# standard Buildkite PR trigger.
|
||||||
|
#
|
||||||
|
|
||||||
|
[[ $BUILDKITE_BRANCH =~ pull/* ]]
|
@@ -5,6 +5,7 @@ skipSetup=false
|
|||||||
iterations=1
|
iterations=1
|
||||||
restartInterval=never
|
restartInterval=never
|
||||||
rollingRestart=false
|
rollingRestart=false
|
||||||
|
maybeNoLeaderRotation=
|
||||||
extraNodes=0
|
extraNodes=0
|
||||||
walletRpcPort=:8899
|
walletRpcPort=:8899
|
||||||
|
|
||||||
@@ -53,6 +54,9 @@ while getopts "ch?i:k:brxR" opt; do
|
|||||||
k)
|
k)
|
||||||
restartInterval=$OPTARG
|
restartInterval=$OPTARG
|
||||||
;;
|
;;
|
||||||
|
b)
|
||||||
|
maybeNoLeaderRotation="--stake 0"
|
||||||
|
;;
|
||||||
x)
|
x)
|
||||||
extraNodes=$((extraNodes + 1))
|
extraNodes=$((extraNodes + 1))
|
||||||
;;
|
;;
|
||||||
@@ -78,6 +82,7 @@ nodes=(
|
|||||||
--no-restart \
|
--no-restart \
|
||||||
--init-complete-file init-complete-node1.log"
|
--init-complete-file init-complete-node1.log"
|
||||||
"multinode-demo/validator.sh \
|
"multinode-demo/validator.sh \
|
||||||
|
$maybeNoLeaderRotation \
|
||||||
--enable-rpc-exit \
|
--enable-rpc-exit \
|
||||||
--no-restart \
|
--no-restart \
|
||||||
--init-complete-file init-complete-node2.log \
|
--init-complete-file init-complete-node2.log \
|
||||||
@@ -89,7 +94,8 @@ for i in $(seq 1 $extraNodes); do
|
|||||||
"multinode-demo/validator.sh \
|
"multinode-demo/validator.sh \
|
||||||
--no-restart \
|
--no-restart \
|
||||||
--label dyn$i \
|
--label dyn$i \
|
||||||
--init-complete-file init-complete-node$((2 + i)).log"
|
--init-complete-file init-complete-node$((2 + i)).log \
|
||||||
|
$maybeNoLeaderRotation"
|
||||||
)
|
)
|
||||||
done
|
done
|
||||||
numNodes=$((2 + extraNodes))
|
numNodes=$((2 + extraNodes))
|
||||||
@@ -119,8 +125,11 @@ startNode() {
|
|||||||
echo "log: $log"
|
echo "log: $log"
|
||||||
}
|
}
|
||||||
|
|
||||||
waitForNodeToInit() {
|
initCompleteFiles=()
|
||||||
declare initCompleteFile=$1
|
waitForAllNodesToInit() {
|
||||||
|
echo "--- ${#initCompleteFiles[@]} nodes booting"
|
||||||
|
SECONDS=
|
||||||
|
for initCompleteFile in "${initCompleteFiles[@]}"; do
|
||||||
while [[ ! -r $initCompleteFile ]]; do
|
while [[ ! -r $initCompleteFile ]]; do
|
||||||
if [[ $SECONDS -ge 240 ]]; then
|
if [[ $SECONDS -ge 240 ]]; then
|
||||||
echo "^^^ +++"
|
echo "^^^ +++"
|
||||||
@@ -131,14 +140,6 @@ waitForNodeToInit() {
|
|||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
echo "Found $initCompleteFile"
|
echo "Found $initCompleteFile"
|
||||||
}
|
|
||||||
|
|
||||||
initCompleteFiles=()
|
|
||||||
waitForAllNodesToInit() {
|
|
||||||
echo "--- ${#initCompleteFiles[@]} nodes booting"
|
|
||||||
SECONDS=
|
|
||||||
for initCompleteFile in "${initCompleteFiles[@]}"; do
|
|
||||||
waitForNodeToInit "$initCompleteFile"
|
|
||||||
done
|
done
|
||||||
echo "All nodes finished booting in $SECONDS seconds"
|
echo "All nodes finished booting in $SECONDS seconds"
|
||||||
}
|
}
|
||||||
@@ -161,13 +162,6 @@ startNodes() {
|
|||||||
if $addLogs; then
|
if $addLogs; then
|
||||||
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
logs+=("$(getNodeLogFile "$i" "$cmd")")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# 1 == bootstrap leader, wait until it boots before starting
|
|
||||||
# other validators
|
|
||||||
if [[ "$i" -eq 1 ]]; then
|
|
||||||
SECONDS=
|
|
||||||
waitForNodeToInit "$initCompleteFile"
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
waitForAllNodesToInit
|
waitForAllNodesToInit
|
||||||
@@ -300,19 +294,20 @@ flag_error() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ! $skipSetup; then
|
if ! $skipSetup; then
|
||||||
multinode-demo/setup.sh
|
multinode-demo/setup.sh --hashes-per-tick auto
|
||||||
else
|
else
|
||||||
verifyLedger
|
verifyLedger
|
||||||
fi
|
fi
|
||||||
startNodes
|
startNodes
|
||||||
lastTransactionCount=
|
lastTransactionCount=
|
||||||
|
enforceTransactionCountAdvance=true
|
||||||
while [[ $iteration -le $iterations ]]; do
|
while [[ $iteration -le $iterations ]]; do
|
||||||
echo "--- Node count ($iteration)"
|
echo "--- Node count ($iteration)"
|
||||||
(
|
(
|
||||||
source multinode-demo/common.sh
|
source multinode-demo/common.sh
|
||||||
set -x
|
set -x
|
||||||
client_keypair=/tmp/client-id.json-$$
|
client_keypair=/tmp/client-id.json-$$
|
||||||
$solana_keygen new -f -o $client_keypair || exit $?
|
$solana_keygen -o $client_keypair || exit $?
|
||||||
$solana_gossip spy --num-nodes-exactly $numNodes || exit $?
|
$solana_gossip spy --num-nodes-exactly $numNodes || exit $?
|
||||||
rm -rf $client_keypair
|
rm -rf $client_keypair
|
||||||
) || flag_error
|
) || flag_error
|
||||||
@@ -341,20 +336,36 @@ while [[ $iteration -le $iterations ]]; do
|
|||||||
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
|
transactionCount=$(sed -e 's/{"jsonrpc":"2.0","result":\([0-9]*\),"id":1}/\1/' log-transactionCount.txt)
|
||||||
if [[ -n $lastTransactionCount ]]; then
|
if [[ -n $lastTransactionCount ]]; then
|
||||||
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
|
echo "--- Transaction count check: $lastTransactionCount < $transactionCount"
|
||||||
|
if $enforceTransactionCountAdvance; then
|
||||||
if [[ $lastTransactionCount -ge $transactionCount ]]; then
|
if [[ $lastTransactionCount -ge $transactionCount ]]; then
|
||||||
echo "Error: Transaction count is not advancing"
|
echo "Error: Transaction count is not advancing"
|
||||||
echo "* lastTransactionCount: $lastTransactionCount"
|
echo "* lastTransactionCount: $lastTransactionCount"
|
||||||
echo "* transactionCount: $transactionCount"
|
echo "* transactionCount: $transactionCount"
|
||||||
flag_error
|
flag_error
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
echo "enforceTransactionCountAdvance=false"
|
||||||
|
fi
|
||||||
|
enforceTransactionCountAdvance=true
|
||||||
fi
|
fi
|
||||||
lastTransactionCount=$transactionCount
|
lastTransactionCount=$transactionCount
|
||||||
|
|
||||||
echo "--- Wallet sanity ($iteration)"
|
echo "--- Wallet sanity ($iteration)"
|
||||||
|
flag_error_if_no_leader_rotation() {
|
||||||
|
# TODO: Stop ignoring wallet sanity failures when leader rotation is enabled
|
||||||
|
# once https://github.com/solana-labs/solana/issues/2474 is fixed
|
||||||
|
if [[ -n $maybeNoLeaderRotation ]]; then
|
||||||
|
flag_error
|
||||||
|
else
|
||||||
|
# Wallet error occurred (and was ignored) so transactionCount may not
|
||||||
|
# advance on the next iteration
|
||||||
|
enforceTransactionCountAdvance=false
|
||||||
|
fi
|
||||||
|
}
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
timeout 60s scripts/wallet-sanity.sh --url http://127.0.0.1"$walletRpcPort"
|
timeout 60s scripts/wallet-sanity.sh --url http://127.0.0.1"$walletRpcPort"
|
||||||
) || flag_error
|
) || flag_error_if_no_leader_rotation
|
||||||
|
|
||||||
iteration=$((iteration + 1))
|
iteration=$((iteration + 1))
|
||||||
|
|
||||||
|
16
ci/nits.sh
16
ci/nits.sh
@@ -23,13 +23,11 @@ declare print_free_tree=(
|
|||||||
'metrics/src'
|
'metrics/src'
|
||||||
'netutil/src'
|
'netutil/src'
|
||||||
'runtime/src'
|
'runtime/src'
|
||||||
'sdk/bpf/rust/rust-utils'
|
|
||||||
'sdk/src'
|
'sdk/src'
|
||||||
'programs/bpf/rust'
|
|
||||||
'programs/stake_api/src'
|
|
||||||
'programs/stake_program/src'
|
|
||||||
'programs/vote_api/src'
|
'programs/vote_api/src'
|
||||||
'programs/vote_program/src'
|
'programs/vote_program/src'
|
||||||
|
'programs/stake_api/src'
|
||||||
|
'programs/stake_program/src'
|
||||||
)
|
)
|
||||||
|
|
||||||
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then
|
if _ git --no-pager grep -n --max-depth=0 "${prints[@]/#/-e }" -- "${print_free_tree[@]}"; then
|
||||||
@@ -46,22 +44,16 @@ if _ git --no-pager grep -n 'Default::default()' -- '*.rs'; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Let's keep a .gitignore for every crate, ensure it's got
|
# Let's keep a .gitignore for every crate, ensure it's got
|
||||||
# /target/ and /farf/ in it
|
# /target/ in it
|
||||||
declare gitignores_ok=true
|
declare gitignores_ok=true
|
||||||
for i in $(git --no-pager ls-files \*/Cargo.toml ); do
|
for i in $(git --no-pager ls-files \*/Cargo.toml ); do
|
||||||
dir=$(dirname "$i")
|
dir=$(dirname "$i")
|
||||||
if [[ ! -f $dir/.gitignore ]]; then
|
if [[ ! -f $dir/.gitignore ]]; then
|
||||||
echo 'error: nits.sh .gitnore missing for crate '"$dir" >&2
|
echo 'error: nits.sh .gitnore missing for crate '"$dir" >&2
|
||||||
gitignores_ok=false
|
gitignores_ok=false
|
||||||
else
|
elif ! grep -q -e '^/target/$' "$dir"/.gitignore; then
|
||||||
if ! grep -q -e '^/target/$' "$dir"/.gitignore; then
|
|
||||||
echo 'error: nits.sh "/target/" apparently missing from '"$dir"'/.gitignore' >&2
|
echo 'error: nits.sh "/target/" apparently missing from '"$dir"'/.gitignore' >&2
|
||||||
gitignores_ok=false
|
gitignores_ok=false
|
||||||
fi
|
fi
|
||||||
if ! grep -q -e '^/farf/$' "$dir"/.gitignore ; then
|
|
||||||
echo 'error: nits.sh "/farf/" apparently missing from '"$dir"'/.gitignore' >&2
|
|
||||||
gitignores_ok=false
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
"$gitignores_ok"
|
"$gitignores_ok"
|
||||||
|
@@ -1,65 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
#
|
|
||||||
# This script figures the order in which workspace crates must be published to
|
|
||||||
# crates.io. Along the way it also ensures there are no circular dependencies
|
|
||||||
# that would cause a |cargo publish| to fail.
|
|
||||||
#
|
|
||||||
# On success an ordered list of Cargo.toml files is written to stdout
|
|
||||||
#
|
|
||||||
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import sys;
|
|
||||||
|
|
||||||
def load_metadata():
|
|
||||||
return json.loads(subprocess.Popen(
|
|
||||||
'cargo metadata --no-deps --format-version=1',
|
|
||||||
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
|
||||||
|
|
||||||
def get_packages():
|
|
||||||
metadata = load_metadata()
|
|
||||||
|
|
||||||
manifest_path = dict()
|
|
||||||
|
|
||||||
# Build dictionary of packages and their immediate solana-only dependencies
|
|
||||||
dependency_graph = dict()
|
|
||||||
for pkg in metadata['packages']:
|
|
||||||
manifest_path[pkg['name']] = pkg['manifest_path'];
|
|
||||||
dependency_graph[pkg['name']] = [x['name'] for x in pkg['dependencies'] if x['name'].startswith('solana')];
|
|
||||||
|
|
||||||
# Check for direct circular dependencies
|
|
||||||
circular_dependencies = set()
|
|
||||||
for package, dependencies in dependency_graph.items():
|
|
||||||
for dependency in dependencies:
|
|
||||||
if dependency in dependency_graph and package in dependency_graph[dependency]:
|
|
||||||
circular_dependencies.add(' <--> '.join(sorted([package, dependency])))
|
|
||||||
|
|
||||||
for dependency in circular_dependencies:
|
|
||||||
sys.stderr.write('Error: Circular dependency: {}\n'.format(dependency))
|
|
||||||
|
|
||||||
if len(circular_dependencies) != 0:
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Order dependencies
|
|
||||||
sorted_dependency_graph = []
|
|
||||||
max_iterations = pow(len(dependency_graph),2)
|
|
||||||
while dependency_graph:
|
|
||||||
if max_iterations == 0:
|
|
||||||
# TODO: Be more helpful and find the actual cycle for the user
|
|
||||||
sys.exit('Error: Circular dependency suspected between these packages: {}\n'.format(' '.join(dependency_graph.keys())))
|
|
||||||
|
|
||||||
max_iterations -= 1
|
|
||||||
for package, dependencies in dependency_graph.items():
|
|
||||||
for dependency in dependencies:
|
|
||||||
if dependency in dependency_graph:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
del dependency_graph[package]
|
|
||||||
sorted_dependency_graph.append((package, manifest_path[package]))
|
|
||||||
|
|
||||||
|
|
||||||
return sorted_dependency_graph
|
|
||||||
|
|
||||||
for package, manifest in get_packages():
|
|
||||||
print os.path.relpath(manifest)
|
|
@@ -2,50 +2,8 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
BOOK="book"
|
|
||||||
|
|
||||||
source ci/rust-version.sh stable
|
book/build.sh
|
||||||
eval "$(ci/channel-info.sh)"
|
|
||||||
|
|
||||||
if [[ -n $PUBLISH_BOOK_TAG ]]; then
|
|
||||||
CURRENT_TAG="$(git describe --tags)"
|
|
||||||
COMMIT_TO_PUBLISH="$(git rev-list -n 1 "${PUBLISH_BOOK_TAG}")"
|
|
||||||
|
|
||||||
# book is manually published at a specified release tag
|
|
||||||
if [[ $PUBLISH_BOOK_TAG != "$CURRENT_TAG" ]]; then
|
|
||||||
(
|
|
||||||
cat <<EOF
|
|
||||||
steps:
|
|
||||||
- trigger: "$BUILDKITE_PIPELINE_SLUG"
|
|
||||||
async: true
|
|
||||||
build:
|
|
||||||
message: "$BUILDKITE_MESSAGE"
|
|
||||||
commit: "$COMMIT_TO_PUBLISH"
|
|
||||||
env:
|
|
||||||
PUBLISH_BOOK_TAG: "$PUBLISH_BOOK_TAG"
|
|
||||||
EOF
|
|
||||||
) | buildkite-agent pipeline upload
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
repo=git@github.com:solana-labs/book.git
|
|
||||||
else
|
|
||||||
# book-edge and book-beta are published automatically on the tip of the branch
|
|
||||||
case $CHANNEL in
|
|
||||||
edge)
|
|
||||||
repo=git@github.com:solana-labs/book-edge.git
|
|
||||||
;;
|
|
||||||
beta)
|
|
||||||
repo=git@github.com:solana-labs/book-beta.git
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "--- publish skipped"
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
BOOK=$CHANNEL
|
|
||||||
fi
|
|
||||||
|
|
||||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "book/build.sh"
|
|
||||||
|
|
||||||
echo --- create book repo
|
echo --- create book repo
|
||||||
(
|
(
|
||||||
@@ -55,10 +13,25 @@ echo --- create book repo
|
|||||||
git config user.email "maintainers@solana.com"
|
git config user.email "maintainers@solana.com"
|
||||||
git config user.name "$(basename "$0")"
|
git config user.name "$(basename "$0")"
|
||||||
git add ./* ./.nojekyll
|
git add ./* ./.nojekyll
|
||||||
git commit -m "${CI_COMMIT:-local}"
|
git commit -m "${BUILDKITE_COMMIT:-local}"
|
||||||
)
|
)
|
||||||
|
|
||||||
echo "--- publish $BOOK"
|
eval "$(ci/channel-info.sh)"
|
||||||
|
# Only publish the book from the edge and beta channels for now.
|
||||||
|
case $CHANNEL in
|
||||||
|
edge)
|
||||||
|
repo=git@github.com:solana-labs/book-edge.git
|
||||||
|
;;
|
||||||
|
beta)
|
||||||
|
repo=git@github.com:solana-labs/book.git
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "--- publish skipped"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "--- publish $CHANNEL"
|
||||||
cd book/html/
|
cd book/html/
|
||||||
git remote add origin $repo
|
git remote add origin $repo
|
||||||
git fetch origin master
|
git fetch origin master
|
||||||
|
@@ -2,23 +2,44 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
source ci/semver_bash/semver.sh
|
source ci/semver_bash/semver.sh
|
||||||
source ci/rust-version.sh stable
|
|
||||||
|
|
||||||
# shellcheck disable=SC2086
|
# List of internal crates to publish
|
||||||
is_crate_version_uploaded() {
|
#
|
||||||
name=$1
|
# IMPORTANT: the order of the CRATES *is* significant. Crates must be published
|
||||||
version=$2
|
# before the crates that depend on them. Note that this information is already
|
||||||
curl https://crates.io/api/v1/crates/${name}/${version} | \
|
# expressed in the various Cargo.toml files, and ideally would not be duplicated
|
||||||
python3 -c "import sys,json; print('version' in json.load(sys.stdin));"
|
# here. (TODO: figure the crate ordering dynamically)
|
||||||
}
|
#
|
||||||
|
CRATES=(
|
||||||
|
kvstore
|
||||||
|
logger
|
||||||
|
netutil
|
||||||
|
sdk
|
||||||
|
keygen
|
||||||
|
metrics
|
||||||
|
client
|
||||||
|
drone
|
||||||
|
programs/{budget_api,config_api,stake_api,storage_api,token_api,vote_api,exchange_api}
|
||||||
|
programs/{vote_program,budget_program,bpf_loader,config_program,exchange_program,failure_program}
|
||||||
|
programs/{noop_program,stake_program,storage_program,token_program}
|
||||||
|
runtime
|
||||||
|
vote-signer
|
||||||
|
core
|
||||||
|
validator
|
||||||
|
genesis
|
||||||
|
gossip
|
||||||
|
ledger-tool
|
||||||
|
wallet
|
||||||
|
install
|
||||||
|
)
|
||||||
|
|
||||||
# Only package/publish if this is a tagged release
|
# Only package/publish if this is a tagged release
|
||||||
[[ -n $CI_TAG ]] || {
|
[[ -n $TRIGGERED_BUILDKITE_TAG ]] || {
|
||||||
echo CI_TAG unset, skipped
|
echo TRIGGERED_BUILDKITE_TAG unset, skipped
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
semverParseInto "$CI_TAG" MAJOR MINOR PATCH SPECIAL
|
semverParseInto "$TRIGGERED_BUILDKITE_TAG" MAJOR MINOR PATCH SPECIAL
|
||||||
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
||||||
|
|
||||||
[[ -n "$CRATES_IO_TOKEN" ]] || {
|
[[ -n "$CRATES_IO_TOKEN" ]] || {
|
||||||
@@ -26,60 +47,27 @@ expectedCrateVersion="$MAJOR.$MINOR.$PATCH$SPECIAL"
|
|||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
Cargo_tomls=$(ci/order-crates-for-publishing.py)
|
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
|
||||||
|
|
||||||
for Cargo_toml in $Cargo_tomls; do
|
for crate in "${CRATES[@]}"; do
|
||||||
echo "--- $Cargo_toml"
|
if [[ ! -r $crate/Cargo.toml ]]; then
|
||||||
grep -q "^version = \"$expectedCrateVersion\"$" "$Cargo_toml" || {
|
echo "Error: $crate/Cargo.toml does not exist"
|
||||||
echo "Error: $Cargo_toml version is not $expectedCrateVersion"
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "-- $crate"
|
||||||
|
grep -q "^version = \"$expectedCrateVersion\"$" "$crate"/Cargo.toml || {
|
||||||
|
echo "Error: $crate/Cargo.toml version is not $expectedCrateVersion"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
crate_name=$(grep -m 1 '^name = ' "$Cargo_toml" | cut -f 3 -d ' ' | tr -d \")
|
|
||||||
|
|
||||||
if grep -q "^publish = false" "$Cargo_toml"; then
|
|
||||||
echo "$crate_name is is marked as unpublishable"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
|
|
||||||
echo "${crate_name} version ${expectedCrateVersion} is already on crates.io"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
crate=$(dirname "$Cargo_toml")
|
|
||||||
# TODO: the rocksdb package does not build with the stock rust docker image,
|
# TODO: the rocksdb package does not build with the stock rust docker image,
|
||||||
# so use the solana rust docker image until this is resolved upstream
|
# so use the solana rust docker image until this is resolved upstream
|
||||||
cargoCommand="cargo publish --token $CRATES_IO_TOKEN"
|
source ci/rust-version.sh
|
||||||
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"
|
||||||
) || true # <-- Don't fail. We want to be able to retry the job in cases when a publish fails halfway due to network/cloud issues
|
#ci/docker-run.sh rust bash -exc "cd $crate; $cargoCommand"
|
||||||
|
)
|
||||||
numRetries=30
|
|
||||||
for ((i = 1 ; i <= numRetries ; i++)); do
|
|
||||||
echo "Attempt ${i} of ${numRetries}"
|
|
||||||
if [[ $(is_crate_version_uploaded "$crate_name" "$expectedCrateVersion") = True ]] ; then
|
|
||||||
echo "Found ${crate_name} version ${expectedCrateVersion} on crates.io REST API"
|
|
||||||
|
|
||||||
really_uploaded=0
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
rm -rf crate-test
|
|
||||||
cargo +"$rust_stable" init crate-test
|
|
||||||
cd crate-test/
|
|
||||||
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
|
|
||||||
echo "[workspace]" >> Cargo.toml
|
|
||||||
cargo +"$rust_stable" check
|
|
||||||
) && really_uploaded=1
|
|
||||||
if ((really_uploaded)); then
|
|
||||||
break;
|
|
||||||
fi
|
|
||||||
echo "${crate_name} not yet available for download from crates.io"
|
|
||||||
fi
|
|
||||||
echo "Did not find ${crate_name} version ${expectedCrateVersion} on crates.io. Sleeping for 2 seconds."
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@@ -45,9 +45,7 @@ beta)
|
|||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
CHANNEL_BRANCH=$BETA_CHANNEL
|
||||||
;;
|
;;
|
||||||
stable)
|
stable)
|
||||||
# Set to whatever branch 'testnet' is on.
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
# TODO: Revert to $STABLE_CHANNEL for TdS
|
|
||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"
|
echo "Error: Invalid PUBLISH_CHANNEL=$PUBLISH_CHANNEL"
|
||||||
@@ -55,7 +53,7 @@ stable)
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
(
|
(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
steps:
|
steps:
|
||||||
|
@@ -3,20 +3,8 @@ set -e
|
|||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
|
|
||||||
if [[ -n $APPVEYOR ]]; then
|
|
||||||
# Bootstrap rust build environment
|
|
||||||
source ci/env.sh
|
|
||||||
source ci/rust-version.sh
|
|
||||||
|
|
||||||
appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
|
|
||||||
./rustup-init -yv --default-toolchain $rust_stable --default-host x86_64-pc-windows-msvc
|
|
||||||
export PATH="$PATH:$USERPROFILE/.cargo/bin"
|
|
||||||
rustc -vV
|
|
||||||
cargo -vV
|
|
||||||
fi
|
|
||||||
|
|
||||||
DRYRUN=
|
DRYRUN=
|
||||||
if [[ -z $CI_BRANCH ]]; then
|
if [[ -z $BUILDKITE_BRANCH ]]; then
|
||||||
DRYRUN="echo"
|
DRYRUN="echo"
|
||||||
CHANNEL=unknown
|
CHANNEL=unknown
|
||||||
fi
|
fi
|
||||||
@@ -24,9 +12,12 @@ fi
|
|||||||
eval "$(ci/channel-info.sh)"
|
eval "$(ci/channel-info.sh)"
|
||||||
|
|
||||||
TAG=
|
TAG=
|
||||||
if [[ -n "$CI_TAG" ]]; then
|
if [[ -n "$BUILDKITE_TAG" ]]; then
|
||||||
CHANNEL_OR_TAG=$CI_TAG
|
CHANNEL_OR_TAG=$BUILDKITE_TAG
|
||||||
TAG="$CI_TAG"
|
TAG="$BUILDKITE_TAG"
|
||||||
|
elif [[ -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||||
|
CHANNEL_OR_TAG=$TRIGGERED_BUILDKITE_TAG
|
||||||
|
TAG="$TRIGGERED_BUILDKITE_TAG"
|
||||||
else
|
else
|
||||||
CHANNEL_OR_TAG=$CHANNEL
|
CHANNEL_OR_TAG=$CHANNEL
|
||||||
fi
|
fi
|
||||||
@@ -36,21 +27,15 @@ if [[ -z $CHANNEL_OR_TAG ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PERF_LIBS=false
|
case "$(uname)" in
|
||||||
case "$CI_OS_NAME" in
|
Darwin)
|
||||||
osx)
|
|
||||||
TARGET=x86_64-apple-darwin
|
TARGET=x86_64-apple-darwin
|
||||||
;;
|
;;
|
||||||
linux)
|
Linux)
|
||||||
TARGET=x86_64-unknown-linux-gnu
|
TARGET=x86_64-unknown-linux-gnu
|
||||||
PERF_LIBS=true
|
|
||||||
;;
|
|
||||||
windows)
|
|
||||||
TARGET=x86_64-pc-windows-msvc
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo CI_OS_NAME unset
|
TARGET=unknown-unknown-unknown
|
||||||
exit 1
|
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
@@ -71,13 +56,6 @@ echo --- Creating tarball
|
|||||||
source ci/rust-version.sh stable
|
source ci/rust-version.sh stable
|
||||||
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||||
|
|
||||||
# Reduce the archive size until
|
|
||||||
# https://github.com/appveyor/ci/issues/2997 is fixed
|
|
||||||
if [[ -n $APPVEYOR ]]; then
|
|
||||||
rm -f solana-release/bin/solana-validator.exe solana-release/bin/solana-bench-exchange.exe
|
|
||||||
fi
|
|
||||||
|
|
||||||
if $PERF_LIBS; then
|
|
||||||
rm -rf target/perf-libs
|
rm -rf target/perf-libs
|
||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
mkdir solana-release/target
|
mkdir solana-release/target
|
||||||
@@ -90,8 +68,6 @@ echo --- Creating tarball
|
|||||||
cargo +"$rust_stable" install --path . --features=cuda --root ../solana-release-cuda
|
cargo +"$rust_stable" install --path . --features=cuda --root ../solana-release-cuda
|
||||||
)
|
)
|
||||||
cp solana-release-cuda/bin/solana-validator solana-release/bin/solana-validator-cuda
|
cp solana-release-cuda/bin/solana-validator solana-release/bin/solana-validator-cuda
|
||||||
fi
|
|
||||||
|
|
||||||
cp -a scripts multinode-demo solana-release/
|
cp -a scripts multinode-demo solana-release/
|
||||||
|
|
||||||
# Add a wrapper script for validator.sh
|
# Add a wrapper script for validator.sh
|
||||||
@@ -101,35 +77,36 @@ echo --- Creating tarball
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"/..
|
cd "$(dirname "$0")"/..
|
||||||
export USE_INSTALL=1
|
export USE_INSTALL=1
|
||||||
export REQUIRE_CONFIG_DIR=1
|
|
||||||
exec multinode-demo/validator.sh "$@"
|
exec multinode-demo/validator.sh "$@"
|
||||||
EOF
|
EOF
|
||||||
chmod +x solana-release/bin/validator.sh
|
chmod +x solana-release/bin/validator.sh
|
||||||
|
|
||||||
tar cvf solana-release-$TARGET.tar solana-release
|
# Add a wrapper script for clear-config.sh
|
||||||
bzip2 solana-release-$TARGET.tar
|
# TODO: Remove multinode/... from tarball
|
||||||
cp solana-release/bin/solana-install-init solana-install-init-$TARGET
|
cat > solana-release/bin/clear-config.sh <<'EOF'
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"/..
|
||||||
|
export USE_INSTALL=1
|
||||||
|
exec multinode-demo/clear-validator-config.sh "$@"
|
||||||
|
EOF
|
||||||
|
chmod +x solana-release/bin/clear-config.sh
|
||||||
|
|
||||||
|
tar jvcf solana-release-$TARGET.tar.bz2 solana-release/
|
||||||
|
cp solana-release/bin/solana-install solana-install-$TARGET
|
||||||
)
|
)
|
||||||
|
|
||||||
# Metrics tarball is platform agnostic, only publish it from Linux
|
echo --- Saving build artifacts
|
||||||
MAYBE_METRICS_TARBALL=
|
source ci/upload-ci-artifact.sh
|
||||||
if [[ "$CI_OS_NAME" = linux ]]; then
|
upload-ci-artifact solana-release-$TARGET.tar.bz2
|
||||||
metrics/create-metrics-tarball.sh
|
|
||||||
MAYBE_METRICS_TARBALL=solana-metrics.tar.bz2
|
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||||
|
echo Skipped due to DO_NOT_PUBLISH_TAR
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source ci/upload-ci-artifact.sh
|
for file in solana-release-$TARGET.tar.bz2 solana-install-$TARGET; do
|
||||||
|
echo --- AWS S3 Store: $file
|
||||||
for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE_METRICS_TARBALL; do
|
|
||||||
upload-ci-artifact "$file"
|
|
||||||
|
|
||||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
|
||||||
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n $BUILDKITE ]]; then
|
|
||||||
echo --- AWS S3 Store: "$file"
|
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
$DRYRUN docker run \
|
$DRYRUN docker run \
|
||||||
@@ -145,22 +122,7 @@ for file in solana-release-$TARGET.tar.bz2 solana-install-init-"$TARGET"* $MAYBE
|
|||||||
)
|
)
|
||||||
|
|
||||||
if [[ -n $TAG ]]; then
|
if [[ -n $TAG ]]; then
|
||||||
ci/upload-github-release-asset.sh "$file"
|
ci/upload-github-release-asset.sh $file
|
||||||
fi
|
|
||||||
elif [[ -n $TRAVIS ]]; then
|
|
||||||
# .travis.yml uploads everything in the travis-s3-upload/ directory to release.solana.com
|
|
||||||
mkdir -p travis-s3-upload/"$CHANNEL_OR_TAG"
|
|
||||||
cp -v "$file" travis-s3-upload/"$CHANNEL_OR_TAG"/
|
|
||||||
|
|
||||||
if [[ -n $TAG ]]; then
|
|
||||||
# .travis.yaml uploads everything in the travis-release-upload/ directory to
|
|
||||||
# the associated Github Release
|
|
||||||
mkdir -p travis-release-upload/
|
|
||||||
cp -v "$file" travis-release-upload/
|
|
||||||
fi
|
|
||||||
elif [[ -n $APPVEYOR ]]; then
|
|
||||||
# Add artifacts for .appveyor.yml to upload
|
|
||||||
appveyor PushArtifact "$file" -FileName "$CHANNEL_OR_TAG"/"$file"
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@@ -13,14 +13,11 @@
|
|||||||
# $ source ci/rust-version.sh
|
# $ source ci/rust-version.sh
|
||||||
#
|
#
|
||||||
|
|
||||||
stable_version=1.36.0
|
export rust_stable=1.34.0
|
||||||
nightly_version=2019-07-19
|
export rust_stable_docker_image=solanalabs/rust:1.34.0
|
||||||
|
|
||||||
export rust_stable="$stable_version"
|
export rust_nightly=nightly-2019-05-01
|
||||||
export rust_stable_docker_image=solanalabs/rust:"$stable_version"
|
export rust_nightly_docker_image=solanalabs/rust-nightly:2019-05-01
|
||||||
|
|
||||||
export rust_nightly=nightly-"$nightly_version"
|
|
||||||
export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
|
||||||
|
|
||||||
[[ -z $1 ]] || (
|
[[ -z $1 ]] || (
|
||||||
|
|
||||||
|
@@ -30,8 +30,8 @@ set -o pipefail
|
|||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
|
|
||||||
UPLOAD_METRICS=""
|
UPLOAD_METRICS=""
|
||||||
TARGET_BRANCH=$CI_BRANCH
|
TARGET_BRANCH=$BUILDKITE_BRANCH
|
||||||
if [[ -z $CI_BRANCH ]] || [[ -n $CI_PULL_REQUEST ]]; then
|
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||||
TARGET_BRANCH=$EDGE_CHANNEL
|
TARGET_BRANCH=$EDGE_CHANNEL
|
||||||
else
|
else
|
||||||
UPLOAD_METRICS="upload"
|
UPLOAD_METRICS="upload"
|
||||||
@@ -40,10 +40,6 @@ fi
|
|||||||
BENCH_FILE=bench_output.log
|
BENCH_FILE=bench_output.log
|
||||||
BENCH_ARTIFACT=current_bench_results.log
|
BENCH_ARTIFACT=current_bench_results.log
|
||||||
|
|
||||||
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
|
||||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
|
||||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
|
||||||
|
|
||||||
# Ensure all dependencies are built
|
# Ensure all dependencies are built
|
||||||
_ cargo +$rust_nightly build --all --release
|
_ cargo +$rust_nightly build --all --release
|
||||||
|
|
||||||
|
@@ -5,47 +5,15 @@ cd "$(dirname "$0")/.."
|
|||||||
|
|
||||||
source ci/_
|
source ci/_
|
||||||
source ci/rust-version.sh stable
|
source ci/rust-version.sh stable
|
||||||
source ci/rust-version.sh nightly
|
|
||||||
|
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
export RUSTFLAGS="-D warnings"
|
export RUSTFLAGS="-D warnings"
|
||||||
|
|
||||||
do_bpf_check() {
|
|
||||||
_ cargo +"$rust_stable" fmt --all -- --check
|
|
||||||
_ cargo +"$rust_nightly" test --all
|
|
||||||
_ cargo +"$rust_nightly" clippy --version
|
|
||||||
_ cargo +"$rust_nightly" clippy --all -- --deny=warnings
|
|
||||||
_ cargo +"$rust_stable" audit
|
|
||||||
}
|
|
||||||
|
|
||||||
(
|
|
||||||
(
|
|
||||||
cd sdk/bpf/rust/rust-no-std
|
|
||||||
do_bpf_check
|
|
||||||
)
|
|
||||||
(
|
|
||||||
cd sdk/bpf/rust/rust-utils
|
|
||||||
do_bpf_check
|
|
||||||
)
|
|
||||||
(
|
|
||||||
cd sdk/bpf/rust/rust-test
|
|
||||||
do_bpf_check
|
|
||||||
)
|
|
||||||
for project in programs/bpf/rust/*/ ; do
|
|
||||||
(
|
|
||||||
cd "$project"
|
|
||||||
do_bpf_check
|
|
||||||
)
|
|
||||||
done
|
|
||||||
)
|
|
||||||
|
|
||||||
_ cargo +"$rust_stable" fmt --all -- --check
|
_ cargo +"$rust_stable" fmt --all -- --check
|
||||||
_ cargo +"$rust_stable" clippy --version
|
_ cargo +"$rust_stable" clippy --all -- --version
|
||||||
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
_ cargo +"$rust_stable" clippy --all -- --deny=warnings
|
||||||
_ cargo +"$rust_stable" audit --version
|
_ cargo +"$rust_stable" audit
|
||||||
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0011 # https://github.com/solana-labs/solana/issues/5207
|
|
||||||
_ ci/nits.sh
|
_ ci/nits.sh
|
||||||
_ ci/order-crates-for-publishing.py
|
|
||||||
_ book/build.sh
|
_ book/build.sh
|
||||||
|
|
||||||
echo --- ok
|
echo --- ok
|
||||||
|
@@ -25,7 +25,7 @@ source scripts/ulimit-n.sh
|
|||||||
|
|
||||||
scripts/coverage.sh
|
scripts/coverage.sh
|
||||||
|
|
||||||
report=coverage-"${CI_COMMIT:0:9}".tar.gz
|
report=coverage-"${BUILDKITE_COMMIT:0:9}".tar.gz
|
||||||
mv target/cov/report.tar.gz "$report"
|
mv target/cov/report.tar.gz "$report"
|
||||||
upload-ci-artifact "$report"
|
upload-ci-artifact "$report"
|
||||||
annotate --style success --context lcov-report \
|
annotate --style success --context lcov-report \
|
||||||
@@ -39,5 +39,5 @@ else
|
|||||||
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
|
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
|
||||||
|
|
||||||
annotate --style success --context codecov.io \
|
annotate --style success --context codecov.io \
|
||||||
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}"
|
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${BUILDKITE_COMMIT:0:9}"
|
||||||
fi
|
fi
|
||||||
|
@@ -19,21 +19,14 @@ source scripts/ulimit-n.sh
|
|||||||
# Clear cached json keypair files
|
# Clear cached json keypair files
|
||||||
rm -rf "$HOME/.config/solana"
|
rm -rf "$HOME/.config/solana"
|
||||||
|
|
||||||
# Clear the C dependency files, if dependeny moves these files are not regenerated
|
# Run tbe appropriate test based on entrypoint
|
||||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
|
||||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
|
||||||
|
|
||||||
# Clear the BPF sysroot files, they are not automatically rebuilt
|
|
||||||
rm -rf target/xargo # Issue #3105
|
|
||||||
|
|
||||||
# Run the appropriate test based on entrypoint
|
|
||||||
testName=$(basename "$0" .sh)
|
testName=$(basename "$0" .sh)
|
||||||
case $testName in
|
case $testName in
|
||||||
test-stable)
|
test-stable)
|
||||||
echo "Executing $testName"
|
echo "Executing $testName"
|
||||||
|
|
||||||
_ cargo +"$rust_stable" build --all ${V:+--verbose}
|
_ cargo +"$rust_stable" build --all ${V:+--verbose}
|
||||||
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture
|
_ cargo +"$rust_stable" test --all ${V:+--verbose} -- --nocapture --test-threads=1
|
||||||
;;
|
;;
|
||||||
test-stable-perf)
|
test-stable-perf)
|
||||||
echo "Executing $testName"
|
echo "Executing $testName"
|
||||||
@@ -42,10 +35,8 @@ test-stable-perf)
|
|||||||
.rs$ \
|
.rs$ \
|
||||||
Cargo.lock$ \
|
Cargo.lock$ \
|
||||||
Cargo.toml$ \
|
Cargo.toml$ \
|
||||||
^ci/test-stable-perf.sh \
|
ci/test-stable-perf.sh \
|
||||||
^ci/test-stable.sh \
|
ci/test-stable.sh \
|
||||||
^core/build.rs \
|
|
||||||
^fetch-perf-libs.sh \
|
|
||||||
^programs/ \
|
^programs/ \
|
||||||
^sdk/ \
|
^sdk/ \
|
||||||
|| {
|
|| {
|
||||||
@@ -61,8 +52,10 @@ test-stable-perf)
|
|||||||
--no-default-features --features=bpf_c,bpf_rust
|
--no-default-features --features=bpf_c,bpf_rust
|
||||||
|
|
||||||
# Run root package tests with these features
|
# Run root package tests with these features
|
||||||
ROOT_FEATURES=
|
ROOT_FEATURES=erasure,chacha
|
||||||
if [[ $(uname) = Linux ]]; then
|
if [[ $(uname) = Darwin ]]; then
|
||||||
|
./build-perf-libs.sh
|
||||||
|
else
|
||||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||||
# is not yet loaded.
|
# is not yet loaded.
|
||||||
@@ -72,12 +65,12 @@ test-stable-perf)
|
|||||||
./fetch-perf-libs.sh
|
./fetch-perf-libs.sh
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
source ./target/perf-libs/env.sh
|
source ./target/perf-libs/env.sh
|
||||||
ROOT_FEATURES=cuda
|
ROOT_FEATURES=$ROOT_FEATURES,cuda
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run root package library tests
|
# Run root package library tests
|
||||||
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
_ cargo +"$rust_stable" build --all ${V:+--verbose} --features="$ROOT_FEATURES"
|
||||||
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture
|
_ cargo +"$rust_stable" test --manifest-path=core/Cargo.toml ${V:+--verbose} --features="$ROOT_FEATURES" -- --nocapture --test-threads=1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Error: Unknown test: $testName"
|
echo "Error: Unknown test: $testName"
|
||||||
|
@@ -24,14 +24,6 @@ blockstreamer=false
|
|||||||
deployUpdateManifest=true
|
deployUpdateManifest=true
|
||||||
fetchLogs=true
|
fetchLogs=true
|
||||||
maybeHashesPerTick=
|
maybeHashesPerTick=
|
||||||
maybeDisableAirdrops=
|
|
||||||
maybeInternalNodesStakeLamports=
|
|
||||||
maybeInternalNodesLamports=
|
|
||||||
maybeExternalPrimordialAccountsFile=
|
|
||||||
maybeLamports=
|
|
||||||
maybeLetsEncrypt=
|
|
||||||
maybeFullnodeAdditionalDiskSize=
|
|
||||||
maybeNoSnapshot=
|
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
exitcode=0
|
exitcode=0
|
||||||
@@ -70,28 +62,11 @@ Deploys a CD testnet
|
|||||||
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
|
-s - Skip start. Nodes will still be created or configured, but network software will not be started.
|
||||||
-S - Stop network software without tearing down nodes.
|
-S - Stop network software without tearing down nodes.
|
||||||
-f - Discard validator nodes that didn't bootup successfully
|
-f - Discard validator nodes that didn't bootup successfully
|
||||||
--no-airdrop
|
-w - Skip time-consuming "bells and whistles" that are
|
||||||
- If set, disables airdrops. Nodes must be funded in genesis block when airdrops are disabled.
|
unnecessary for a high-node count demo testnet
|
||||||
--internal-nodes-stake-lamports NUM_LAMPORTS
|
|
||||||
- Amount to stake internal nodes.
|
|
||||||
--internal-nodes-lamports NUM_LAMPORTS
|
|
||||||
- Amount to fund internal nodes in genesis block
|
|
||||||
--external-accounts-file FILE_PATH
|
|
||||||
- Path to external Primordial Accounts file, if it exists.
|
|
||||||
--hashes-per-tick NUM_HASHES|sleep|auto
|
--hashes-per-tick NUM_HASHES|sleep|auto
|
||||||
- Override the default --hashes-per-tick for the cluster
|
- Override the default --hashes-per-tick for the cluster
|
||||||
--lamports NUM_LAMPORTS
|
|
||||||
- Specify the number of lamports to mint (default 100000000000000)
|
|
||||||
--skip-deploy-update
|
|
||||||
- If set, will skip software update deployment
|
|
||||||
--skip-remote-log-retrieval
|
|
||||||
- If set, will not fetch logs from remote nodes
|
|
||||||
--letsencrypt [dns name]
|
|
||||||
- Attempt to generate a TLS certificate using this DNS name
|
|
||||||
--fullnode-additional-disk-size-gb [number]
|
|
||||||
- Size of additional disk in GB for all fullnodes
|
|
||||||
--no-snapshot
|
|
||||||
- If set, disables booting validators from a snapshot
|
|
||||||
|
|
||||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||||
metrics
|
metrics
|
||||||
@@ -107,39 +82,6 @@ while [[ -n $1 ]]; do
|
|||||||
if [[ $1 = --hashes-per-tick ]]; then
|
if [[ $1 = --hashes-per-tick ]]; then
|
||||||
maybeHashesPerTick="$1 $2"
|
maybeHashesPerTick="$1 $2"
|
||||||
shift 2
|
shift 2
|
||||||
elif [[ $1 = --lamports ]]; then
|
|
||||||
maybeLamports="$1 $2"
|
|
||||||
shift 2
|
|
||||||
elif [[ $1 = --no-airdrop ]]; then
|
|
||||||
maybeDisableAirdrops="$1"
|
|
||||||
shift 1
|
|
||||||
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
|
|
||||||
maybeInternalNodesStakeLamports="$1 $2"
|
|
||||||
shift 2
|
|
||||||
elif [[ $1 = --internal-nodes-lamports ]]; then
|
|
||||||
maybeInternalNodesLamports="$1 $2"
|
|
||||||
shift 2
|
|
||||||
elif [[ $1 = --external-accounts-file ]]; then
|
|
||||||
maybeExternalPrimordialAccountsFile="$1 $2"
|
|
||||||
shift 2
|
|
||||||
elif [[ $1 = --skip-deploy-update ]]; then
|
|
||||||
deployUpdateManifest=false
|
|
||||||
shift 1
|
|
||||||
elif [[ $1 = --skip-remote-log-retrieval ]]; then
|
|
||||||
fetchLogs=false
|
|
||||||
shift 1
|
|
||||||
elif [[ $1 = --letsencrypt ]]; then
|
|
||||||
maybeLetsEncrypt="$1 $2"
|
|
||||||
shift 2
|
|
||||||
elif [[ $1 = --fullnode-additional-disk-size-gb ]]; then
|
|
||||||
maybeFullnodeAdditionalDiskSize="$1 $2"
|
|
||||||
shift 2
|
|
||||||
elif [[ $1 == --machine-type* ]]; then # Bypass quoted long args for GPUs
|
|
||||||
shortArgs+=("$1")
|
|
||||||
shift
|
|
||||||
elif [[ $1 = --no-snapshot ]]; then
|
|
||||||
maybeNoSnapshot="$1"
|
|
||||||
shift 1
|
|
||||||
else
|
else
|
||||||
usage "Unknown long option: $1"
|
usage "Unknown long option: $1"
|
||||||
fi
|
fi
|
||||||
@@ -286,11 +228,6 @@ if ! $skipCreate; then
|
|||||||
# shellcheck disable=SC2206
|
# shellcheck disable=SC2206
|
||||||
create_args+=(${zone_args[@]})
|
create_args+=(${zone_args[@]})
|
||||||
|
|
||||||
if [[ -n $maybeLetsEncrypt ]]; then
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeLetsEncrypt
|
|
||||||
create_args+=($maybeLetsEncrypt)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if $blockstreamer; then
|
if $blockstreamer; then
|
||||||
create_args+=(-u)
|
create_args+=(-u)
|
||||||
fi
|
fi
|
||||||
@@ -319,11 +256,6 @@ if ! $skipCreate; then
|
|||||||
create_args+=(-f)
|
create_args+=(-f)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $maybeFullnodeAdditionalDiskSize ]]; then
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote
|
|
||||||
create_args+=($maybeFullnodeAdditionalDiskSize)
|
|
||||||
fi
|
|
||||||
|
|
||||||
time net/"$cloudProvider".sh create "${create_args[@]}"
|
time net/"$cloudProvider".sh create "${create_args[@]}"
|
||||||
else
|
else
|
||||||
echo "--- $cloudProvider.sh config"
|
echo "--- $cloudProvider.sh config"
|
||||||
@@ -379,13 +311,11 @@ if ! $skipStart; then
|
|||||||
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||||
args+=(-o noLedgerVerify)
|
args+=(-o noLedgerVerify)
|
||||||
fi
|
fi
|
||||||
if [[ -n $NO_INSTALL_CHECK ]]; then
|
|
||||||
args+=(-o noInstallCheck)
|
|
||||||
fi
|
|
||||||
if [[ -n $maybeHashesPerTick ]]; then
|
if [[ -n $maybeHashesPerTick ]]; then
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
|
# shellcheck disable=SC2206 # Do not want to quote $maybeHashesPerTick
|
||||||
args+=($maybeHashesPerTick)
|
args+=($maybeHashesPerTick)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if $reuseLedger; then
|
if $reuseLedger; then
|
||||||
args+=(-r)
|
args+=(-r)
|
||||||
fi
|
fi
|
||||||
@@ -394,39 +324,13 @@ if ! $skipStart; then
|
|||||||
args+=(-F)
|
args+=(-F)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if $deployUpdateManifest; then
|
# shellcheck disable=SC2154 # SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu comes from .buildkite/env/
|
||||||
rm -f update_manifest_keypair.json
|
if $deployUpdateManifest && [[ -n $SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu ]]; then
|
||||||
args+=(--deploy-update linux)
|
echo "$SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu" > update_manifest_keypair.json
|
||||||
args+=(--deploy-update osx)
|
args+=(-i update_manifest_keypair.json)
|
||||||
args+=(--deploy-update windows)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n $maybeDisableAirdrops ]]; then
|
|
||||||
# shellcheck disable=SC2206
|
|
||||||
args+=($maybeDisableAirdrops)
|
|
||||||
fi
|
|
||||||
if [[ -n $maybeInternalNodesStakeLamports ]]; then
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeInternalNodesStakeLamports
|
|
||||||
args+=($maybeInternalNodesStakeLamports)
|
|
||||||
fi
|
|
||||||
if [[ -n $maybeInternalNodesLamports ]]; then
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeInternalNodesLamports
|
|
||||||
args+=($maybeInternalNodesLamports)
|
|
||||||
fi
|
|
||||||
if [[ -n $maybeExternalPrimordialAccountsFile ]]; then
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeExternalPrimordialAccountsFile
|
|
||||||
args+=($maybeExternalPrimordialAccountsFile)
|
|
||||||
fi
|
|
||||||
if [[ -n $maybeLamports ]]; then
|
|
||||||
# shellcheck disable=SC2206 # Do not want to quote $maybeLamports
|
|
||||||
args+=($maybeLamports)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n $maybeNoSnapshot ]]; then
|
|
||||||
# shellcheck disable=SC2206
|
|
||||||
args+=($maybeNoSnapshot)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2086 # Don't want to double quote the $maybeXYZ variables
|
||||||
time net/net.sh "${args[@]}"
|
time net/net.sh "${args[@]}"
|
||||||
) || ok=false
|
) || ok=false
|
||||||
|
|
||||||
|
@@ -44,8 +44,6 @@ steps:
|
|||||||
value: "testnet-beta-perf"
|
value: "testnet-beta-perf"
|
||||||
- label: "testnet-demo"
|
- label: "testnet-demo"
|
||||||
value: "testnet-demo"
|
value: "testnet-demo"
|
||||||
- label: "tds"
|
|
||||||
value: "tds"
|
|
||||||
- select: "Operation"
|
- select: "Operation"
|
||||||
key: "testnet-operation"
|
key: "testnet-operation"
|
||||||
default: "sanity-or-restart"
|
default: "sanity-or-restart"
|
||||||
@@ -142,6 +140,8 @@ testnet-beta|testnet-beta-perf)
|
|||||||
testnet)
|
testnet)
|
||||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
CHANNEL_BRANCH=$STABLE_CHANNEL
|
CHANNEL_BRANCH=$STABLE_CHANNEL
|
||||||
|
: "${EC2_NODE_COUNT:=10}"
|
||||||
|
: "${GCE_NODE_COUNT:=}"
|
||||||
;;
|
;;
|
||||||
testnet-perf)
|
testnet-perf)
|
||||||
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
|
||||||
@@ -153,10 +153,6 @@ testnet-demo)
|
|||||||
: "${GCE_NODE_COUNT:=150}"
|
: "${GCE_NODE_COUNT:=150}"
|
||||||
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
: "${GCE_LOW_QUOTA_NODE_COUNT:=70}"
|
||||||
;;
|
;;
|
||||||
tds)
|
|
||||||
CHANNEL_OR_TAG=beta
|
|
||||||
CHANNEL_BRANCH=$BETA_CHANNEL
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -188,7 +184,7 @@ if [[ -n $TESTNET_TAG ]]; then
|
|||||||
CHANNEL_OR_TAG=$TESTNET_TAG
|
CHANNEL_OR_TAG=$TESTNET_TAG
|
||||||
else
|
else
|
||||||
|
|
||||||
if [[ $CI_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
if [[ $BUILDKITE_BRANCH != "$CHANNEL_BRANCH" ]]; then
|
||||||
(
|
(
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
steps:
|
steps:
|
||||||
@@ -201,6 +197,7 @@ steps:
|
|||||||
TESTNET: "$TESTNET"
|
TESTNET: "$TESTNET"
|
||||||
TESTNET_OP: "$TESTNET_OP"
|
TESTNET_OP: "$TESTNET_OP"
|
||||||
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
TESTNET_DB_HOST: "$TESTNET_DB_HOST"
|
||||||
|
EC2_NODE_COUNT: "$EC2_NODE_COUNT"
|
||||||
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
GCE_NODE_COUNT: "$GCE_NODE_COUNT"
|
||||||
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
|
GCE_LOW_QUOTA_NODE_COUNT: "$GCE_LOW_QUOTA_NODE_COUNT"
|
||||||
EOF
|
EOF
|
||||||
@@ -215,9 +212,8 @@ sanity() {
|
|||||||
testnet-edge)
|
testnet-edge)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
NO_INSTALL_CHECK=1 \
|
|
||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
ci/testnet-sanity.sh edge-testnet-solana-com gce us-west1-b
|
ci/testnet-sanity.sh edge-testnet-solana-com ec2 us-west-1a
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-edge-perf)
|
testnet-edge-perf)
|
||||||
@@ -232,9 +228,8 @@ sanity() {
|
|||||||
testnet-beta)
|
testnet-beta)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
NO_INSTALL_CHECK=1 \
|
|
||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
ci/testnet-sanity.sh beta-testnet-solana-com gce us-west1-b
|
ci/testnet-sanity.sh beta-testnet-solana-com ec2 us-west-1a
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta-perf)
|
testnet-beta-perf)
|
||||||
@@ -249,9 +244,19 @@ sanity() {
|
|||||||
testnet)
|
testnet)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
|
ok=true
|
||||||
|
if [[ -n $EC2_NODE_COUNT ]]; then
|
||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
NO_VALIDATOR_SANITY=1 \
|
ci/testnet-sanity.sh testnet-solana-com ec2 "${EC2_ZONES[0]}" || ok=false
|
||||||
ci/testnet-sanity.sh testnet-solana-com gce us-west1-b
|
elif [[ -n $GCE_NODE_COUNT ]]; then
|
||||||
|
NO_LEDGER_VERIFY=1 \
|
||||||
|
ci/testnet-sanity.sh testnet-solana-com gce "${GCE_ZONES[0]}" || ok=false
|
||||||
|
else
|
||||||
|
echo "Error: no EC2 or GCE nodes"
|
||||||
|
ok=false
|
||||||
|
fi
|
||||||
|
$ok
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-perf)
|
testnet-perf)
|
||||||
@@ -280,14 +285,6 @@ sanity() {
|
|||||||
$ok
|
$ok
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
tds)
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
NO_LEDGER_VERIFY=1 \
|
|
||||||
NO_VALIDATOR_SANITY=1 \
|
|
||||||
ci/testnet-sanity.sh tds-solana-com gce "${GCE_ZONES[0]}" -f
|
|
||||||
)
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "Error: Invalid TESTNET=$TESTNET"
|
echo "Error: Invalid TESTNET=$TESTNET"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -321,13 +318,13 @@ deploy() {
|
|||||||
testnet-edge)
|
testnet-edge)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
ci/testnet-deploy.sh -p edge-testnet-solana-com -C gce -z us-west1-b \
|
ci/testnet-deploy.sh -p edge-testnet-solana-com -C ec2 -z us-west-1a \
|
||||||
-t "$CHANNEL_OR_TAG" -n 2 -c 0 -u -P \
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0ccd4f2239886fa94 \
|
||||||
-a edge-testnet-solana-com --letsencrypt edge.testnet.solana.com \
|
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
--hashes-per-tick auto
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-edge-perf)
|
testnet-edge-perf)
|
||||||
@@ -341,20 +338,21 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
--hashes-per-tick auto
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta)
|
testnet-beta)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
ci/testnet-deploy.sh -p beta-testnet-solana-com -C gce -z us-west1-b \
|
ci/testnet-deploy.sh -p beta-testnet-solana-com -C ec2 -z us-west-1a \
|
||||||
-t "$CHANNEL_OR_TAG" -n 2 -c 0 -u -P \
|
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P -a eipalloc-0f286cf8a0771ce35 \
|
||||||
-a beta-testnet-solana-com --letsencrypt beta.testnet.solana.com \
|
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
--hashes-per-tick auto
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-beta-perf)
|
testnet-beta-perf)
|
||||||
@@ -368,20 +366,36 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
--hashes-per-tick auto
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet)
|
testnet)
|
||||||
(
|
(
|
||||||
set -x
|
set -x
|
||||||
NO_VALIDATOR_SANITY=1 \
|
|
||||||
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
|
if [[ -n $GCE_NODE_COUNT ]] || [[ -n $skipStart ]]; then
|
||||||
-t "$CHANNEL_OR_TAG" -n 2 -c 0 -u -P \
|
maybeSkipStart="skip"
|
||||||
-a testnet-solana-com --letsencrypt testnet.solana.com \
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2068
|
||||||
|
ci/testnet-deploy.sh -p testnet-solana-com -C ec2 ${EC2_ZONE_ARGS[@]} \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n "$EC2_NODE_COUNT" -c 0 -u -P -f -a eipalloc-0fa502bf95f6f18b2 \
|
||||||
|
${skipCreate:+-e} \
|
||||||
|
${maybeSkipStart:+-s} \
|
||||||
|
${maybeStop:+-S} \
|
||||||
|
${maybeDelete:+-D}
|
||||||
|
|
||||||
|
if [[ -n $GCE_NODE_COUNT ]]; then
|
||||||
|
# shellcheck disable=SC2068
|
||||||
|
ci/testnet-deploy.sh -p testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
||||||
|
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -f \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
-x
|
||||||
|
fi
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-perf)
|
testnet-perf)
|
||||||
@@ -397,7 +411,8 @@ deploy() {
|
|||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
--hashes-per-tick auto
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
testnet-demo)
|
testnet-demo)
|
||||||
@@ -412,147 +427,26 @@ deploy() {
|
|||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
ci/testnet-deploy.sh -p demo-testnet-solana-com -C gce ${GCE_ZONE_ARGS[@]} \
|
||||||
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f \
|
-t "$CHANNEL_OR_TAG" -n "$GCE_NODE_COUNT" -c 0 -P -u -f -w \
|
||||||
--skip-deploy-update \
|
|
||||||
--skip-remote-log-retrieval \
|
|
||||||
-a demo-testnet-solana-com \
|
-a demo-testnet-solana-com \
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${maybeSkipStart:+-s} \
|
${maybeSkipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D}
|
${maybeDelete:+-D} \
|
||||||
|
--hashes-per-tick auto
|
||||||
|
|
||||||
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then
|
if [[ -n $GCE_LOW_QUOTA_NODE_COUNT ]]; then
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
NO_LEDGER_VERIFY=1 \
|
NO_LEDGER_VERIFY=1 \
|
||||||
NO_VALIDATOR_SANITY=1 \
|
NO_VALIDATOR_SANITY=1 \
|
||||||
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
|
ci/testnet-deploy.sh -p demo-testnet-solana-com2 -C gce ${GCE_LOW_QUOTA_ZONE_ARGS[@]} \
|
||||||
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x \
|
-t "$CHANNEL_OR_TAG" -n "$GCE_LOW_QUOTA_NODE_COUNT" -c 0 -P -f -x -w \
|
||||||
--skip-deploy-update \
|
|
||||||
--skip-remote-log-retrieval \
|
|
||||||
${skipCreate:+-e} \
|
|
||||||
${skipStart:+-s} \
|
|
||||||
${maybeStop:+-S} \
|
|
||||||
${maybeDelete:+-D}
|
|
||||||
fi
|
|
||||||
)
|
|
||||||
;;
|
|
||||||
tds)
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# Allow cluster configuration to be overridden from env vars
|
|
||||||
|
|
||||||
if [[ -z $TDS_ZONES ]]; then
|
|
||||||
TDS_ZONES="us-west1-a,us-central1-a,europe-west4-a"
|
|
||||||
fi
|
|
||||||
GCE_CLOUD_ZONES=(); while read -r -d, ; do GCE_CLOUD_ZONES+=( "$REPLY" ); done <<< "${TDS_ZONES},"
|
|
||||||
|
|
||||||
if [[ -z $TDS_NODE_COUNT ]]; then
|
|
||||||
TDS_NODE_COUNT="3"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $TDS_CLIENT_COUNT ]]; then
|
|
||||||
TDS_CLIENT_COUNT="1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $ENABLE_GPU ]]; then
|
|
||||||
maybeGpu=(-G "--machine-type n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100")
|
|
||||||
elif [[ $ENABLE_GPU == skip ]]; then
|
|
||||||
maybeGpu=()
|
|
||||||
else
|
|
||||||
maybeGpu=(-G "${ENABLE_GPU}")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $HASHES_PER_TICK ]]; then
|
|
||||||
maybeHashesPerTick="--hashes-per-tick auto"
|
|
||||||
elif [[ $HASHES_PER_TICK == skip ]]; then
|
|
||||||
maybeHashesPerTick=""
|
|
||||||
else
|
|
||||||
maybeHashesPerTick="--hashes-per-tick ${HASHES_PER_TICK}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $DISABLE_AIRDROPS ]]; then
|
|
||||||
DISABLE_AIRDROPS="true"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $DISABLE_AIRDROPS == true ]] ; then
|
|
||||||
maybeDisableAirdrops="--no-airdrop"
|
|
||||||
else
|
|
||||||
maybeDisableAirdrops=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $INTERNAL_NODES_STAKE_LAMPORTS ]]; then
|
|
||||||
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports 1000000000000"
|
|
||||||
elif [[ $INTERNAL_NODES_STAKE_LAMPORTS == skip ]]; then
|
|
||||||
maybeInternalNodesStakeLamports=""
|
|
||||||
else
|
|
||||||
maybeInternalNodesStakeLamports="--internal-nodes-stake-lamports ${INTERNAL_NODES_STAKE_LAMPORTS}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $INTERNAL_NODES_LAMPORTS ]]; then
|
|
||||||
maybeInternalNodesLamports="--internal-nodes-lamports 2000000000000"
|
|
||||||
elif [[ $INTERNAL_NODES_LAMPORTS == skip ]]; then
|
|
||||||
maybeInternalNodesLamports=""
|
|
||||||
else
|
|
||||||
maybeInternalNodesLamports="--internal-nodes-lamports ${INTERNAL_NODES_LAMPORTS}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
EXTERNAL_ACCOUNTS_FILE=/tmp/validator.yml
|
|
||||||
if [[ -z $EXTERNAL_ACCOUNTS_FILE_URL ]]; then
|
|
||||||
EXTERNAL_ACCOUNTS_FILE_URL=https://raw.githubusercontent.com/solana-labs/tour-de-sol/master/validators/all.yml
|
|
||||||
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
|
|
||||||
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
|
|
||||||
elif [[ $EXTERNAL_ACCOUNTS_FILE_URL == skip ]]; then
|
|
||||||
maybeExternalAccountsFile=""
|
|
||||||
else
|
|
||||||
wget ${EXTERNAL_ACCOUNTS_FILE_URL} -O ${EXTERNAL_ACCOUNTS_FILE}
|
|
||||||
maybeExternalAccountsFile="--external-accounts-file ${EXTERNAL_ACCOUNTS_FILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $LAMPORTS ]]; then
|
|
||||||
maybeLamports="--lamports 8589934592000000000"
|
|
||||||
elif [[ $LAMPORTS == skip ]]; then
|
|
||||||
maybeLamports=""
|
|
||||||
else
|
|
||||||
maybeLamports="--lamports ${LAMPORTS}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z $ADDITIONAL_DISK_SIZE_GB ]]; then
|
|
||||||
maybeAdditionalDisk="--fullnode-additional-disk-size-gb 32000"
|
|
||||||
elif [[ $ADDITIONAL_DISK_SIZE_GB == skip ]]; then
|
|
||||||
maybeAdditionalDisk=""
|
|
||||||
else
|
|
||||||
maybeAdditionalDisk="--fullnode-additional-disk-size-gb ${ADDITIONAL_DISK_SIZE_GB}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Multiple V100 GPUs are available in us-west1, us-central1 and europe-west4
|
|
||||||
# shellcheck disable=SC2068
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
NO_LEDGER_VERIFY=1 \
|
|
||||||
NO_VALIDATOR_SANITY=1 \
|
|
||||||
ci/testnet-deploy.sh -p tds-solana-com -C gce \
|
|
||||||
"${maybeGpu[@]}" \
|
|
||||||
-d pd-ssd \
|
|
||||||
${GCE_CLOUD_ZONES[@]/#/-z } \
|
|
||||||
-t "$CHANNEL_OR_TAG" \
|
|
||||||
-n ${TDS_NODE_COUNT} \
|
|
||||||
-c ${TDS_CLIENT_COUNT} \
|
|
||||||
-P -u \
|
|
||||||
-a tds-solana-com --letsencrypt tds.solana.com \
|
|
||||||
${maybeHashesPerTick} \
|
|
||||||
${skipCreate:+-e} \
|
${skipCreate:+-e} \
|
||||||
${skipStart:+-s} \
|
${skipStart:+-s} \
|
||||||
${maybeStop:+-S} \
|
${maybeStop:+-S} \
|
||||||
${maybeDelete:+-D} \
|
${maybeDelete:+-D} \
|
||||||
${maybeDisableAirdrops} \
|
--hashes-per-tick auto
|
||||||
${maybeInternalNodesStakeLamports} \
|
fi
|
||||||
${maybeInternalNodesLamports} \
|
|
||||||
${maybeExternalAccountsFile} \
|
|
||||||
${maybeLamports} \
|
|
||||||
${maybeAdditionalDisk} \
|
|
||||||
--skip-deploy-update \
|
|
||||||
--no-snapshot
|
|
||||||
)
|
)
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
|
@@ -64,7 +64,6 @@ for zone in "$@"; do
|
|||||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||||
${NO_INSTALL_CHECK:+-o noInstallCheck} \
|
|
||||||
$zone || ok=false
|
$zone || ok=false
|
||||||
|
|
||||||
net/net.sh logs
|
net/net.sh logs
|
||||||
|
@@ -8,6 +8,8 @@
|
|||||||
#
|
#
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
REPO_SLUG=solana-labs/solana
|
||||||
|
|
||||||
if [[ -z $1 ]]; then
|
if [[ -z $1 ]]; then
|
||||||
echo No files specified
|
echo No files specified
|
||||||
exit 1
|
exit 1
|
||||||
@@ -18,30 +20,31 @@ if [[ -z $GITHUB_TOKEN ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z $CI_TAG ]]; then
|
if [[ -n $BUILDKITE_TAG ]]; then
|
||||||
echo Error: CI_TAG not defined
|
TAG=$BUILDKITE_TAG
|
||||||
exit 1
|
elif [[ -n $TRIGGERED_BUILDKITE_TAG ]]; then
|
||||||
|
TAG=$TRIGGERED_BUILDKITE_TAG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z $CI_REPO_SLUG ]]; then
|
if [[ -z $TAG ]]; then
|
||||||
echo Error: CI_REPO_SLUG not defined
|
echo Error: TAG not defined
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
releaseId=$( \
|
releaseId=$( \
|
||||||
curl -s "https://api.github.com/repos/$CI_REPO_SLUG/releases/tags/$CI_TAG" \
|
curl -s "https://api.github.com/repos/$REPO_SLUG/releases/tags/$TAG" \
|
||||||
| grep -m 1 \"id\": \
|
| grep -m 1 \"id\": \
|
||||||
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
|
| sed -ne 's/^[^0-9]*\([0-9]*\),$/\1/p' \
|
||||||
)
|
)
|
||||||
echo "Github release id for $CI_TAG is $releaseId"
|
echo "Github release id for $TAG is $releaseId"
|
||||||
|
|
||||||
for file in "$@"; do
|
for file in "$@"; do
|
||||||
echo "--- Uploading $file to tag $CI_TAG of $CI_REPO_SLUG"
|
echo "--- Uploading $file to tag $TAG of $REPO_SLUG"
|
||||||
curl \
|
curl \
|
||||||
--data-binary @"$file" \
|
--data-binary @"$file" \
|
||||||
-H "Authorization: token $GITHUB_TOKEN" \
|
-H "Authorization: token $GITHUB_TOKEN" \
|
||||||
-H "Content-Type: application/octet-stream" \
|
-H "Content-Type: application/octet-stream" \
|
||||||
"https://uploads.github.com/repos/$CI_REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
|
"https://uploads.github.com/repos/$REPO_SLUG/releases/$releaseId/assets?name=$(basename "$file")"
|
||||||
echo
|
echo
|
||||||
done
|
done
|
||||||
|
|
||||||
|
1
client/.gitignore
vendored
1
client/.gitignore
vendored
@@ -1,2 +1 @@
|
|||||||
/target/
|
/target/
|
||||||
/farf/
|
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana-client"
|
name = "solana-client"
|
||||||
version = "0.17.2"
|
version = "0.15.0"
|
||||||
description = "Solana Client"
|
description = "Solana Client"
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
@@ -11,18 +11,16 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.1.4"
|
bincode = "1.1.4"
|
||||||
bs58 = "0.2.0"
|
bs58 = "0.2.0"
|
||||||
jsonrpc-core = "12.1.0"
|
log = "0.4.2"
|
||||||
log = "0.4.7"
|
jsonrpc-core = "10.1.0"
|
||||||
rand = "0.6.5"
|
reqwest = "0.9.17"
|
||||||
rayon = "1.1.0"
|
serde = "1.0.89"
|
||||||
reqwest = "0.9.19"
|
serde_derive = "1.0.91"
|
||||||
serde = "1.0.97"
|
serde_json = "1.0.39"
|
||||||
serde_derive = "1.0.97"
|
solana-netutil = { path = "../netutil", version = "0.15.0" }
|
||||||
serde_json = "1.0.40"
|
solana-sdk = { path = "../sdk", version = "0.15.0" }
|
||||||
solana-netutil = { path = "../netutil", version = "0.17.2" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "0.17.2" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
jsonrpc-core = "12.1.0"
|
jsonrpc-core = "10.1.0"
|
||||||
jsonrpc-http-server = "12.1.0"
|
jsonrpc-http-server = "10.1.0"
|
||||||
solana-logger = { path = "../logger", version = "0.17.2" }
|
solana-logger = { path = "../logger", version = "0.15.0" }
|
||||||
|
@@ -60,7 +60,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
|||||||
serde_json::to_value(response).unwrap()
|
serde_json::to_value(response).unwrap()
|
||||||
}
|
}
|
||||||
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
|
RpcRequest::GetTransactionCount => Value::Number(Number::from(1234)),
|
||||||
RpcRequest::GetSlot => Value::Number(Number::from(0)),
|
|
||||||
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
|
RpcRequest::SendTransaction => Value::String(SIGNATURE.to_string()),
|
||||||
_ => Value::Null,
|
_ => Value::Null,
|
||||||
};
|
};
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user