Compare commits
37 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
a8b447f86c | ||
|
f1b7f00c30 | ||
|
6b18db969d | ||
|
a9b79d6563 | ||
|
b00ae72a31 | ||
|
87e677964f | ||
|
898dabba70 | ||
|
d90461f856 | ||
|
1e10c43abb | ||
|
9d0ed84caa | ||
|
f6a418aa9d | ||
|
a31475e22f | ||
|
e11f38733a | ||
|
f1f148bbd2 | ||
|
460c4f312d | ||
|
519e0c147e | ||
|
6c0ceb9067 | ||
|
d052de847e | ||
|
ad37dfb0a7 | ||
|
1087ca6b9a | ||
|
b4a41fff6e | ||
|
925abbbf15 | ||
|
9098f02f98 | ||
|
6a630ff156 | ||
|
8323309ccc | ||
|
cb0a580b07 | ||
|
97488c0cd8 | ||
|
9c90e29a00 | ||
|
c01789d2a8 | ||
|
a0f9d968fe | ||
|
888072d4c2 | ||
|
af1010cfd3 | ||
|
fe419db5b4 | ||
|
a86dc44c96 | ||
|
ebda293dc4 | ||
|
6acfc2cf0f | ||
|
a863e82741 |
@@ -3,7 +3,7 @@ version: '{build}'
|
|||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- /^v[0-9.]+\.[0-9.]+/
|
- /^v[0-9.]+/
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
- '%USERPROFILE%\.cargo'
|
- '%USERPROFILE%\.cargo'
|
||||||
|
16
.buildkite/env/secrets.ejson
vendored
@@ -1,12 +1,14 @@
|
|||||||
{
|
{
|
||||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||||
"environment": {
|
"environment": {
|
||||||
"CODECOV_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:JnxhrIxh09AvqdJgrVSYmb7PxSrh19aE:07WzVExCHEd1lJ1m8QizRRthGri+WBNeZRKjjEvsy5eo4gv3HD7zVEm42tVTGkqITKkBNQ==]",
|
"CODECOV_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:ks2/ElgxwgxqgmFcxTHANNLmj23YH74h:U4uzRONRfiQyqy6HrPQ/e7OnBUY4HkW37R0iekkF3KJ9UGnHqT1UvwgVbDqLahtDIJ4rWw==]",
|
||||||
"CRATES_IO_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:d0jJqC32/axwzq/N7kMRmpxKhnRrhtpt:zvcPHwkOzGnjhNkAQSejwdy1Jkr9wR1qXFFCnfIjyt/XQYubzB1tLkoly/qdmeb5]",
|
"CRATES_IO_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:lKMh3aLW+jyRrfS/c7yvkpB+TaPhXqLq:j0v27EbaPgwRdHZAbsM0FlAnt3r9ScQrFbWJYOAZtM3qestEiByTlKpZ0eyF/823]",
|
||||||
"GEOLOCATION_API_KEY": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R4gfB6Ey4i50HyfLt4UZDLBqg3qHEUye:UfZCOgt8XI6Y2g+ivCRVoS1fjFycFs7/GSevvCqh1B50mG0+hzpEyzXQLuKG5OeI]",
|
"GITHUB_TOKEN": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:Ll78c3jGpYqnTwR7HJq3mNNUC7pOv9Lu:GrInO2r8MjmP5c54szkyygdsrW5KQYkDgJQUVyFEPyG8SWfchyM9Gur8RV0a+cdwuxNkHLi4U2M=]",
|
||||||
"GITHUB_TOKEN": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:Vq2dkGTOzfEpRht0BAGHFp/hDogMvXJe:tFXHg1epVt2mq9hkuc5sRHe+KAnVREi/p8S+IZu67XRyzdiA/nGak1k860FXYuuzuaE0QWekaEc=]",
|
"INFLUX_DATABASE": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:IlH/ZLTXv3SwlY3TVyAPCX2KzLRY6iG3:gGmUGSU/kCfR/mTwKONaUC/X]",
|
||||||
"INFLUX_DATABASE": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:5KI9WBkXx3R/W4m256mU5MJOE7N8aAT9:Cb8QFELZ9I60t5zhJ9h55Kcs]",
|
"INFLUX_PASSWORD": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:o2qm95GU4VrrcC4OU06jjPvCwKZy/CZF:OW2ga3kLOQJvaDEdGRJ+gn3L2ckFm8AJZtv9wj/GeUIKDH2A4uBPTHsAH9PMe6zujpuHGk3qbeg=]",
|
||||||
"INFLUX_PASSWORD": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:hQRMpLCrav+OYkNphkeM4hagdVoZv5Iw:AUO76rr6+gF1OLJA8ZLSG8wHKXgYCPNk6gRCV8rBhZBJ4KwDaxpvOhMl7bxxXG6jol7v4aRa/Lk=]",
|
"INFLUX_USERNAME": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:yDWW/uIHsJqOTDYskZoSx3pzoB1vztWY:2z31oTA3g0Xs9fCczGNJRcx8xf/hFCed]",
|
||||||
"INFLUX_USERNAME": "EJ[1:yGpTmjdbyjW2kjgIHkFoJv7Ue7EbUvUbqHyw6anGgWg=:R7BNmQjfeqoGDAFTJu9bYTGHol2NgnYN:Q2tOT/EBcFvhFk+DKLKmVU7tLCpVC3Ui]"
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_unknown_linux_gnu": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:RqRaHlYUvGPNFJa6gmciaYM3tRJTURUH:q78/3GTHCN3Uqx9z4nOBjPZcO1lOazNoB/mdhGRDFsnAqVd2hU8zbKkqLrZfLlGqyD8WQOFuw5oTJR9qWg6L9LcOyj3pGL8jWF2yjgZxdtNMXnkbSrCWLooWBBLT61jYQnEwg73gT8ld3Q8EVv3T+MeSMu6FnPz+0+bqQCAGgfqksP4hsUAJGzgZu+i0tNOdlT7fxnh5KJK/yFM/CKgN2sRwEjukA9hXsffyB61g2zqzTDJxCUDLbCVrCkA/bfUk7Of/t0W5t0nK1H3oyGZEc/lRMauCknDBka3Gz11dVss2QT19WQNh0u7bHVaT/U4lepX1j9Zv]",
|
||||||
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_apple_darwin": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wFDl3INEnA3EQDHRX40avqGe1OMoJxyy:6ncCRVRTIRuYI5o/gayeuWCudWvmKNYr8KEHAWeTq34a5bdcKInBdKhjmjX+wLHqsEwQ5gcyhcxy4Ri2mbuN6AHazfZOZlubQkGlyUOAIYO5D5jkbyIh40DAtjVzo1MD/0HsW9zdGOzqUKp5xJJeDsbR4F153jbxa7fvwF90Q4UQjYFTKAtExEmHtDGSJG48ToVwTabTV/OnISMIggDZBviIv2QWHvXgK07b2mUj34rHJywEDGN1nj5rITTDdUeRcB1x4BAMOe94kTFPSTaj/OszvYlGECt8rkKFqbm092qL+XLfiBaImqe/WJHRCnAj6Don]",
|
||||||
|
"SOLANA_INSTALL_UPDATE_MANIFEST_KEYPAIR_x86_64_pc_windows_msvc": "EJ[1:8iZ6baJB4fbBV+XDsrUooyGAnGL/8Ol+4Qd0zKh5YjI=:wAh+dBuZopv6vruVOYegUcq/aBnbksT1:qIJfCfDvDWiqicMOkmbJs/0n7UJLKNmgMQaKzeQ8J7Q60YpXbtWzKVW3tS6lzlgf64m3MrPXyo1C+mWh6jkjsb18T/OfggZy1ZHM4AcsOC6/ldUkV5YtuxUQuAmd5jCuV/R7iuYY8Z66AcfAevlb+bnLpgIifdA8fh/IktOo58nZUQwZDdppAacmftsLc6Frn5Er6A6+EXpxK1nmnlmLJ4AJztqlh6X0r+JvE2O7qeoZUXrIegnkxo7Aay7I/dd8zdYpp7ICSiTEtfVN/xNIu/5QmTRU7gWoz7cPl9epq4aiEALzPOzb6KVOiRcsOg+TlFvLQ71Ik5o=]"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -15,17 +15,12 @@ if [[ -n $BUILDKITE_TAG ]]; then
|
|||||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||||
buildkite-agent pipeline upload ci/buildkite-release.yml
|
buildkite-agent pipeline upload ci/buildkite-release.yml
|
||||||
else
|
else
|
||||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
|
||||||
# Add helpful link back to the corresponding Github Pull Request
|
|
||||||
buildkite-agent annotate --style info --context pr-backlink \
|
|
||||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $BUILDKITE_MESSAGE =~ GitBook: ]]; then
|
|
||||||
buildkite-agent annotate --style info --context gitbook-ci-skip \
|
|
||||||
"GitBook commit detected, CI skipped"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildkite-agent pipeline upload ci/buildkite.yml
|
buildkite-agent pipeline upload ci/buildkite.yml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||||
|
# Add helpful link back to the corresponding Github Pull Request
|
||||||
|
buildkite-agent annotate --style info --context pr-backlink \
|
||||||
|
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||||
|
fi
|
||||||
|
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
root: ./docs/src
|
root: ./book/src
|
||||||
|
|
||||||
structure:
|
structure:
|
||||||
readme: introduction.md
|
readme: introduction.md
|
||||||
summary: SUMMARY.md
|
|
||||||
|
2
.github/stale.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
only: pulls
|
only: pulls
|
||||||
|
|
||||||
# Number of days of inactivity before a pull request becomes stale
|
# Number of days of inactivity before a pull request becomes stale
|
||||||
daysUntilStale: 7
|
daysUntilStale: 30
|
||||||
|
|
||||||
# Number of days of inactivity before a stale pull request is closed
|
# Number of days of inactivity before a stale pull request is closed
|
||||||
daysUntilClose: 7
|
daysUntilClose: 7
|
||||||
|
7
.gitignore
vendored
@@ -1,6 +1,6 @@
|
|||||||
/docs/html/
|
/book/html/
|
||||||
/docs/src/tests.ok
|
/book/src/img/
|
||||||
/docs/src/.gitbook/assets/*.svg
|
/book/src/tests.ok
|
||||||
/farf/
|
/farf/
|
||||||
/solana-release/
|
/solana-release/
|
||||||
/solana-release.tar.bz2
|
/solana-release.tar.bz2
|
||||||
@@ -16,7 +16,6 @@
|
|||||||
# log files
|
# log files
|
||||||
*.log
|
*.log
|
||||||
log-*.txt
|
log-*.txt
|
||||||
log-*/
|
|
||||||
|
|
||||||
# intellij files
|
# intellij files
|
||||||
/.idea/
|
/.idea/
|
||||||
|
48
.mergify.yml
@@ -19,35 +19,59 @@ pull_request_rules:
|
|||||||
label:
|
label:
|
||||||
add:
|
add:
|
||||||
- automerge
|
- automerge
|
||||||
- name: v0.23 backport
|
- name: v0.16 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- base=master
|
||||||
- label=v0.23
|
- label=v0.16
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
branches:
|
branches:
|
||||||
- v0.23
|
- v0.16
|
||||||
- name: v1.0 backport
|
- name: v0.17 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- base=master
|
||||||
- label=v1.0
|
- label=v0.17
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
branches:
|
branches:
|
||||||
- v1.0
|
- v0.17
|
||||||
- name: v1.1 backport
|
- name: v0.18 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- base=master
|
||||||
- label=v1.1
|
- label=v0.18
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
branches:
|
branches:
|
||||||
- v1.1
|
- v0.18
|
||||||
- name: v1.2 backport
|
- name: v0.19 backport
|
||||||
conditions:
|
conditions:
|
||||||
- base=master
|
- base=master
|
||||||
- label=v1.2
|
- label=v0.19
|
||||||
actions:
|
actions:
|
||||||
backport:
|
backport:
|
||||||
branches:
|
branches:
|
||||||
- v1.2
|
- v0.19
|
||||||
|
- name: v0.20 backport
|
||||||
|
conditions:
|
||||||
|
- base=master
|
||||||
|
- label=v0.20
|
||||||
|
actions:
|
||||||
|
backport:
|
||||||
|
branches:
|
||||||
|
- v0.20
|
||||||
|
- name: v0.21 backport
|
||||||
|
conditions:
|
||||||
|
- base=master
|
||||||
|
- label=v0.21
|
||||||
|
actions:
|
||||||
|
backport:
|
||||||
|
branches:
|
||||||
|
- v0.21
|
||||||
|
- name: v0.22 backport
|
||||||
|
conditions:
|
||||||
|
- base=master
|
||||||
|
- label=v0.22
|
||||||
|
actions:
|
||||||
|
backport:
|
||||||
|
branches:
|
||||||
|
- v0.22
|
||||||
|
@@ -1,13 +1,13 @@
|
|||||||
os:
|
os:
|
||||||
- osx
|
- osx
|
||||||
- windows
|
|
||||||
|
|
||||||
language: rust
|
language: rust
|
||||||
rust:
|
rust:
|
||||||
- stable
|
- 1.37.0
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- source ci/rust-version.sh
|
- source ci/rust-version.sh
|
||||||
|
- test $rust_stable = $TRAVIS_RUST_VERSION # Update .travis.yml rust version above when this fails
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- source ci/env.sh
|
- source ci/env.sh
|
||||||
@@ -16,7 +16,7 @@ script:
|
|||||||
branches:
|
branches:
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- /^v\d+\.\d+/
|
- /^v\d+\.\d+(\.\d+)?(-\S*)?$/
|
||||||
|
|
||||||
notifications:
|
notifications:
|
||||||
slack:
|
slack:
|
||||||
|
270
CONTRIBUTING.md
@@ -1,41 +1,23 @@
|
|||||||
# Solana Coding Guidelines
|
Solana Coding Guidelines
|
||||||
|
===
|
||||||
|
|
||||||
The goal of these guidelines is to improve developer productivity by allowing
|
The goal of these guidelines is to improve developer productivity by allowing developers to
|
||||||
developers to jump into any file in the codebase and not need to adapt to
|
jump any file in the codebase and not need to adapt to inconsistencies in how the code is
|
||||||
inconsistencies in how the code is written. The codebase should appear as if it
|
written. The codebase should appear as if it had been authored by a single developer. If you
|
||||||
had been authored by a single developer. If you don't agree with a convention,
|
don't agree with a convention, submit a PR patching this document and let's discuss! Once
|
||||||
submit a PR patching this document and let's discuss! Once the PR is accepted,
|
the PR is accepted, *all* code should be updated as soon as possible to reflect the new
|
||||||
*all* code should be updated as soon as possible to reflect the new
|
|
||||||
conventions.
|
conventions.
|
||||||
|
|
||||||
## Pull Requests
|
Pull Requests
|
||||||
|
---
|
||||||
|
|
||||||
Small, frequent PRs are much preferred to large, infrequent ones. A large PR is
|
Small, frequent PRs are much preferred to large, infrequent ones. A large PR is difficult
|
||||||
difficult to review, can block others from making progress, and can quickly get
|
to review, can block others from making progress, and can quickly get its author into
|
||||||
its author into "rebase hell". A large PR oftentimes arises when one change
|
"rebase hell". A large PR oftentimes arises when one change requires another, which requires
|
||||||
requires another, which requires another, and then another. When you notice
|
another, and then another. When you notice those dependencies, put the fix into a commit of
|
||||||
those dependencies, put the fix into a commit of its own, then checkout a new
|
its own, then checkout a new branch, and cherrypick it. Open a PR to start the review
|
||||||
branch, and cherry-pick it.
|
process and then jump back to your original branch to keep making progress. Once the commit
|
||||||
|
is merged, you can use git-rebase to purge it from your original branch.
|
||||||
```bash
|
|
||||||
$ git commit -am "Fix foo, needed by bar"
|
|
||||||
$ git checkout master
|
|
||||||
$ git checkout -b fix-foo
|
|
||||||
$ git cherry-pick fix-bar
|
|
||||||
$ git push --set-upstream origin fix-foo
|
|
||||||
```
|
|
||||||
|
|
||||||
Open a PR to start the review process and then jump back to your original
|
|
||||||
branch to keep making progress. Consider rebasing to make your fix the first
|
|
||||||
commit:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ git checkout fix-bar
|
|
||||||
$ git rebase -i master <Move fix-foo to top>
|
|
||||||
```
|
|
||||||
|
|
||||||
Once the commit is merged, rebase the original branch to purge the
|
|
||||||
cherry-picked commit:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git pull --rebase upstream master
|
$ git pull --rebase upstream master
|
||||||
@@ -43,137 +25,26 @@ $ git pull --rebase upstream master
|
|||||||
|
|
||||||
### How big is too big?
|
### How big is too big?
|
||||||
|
|
||||||
If there are no functional changes, PRs can be very large and that's no
|
If there are no functional changes, PRs can be very large and that's no problem. If,
|
||||||
problem. If, however, your changes are making meaningful changes or additions,
|
however, your changes are making meaningful changes or additions, then about 1,000 lines of
|
||||||
then about 1000 lines of changes is about the most you should ask a Solana
|
changes is about the most you should ask a Solana maintainer to review.
|
||||||
maintainer to review.
|
|
||||||
|
|
||||||
### Should I send small PRs as I develop large, new components?
|
### Should I send small PRs as I develop large, new components?
|
||||||
|
|
||||||
Add only code to the codebase that is ready to be deployed. If you are building
|
Add only code to the codebase that is ready to be deployed. If you are building a large
|
||||||
a large library, consider developing it in a separate git repository. When it
|
library, consider developing it in a separate git repository. When it is ready to be
|
||||||
is ready to be integrated, the Solana maintainers will work with you to decide
|
integrated, the Solana maintainers will work with you to decide on a path forward. Smaller
|
||||||
on a path forward. Smaller libraries may be copied in whereas very large ones
|
libraries may be copied in whereas very large ones may be pulled in with a package manager.
|
||||||
may be pulled in with a package manager.
|
|
||||||
|
|
||||||
## Getting Pull Requests Merged
|
|
||||||
|
|
||||||
There is no single person assigned to watching GitHub PR queue and ushering you
|
|
||||||
through the process. Typically, you will ask the person that wrote a component
|
|
||||||
to review changes to it. You can find the author using `git blame` or asking on
|
|
||||||
Discord. When working to get your PR merged, it's most important to understand
|
|
||||||
that changing the code is your priority and not necessarily a priority of the
|
|
||||||
person you need an approval from. Also, while you may interact the most with
|
|
||||||
the component author, you should aim to be inclusive of others. Providing a
|
|
||||||
detailed problem description is the most effective means of engaging both the
|
|
||||||
component author and other potentially interested parties.
|
|
||||||
|
|
||||||
Consider opening all PRs as Draft Pull Requests first. Using a draft PR allows
|
|
||||||
you to kickstart the CI automation, which typically takes between 10 and 30
|
|
||||||
minutes to execute. Use that time to write a detailed problem description. Once
|
|
||||||
the description is written and CI succeeds, click the "Ready to Review" button
|
|
||||||
and add reviewers. Adding reviewers before CI succeeds is a fast path to losing
|
|
||||||
reviewer engagement. Not only will they be notified and see the PR is not yet
|
|
||||||
ready for them, they will also be bombarded them with additional notifications
|
|
||||||
each time you push a commit to get past CI or until they "mute" the PR. Once
|
|
||||||
muted, you'll need to reach out over some other medium, such as Discord, to
|
|
||||||
request they have another look. When you use draft PRs, no notifications are
|
|
||||||
sent when you push commits and edit the PR description. Use draft PRs
|
|
||||||
liberally. Don't bug the humans until you have gotten past the bots.
|
|
||||||
|
|
||||||
### What should be in my PR description?
|
|
||||||
|
|
||||||
Reviewing code is hard work and generally involves an attempt to guess the
|
|
||||||
author's intent at various levels. Please assume reviewer time is scarce and do
|
|
||||||
what you can to make your PR as consumable as possible. Inspired by techniques
|
|
||||||
for writing good whitepapers, the guidance here aims to maximize reviewer
|
|
||||||
engagement.
|
|
||||||
|
|
||||||
Assume the reviewer will spend no more than a few seconds reading the PR title.
|
|
||||||
If it doesn't describe a noteworthy change, don't expect the reviewer to click
|
|
||||||
to see more.
|
|
||||||
|
|
||||||
Next, like the abstract of a whitepaper, the reviewer will spend ~30 seconds
|
|
||||||
reading the PR problem description. If what is described there doesn't look
|
|
||||||
more important than competing issues, don't expect the reviewer to read on.
|
|
||||||
|
|
||||||
Next, the reviewer will read the proposed changes. At this point, the reviewer
|
|
||||||
needs to be convinced the proposed changes are a *good* solution to the problem
|
|
||||||
described above. If the proposed changes, not the code changes, generates
|
|
||||||
discussion, consider closing the PR and returning with a design proposal
|
|
||||||
instead.
|
|
||||||
|
|
||||||
Finally, once the reviewer understands the problem and agrees with the approach
|
|
||||||
to solving it, the reviewer will view the code changes. At this point, the
|
|
||||||
reviewer is simply looking to see if the implementation actually implements
|
|
||||||
what was proposed and if that implementation is maintainable. When a concise,
|
|
||||||
readable test for each new code path is present, the reviewer can safely ignore
|
|
||||||
the details of its implementation. When those tests are missing, expect to
|
|
||||||
either lose engagement or get a pile of review comments as the reviewer
|
|
||||||
attempts to consider every ambiguity in your implementation.
|
|
||||||
|
|
||||||
### The PR Title
|
|
||||||
|
|
||||||
The PR title should contain a brief summary of the change, from the perspective
|
|
||||||
of the user. Examples of good titles:
|
|
||||||
|
|
||||||
* Add rent to accounts
|
|
||||||
* Fix out-of-memory error in validator
|
|
||||||
* Clean up `process_message()` in runtime
|
|
||||||
|
|
||||||
The conventions here are all the same as a good git commit title:
|
|
||||||
|
|
||||||
* First word capitalized and in the imperative mood, not past tense ("add", not
|
|
||||||
"added")
|
|
||||||
* No trailing period
|
|
||||||
* What was done, whom it was done to, and in what context
|
|
||||||
|
|
||||||
### The PR Problem Statement
|
|
||||||
|
|
||||||
The git repo implements a product with various features. The problem statement
|
|
||||||
should describe how the product is missing a feature, how a feature is
|
|
||||||
incomplete, or how the implementation of a feature is somehow undesirable. If
|
|
||||||
an issue being fixed already describes the problem, go ahead and copy-paste it.
|
|
||||||
As mentioned above, reviewer time is scarce. Given a queue of PRs to review,
|
|
||||||
the reviewer may ignore PRs that expect them to click through links to see if
|
|
||||||
the PR warrants attention.
|
|
||||||
|
|
||||||
### The Proposed Changes
|
|
||||||
|
|
||||||
Typically the content under the "Proposed changes" section will be a bulleted
|
|
||||||
list of steps taken to solve the problem. Oftentimes, the list is identical to
|
|
||||||
the subject lines of the git commits contained in the PR. It's especially
|
|
||||||
generous (and not expected) to rebase or reword commits such that each change
|
|
||||||
matches the logical flow in your PR description.
|
|
||||||
|
|
||||||
### When will my PR be reviewed?
|
### When will my PR be reviewed?
|
||||||
|
|
||||||
PRs are typically reviewed and merged in under 7 days. If your PR has been open
|
PRs are typically reviewed and merged in under 7 days. If your PR has been open for longer,
|
||||||
for longer, it's a strong indicator that the reviewers aren't confident the
|
it's a strong indicator that the reviewers aren't confident the change meets the quality
|
||||||
change meets the quality standards of the codebase. You might consider closing
|
standards of the codebase. You might consider closing it and coming back with smaller PRs
|
||||||
it and coming back with smaller PRs and longer descriptions detailing what
|
and longer descriptions detailing what problem it solves and how it solves it.
|
||||||
problem it solves and how it solves it. Old PRs will be marked stale and then
|
|
||||||
closed automatically 7 days later.
|
|
||||||
|
|
||||||
### How to manage review feedback?
|
Draft Pull Requests
|
||||||
|
---
|
||||||
After a reviewer provides feedback, you can quickly say "acknowledged, will
|
|
||||||
fix" using a thumb's up emoji. If you're confident your fix is exactly as
|
|
||||||
prescribed, add a reply "Fixed in COMMIT\_HASH" and mark the comment as
|
|
||||||
resolved. If you're not sure, reply "Is this what you had in mind?
|
|
||||||
COMMIT\_HASH" and if so, the reviewer will reply and mark the conversation as
|
|
||||||
resolved. Marking conversations as resolved is an excellent way to engage more
|
|
||||||
reviewers. Leaving conversations open may imply the PR is not yet ready for
|
|
||||||
additional review.
|
|
||||||
|
|
||||||
### When will my PR be re-reviewed?
|
|
||||||
|
|
||||||
Recall that once your PR is opened, a notification is sent every time you push
|
|
||||||
a commit. After a reviewer adds feedback, they won't be checking on the status
|
|
||||||
of that feedback after every new commit. Instead, directly mention the reviewer
|
|
||||||
when you feel your PR is ready for another pass.
|
|
||||||
|
|
||||||
## Draft Pull Requests
|
|
||||||
|
|
||||||
If you want early feedback on your PR, use GitHub's "Draft Pull Request"
|
If you want early feedback on your PR, use GitHub's "Draft Pull Request"
|
||||||
mechanism. Draft PRs are a convenient way to collaborate with the Solana
|
mechanism. Draft PRs are a convenient way to collaborate with the Solana
|
||||||
@@ -181,68 +52,67 @@ maintainers without triggering notifications as you make changes. When you feel
|
|||||||
your PR is ready for a broader audience, you can transition your draft PR to a
|
your PR is ready for a broader audience, you can transition your draft PR to a
|
||||||
standard PR with the click of a button.
|
standard PR with the click of a button.
|
||||||
|
|
||||||
Do not add reviewers to draft PRs. GitHub doesn't automatically clear
|
Do not add reviewers to draft PRs. GitHub doesn't automatically clear approvals
|
||||||
approvals when you click "Ready for Review", so a review that meant "I approve
|
when you click "Ready for Review", so a review that meant "I approve of the
|
||||||
of the direction" suddenly has the appearance of "I approve of these changes."
|
direction" suddenly has the appearance of "I approve of these changes." Instead,
|
||||||
Instead, add a comment that mentions the usernames that you would like a review
|
add a comment that mentions the usernames that you would like a review from. Ask
|
||||||
from. Ask explicitly what you would like feedback on.
|
explicitly what you would like feedback on.
|
||||||
|
|
||||||
## Rust coding conventions
|
Rust coding conventions
|
||||||
|
---
|
||||||
|
|
||||||
* All Rust code is formatted using the latest version of `rustfmt`. Once
|
* All Rust code is formatted using the latest version of `rustfmt`. Once installed, it will be
|
||||||
installed, it will be updated automatically when you update the compiler with
|
updated automatically when you update the compiler with `rustup`.
|
||||||
`rustup`.
|
|
||||||
|
|
||||||
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do
|
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly:
|
||||||
so explicitly:
|
|
||||||
|
|
||||||
```rust #[allow(clippy::too_many_arguments)] ```
|
```rust
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
```
|
||||||
|
|
||||||
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.
|
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.
|
||||||
|
|
||||||
* For variable names, when in doubt, spell it out. The mapping from type names
|
* For variable names, when in doubt, spell it out. The mapping from type names to variable names
|
||||||
to variable names is to lowercase the type name, putting an underscore before
|
is to lowercase the type name, putting an underscore before each capital letter. Variable names
|
||||||
each capital letter. Variable names should *not* be abbreviated unless being
|
should *not* be abbreviated unless being used as closure arguments and the brevity improves
|
||||||
used as closure arguments and the brevity improves readability. When a function
|
readability. When a function has multiple instances of the same type, qualify each with a
|
||||||
has multiple instances of the same type, qualify each with a prefix and
|
prefix and underscore (i.e. alice_keypair) or a numeric suffix (i.e. tx0).
|
||||||
underscore (i.e. alice\_keypair) or a numeric suffix (i.e. tx0).
|
|
||||||
|
|
||||||
* For function and method names, use `<verb>_<subject>`. For unit tests, that
|
* For function and method names, use `<verb>_<subject>`. For unit tests, that verb should
|
||||||
verb should always be `test` and for benchmarks the verb should always be
|
always be `test` and for benchmarks the verb should always be `bench`. Avoid namespacing
|
||||||
`bench`. Avoid namespacing function names with some arbitrary word. Avoid
|
function names with some arbitrary word. Avoid abbreviating words in function names.
|
||||||
abbreviating words in function names.
|
|
||||||
|
|
||||||
* As they say, "When in Rome, do as the Romans do." A good patch should
|
* As they say, "When in Rome, do as the Romans do." A good patch should acknowledge the coding
|
||||||
acknowledge the coding conventions of the code that surrounds it, even in the
|
conventions of the code that surrounds it, even in the case where that code has not yet been
|
||||||
case where that code has not yet been updated to meet the conventions described
|
updated to meet the conventions described here.
|
||||||
here.
|
|
||||||
|
|
||||||
|
|
||||||
## Terminology
|
Terminology
|
||||||
|
---
|
||||||
|
|
||||||
Inventing new terms is allowed, but should only be done when the term is widely
|
Inventing new terms is allowed, but should only be done when the term is widely used and
|
||||||
used and understood. Avoid introducing new 3-letter terms, which can be
|
understood. Avoid introducing new 3-letter terms, which can be confused with 3-letter acronyms.
|
||||||
confused with 3-letter acronyms.
|
|
||||||
|
|
||||||
[Terms currently in use](docs/src/terminology.md)
|
[Terms currently in use](book/src/terminology.md)
|
||||||
|
|
||||||
|
|
||||||
## Design Proposals
|
Design Proposals
|
||||||
|
---
|
||||||
|
|
||||||
Solana's architecture is described by docs generated from markdown files in
|
Solana's architecture is described by a book generated from markdown files in
|
||||||
the `docs/src/` directory, maintained by an *editor* (currently @garious). To
|
the `book/src/` directory, maintained by an *editor* (currently @garious). To
|
||||||
add a design proposal, you'll need to include it in the
|
add a design proposal, you'll need to at least propose a change the content
|
||||||
[Accepted Design Proposals](https://docs.solana.com/proposals)
|
under the [Accepted Design
|
||||||
section of the Solana docs. Here's the full process:
|
Proposals](https://solana-labs.github.io/book-edge/proposals.html) chapter.
|
||||||
|
Here's the full process:
|
||||||
|
|
||||||
1. Propose a design by creating a PR that adds a markdown document to the
|
1. Propose a design by creating a PR that adds a markdown document to the
|
||||||
`docs/src/proposals` directory and references it from the [table of
|
directory `book/src/` and references it from the [table of
|
||||||
contents](docs/src/SUMMARY.md). Add any relevant *maintainers* to the PR
|
contents](book/src/SUMMARY.md). Add any relevant *maintainers* to the PR review.
|
||||||
review.
|
|
||||||
2. The PR being merged indicates your proposed change was accepted and that the
|
2. The PR being merged indicates your proposed change was accepted and that the
|
||||||
maintainers support your plan of attack.
|
maintainers support your plan of attack.
|
||||||
3. Submit PRs that implement the proposal. When the implementation reveals the
|
3. Submit PRs that implement the proposal. When the implementation reveals the
|
||||||
need for tweaks to the proposal, be sure to update the proposal and have that
|
need for tweaks to the proposal, be sure to update the proposal and have
|
||||||
change reviewed by the same people as in step 1.
|
that change reviewed by the same people as in step 1.
|
||||||
4. Once the implementation is complete, submit a PR that moves the link from
|
4. Once the implementation is complete, submit a PR that moves the link from
|
||||||
the Accepted Proposals to the Implemented Proposals section.
|
the Accepted Proposals to the Implemented Proposals section.
|
||||||
|
6529
Cargo.lock
generated
72
Cargo.toml
@@ -3,65 +3,59 @@ members = [
|
|||||||
"bench-exchange",
|
"bench-exchange",
|
||||||
"bench-streamer",
|
"bench-streamer",
|
||||||
"bench-tps",
|
"bench-tps",
|
||||||
"banking-bench",
|
|
||||||
"chacha",
|
|
||||||
"chacha-cuda",
|
|
||||||
"chacha-sys",
|
"chacha-sys",
|
||||||
"cli-config",
|
|
||||||
"client",
|
"client",
|
||||||
"core",
|
"core",
|
||||||
"faucet",
|
"drone",
|
||||||
"perf",
|
|
||||||
"validator",
|
"validator",
|
||||||
"genesis",
|
"genesis",
|
||||||
"genesis-programs",
|
"genesis_programs",
|
||||||
"gossip",
|
"gossip",
|
||||||
"install",
|
"install",
|
||||||
"keygen",
|
"keygen",
|
||||||
"ledger",
|
"kvstore",
|
||||||
"ledger-tool",
|
"ledger-tool",
|
||||||
"local-cluster",
|
"local_cluster",
|
||||||
"logger",
|
"logger",
|
||||||
"log-analyzer",
|
|
||||||
"merkle-tree",
|
"merkle-tree",
|
||||||
"measure",
|
"measure",
|
||||||
"metrics",
|
"metrics",
|
||||||
"net-shaper",
|
"programs/bpf",
|
||||||
"programs/bpf_loader",
|
"programs/bpf_loader_api",
|
||||||
"programs/budget",
|
"programs/bpf_loader_program",
|
||||||
"programs/btc_spv",
|
"programs/budget_api",
|
||||||
"programs/btc_spv_bin",
|
"programs/budget_program",
|
||||||
"programs/config",
|
"programs/config_api",
|
||||||
"programs/exchange",
|
"programs/config_program",
|
||||||
"programs/failure",
|
"programs/config_tests",
|
||||||
"programs/noop",
|
"programs/exchange_api",
|
||||||
"programs/ownable",
|
"programs/exchange_program",
|
||||||
"programs/stake",
|
"programs/failure_program",
|
||||||
"programs/storage",
|
"programs/move_loader_api",
|
||||||
"programs/vest",
|
"programs/move_loader_program",
|
||||||
"programs/vote",
|
"programs/librapay_api",
|
||||||
"archiver",
|
"programs/noop_program",
|
||||||
"archiver-lib",
|
"programs/stake_api",
|
||||||
"archiver-utils",
|
"programs/stake_program",
|
||||||
"remote-wallet",
|
"programs/stake_tests",
|
||||||
|
"programs/storage_api",
|
||||||
|
"programs/storage_program",
|
||||||
|
"programs/token_api",
|
||||||
|
"programs/token_program",
|
||||||
|
"programs/vote_api",
|
||||||
|
"programs/vote_program",
|
||||||
|
"replicator",
|
||||||
"runtime",
|
"runtime",
|
||||||
"sdk",
|
"sdk",
|
||||||
"sdk-c",
|
"sdk-c",
|
||||||
"scripts",
|
|
||||||
"stake-monitor",
|
|
||||||
"sys-tuner",
|
|
||||||
"transaction-status",
|
|
||||||
"upload-perf",
|
"upload-perf",
|
||||||
"net-utils",
|
"validator-info",
|
||||||
"version",
|
"utils/netutil",
|
||||||
|
"utils/fixed_buf",
|
||||||
"vote-signer",
|
"vote-signer",
|
||||||
"cli",
|
"cli",
|
||||||
"rayon-threadlimit",
|
|
||||||
"watchtower",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
exclude = [
|
exclude = [
|
||||||
"programs/bpf",
|
"programs/bpf/rust/noop",
|
||||||
"programs/move_loader",
|
|
||||||
"programs/librapay",
|
|
||||||
]
|
]
|
||||||
|
41
README.md
@@ -1,5 +1,5 @@
|
|||||||
[](https://crates.io/crates/solana-core)
|
[](https://crates.io/crates/solana)
|
||||||
[](https://docs.rs/solana-core)
|
[](https://docs.rs/solana)
|
||||||
[](https://buildkite.com/solana-labs/solana/builds?branch=master)
|
[](https://buildkite.com/solana-labs/solana/builds?branch=master)
|
||||||
[](https://codecov.io/gh/solana-labs/solana)
|
[](https://codecov.io/gh/solana-labs/solana)
|
||||||
|
|
||||||
@@ -9,12 +9,26 @@ Blockchain Rebuilt for Scale
|
|||||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||||
up to 710 thousand transactions per second on a gigabit network.
|
up to 710 thousand transactions per second on a gigabit network.
|
||||||
|
|
||||||
Documentation
|
Disclaimer
|
||||||
===
|
===
|
||||||
|
|
||||||
Before you jump into the code, review the documentation [Solana: Blockchain Rebuilt for Scale](https://docs.solana.com).
|
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||||
|
|
||||||
(The _latest_ development version of the docs is [available here](https://docs.solana.com/v/master).)
|
Introduction
|
||||||
|
===
|
||||||
|
|
||||||
|
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||||
|
|
||||||
|
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||||
|
|
||||||
|
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
===
|
||||||
|
|
||||||
|
Before you jump into the code, review the online book [Solana: Blockchain Rebuilt for Scale](https://solana-labs.github.io/book/).
|
||||||
|
|
||||||
|
(The _latest_ development version of the online book is also [available here](https://solana-labs.github.io/book-edge/).)
|
||||||
|
|
||||||
Release Binaries
|
Release Binaries
|
||||||
===
|
===
|
||||||
@@ -64,7 +78,7 @@ $ source $HOME/.cargo/env
|
|||||||
$ rustup component add rustfmt
|
$ rustup component add rustfmt
|
||||||
```
|
```
|
||||||
|
|
||||||
If your rustc version is lower than 1.39.0, please update it:
|
If your rustc version is lower than 1.37.0, please update it:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ rustup update
|
$ rustup update
|
||||||
@@ -73,8 +87,7 @@ $ rustup update
|
|||||||
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo apt-get update
|
$ sudo apt-get install libssl-dev pkg-config zlib1g-dev llvm clang
|
||||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the source code:
|
Download the source code:
|
||||||
@@ -107,13 +120,16 @@ $ cargo test
|
|||||||
Local Testnet
|
Local Testnet
|
||||||
---
|
---
|
||||||
|
|
||||||
Start your own testnet locally, instructions are in the online docs [Solana: Blockchain Rebuild for Scale: Getting Started](https://docs.solana.com/building-from-source).
|
Start your own testnet locally, instructions are in the book [Solana: Blockchain Rebuild for Scale: Getting Started](https://solana-labs.github.io/book/getting-started.html).
|
||||||
|
|
||||||
Remote Testnets
|
Remote Testnets
|
||||||
---
|
---
|
||||||
|
|
||||||
* `testnet` - public stable testnet accessible via devnet.solana.com. Runs 24/7
|
We maintain several testnets:
|
||||||
|
|
||||||
|
* `testnet` - public stable testnet accessible via testnet.solana.com. Runs 24/7
|
||||||
|
* `testnet-beta` - public beta channel testnet accessible via beta.testnet.solana.com. Runs 24/7
|
||||||
|
* `testnet-edge` - public edge channel testnet accessible via edge.testnet.solana.com. Runs 24/7
|
||||||
|
|
||||||
## Deploy process
|
## Deploy process
|
||||||
|
|
||||||
@@ -224,8 +240,3 @@ problem is solved by this code?" On the other hand, if a test does fail and you
|
|||||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||||
send us that patch!
|
send us that patch!
|
||||||
|
|
||||||
Disclaimer
|
|
||||||
===
|
|
||||||
|
|
||||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
|
||||||
|
139
RELEASE.md
@@ -59,90 +59,81 @@ There are three release channels that map to branches as follows:
|
|||||||
* beta - tracks the largest (and latest) `vX.Y` stabilization branch, more stable.
|
* beta - tracks the largest (and latest) `vX.Y` stabilization branch, more stable.
|
||||||
* stable - tracks the second largest `vX.Y` stabilization branch, most stable.
|
* stable - tracks the second largest `vX.Y` stabilization branch, most stable.
|
||||||
|
|
||||||
## Steps to Create a Branch
|
## Release Steps
|
||||||
|
|
||||||
### Create the new branch
|
### Creating a new branch from master
|
||||||
1. Check out the latest commit on `master` branch:
|
|
||||||
```
|
#### Create the new branch
|
||||||
git fetch --all
|
1. Pick your branch point for release on master.
|
||||||
git checkout upstream/master
|
1. Create the branch. The name should be "v" + the first 2 "version" fields
|
||||||
```
|
|
||||||
1. Determine the new branch name. The name should be "v" + the first 2 version fields
|
|
||||||
from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies
|
from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies
|
||||||
the next branch name is "v0.9".
|
the next branch name is "v0.9".
|
||||||
1. Create the new branch and push this branch to the `solana` repository:
|
1. Note the Cargo.toml in the repo root directory does not contain a version. Look at any other Cargo.toml file.
|
||||||
```
|
1. Create a new branch and push this branch to the solana repository.
|
||||||
git checkout -b <branchname>
|
1. `git checkout -b <branchname>`
|
||||||
git push -u origin <branchname>
|
1. `git push -u origin <branchname>`
|
||||||
```
|
|
||||||
|
|
||||||
### Update master branch with the next version
|
#### Update master with the next version
|
||||||
|
|
||||||
1. After the new branch has been created and pushed, update the Cargo.toml files on **master** to the next semantic version (e.g. 0.9.0 -> 0.10.0) with:
|
1. After the new branch has been created and pushed, update Cargo.toml on **master** to the next semantic version (e.g. 0.9.0 -> 0.10.0)
|
||||||
```
|
by running `./scripts/increment-cargo-version.sh`, then rebuild with
|
||||||
scripts/increment-cargo-version.sh minor
|
`cargo build` to cause a refresh of `Cargo.lock`.
|
||||||
```
|
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
|
||||||
1. Rebuild to get an updated version of `Cargo.lock`:
|
master branch
|
||||||
```
|
|
||||||
cargo build
|
|
||||||
```
|
|
||||||
1. Push all the changed Cargo.toml and Cargo.lock files to the `master` branch with something like:
|
|
||||||
```
|
|
||||||
git co -b version_update
|
|
||||||
git ls-files -m | xargs git add
|
|
||||||
git commit -m 'Update Cargo.toml versions from X.Y to X.Y+1'
|
|
||||||
git push -u origin version_update
|
|
||||||
```
|
|
||||||
1. Confirm that your freshly cut release branch is shown as `BETA_CHANNEL` and the previous release branch as `STABLE_CHANNEL`:
|
|
||||||
```
|
|
||||||
ci/channel_info.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Steps to Create a Release
|
At this point, `ci/channel-info.sh` should show your freshly cut release branch as
|
||||||
|
"BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
|
||||||
### Create the Release Tag on GitHub
|
|
||||||
|
|
||||||
1. Go to [GitHub's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
|
|
||||||
1. Click "Draft new release". The release tag must exactly match the `version`
|
|
||||||
field in `/Cargo.toml` prefixed by `v`.
|
|
||||||
1. If the Cargo.toml verion field is **0.12.3**, then the release tag must be **v0.12.3**
|
|
||||||
1. Make sure the Target Branch field matches the branch you want to make a release on.
|
|
||||||
1. If you want to release v0.12.0, the target branch must be v0.12
|
|
||||||
1. If this is the first release on the branch (e.g. v0.13.**0**), paste in [this
|
|
||||||
template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md). Engineering Lead can provide summary contents for release notes if needed.
|
|
||||||
1. Click "Save Draft", then confirm the release notes look good and the tag name and branch are correct. Go back into edit the release and click "Publish release" when ready.
|
|
||||||
|
|
||||||
### Update release branch with the next patch version
|
|
||||||
|
|
||||||
1. After the new release has been tagged, update the Cargo.toml files on **release branch** to the next semantic version (e.g. 0.9.0 -> 0.9.1) with:
|
|
||||||
```
|
|
||||||
scripts/increment-cargo-version.sh patch
|
|
||||||
```
|
|
||||||
1. Rebuild to get an updated version of `Cargo.lock`:
|
|
||||||
```
|
|
||||||
cargo build
|
|
||||||
```
|
|
||||||
1. Push all the changed Cargo.toml and Cargo.lock files to the **release branch** with something like:
|
|
||||||
```
|
|
||||||
git co -b version_update
|
|
||||||
git ls-files -m | xargs git add
|
|
||||||
git commit -m 'Update Cargo.toml versions from X.Y.Z to X.Y.Z+1'
|
|
||||||
git push -u origin version_update
|
|
||||||
```
|
|
||||||
|
|
||||||
### Verify release automation success
|
|
||||||
1. Go to [Solana Releases](https://github.com/solana-labs/solana/releases) and click on the latest release that you just published. Verify that all of the build artifacts are present. This can take up to 90 minutes after creating the tag.
|
|
||||||
1. The `solana-secondary` Buildkite pipeline handles creating the binary tarballs and updated crates. Look for a job under the tag name of the release: https://buildkite.com/solana-labs/solana-secondary
|
|
||||||
1. [Crates.io](https://crates.io/crates/solana) should have an updated Solana version.
|
|
||||||
|
|
||||||
### Update documentation
|
### Update documentation
|
||||||
TODO: Documentation update procedure is WIP as we move to gitbook
|
|
||||||
|
|
||||||
Document the new recommended version by updating `docs/src/running-archiver.md` and `docs/src/validator-testnet.md` on the release (beta) branch to point at the `solana-install` for the upcoming release version.
|
Document the new recommended version by updating
|
||||||
|
```export SOLANA_RELEASE=[new scheduled TESTNET_TAG value]```
|
||||||
|
in book/src/testnet-participation.md on the release (beta) branch.
|
||||||
|
|
||||||
### Update software on devnet.solana.com
|
### Make the Release
|
||||||
|
|
||||||
The testnet running on devnet.solana.com is set to use a fixed release tag
|
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
|
||||||
|
|
||||||
|
1. Go [there ;)](https://github.com/solana-labs/solana/releases).
|
||||||
|
1. Click "Draft new release". The release tag must exactly match the `version`
|
||||||
|
field in `/Cargo.toml` prefixed by `v` (ie, `<branchname>.X`).
|
||||||
|
1. If the Cargo.toml verion field is **0.12.3**, then the release tag must be **v0.12.3**
|
||||||
|
1. If this is the first release on the branch (e.g. v0.13.**0**), paste in [this
|
||||||
|
template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md)
|
||||||
|
and fill it in.
|
||||||
|
1. Test the release by generating a tag using semver's rules. First try at a
|
||||||
|
release should be `<branchname>.X-rc.0`.
|
||||||
|
1. Verify release automation:
|
||||||
|
1. [Crates.io](https://crates.io/crates/solana) should have an updated Solana version.
|
||||||
|
1. Once the release has been made, update Cargo.toml on the release branch to the next
|
||||||
|
semantic version (e.g. 0.9.0 -> 0.9.1) by running
|
||||||
|
`./scripts/increment-cargo-version.sh patch`, then rebuild with `cargo
|
||||||
|
build` to cause a refresh of `Cargo.lock`.
|
||||||
|
1. Push your Cargo.toml change and the autogenerated Cargo.lock changes to the
|
||||||
|
release branch.
|
||||||
|
|
||||||
|
### Publish updated Book
|
||||||
|
We maintain three copies of the "book" as official documentation:
|
||||||
|
|
||||||
|
1) "Book" is the documentation for the latest official release. This should get manually updated whenever a new release is made. It is published here:
|
||||||
|
https://solana-labs.github.io/book/
|
||||||
|
|
||||||
|
2) "Book-edge" tracks the tip of the master branch and updates automatically.
|
||||||
|
https://solana-labs.github.io/book-edge/
|
||||||
|
|
||||||
|
3) "Book-beta" tracks the tip of the beta branch and updates automatically.
|
||||||
|
https://solana-labs.github.io/book-beta/
|
||||||
|
|
||||||
|
To manually trigger an update of the "Book", create a new job of the manual-update-book pipeline.
|
||||||
|
Set the tag of the latest release as the PUBLISH_BOOK_TAG environment variable.
|
||||||
|
```bash
|
||||||
|
PUBLISH_BOOK_TAG=v0.16.6
|
||||||
|
```
|
||||||
|
https://buildkite.com/solana-labs/manual-update-book
|
||||||
|
|
||||||
|
### Update software on testnet.solana.com
|
||||||
|
|
||||||
|
The testnet running on testnet.solana.com is set to use a fixed release tag
|
||||||
which is set in the Buildkite testnet-management pipeline.
|
which is set in the Buildkite testnet-management pipeline.
|
||||||
This tag needs to be updated and the testnet restarted after a new release
|
This tag needs to be updated and the testnet restarted after a new release
|
||||||
tag is created.
|
tag is created.
|
||||||
@@ -182,4 +173,4 @@ TESTNET_OP=create-and-start
|
|||||||
### Alert the community
|
### Alert the community
|
||||||
|
|
||||||
Notify Discord users on #validator-support that a new release for
|
Notify Discord users on #validator-support that a new release for
|
||||||
devnet.solana.com is available
|
testnet.solana.com is available
|
||||||
|
@@ -1,42 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "solana-archiver-lib"
|
|
||||||
version = "1.0.23"
|
|
||||||
description = "Solana Archiver Library"
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
bincode = "1.2.1"
|
|
||||||
crossbeam-channel = "0.3"
|
|
||||||
ed25519-dalek = "=1.0.0-pre.3"
|
|
||||||
log = "0.4.8"
|
|
||||||
rand = "0.7.0"
|
|
||||||
rand_chacha = "0.2.2"
|
|
||||||
solana-client = { path = "../client", version = "1.0.23" }
|
|
||||||
solana-storage-program = { path = "../programs/storage", version = "1.0.23" }
|
|
||||||
thiserror = "1.0"
|
|
||||||
serde = "1.0.104"
|
|
||||||
serde_json = "1.0.46"
|
|
||||||
serde_derive = "1.0.103"
|
|
||||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
|
||||||
solana-chacha = { path = "../chacha", version = "1.0.23" }
|
|
||||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.23" }
|
|
||||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
|
||||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
|
||||||
solana-core = { path = "../core", version = "1.0.23" }
|
|
||||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.23" }
|
|
||||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
hex = "0.4.0"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
name = "solana_archiver_lib"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,11 +0,0 @@
|
|||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate serde_derive;
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate solana_metrics;
|
|
||||||
|
|
||||||
pub mod archiver;
|
|
||||||
mod result;
|
|
@@ -1,47 +0,0 @@
|
|||||||
use solana_client::client_error;
|
|
||||||
use solana_ledger::blockstore;
|
|
||||||
use solana_sdk::transport;
|
|
||||||
use std::any::Any;
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum ArchiverError {
|
|
||||||
#[error("IO error")]
|
|
||||||
IO(#[from] std::io::Error),
|
|
||||||
|
|
||||||
#[error("blockstore error")]
|
|
||||||
BlockstoreError(#[from] blockstore::BlockstoreError),
|
|
||||||
|
|
||||||
#[error("crossbeam error")]
|
|
||||||
CrossbeamSendError(#[from] crossbeam_channel::SendError<u64>),
|
|
||||||
|
|
||||||
#[error("send error")]
|
|
||||||
SendError(#[from] std::sync::mpsc::SendError<u64>),
|
|
||||||
|
|
||||||
#[error("join error")]
|
|
||||||
JoinError(Box<dyn Any + Send + 'static>),
|
|
||||||
|
|
||||||
#[error("transport error")]
|
|
||||||
TransportError(#[from] transport::TransportError),
|
|
||||||
|
|
||||||
#[error("client error")]
|
|
||||||
ClientError(#[from] client_error::ClientError),
|
|
||||||
|
|
||||||
#[error("Json parsing error")]
|
|
||||||
JsonError(#[from] serde_json::error::Error),
|
|
||||||
|
|
||||||
#[error("Storage account has no balance")]
|
|
||||||
EmptyStorageAccountBalance,
|
|
||||||
|
|
||||||
#[error("No RPC peers..")]
|
|
||||||
NoRpcPeers,
|
|
||||||
|
|
||||||
#[error("Couldn't download full segment")]
|
|
||||||
SegmentDownloadError,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::convert::From<Box<dyn Any + Send + 'static>> for ArchiverError {
|
|
||||||
fn from(e: Box<dyn Any + Send + 'static>) -> ArchiverError {
|
|
||||||
ArchiverError::JoinError(e)
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,28 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "solana-archiver-utils"
|
|
||||||
version = "1.0.23"
|
|
||||||
description = "Solana Archiver Utils"
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
log = "0.4.8"
|
|
||||||
rand = "0.7.0"
|
|
||||||
solana-chacha = { path = "../chacha", version = "1.0.23" }
|
|
||||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.23" }
|
|
||||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
|
||||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
hex = "0.4.0"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
name = "solana_archiver_utils"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,120 +0,0 @@
|
|||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
|
|
||||||
use solana_sdk::hash::{Hash, Hasher};
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::{self, BufReader, ErrorKind, Read, Seek, SeekFrom};
|
|
||||||
use std::mem::size_of;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> {
|
|
||||||
let in_file = File::open(in_path)?;
|
|
||||||
let metadata = in_file.metadata()?;
|
|
||||||
let mut buffer_file = BufReader::new(in_file);
|
|
||||||
|
|
||||||
let mut hasher = Hasher::default();
|
|
||||||
let sample_size = size_of::<Hash>();
|
|
||||||
let sample_size64 = sample_size as u64;
|
|
||||||
let mut buf = vec![0; sample_size];
|
|
||||||
|
|
||||||
let file_len = metadata.len();
|
|
||||||
if file_len < sample_size64 {
|
|
||||||
return Err(io::Error::new(ErrorKind::Other, "file too short!"));
|
|
||||||
}
|
|
||||||
for offset in sample_offsets {
|
|
||||||
if *offset > (file_len - sample_size64) / sample_size64 {
|
|
||||||
return Err(io::Error::new(ErrorKind::Other, "offset too large"));
|
|
||||||
}
|
|
||||||
buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?;
|
|
||||||
trace!("sampling @ {} ", *offset);
|
|
||||||
match buffer_file.read(&mut buf) {
|
|
||||||
Ok(size) => {
|
|
||||||
assert_eq!(size, buf.len());
|
|
||||||
hasher.hash(&buf);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Error sampling file");
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(hasher.result())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use rand::{thread_rng, Rng};
|
|
||||||
use std::fs::{create_dir_all, remove_file};
|
|
||||||
use std::io::Write;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
extern crate hex;
|
|
||||||
|
|
||||||
fn tmp_file_path(name: &str) -> PathBuf {
|
|
||||||
use std::env;
|
|
||||||
let out_dir = env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
|
||||||
let mut rand_bits = [0u8; 32];
|
|
||||||
thread_rng().fill(&mut rand_bits[..]);
|
|
||||||
|
|
||||||
let mut path = PathBuf::new();
|
|
||||||
path.push(out_dir);
|
|
||||||
path.push("tmp");
|
|
||||||
create_dir_all(&path).unwrap();
|
|
||||||
|
|
||||||
path.push(format!("{}-{:?}", name, hex::encode(rand_bits)));
|
|
||||||
println!("path: {:?}", path);
|
|
||||||
path
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sample_file() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let in_path = tmp_file_path("test_sample_file_input.txt");
|
|
||||||
let num_strings = 4096;
|
|
||||||
let string = "12foobar";
|
|
||||||
{
|
|
||||||
let mut in_file = File::create(&in_path).unwrap();
|
|
||||||
for _ in 0..num_strings {
|
|
||||||
in_file.write(string.as_bytes()).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64;
|
|
||||||
let samples: Vec<_> = (0..num_samples).collect();
|
|
||||||
let res = sample_file(&in_path, samples.as_slice());
|
|
||||||
let ref_hash: Hash = Hash::new(&[
|
|
||||||
173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144,
|
|
||||||
151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52,
|
|
||||||
]);
|
|
||||||
let res = res.unwrap();
|
|
||||||
assert_eq!(res, ref_hash);
|
|
||||||
|
|
||||||
// Sample just past the end
|
|
||||||
assert!(sample_file(&in_path, &[num_samples]).is_err());
|
|
||||||
remove_file(&in_path).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sample_file_invalid_offset() {
|
|
||||||
let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt");
|
|
||||||
{
|
|
||||||
let mut in_file = File::create(&in_path).unwrap();
|
|
||||||
for _ in 0..4096 {
|
|
||||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let samples = [0, 200000];
|
|
||||||
let res = sample_file(&in_path, &samples);
|
|
||||||
assert!(res.is_err());
|
|
||||||
remove_file(in_path).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_sample_file_missing_file() {
|
|
||||||
let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt");
|
|
||||||
let samples = [0, 5];
|
|
||||||
let res = sample_file(&in_path, &samples);
|
|
||||||
assert!(res.is_err());
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,23 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
name = "solana-archiver"
|
|
||||||
version = "1.0.23"
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
clap = "2.33.0"
|
|
||||||
console = "0.9.2"
|
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
|
||||||
solana-core = { path = "../core", version = "1.0.23" }
|
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
|
||||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
|
||||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.23" }
|
|
||||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
|
||||||
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,131 +0,0 @@
|
|||||||
use clap::{crate_description, crate_name, App, Arg};
|
|
||||||
use console::style;
|
|
||||||
use solana_archiver_lib::archiver::Archiver;
|
|
||||||
use solana_clap_utils::{
|
|
||||||
input_parsers::keypair_of, input_validators::is_keypair_or_ask_keyword,
|
|
||||||
keypair::SKIP_SEED_PHRASE_VALIDATION_ARG,
|
|
||||||
};
|
|
||||||
use solana_core::{
|
|
||||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
|
||||||
contact_info::ContactInfo,
|
|
||||||
};
|
|
||||||
use solana_sdk::{
|
|
||||||
commitment_config::CommitmentConfig,
|
|
||||||
signature::{Keypair, Signer},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
|
||||||
path::PathBuf,
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
solana_logger::setup();
|
|
||||||
|
|
||||||
let matches = App::new(crate_name!())
|
|
||||||
.about(crate_description!())
|
|
||||||
.version(solana_clap_utils::version!())
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("identity_keypair")
|
|
||||||
.short("i")
|
|
||||||
.long("identity")
|
|
||||||
.value_name("PATH")
|
|
||||||
.takes_value(true)
|
|
||||||
.validator(is_keypair_or_ask_keyword)
|
|
||||||
.help("File containing an identity (keypair)"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("entrypoint")
|
|
||||||
.short("n")
|
|
||||||
.long("entrypoint")
|
|
||||||
.value_name("HOST:PORT")
|
|
||||||
.takes_value(true)
|
|
||||||
.required(true)
|
|
||||||
.validator(solana_net_utils::is_host_port)
|
|
||||||
.help("Rendezvous with the cluster at this entry point"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("ledger")
|
|
||||||
.short("l")
|
|
||||||
.long("ledger")
|
|
||||||
.value_name("DIR")
|
|
||||||
.takes_value(true)
|
|
||||||
.required(true)
|
|
||||||
.help("use DIR as persistent ledger location"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("storage_keypair")
|
|
||||||
.short("s")
|
|
||||||
.long("storage-keypair")
|
|
||||||
.value_name("PATH")
|
|
||||||
.takes_value(true)
|
|
||||||
.validator(is_keypair_or_ask_keyword)
|
|
||||||
.help("File containing the storage account keypair"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
|
||||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
|
||||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
let ledger_path = PathBuf::from(matches.value_of("ledger").unwrap());
|
|
||||||
|
|
||||||
let identity_keypair = keypair_of(&matches, "identity_keypair").unwrap_or_else(Keypair::new);
|
|
||||||
|
|
||||||
let storage_keypair = keypair_of(&matches, "storage_keypair").unwrap_or_else(|| {
|
|
||||||
clap::Error::with_description(
|
|
||||||
"The `storage-keypair` argument was not found",
|
|
||||||
clap::ErrorKind::ArgumentNotFound,
|
|
||||||
)
|
|
||||||
.exit();
|
|
||||||
});
|
|
||||||
|
|
||||||
let entrypoint_addr = matches
|
|
||||||
.value_of("entrypoint")
|
|
||||||
.map(|entrypoint| {
|
|
||||||
solana_net_utils::parse_host_port(entrypoint)
|
|
||||||
.expect("failed to parse entrypoint address")
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let gossip_addr = {
|
|
||||||
let ip = solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap();
|
|
||||||
let mut addr = SocketAddr::new(ip, 0);
|
|
||||||
addr.set_ip(solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap());
|
|
||||||
addr
|
|
||||||
};
|
|
||||||
let node = Node::new_archiver_with_external_ip(
|
|
||||||
&identity_keypair.pubkey(),
|
|
||||||
&gossip_addr,
|
|
||||||
VALIDATOR_PORT_RANGE,
|
|
||||||
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)),
|
|
||||||
);
|
|
||||||
|
|
||||||
println!(
|
|
||||||
"{} version {} (branch={}, commit={})",
|
|
||||||
style(crate_name!()).bold(),
|
|
||||||
solana_clap_utils::version!(),
|
|
||||||
option_env!("CI_BRANCH").unwrap_or("unknown"),
|
|
||||||
option_env!("CI_COMMIT").unwrap_or("unknown")
|
|
||||||
);
|
|
||||||
solana_metrics::set_host_id(identity_keypair.pubkey().to_string());
|
|
||||||
println!(
|
|
||||||
"replicating the data with identity_keypair={:?} gossip_addr={:?}",
|
|
||||||
identity_keypair.pubkey(),
|
|
||||||
gossip_addr
|
|
||||||
);
|
|
||||||
|
|
||||||
let entrypoint_info = ContactInfo::new_gossip_entry_point(&entrypoint_addr);
|
|
||||||
let archiver = Archiver::new(
|
|
||||||
&ledger_path,
|
|
||||||
node,
|
|
||||||
entrypoint_info,
|
|
||||||
Arc::new(identity_keypair),
|
|
||||||
Arc::new(storage_keypair),
|
|
||||||
CommitmentConfig::recent(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
archiver.join();
|
|
||||||
}
|
|
@@ -1,23 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
name = "solana-banking-bench"
|
|
||||||
version = "1.0.23"
|
|
||||||
repository = "https://github.com/solana-labs/solana"
|
|
||||||
license = "Apache-2.0"
|
|
||||||
homepage = "https://solana.com/"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
log = "0.4.6"
|
|
||||||
rayon = "1.2.0"
|
|
||||||
solana-core = { path = "../core", version = "1.0.23" }
|
|
||||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
|
||||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
|
||||||
solana-measure = { path = "../measure", version = "1.0.23" }
|
|
||||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
|
||||||
rand = "0.7.0"
|
|
||||||
crossbeam-channel = "0.3"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
@@ -1,306 +0,0 @@
|
|||||||
use crossbeam_channel::unbounded;
|
|
||||||
use log::*;
|
|
||||||
use rand::{thread_rng, Rng};
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use solana_core::banking_stage::{create_test_recorder, BankingStage};
|
|
||||||
use solana_core::cluster_info::ClusterInfo;
|
|
||||||
use solana_core::cluster_info::Node;
|
|
||||||
use solana_core::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
|
||||||
use solana_core::packet::to_packets_chunked;
|
|
||||||
use solana_core::poh_recorder::PohRecorder;
|
|
||||||
use solana_core::poh_recorder::WorkingBankEntry;
|
|
||||||
use solana_ledger::bank_forks::BankForks;
|
|
||||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
|
||||||
use solana_measure::measure::Measure;
|
|
||||||
use solana_runtime::bank::Bank;
|
|
||||||
use solana_sdk::hash::Hash;
|
|
||||||
use solana_sdk::pubkey::Pubkey;
|
|
||||||
use solana_sdk::signature::Keypair;
|
|
||||||
use solana_sdk::signature::Signature;
|
|
||||||
use solana_sdk::system_transaction;
|
|
||||||
use solana_sdk::timing::{duration_as_us, timestamp};
|
|
||||||
use solana_sdk::transaction::Transaction;
|
|
||||||
use std::sync::atomic::Ordering;
|
|
||||||
use std::sync::mpsc::Receiver;
|
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
use std::thread::sleep;
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
|
|
||||||
fn check_txs(
|
|
||||||
receiver: &Arc<Receiver<WorkingBankEntry>>,
|
|
||||||
ref_tx_count: usize,
|
|
||||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
|
||||||
) -> bool {
|
|
||||||
let mut total = 0;
|
|
||||||
let now = Instant::now();
|
|
||||||
let mut no_bank = false;
|
|
||||||
loop {
|
|
||||||
if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::from_millis(10))
|
|
||||||
{
|
|
||||||
total += entry.transactions.len();
|
|
||||||
}
|
|
||||||
if total >= ref_tx_count {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if now.elapsed().as_secs() > 60 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if poh_recorder.lock().unwrap().bank().is_none() {
|
|
||||||
trace!("no bank");
|
|
||||||
no_bank = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !no_bank {
|
|
||||||
assert!(total >= ref_tx_count);
|
|
||||||
}
|
|
||||||
no_bank
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Transaction> {
|
|
||||||
let to_pubkey = Pubkey::new_rand();
|
|
||||||
let dummy = system_transaction::transfer(mint_keypair, &to_pubkey, 1, hash);
|
|
||||||
(0..txes)
|
|
||||||
.into_par_iter()
|
|
||||||
.map(|_| {
|
|
||||||
let mut new = dummy.clone();
|
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
|
||||||
new.message.account_keys[0] = Pubkey::new_rand();
|
|
||||||
new.message.account_keys[1] = Pubkey::new_rand();
|
|
||||||
new.signatures = vec![Signature::new(&sig[0..64])];
|
|
||||||
new
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Config {
|
|
||||||
packets_per_batch: usize,
|
|
||||||
chunk_len: usize,
|
|
||||||
num_threads: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Config {
|
|
||||||
fn get_transactions_index(&self, chunk_index: usize) -> usize {
|
|
||||||
chunk_index * (self.chunk_len / self.num_threads) * self.packets_per_batch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bytes_as_usize(bytes: &[u8]) -> usize {
|
|
||||||
bytes[0] as usize | (bytes[1] as usize) << 8
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let num_threads = BankingStage::num_threads() as usize;
|
|
||||||
// a multiple of packet chunk duplicates to avoid races
|
|
||||||
const CHUNKS: usize = 8 * 2;
|
|
||||||
const PACKETS_PER_BATCH: usize = 192;
|
|
||||||
let txes = PACKETS_PER_BATCH * num_threads * CHUNKS;
|
|
||||||
let mint_total = 1_000_000_000_000;
|
|
||||||
let GenesisConfigInfo {
|
|
||||||
genesis_config,
|
|
||||||
mint_keypair,
|
|
||||||
..
|
|
||||||
} = create_genesis_config(mint_total);
|
|
||||||
|
|
||||||
let (verified_sender, verified_receiver) = unbounded();
|
|
||||||
let (vote_sender, vote_receiver) = unbounded();
|
|
||||||
let bank0 = Bank::new(&genesis_config);
|
|
||||||
let mut bank_forks = BankForks::new(0, bank0);
|
|
||||||
let mut bank = bank_forks.working_bank();
|
|
||||||
|
|
||||||
info!("threads: {} txs: {}", num_threads, txes);
|
|
||||||
|
|
||||||
let mut transactions = make_accounts_txs(txes, &mint_keypair, genesis_config.hash());
|
|
||||||
|
|
||||||
// fund all the accounts
|
|
||||||
transactions.iter().for_each(|tx| {
|
|
||||||
let fund = system_transaction::transfer(
|
|
||||||
&mint_keypair,
|
|
||||||
&tx.message.account_keys[0],
|
|
||||||
mint_total / txes as u64,
|
|
||||||
genesis_config.hash(),
|
|
||||||
);
|
|
||||||
let x = bank.process_transaction(&fund);
|
|
||||||
x.unwrap();
|
|
||||||
});
|
|
||||||
//sanity check, make sure all the transactions can execute sequentially
|
|
||||||
transactions.iter().for_each(|tx| {
|
|
||||||
let res = bank.process_transaction(&tx);
|
|
||||||
assert!(res.is_ok(), "sanity test transactions");
|
|
||||||
});
|
|
||||||
bank.clear_signatures();
|
|
||||||
//sanity check, make sure all the transactions can execute in parallel
|
|
||||||
let res = bank.process_transactions(&transactions);
|
|
||||||
for r in res {
|
|
||||||
assert!(r.is_ok(), "sanity parallel execution");
|
|
||||||
}
|
|
||||||
bank.clear_signatures();
|
|
||||||
let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
|
||||||
let ledger_path = get_tmp_ledger_path!();
|
|
||||||
{
|
|
||||||
let blockstore = Arc::new(
|
|
||||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
|
||||||
);
|
|
||||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
|
||||||
create_test_recorder(&bank, &blockstore, None);
|
|
||||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
|
||||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
|
||||||
let banking_stage = BankingStage::new(
|
|
||||||
&cluster_info,
|
|
||||||
&poh_recorder,
|
|
||||||
verified_receiver,
|
|
||||||
vote_receiver,
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
|
||||||
|
|
||||||
let chunk_len = verified.len() / CHUNKS;
|
|
||||||
let mut start = 0;
|
|
||||||
|
|
||||||
// This is so that the signal_receiver does not go out of scope after the closure.
|
|
||||||
// If it is dropped before poh_service, then poh_service will error when
|
|
||||||
// calling send() on the channel.
|
|
||||||
let signal_receiver = Arc::new(signal_receiver);
|
|
||||||
let mut total_us = 0;
|
|
||||||
let mut tx_total_us = 0;
|
|
||||||
let mut txs_processed = 0;
|
|
||||||
let mut root = 1;
|
|
||||||
let collector = Pubkey::new_rand();
|
|
||||||
const ITERS: usize = 1_000;
|
|
||||||
let config = Config {
|
|
||||||
packets_per_batch: PACKETS_PER_BATCH,
|
|
||||||
chunk_len,
|
|
||||||
num_threads,
|
|
||||||
};
|
|
||||||
let mut total_sent = 0;
|
|
||||||
for _ in 0..ITERS {
|
|
||||||
let now = Instant::now();
|
|
||||||
let mut sent = 0;
|
|
||||||
|
|
||||||
for (i, v) in verified[start..start + chunk_len]
|
|
||||||
.chunks(chunk_len / num_threads)
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
let mut byte = 0;
|
|
||||||
let index = config.get_transactions_index(start + i);
|
|
||||||
if index < transactions.len() {
|
|
||||||
byte = bytes_as_usize(transactions[index].signatures[0].as_ref());
|
|
||||||
}
|
|
||||||
trace!(
|
|
||||||
"sending... {}..{} {} v.len: {} sig: {} transactions.len: {} index: {}",
|
|
||||||
start + i,
|
|
||||||
start + chunk_len,
|
|
||||||
timestamp(),
|
|
||||||
v.len(),
|
|
||||||
byte,
|
|
||||||
transactions.len(),
|
|
||||||
index,
|
|
||||||
);
|
|
||||||
for xv in v {
|
|
||||||
sent += xv.packets.len();
|
|
||||||
}
|
|
||||||
verified_sender.send(v.to_vec()).unwrap();
|
|
||||||
}
|
|
||||||
let start_tx_index = config.get_transactions_index(start);
|
|
||||||
let end_tx_index = config.get_transactions_index(start + chunk_len);
|
|
||||||
for tx in &transactions[start_tx_index..end_tx_index] {
|
|
||||||
loop {
|
|
||||||
if bank.get_signature_status(&tx.signatures[0]).is_some() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if poh_recorder.lock().unwrap().bank().is_none() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
sleep(Duration::from_millis(5));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if check_txs(&signal_receiver, txes / CHUNKS, &poh_recorder) {
|
|
||||||
debug!(
|
|
||||||
"resetting bank {} tx count: {} txs_proc: {}",
|
|
||||||
bank.slot(),
|
|
||||||
bank.transaction_count(),
|
|
||||||
txs_processed
|
|
||||||
);
|
|
||||||
assert!(txs_processed < bank.transaction_count());
|
|
||||||
txs_processed = bank.transaction_count();
|
|
||||||
tx_total_us += duration_as_us(&now.elapsed());
|
|
||||||
|
|
||||||
let mut poh_time = Measure::start("poh_time");
|
|
||||||
poh_recorder.lock().unwrap().reset(
|
|
||||||
bank.last_blockhash(),
|
|
||||||
bank.slot(),
|
|
||||||
Some((bank.slot(), bank.slot() + 1)),
|
|
||||||
);
|
|
||||||
poh_time.stop();
|
|
||||||
|
|
||||||
let mut new_bank_time = Measure::start("new_bank");
|
|
||||||
let new_bank = Bank::new_from_parent(&bank, &collector, bank.slot() + 1);
|
|
||||||
new_bank_time.stop();
|
|
||||||
|
|
||||||
let mut insert_time = Measure::start("insert_time");
|
|
||||||
bank_forks.insert(new_bank);
|
|
||||||
bank = bank_forks.working_bank();
|
|
||||||
insert_time.stop();
|
|
||||||
|
|
||||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
|
||||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
|
||||||
if bank.slot() > 32 {
|
|
||||||
bank_forks.set_root(root, &None, None);
|
|
||||||
root += 1;
|
|
||||||
}
|
|
||||||
debug!(
|
|
||||||
"new_bank_time: {}us insert_time: {}us poh_time: {}us",
|
|
||||||
new_bank_time.as_us(),
|
|
||||||
insert_time.as_us(),
|
|
||||||
poh_time.as_us(),
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
tx_total_us += duration_as_us(&now.elapsed());
|
|
||||||
}
|
|
||||||
|
|
||||||
// This signature clear may not actually clear the signatures
|
|
||||||
// in this chunk, but since we rotate between CHUNKS then
|
|
||||||
// we should clear them by the time we come around again to re-use that chunk.
|
|
||||||
bank.clear_signatures();
|
|
||||||
total_us += duration_as_us(&now.elapsed());
|
|
||||||
debug!(
|
|
||||||
"time: {} us checked: {} sent: {}",
|
|
||||||
duration_as_us(&now.elapsed()),
|
|
||||||
txes / CHUNKS,
|
|
||||||
sent,
|
|
||||||
);
|
|
||||||
total_sent += sent;
|
|
||||||
|
|
||||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
|
||||||
for tx in transactions.iter_mut() {
|
|
||||||
tx.message.recent_blockhash = bank.last_blockhash();
|
|
||||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
|
||||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
|
||||||
}
|
|
||||||
verified = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
|
|
||||||
}
|
|
||||||
|
|
||||||
start += chunk_len;
|
|
||||||
start %= verified.len();
|
|
||||||
}
|
|
||||||
eprintln!(
|
|
||||||
"{{'name': 'banking_bench_total', 'median': '{}'}}",
|
|
||||||
(1000.0 * 1000.0 * total_sent as f64) / (total_us as f64),
|
|
||||||
);
|
|
||||||
eprintln!(
|
|
||||||
"{{'name': 'banking_bench_tx_total', 'median': '{}'}}",
|
|
||||||
(1000.0 * 1000.0 * total_sent as f64) / (tx_total_us as f64),
|
|
||||||
);
|
|
||||||
|
|
||||||
drop(verified_sender);
|
|
||||||
drop(vote_sender);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
banking_stage.join().unwrap();
|
|
||||||
debug!("waited for banking_stage");
|
|
||||||
poh_service.join().unwrap();
|
|
||||||
sleep(Duration::from_secs(1));
|
|
||||||
debug!("waited for poh_service");
|
|
||||||
}
|
|
||||||
let _unused = Blockstore::destroy(&ledger_path);
|
|
||||||
}
|
|
@@ -2,36 +2,42 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-exchange"
|
name = "solana-bench-exchange"
|
||||||
version = "1.0.23"
|
version = "0.18.2"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
publish = false
|
publish = false
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
bincode = "1.1.4"
|
||||||
|
bs58 = "0.2.4"
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
itertools = "0.8.2"
|
env_logger = "0.6.2"
|
||||||
|
itertools = "0.8.0"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
num-derive = "0.3"
|
num-derive = "0.2"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
rand = "0.7.0"
|
rand = "0.6.5"
|
||||||
rayon = "1.2.0"
|
rayon = "1.1.0"
|
||||||
serde_json = "1.0.46"
|
serde = "1.0.99"
|
||||||
serde_yaml = "0.8.11"
|
serde_derive = "1.0.99"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
serde_json = "1.0.40"
|
||||||
solana-core = { path = "../core", version = "1.0.23" }
|
serde_yaml = "0.8.9"
|
||||||
solana-genesis = { path = "../genesis", version = "1.0.23" }
|
# solana-runtime = { path = "../solana/runtime"}
|
||||||
solana-client = { path = "../client", version = "1.0.23" }
|
solana-core = { path = "../core", version = "0.18.2" }
|
||||||
solana-faucet = { path = "../faucet", version = "1.0.23" }
|
solana-local-cluster = { path = "../local_cluster", version = "0.18.2" }
|
||||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.23" }
|
solana-client = { path = "../client", version = "0.18.2" }
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
solana-drone = { path = "../drone", version = "0.18.2" }
|
||||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
solana-exchange-api = { path = "../programs/exchange_api", version = "0.18.2" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
solana-exchange-program = { path = "../programs/exchange_program", version = "0.18.2" }
|
||||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
solana-logger = { path = "../logger", version = "0.18.2" }
|
||||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
solana-metrics = { path = "../metrics", version = "0.18.2" }
|
||||||
|
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
|
||||||
|
solana-runtime = { path = "../runtime", version = "0.18.2" }
|
||||||
|
solana-sdk = { path = "../sdk", version = "0.18.2" }
|
||||||
|
untrusted = "0.7.0"
|
||||||
|
ws = "0.9.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[features]
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.23" }
|
cuda = ["solana-core/cuda"]
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
||||||
|
@@ -360,7 +360,7 @@ The Matcher would initiate the following last swap:
|
|||||||
|
|
||||||
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
|
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
|
||||||
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
|
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
|
||||||
- Matcher takes 2 B tokens as profit
|
- Matcher takes 4 B tokens as profit
|
||||||
|
|
||||||
Table becomes:
|
Table becomes:
|
||||||
|
|
||||||
|
@@ -7,36 +7,31 @@ use rand::{thread_rng, Rng};
|
|||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||||
use solana_core::gen_keys::GenKeys;
|
use solana_core::gen_keys::GenKeys;
|
||||||
use solana_exchange_program::{exchange_instruction, exchange_state::*, id};
|
use solana_drone::drone::request_airdrop_transaction;
|
||||||
use solana_faucet::faucet::request_airdrop_transaction;
|
use solana_exchange_api::exchange_instruction;
|
||||||
use solana_genesis::Base64Account;
|
use solana_exchange_api::exchange_state::*;
|
||||||
|
use solana_exchange_api::id;
|
||||||
use solana_metrics::datapoint_info;
|
use solana_metrics::datapoint_info;
|
||||||
use solana_sdk::{
|
use solana_sdk::client::Client;
|
||||||
client::{Client, SyncClient},
|
use solana_sdk::client::SyncClient;
|
||||||
commitment_config::CommitmentConfig,
|
use solana_sdk::pubkey::Pubkey;
|
||||||
pubkey::Pubkey,
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
signature::{Keypair, Signer},
|
use solana_sdk::system_instruction;
|
||||||
timing::{duration_as_ms, duration_as_s},
|
use solana_sdk::timing::{duration_as_ms, duration_as_s};
|
||||||
transaction::Transaction,
|
use solana_sdk::transaction::Transaction;
|
||||||
{system_instruction, system_program},
|
use std::cmp;
|
||||||
};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::{
|
use std::fs::File;
|
||||||
cmp,
|
use std::io::prelude::*;
|
||||||
collections::{HashMap, VecDeque},
|
use std::mem;
|
||||||
fs::File,
|
use std::net::SocketAddr;
|
||||||
io::prelude::*,
|
use std::path::Path;
|
||||||
mem,
|
use std::process::exit;
|
||||||
net::SocketAddr,
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
path::Path,
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
process::exit,
|
use std::sync::{Arc, RwLock};
|
||||||
sync::{
|
use std::thread::{sleep, Builder};
|
||||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
use std::time::{Duration, Instant};
|
||||||
mpsc::{channel, Receiver, Sender},
|
|
||||||
Arc, RwLock,
|
|
||||||
},
|
|
||||||
thread::{sleep, Builder},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
|
// TODO Chunk length as specified results in a bunch of failures, divide by 10 helps...
|
||||||
// Assume 4MB network buffers, and 512 byte packets
|
// Assume 4MB network buffers, and 512 byte packets
|
||||||
@@ -93,12 +88,7 @@ pub fn create_client_accounts_file(
|
|||||||
keypairs.iter().for_each(|keypair| {
|
keypairs.iter().for_each(|keypair| {
|
||||||
accounts.insert(
|
accounts.insert(
|
||||||
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
||||||
Base64Account {
|
fund_amount,
|
||||||
balance: fund_amount,
|
|
||||||
executable: false,
|
|
||||||
owner: system_program::id().to_string(),
|
|
||||||
data: String::new(),
|
|
||||||
},
|
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -144,7 +134,7 @@ where
|
|||||||
let path = Path::new(&client_ids_and_stake_file);
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
let file = File::open(path).unwrap();
|
let file = File::open(path).unwrap();
|
||||||
|
|
||||||
let accounts: HashMap<String, Base64Account> = serde_yaml::from_reader(file).unwrap();
|
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
|
||||||
accounts
|
accounts
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(keypair, _)| {
|
.map(|(keypair, _)| {
|
||||||
@@ -178,28 +168,19 @@ where
|
|||||||
|
|
||||||
info!("Generating {:?} account keys", total_keys);
|
info!("Generating {:?} account keys", total_keys);
|
||||||
let mut account_keypairs = generate_keypairs(total_keys);
|
let mut account_keypairs = generate_keypairs(total_keys);
|
||||||
let src_keypairs: Vec<_> = account_keypairs
|
let src_pubkeys: Vec<_> = account_keypairs
|
||||||
.drain(0..accounts_in_groups)
|
.drain(0..accounts_in_groups)
|
||||||
.map(|keypair| keypair)
|
|
||||||
.collect();
|
|
||||||
let src_pubkeys: Vec<Pubkey> = src_keypairs
|
|
||||||
.iter()
|
|
||||||
.map(|keypair| keypair.pubkey())
|
.map(|keypair| keypair.pubkey())
|
||||||
.collect();
|
.collect();
|
||||||
|
let profit_pubkeys: Vec<_> = account_keypairs
|
||||||
let profit_keypairs: Vec<_> = account_keypairs
|
|
||||||
.drain(0..accounts_in_groups)
|
.drain(0..accounts_in_groups)
|
||||||
.map(|keypair| keypair)
|
|
||||||
.collect();
|
|
||||||
let profit_pubkeys: Vec<Pubkey> = profit_keypairs
|
|
||||||
.iter()
|
|
||||||
.map(|keypair| keypair.pubkey())
|
.map(|keypair| keypair.pubkey())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
info!("Create {:?} source token accounts", src_pubkeys.len());
|
info!("Create {:?} source token accounts", src_pubkeys.len());
|
||||||
create_token_accounts(client, &trader_signers, &src_keypairs);
|
create_token_accounts(client, &trader_signers, &src_pubkeys);
|
||||||
info!("Create {:?} profit token accounts", profit_pubkeys.len());
|
info!("Create {:?} profit token accounts", profit_pubkeys.len());
|
||||||
create_token_accounts(client, &swapper_signers, &profit_keypairs);
|
create_token_accounts(client, &swapper_signers, &profit_pubkeys);
|
||||||
|
|
||||||
// Collect the max transaction rate and total tx count seen (single node only)
|
// Collect the max transaction rate and total tx count seen (single node only)
|
||||||
let sample_stats = Arc::new(RwLock::new(Vec::new()));
|
let sample_stats = Arc::new(RwLock::new(Vec::new()));
|
||||||
@@ -256,7 +237,7 @@ where
|
|||||||
trace!("Start trader thread");
|
trace!("Start trader thread");
|
||||||
let trader_thread = {
|
let trader_thread = {
|
||||||
let exit_signal = exit_signal.clone();
|
let exit_signal = exit_signal.clone();
|
||||||
|
let shared_txs = shared_txs.clone();
|
||||||
let client = clients[0].clone();
|
let client = clients[0].clone();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-exchange-trader".to_string())
|
.name("solana-exchange-trader".to_string())
|
||||||
@@ -393,10 +374,7 @@ fn swapper<T>(
|
|||||||
let mut tries = 0;
|
let mut tries = 0;
|
||||||
let mut trade_index = 0;
|
let mut trade_index = 0;
|
||||||
while client
|
while client
|
||||||
.get_balance_with_commitment(
|
.get_balance(&trade_infos[trade_index].trade_account)
|
||||||
&trade_infos[trade_index].trade_account,
|
|
||||||
CommitmentConfig::recent(),
|
|
||||||
)
|
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
== 0
|
== 0
|
||||||
{
|
{
|
||||||
@@ -450,7 +428,7 @@ fn swapper<T>(
|
|||||||
account_group = (account_group + 1) % account_groups as usize;
|
account_group = (account_group + 1) % account_groups as usize;
|
||||||
|
|
||||||
let (blockhash, _fee_calculator) = client
|
let (blockhash, _fee_calculator) = client
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
.get_recent_blockhash()
|
||||||
.expect("Failed to get blockhash");
|
.expect("Failed to get blockhash");
|
||||||
let to_swap_txs: Vec<_> = to_swap
|
let to_swap_txs: Vec<_> = to_swap
|
||||||
.par_iter()
|
.par_iter()
|
||||||
@@ -573,39 +551,27 @@ fn trader<T>(
|
|||||||
trade_account: trade.pubkey(),
|
trade_account: trade.pubkey(),
|
||||||
order_info,
|
order_info,
|
||||||
});
|
});
|
||||||
trades.push((signer, trade, side, src));
|
trades.push((signer, trade.pubkey(), side, src));
|
||||||
}
|
}
|
||||||
account_group = (account_group + 1) % account_groups as usize;
|
account_group = (account_group + 1) % account_groups as usize;
|
||||||
|
|
||||||
let (blockhash, _fee_calculator) = client
|
let (blockhash, _fee_calculator) = client
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
.get_recent_blockhash()
|
||||||
.expect("Failed to get blockhash");
|
.expect("Failed to get blockhash");
|
||||||
|
|
||||||
trades.chunks(chunk_size).for_each(|chunk| {
|
trades.chunks(chunk_size).for_each(|chunk| {
|
||||||
let trades_txs: Vec<_> = chunk
|
let trades_txs: Vec<_> = chunk
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|(owner, trade, side, src)| {
|
.map(|(signer, trade, side, src)| {
|
||||||
let owner_pubkey = &owner.pubkey();
|
let s: &Keypair = &signer;
|
||||||
let trade_pubkey = &trade.pubkey();
|
let owner = &signer.pubkey();
|
||||||
let space = mem::size_of::<ExchangeState>() as u64;
|
let space = mem::size_of::<ExchangeState>() as u64;
|
||||||
Transaction::new_signed_instructions(
|
Transaction::new_signed_instructions(
|
||||||
&[owner.as_ref(), trade],
|
&[s],
|
||||||
vec![
|
vec![
|
||||||
system_instruction::create_account(
|
system_instruction::create_account(owner, trade, 1, space, &id()),
|
||||||
owner_pubkey,
|
|
||||||
trade_pubkey,
|
|
||||||
1,
|
|
||||||
space,
|
|
||||||
&id(),
|
|
||||||
),
|
|
||||||
exchange_instruction::trade_request(
|
exchange_instruction::trade_request(
|
||||||
owner_pubkey,
|
owner, trade, *side, pair, tokens, price, src,
|
||||||
trade_pubkey,
|
|
||||||
*side,
|
|
||||||
pair,
|
|
||||||
tokens,
|
|
||||||
price,
|
|
||||||
src,
|
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
blockhash,
|
blockhash,
|
||||||
@@ -661,14 +627,12 @@ fn trader<T>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_transaction<T>(sync_client: &T, tx: &Transaction) -> bool
|
fn verify_transfer<T>(sync_client: &T, tx: &Transaction) -> bool
|
||||||
where
|
where
|
||||||
T: SyncClient + ?Sized,
|
T: SyncClient + ?Sized,
|
||||||
{
|
{
|
||||||
for s in &tx.signatures {
|
for s in &tx.signatures {
|
||||||
if let Ok(Some(r)) =
|
if let Ok(Some(r)) = sync_client.get_signature_status(s) {
|
||||||
sync_client.get_signature_status_with_commitment(s, CommitmentConfig::recent())
|
|
||||||
{
|
|
||||||
match r {
|
match r {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
return true;
|
return true;
|
||||||
@@ -687,21 +651,16 @@ fn verify_funding_transfer<T: SyncClient + ?Sized>(
|
|||||||
tx: &Transaction,
|
tx: &Transaction,
|
||||||
amount: u64,
|
amount: u64,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
if verify_transaction(client, tx) {
|
for a in &tx.message().account_keys[1..] {
|
||||||
for a in &tx.message().account_keys[1..] {
|
if client.get_balance(a).unwrap_or(0) >= amount {
|
||||||
if client
|
return true;
|
||||||
.get_balance_with_commitment(a, CommitmentConfig::recent())
|
|
||||||
.unwrap_or(0)
|
|
||||||
>= amount
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
pub fn fund_keys(client: &dyn Client, source: &Keypair, dests: &[Arc<Keypair>], lamports: u64) {
|
||||||
let total = lamports * (dests.len() as u64 + 1);
|
let total = lamports * (dests.len() as u64 + 1);
|
||||||
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
|
||||||
let mut notfunded: Vec<&Arc<Keypair>> = dests.iter().collect();
|
let mut notfunded: Vec<&Arc<Keypair>> = dests.iter().collect();
|
||||||
@@ -775,9 +734,8 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
|||||||
to_fund_txs.len(),
|
to_fund_txs.len(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (blockhash, _fee_calculator) = client
|
let (blockhash, _fee_calculator) =
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
client.get_recent_blockhash().expect("blockhash");
|
||||||
.expect("blockhash");
|
|
||||||
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||||
tx.sign(&[*k], blockhash);
|
tx.sign(&[*k], blockhash);
|
||||||
});
|
});
|
||||||
@@ -806,7 +764,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
|||||||
retries += 1;
|
retries += 1;
|
||||||
debug!(" Retry {:?}", retries);
|
debug!(" Retry {:?}", retries);
|
||||||
if retries >= 10 {
|
if retries >= 10 {
|
||||||
error!("fund_keys: Too many retries ({}), give up", retries);
|
error!(" Too many retries, give up");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -814,41 +772,27 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
|||||||
});
|
});
|
||||||
funded.append(&mut new_funded);
|
funded.append(&mut new_funded);
|
||||||
funded.retain(|(k, b)| {
|
funded.retain(|(k, b)| {
|
||||||
client
|
client.get_balance(&k.pubkey()).unwrap_or(0) > lamports && *b > lamports
|
||||||
.get_balance_with_commitment(&k.pubkey(), CommitmentConfig::recent())
|
|
||||||
.unwrap_or(0)
|
|
||||||
> lamports
|
|
||||||
&& *b > lamports
|
|
||||||
});
|
});
|
||||||
debug!(" Funded: {} left: {}", funded.len(), notfunded.len());
|
debug!(" Funded: {} left: {}", funded.len(), notfunded.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_token_accounts<T: Client>(
|
pub fn create_token_accounts(client: &dyn Client, signers: &[Arc<Keypair>], accounts: &[Pubkey]) {
|
||||||
client: &T,
|
let mut notfunded: Vec<(&Arc<Keypair>, &Pubkey)> = signers.iter().zip(accounts).collect();
|
||||||
signers: &[Arc<Keypair>],
|
|
||||||
accounts: &[Keypair],
|
|
||||||
) {
|
|
||||||
let mut notfunded: Vec<(&Arc<Keypair>, &Keypair)> = signers.iter().zip(accounts).collect();
|
|
||||||
|
|
||||||
while !notfunded.is_empty() {
|
while !notfunded.is_empty() {
|
||||||
notfunded.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
|
notfunded.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
|
||||||
let mut to_create_txs: Vec<_> = chunk
|
let mut to_create_txs: Vec<_> = chunk
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|(from_keypair, new_keypair)| {
|
.map(|(signer, new)| {
|
||||||
let owner_pubkey = &from_keypair.pubkey();
|
let owner_pubkey = &signer.pubkey();
|
||||||
let space = mem::size_of::<ExchangeState>() as u64;
|
let space = mem::size_of::<ExchangeState>() as u64;
|
||||||
let create_ix = system_instruction::create_account(
|
let create_ix =
|
||||||
owner_pubkey,
|
system_instruction::create_account(owner_pubkey, new, 1, space, &id());
|
||||||
&new_keypair.pubkey(),
|
let request_ix = exchange_instruction::account_request(owner_pubkey, new);
|
||||||
1,
|
|
||||||
space,
|
|
||||||
&id(),
|
|
||||||
);
|
|
||||||
let request_ix =
|
|
||||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
|
||||||
(
|
(
|
||||||
(from_keypair, new_keypair),
|
signer,
|
||||||
Transaction::new_unsigned_instructions(vec![create_ix, request_ix]),
|
Transaction::new_unsigned_instructions(vec![create_ix, request_ix]),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@@ -867,13 +811,12 @@ pub fn create_token_accounts<T: Client>(
|
|||||||
let mut retries = 0;
|
let mut retries = 0;
|
||||||
while !to_create_txs.is_empty() {
|
while !to_create_txs.is_empty() {
|
||||||
let (blockhash, _fee_calculator) = client
|
let (blockhash, _fee_calculator) = client
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
.get_recent_blockhash()
|
||||||
.expect("Failed to get blockhash");
|
.expect("Failed to get blockhash");
|
||||||
to_create_txs
|
to_create_txs.par_iter_mut().for_each(|(k, tx)| {
|
||||||
.par_iter_mut()
|
let kp: &Keypair = k;
|
||||||
.for_each(|((from_keypair, to_keypair), tx)| {
|
tx.sign(&[kp], blockhash);
|
||||||
tx.sign(&[from_keypair.as_ref(), to_keypair], blockhash);
|
});
|
||||||
});
|
|
||||||
to_create_txs.iter().for_each(|(_, tx)| {
|
to_create_txs.iter().for_each(|(_, tx)| {
|
||||||
client.async_send_transaction(tx.clone()).expect("transfer");
|
client.async_send_transaction(tx.clone()).expect("transfer");
|
||||||
});
|
});
|
||||||
@@ -881,11 +824,11 @@ pub fn create_token_accounts<T: Client>(
|
|||||||
let mut waits = 0;
|
let mut waits = 0;
|
||||||
while !to_create_txs.is_empty() {
|
while !to_create_txs.is_empty() {
|
||||||
sleep(Duration::from_millis(200));
|
sleep(Duration::from_millis(200));
|
||||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
|
to_create_txs.retain(|(_, tx)| !verify_transfer(client, &tx));
|
||||||
if to_create_txs.is_empty() {
|
if to_create_txs.is_empty() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
info!(
|
debug!(
|
||||||
" {} transactions outstanding, waits {:?}",
|
" {} transactions outstanding, waits {:?}",
|
||||||
to_create_txs.len(),
|
to_create_txs.len(),
|
||||||
waits
|
waits
|
||||||
@@ -898,25 +841,18 @@ pub fn create_token_accounts<T: Client>(
|
|||||||
|
|
||||||
if !to_create_txs.is_empty() {
|
if !to_create_txs.is_empty() {
|
||||||
retries += 1;
|
retries += 1;
|
||||||
info!(" Retry {:?} {} txes left", retries, to_create_txs.len());
|
debug!(" Retry {:?}", retries);
|
||||||
if retries >= 20 {
|
if retries >= 20 {
|
||||||
error!(
|
error!(" Too many retries, give up");
|
||||||
"create_token_accounts: Too many retries ({}), give up",
|
|
||||||
retries
|
|
||||||
);
|
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut new_notfunded: Vec<(&Arc<Keypair>, &Keypair)> = vec![];
|
let mut new_notfunded: Vec<(&Arc<Keypair>, &Pubkey)> = vec![];
|
||||||
for f in ¬funded {
|
for f in ¬funded {
|
||||||
if client
|
if client.get_balance(&f.1).unwrap_or(0) == 0 {
|
||||||
.get_balance_with_commitment(&f.1.pubkey(), CommitmentConfig::recent())
|
|
||||||
.unwrap_or(0)
|
|
||||||
== 0
|
|
||||||
{
|
|
||||||
new_notfunded.push(*f)
|
new_notfunded.push(*f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -972,13 +908,8 @@ fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
|||||||
rnd.gen_n_keypairs(num)
|
rnd.gen_n_keypairs(num)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn airdrop_lamports<T: Client>(
|
pub fn airdrop_lamports(client: &dyn Client, drone_addr: &SocketAddr, id: &Keypair, amount: u64) {
|
||||||
client: &T,
|
let balance = client.get_balance(&id.pubkey());
|
||||||
faucet_addr: &SocketAddr,
|
|
||||||
id: &Keypair,
|
|
||||||
amount: u64,
|
|
||||||
) {
|
|
||||||
let balance = client.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent());
|
|
||||||
let balance = balance.unwrap_or(0);
|
let balance = balance.unwrap_or(0);
|
||||||
if balance >= amount {
|
if balance >= amount {
|
||||||
return;
|
return;
|
||||||
@@ -989,49 +920,142 @@ pub fn airdrop_lamports<T: Client>(
|
|||||||
info!(
|
info!(
|
||||||
"Airdropping {:?} lamports from {} for {}",
|
"Airdropping {:?} lamports from {} for {}",
|
||||||
amount_to_drop,
|
amount_to_drop,
|
||||||
faucet_addr,
|
drone_addr,
|
||||||
id.pubkey(),
|
id.pubkey(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut tries = 0;
|
let mut tries = 0;
|
||||||
loop {
|
loop {
|
||||||
let (blockhash, _fee_calculator) = client
|
let (blockhash, _fee_calculator) = client
|
||||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
.get_recent_blockhash()
|
||||||
.expect("Failed to get blockhash");
|
.expect("Failed to get blockhash");
|
||||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
match request_airdrop_transaction(&drone_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||||
Ok(transaction) => {
|
Ok(transaction) => {
|
||||||
let signature = client.async_send_transaction(transaction).unwrap();
|
let signature = client.async_send_transaction(transaction).unwrap();
|
||||||
|
|
||||||
for _ in 0..30 {
|
for _ in 0..30 {
|
||||||
if let Ok(Some(_)) = client.get_signature_status_with_commitment(
|
if let Ok(Some(_)) = client.get_signature_status(&signature) {
|
||||||
&signature,
|
|
||||||
CommitmentConfig::recent(),
|
|
||||||
) {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sleep(Duration::from_millis(100));
|
sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
if client
|
if client.get_balance(&id.pubkey()).unwrap_or(0) >= amount {
|
||||||
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::recent())
|
|
||||||
.unwrap_or(0)
|
|
||||||
>= amount
|
|
||||||
{
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
panic!(
|
panic!(
|
||||||
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
|
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
|
||||||
err, faucet_addr, amount
|
err, drone_addr, amount
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
debug!(" Retry...");
|
debug!(" Retry...");
|
||||||
tries += 1;
|
tries += 1;
|
||||||
if tries > 50 {
|
if tries > 50 {
|
||||||
error!("airdrop_lamports: Too many retries ({}), give up", tries);
|
error!("Too many retries, give up");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
sleep(Duration::from_secs(2));
|
sleep(Duration::from_secs(2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||||
|
use solana_core::validator::ValidatorConfig;
|
||||||
|
use solana_drone::drone::run_local_drone;
|
||||||
|
use solana_exchange_api::exchange_processor::process_instruction;
|
||||||
|
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||||
|
use solana_runtime::bank::Bank;
|
||||||
|
use solana_runtime::bank_client::BankClient;
|
||||||
|
use solana_sdk::genesis_block::create_genesis_block;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_exchange_local_cluster() {
|
||||||
|
solana_logger::setup();
|
||||||
|
|
||||||
|
const NUM_NODES: usize = 1;
|
||||||
|
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.identity = Keypair::new();
|
||||||
|
config.duration = Duration::from_secs(1);
|
||||||
|
config.fund_amount = 100_000;
|
||||||
|
config.threads = 1;
|
||||||
|
config.transfer_delay = 20; // 15
|
||||||
|
config.batch_size = 100; // 1000;
|
||||||
|
config.chunk_size = 10; // 200;
|
||||||
|
config.account_groups = 1; // 10;
|
||||||
|
let Config {
|
||||||
|
fund_amount,
|
||||||
|
batch_size,
|
||||||
|
account_groups,
|
||||||
|
..
|
||||||
|
} = config;
|
||||||
|
let accounts_in_groups = batch_size * account_groups;
|
||||||
|
|
||||||
|
let cluster = LocalCluster::new(&ClusterConfig {
|
||||||
|
node_stakes: vec![100_000; NUM_NODES],
|
||||||
|
cluster_lamports: 100_000_000_000_000,
|
||||||
|
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||||
|
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||||
|
..ClusterConfig::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
let drone_keypair = Keypair::new();
|
||||||
|
cluster.transfer(
|
||||||
|
&cluster.funding_keypair,
|
||||||
|
&drone_keypair.pubkey(),
|
||||||
|
2_000_000_000_000,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (addr_sender, addr_receiver) = channel();
|
||||||
|
run_local_drone(drone_keypair, addr_sender, Some(1_000_000_000_000));
|
||||||
|
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||||
|
|
||||||
|
info!("Connecting to the cluster");
|
||||||
|
let (nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES)
|
||||||
|
.unwrap_or_else(|err| {
|
||||||
|
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
|
|
||||||
|
info!("clients: {}", num_clients);
|
||||||
|
assert!(num_clients >= NUM_NODES);
|
||||||
|
|
||||||
|
const NUM_SIGNERS: u64 = 2;
|
||||||
|
airdrop_lamports(
|
||||||
|
&client,
|
||||||
|
&drone_addr,
|
||||||
|
&config.identity,
|
||||||
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
|
);
|
||||||
|
|
||||||
|
do_bench_exchange(vec![client], config);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_exchange_bank_client() {
|
||||||
|
solana_logger::setup();
|
||||||
|
let (genesis_block, identity) = create_genesis_block(100_000_000_000_000);
|
||||||
|
let mut bank = Bank::new(&genesis_block);
|
||||||
|
bank.add_instruction_processor(id(), process_instruction);
|
||||||
|
let clients = vec![BankClient::new(bank)];
|
||||||
|
|
||||||
|
let mut config = Config::default();
|
||||||
|
config.identity = identity;
|
||||||
|
config.duration = Duration::from_secs(1);
|
||||||
|
config.fund_amount = 100_000;
|
||||||
|
config.threads = 1;
|
||||||
|
config.transfer_delay = 20; // 0;
|
||||||
|
config.batch_size = 100; // 1500;
|
||||||
|
config.chunk_size = 10; // 1500;
|
||||||
|
config.account_groups = 1; // 50;
|
||||||
|
|
||||||
|
do_bench_exchange(clients, config);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -1,14 +1,14 @@
|
|||||||
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
use clap::{crate_description, crate_name, crate_version, value_t, App, Arg, ArgMatches};
|
||||||
use solana_core::gen_keys::GenKeys;
|
use solana_core::gen_keys::GenKeys;
|
||||||
use solana_faucet::faucet::FAUCET_PORT;
|
use solana_drone::drone::DRONE_PORT;
|
||||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub entrypoint_addr: SocketAddr,
|
pub entrypoint_addr: SocketAddr,
|
||||||
pub faucet_addr: SocketAddr,
|
pub drone_addr: SocketAddr,
|
||||||
pub identity: Keypair,
|
pub identity: Keypair,
|
||||||
pub threads: usize,
|
pub threads: usize,
|
||||||
pub num_nodes: usize,
|
pub num_nodes: usize,
|
||||||
@@ -27,7 +27,7 @@ impl Default for Config {
|
|||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||||
faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)),
|
drone_addr: SocketAddr::from(([127, 0, 0, 1], DRONE_PORT)),
|
||||||
identity: Keypair::new(),
|
identity: Keypair::new(),
|
||||||
num_nodes: 1,
|
num_nodes: 1,
|
||||||
threads: 4,
|
threads: 4,
|
||||||
@@ -44,10 +44,10 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||||
App::new(crate_name!())
|
App::new(crate_name!())
|
||||||
.about(crate_description!())
|
.about(crate_description!())
|
||||||
.version(version)
|
.version(crate_version!())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("entrypoint")
|
Arg::with_name("entrypoint")
|
||||||
.short("n")
|
.short("n")
|
||||||
@@ -59,14 +59,14 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
.help("Cluster entry point; defaults to 127.0.0.1:8001"),
|
.help("Cluster entry point; defaults to 127.0.0.1:8001"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("faucet")
|
Arg::with_name("drone")
|
||||||
.short("d")
|
.short("d")
|
||||||
.long("faucet")
|
.long("drone")
|
||||||
.value_name("HOST:PORT")
|
.value_name("HOST:PORT")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.required(false)
|
.required(false)
|
||||||
.default_value("127.0.0.1:9900")
|
.default_value("127.0.0.1:9900")
|
||||||
.help("Location of the faucet; defaults to 127.0.0.1:9900"),
|
.help("Location of the drone; defaults to 127.0.0.1:9900"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("identity")
|
Arg::with_name("identity")
|
||||||
@@ -166,22 +166,20 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||||
let mut args = Config::default();
|
let mut args = Config::default();
|
||||||
|
|
||||||
args.entrypoint_addr = solana_net_utils::parse_host_port(
|
args.entrypoint_addr = solana_netutil::parse_host_port(matches.value_of("entrypoint").unwrap())
|
||||||
matches.value_of("entrypoint").unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap_or_else(|e| {
|
|
||||||
eprintln!("failed to parse entrypoint address: {}", e);
|
|
||||||
exit(1)
|
|
||||||
});
|
|
||||||
|
|
||||||
args.faucet_addr = solana_net_utils::parse_host_port(matches.value_of("faucet").unwrap())
|
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
eprintln!("failed to parse faucet address: {}", e);
|
eprintln!("failed to parse entrypoint address: {}", e);
|
||||||
|
exit(1)
|
||||||
|
});
|
||||||
|
|
||||||
|
args.drone_addr = solana_netutil::parse_host_port(matches.value_of("drone").unwrap())
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse drone address: {}", e);
|
||||||
exit(1)
|
exit(1)
|
||||||
});
|
});
|
||||||
|
|
||||||
if matches.is_present("identity") {
|
if matches.is_present("identity") {
|
||||||
args.identity = read_keypair_file(matches.value_of("identity").unwrap())
|
args.identity = read_keypair(matches.value_of("identity").unwrap())
|
||||||
.expect("can't read client identity");
|
.expect("can't read client identity");
|
||||||
} else {
|
} else {
|
||||||
args.identity = {
|
args.identity = {
|
||||||
|
@@ -1,3 +0,0 @@
|
|||||||
pub mod bench;
|
|
||||||
pub mod cli;
|
|
||||||
mod order_book;
|
|
@@ -2,21 +2,25 @@ pub mod bench;
|
|||||||
mod cli;
|
mod cli;
|
||||||
pub mod order_book;
|
pub mod order_book;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[macro_use]
|
||||||
|
extern crate solana_exchange_program;
|
||||||
|
|
||||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||||
use log::*;
|
use log::*;
|
||||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||||
use solana_sdk::signature::Signer;
|
use solana_sdk::signature::KeypairUtil;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup();
|
solana_logger::setup();
|
||||||
solana_metrics::set_panic_hook("bench-exchange");
|
solana_metrics::set_panic_hook("bench-exchange");
|
||||||
|
|
||||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
let matches = cli::build_args().get_matches();
|
||||||
let cli_config = cli::extract_args(&matches);
|
let cli_config = cli::extract_args(&matches);
|
||||||
|
|
||||||
let cli::Config {
|
let cli::Config {
|
||||||
entrypoint_addr,
|
entrypoint_addr,
|
||||||
faucet_addr,
|
drone_addr,
|
||||||
identity,
|
identity,
|
||||||
threads,
|
threads,
|
||||||
num_nodes,
|
num_nodes,
|
||||||
@@ -54,7 +58,7 @@ fn main() {
|
|||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
info!("Connecting to the cluster");
|
info!("Connecting to the cluster");
|
||||||
let (nodes, _archivers) =
|
let (nodes, _replicators) =
|
||||||
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||||
panic!("Failed to discover nodes");
|
panic!("Failed to discover nodes");
|
||||||
});
|
});
|
||||||
@@ -73,7 +77,7 @@ fn main() {
|
|||||||
const NUM_SIGNERS: u64 = 2;
|
const NUM_SIGNERS: u64 = 2;
|
||||||
airdrop_lamports(
|
airdrop_lamports(
|
||||||
&client,
|
&client,
|
||||||
&faucet_addr,
|
&drone_addr,
|
||||||
&config.identity,
|
&config.identity,
|
||||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||||
);
|
);
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
use itertools::EitherOrBoth::{Both, Left, Right};
|
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use log::*;
|
use log::*;
|
||||||
use solana_exchange_program::exchange_state::*;
|
use solana_exchange_api::exchange_state::*;
|
||||||
use solana_sdk::pubkey::Pubkey;
|
use solana_sdk::pubkey::Pubkey;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::BinaryHeap;
|
use std::collections::BinaryHeap;
|
||||||
|
@@ -1,103 +0,0 @@
|
|||||||
use log::*;
|
|
||||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
|
||||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
|
||||||
use solana_core::validator::ValidatorConfig;
|
|
||||||
use solana_exchange_program::exchange_processor::process_instruction;
|
|
||||||
use solana_exchange_program::id;
|
|
||||||
use solana_exchange_program::solana_exchange_program;
|
|
||||||
use solana_faucet::faucet::run_local_faucet;
|
|
||||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
|
||||||
use solana_runtime::bank::Bank;
|
|
||||||
use solana_runtime::bank_client::BankClient;
|
|
||||||
use solana_sdk::genesis_config::create_genesis_config;
|
|
||||||
use solana_sdk::signature::{Keypair, Signer};
|
|
||||||
use std::process::exit;
|
|
||||||
use std::sync::mpsc::channel;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_exchange_local_cluster() {
|
|
||||||
solana_logger::setup();
|
|
||||||
|
|
||||||
const NUM_NODES: usize = 1;
|
|
||||||
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.identity = Keypair::new();
|
|
||||||
config.duration = Duration::from_secs(1);
|
|
||||||
config.fund_amount = 100_000;
|
|
||||||
config.threads = 1;
|
|
||||||
config.transfer_delay = 20; // 15
|
|
||||||
config.batch_size = 100; // 1000;
|
|
||||||
config.chunk_size = 10; // 200;
|
|
||||||
config.account_groups = 1; // 10;
|
|
||||||
let Config {
|
|
||||||
fund_amount,
|
|
||||||
batch_size,
|
|
||||||
account_groups,
|
|
||||||
..
|
|
||||||
} = config;
|
|
||||||
let accounts_in_groups = batch_size * account_groups;
|
|
||||||
|
|
||||||
let cluster = LocalCluster::new(&ClusterConfig {
|
|
||||||
node_stakes: vec![100_000; NUM_NODES],
|
|
||||||
cluster_lamports: 100_000_000_000_000,
|
|
||||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
|
||||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
|
||||||
..ClusterConfig::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
let faucet_keypair = Keypair::new();
|
|
||||||
cluster.transfer(
|
|
||||||
&cluster.funding_keypair,
|
|
||||||
&faucet_keypair.pubkey(),
|
|
||||||
2_000_000_000_000,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (addr_sender, addr_receiver) = channel();
|
|
||||||
run_local_faucet(faucet_keypair, addr_sender, Some(1_000_000_000_000));
|
|
||||||
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
|
||||||
let (nodes, _) =
|
|
||||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
|
||||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
|
||||||
exit(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
let (client, num_clients) = get_multi_client(&nodes);
|
|
||||||
|
|
||||||
info!("clients: {}", num_clients);
|
|
||||||
assert!(num_clients >= NUM_NODES);
|
|
||||||
|
|
||||||
const NUM_SIGNERS: u64 = 2;
|
|
||||||
airdrop_lamports(
|
|
||||||
&client,
|
|
||||||
&faucet_addr,
|
|
||||||
&config.identity,
|
|
||||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
|
||||||
);
|
|
||||||
|
|
||||||
do_bench_exchange(vec![client], config);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_exchange_bank_client() {
|
|
||||||
solana_logger::setup();
|
|
||||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
|
||||||
let mut bank = Bank::new(&genesis_config);
|
|
||||||
bank.add_instruction_processor(id(), process_instruction);
|
|
||||||
let clients = vec![BankClient::new(bank)];
|
|
||||||
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.identity = identity;
|
|
||||||
config.duration = Duration::from_secs(1);
|
|
||||||
config.fund_amount = 100_000;
|
|
||||||
config.threads = 1;
|
|
||||||
config.transfer_delay = 20; // 0;
|
|
||||||
config.batch_size = 100; // 1500;
|
|
||||||
config.chunk_size = 10; // 1500;
|
|
||||||
config.account_groups = 1; // 50;
|
|
||||||
|
|
||||||
do_bench_exchange(clients, config);
|
|
||||||
}
|
|
@@ -2,17 +2,17 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-streamer"
|
name = "solana-bench-streamer"
|
||||||
version = "1.0.23"
|
version = "0.18.2"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
solana-core = { path = "../core", version = "0.18.2" }
|
||||||
solana-core = { path = "../core", version = "1.0.23" }
|
solana-logger = { path = "../logger", version = "0.18.2" }
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
|
||||||
|
[features]
|
||||||
|
cuda = ["solana-core/cuda"]
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
||||||
|
@@ -1,5 +1,7 @@
|
|||||||
use clap::{crate_description, crate_name, App, Arg};
|
use clap::{crate_description, crate_name, crate_version, App, Arg};
|
||||||
use solana_core::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
use solana_core::packet::PacketsRecycler;
|
||||||
|
use solana_core::packet::{Packet, Packets, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||||
|
use solana_core::result::Result;
|
||||||
use solana_core::streamer::{receiver, PacketReceiver};
|
use solana_core::streamer::{receiver, PacketReceiver};
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||||
@@ -7,7 +9,7 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
|||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::thread::{spawn, JoinHandle, Result};
|
use std::thread::{spawn, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
@@ -27,7 +29,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
|||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in &msgs.packets {
|
for p in &msgs.packets {
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
assert!(p.meta.size < PACKET_DATA_SIZE);
|
assert!(p.meta.size < BLOB_SIZE);
|
||||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||||
num += 1;
|
num += 1;
|
||||||
}
|
}
|
||||||
@@ -52,7 +54,7 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let matches = App::new(crate_name!())
|
let matches = App::new(crate_name!())
|
||||||
.about(crate_description!())
|
.about(crate_description!())
|
||||||
.version(solana_clap_utils::version!())
|
.version(crate_version!())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("num-recv-sockets")
|
Arg::with_name("num-recv-sockets")
|
||||||
.long("num-recv-sockets")
|
.long("num-recv-sockets")
|
||||||
@@ -67,8 +69,7 @@ fn main() -> Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut port = 0;
|
let mut port = 0;
|
||||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
let mut addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||||
let mut addr = SocketAddr::new(ip_addr, 0);
|
|
||||||
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
@@ -76,7 +77,7 @@ fn main() -> Result<()> {
|
|||||||
let mut read_threads = Vec::new();
|
let mut read_threads = Vec::new();
|
||||||
let recycler = PacketsRecycler::default();
|
let recycler = PacketsRecycler::default();
|
||||||
for _ in 0..num_sockets {
|
for _ in 0..num_sockets {
|
||||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
let read = solana_netutil::bind_to(port, false).unwrap();
|
||||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||||
|
|
||||||
addr = read.local_addr().unwrap();
|
addr = read.local_addr().unwrap();
|
||||||
|
@@ -2,39 +2,34 @@
|
|||||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
name = "solana-bench-tps"
|
name = "solana-bench-tps"
|
||||||
version = "1.0.23"
|
version = "0.18.2"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
homepage = "https://solana.com/"
|
homepage = "https://solana.com/"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode = "1.2.1"
|
bincode = "1.1.4"
|
||||||
clap = "2.33.0"
|
clap = "2.33.0"
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
rayon = "1.2.0"
|
rayon = "1.1.0"
|
||||||
serde_json = "1.0.46"
|
serde = "1.0.99"
|
||||||
serde_yaml = "0.8.11"
|
serde_derive = "1.0.99"
|
||||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
serde_json = "1.0.40"
|
||||||
solana-core = { path = "../core", version = "1.0.23" }
|
serde_yaml = "0.8.9"
|
||||||
solana-genesis = { path = "../genesis", version = "1.0.23" }
|
solana-core = { path = "../core", version = "0.18.2" }
|
||||||
solana-client = { path = "../client", version = "1.0.23" }
|
solana-local-cluster = { path = "../local_cluster", version = "0.18.2" }
|
||||||
solana-faucet = { path = "../faucet", version = "1.0.23" }
|
solana-client = { path = "../client", version = "0.18.2" }
|
||||||
#solana-librapay = { path = "../programs/librapay", version = "1.0.20", optional = true }
|
solana-drone = { path = "../drone", version = "0.18.2" }
|
||||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
solana-librapay-api = { path = "../programs/librapay_api", version = "0.18.2" }
|
||||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
solana-logger = { path = "../logger", version = "0.18.2" }
|
||||||
solana-measure = { path = "../measure", version = "1.0.23" }
|
solana-metrics = { path = "../metrics", version = "0.18.2" }
|
||||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
solana-measure = { path = "../measure", version = "0.18.2" }
|
||||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
solana-netutil = { path = "../utils/netutil", version = "0.18.2" }
|
||||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
solana-runtime = { path = "../runtime", version = "0.18.2" }
|
||||||
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.20", optional = true }
|
solana-sdk = { path = "../sdk", version = "0.18.2" }
|
||||||
|
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.18.2" }
|
||||||
|
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.18.2" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[features]
|
||||||
serial_test = "0.3.2"
|
cuda = ["solana-core/cuda"]
|
||||||
serial_test_derive = "0.4.0"
|
|
||||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.23" }
|
|
||||||
|
|
||||||
#[features]
|
|
||||||
#move = ["solana-librapay", "solana-move-loader-program"]
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
targets = ["x86_64-unknown-linux-gnu"]
|
|
||||||
|
@@ -1,60 +1,55 @@
|
|||||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
use std::net::SocketAddr;
|
||||||
use solana_faucet::faucet::FAUCET_PORT;
|
use std::process::exit;
|
||||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
use std::time::Duration;
|
||||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
|
||||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
|
||||||
|
|
||||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
use clap::{crate_description, crate_name, crate_version, App, Arg, ArgMatches};
|
||||||
|
use solana_drone::drone::DRONE_PORT;
|
||||||
|
use solana_sdk::fee_calculator::FeeCalculator;
|
||||||
|
use solana_sdk::signature::{read_keypair, Keypair, KeypairUtil};
|
||||||
|
|
||||||
/// Holds the configuration for a single run of the benchmark
|
/// Holds the configuration for a single run of the benchmark
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub entrypoint_addr: SocketAddr,
|
pub entrypoint_addr: SocketAddr,
|
||||||
pub faucet_addr: SocketAddr,
|
pub drone_addr: SocketAddr,
|
||||||
pub id: Keypair,
|
pub id: Keypair,
|
||||||
pub threads: usize,
|
pub threads: usize,
|
||||||
pub num_nodes: usize,
|
pub num_nodes: usize,
|
||||||
pub duration: Duration,
|
pub duration: Duration,
|
||||||
pub tx_count: usize,
|
pub tx_count: usize,
|
||||||
pub keypair_multiplier: usize,
|
|
||||||
pub thread_batch_sleep_ms: usize,
|
pub thread_batch_sleep_ms: usize,
|
||||||
pub sustained: bool,
|
pub sustained: bool,
|
||||||
pub client_ids_and_stake_file: String,
|
pub client_ids_and_stake_file: String,
|
||||||
pub write_to_client_file: bool,
|
pub write_to_client_file: bool,
|
||||||
pub read_from_client_file: bool,
|
pub read_from_client_file: bool,
|
||||||
pub target_lamports_per_signature: u64,
|
pub target_lamports_per_signature: u64,
|
||||||
pub multi_client: bool,
|
|
||||||
pub use_move: bool,
|
pub use_move: bool,
|
||||||
pub num_lamports_per_account: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Config {
|
fn default() -> Config {
|
||||||
Config {
|
Config {
|
||||||
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||||
faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)),
|
drone_addr: SocketAddr::from(([127, 0, 0, 1], DRONE_PORT)),
|
||||||
id: Keypair::new(),
|
id: Keypair::new(),
|
||||||
threads: 4,
|
threads: 4,
|
||||||
num_nodes: 1,
|
num_nodes: 1,
|
||||||
duration: Duration::new(std::u64::MAX, 0),
|
duration: Duration::new(std::u64::MAX, 0),
|
||||||
tx_count: 50_000,
|
tx_count: 500_000,
|
||||||
keypair_multiplier: 8,
|
thread_batch_sleep_ms: 0,
|
||||||
thread_batch_sleep_ms: 1000,
|
|
||||||
sustained: false,
|
sustained: false,
|
||||||
client_ids_and_stake_file: String::new(),
|
client_ids_and_stake_file: String::new(),
|
||||||
write_to_client_file: false,
|
write_to_client_file: false,
|
||||||
read_from_client_file: false,
|
read_from_client_file: false,
|
||||||
target_lamports_per_signature: FeeRateGovernor::default().target_lamports_per_signature,
|
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||||
multi_client: true,
|
|
||||||
use_move: false,
|
use_move: false,
|
||||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Defines and builds the CLI args for a run of the benchmark
|
/// Defines and builds the CLI args for a run of the benchmark
|
||||||
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||||
App::new(crate_name!()).about(crate_description!())
|
App::new(crate_name!()).about(crate_description!())
|
||||||
.version(version)
|
.version(crate_version!())
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("entrypoint")
|
Arg::with_name("entrypoint")
|
||||||
.short("n")
|
.short("n")
|
||||||
@@ -64,12 +59,12 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
.help("Rendezvous with the cluster at this entry point; defaults to 127.0.0.1:8001"),
|
.help("Rendezvous with the cluster at this entry point; defaults to 127.0.0.1:8001"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("faucet")
|
Arg::with_name("drone")
|
||||||
.short("d")
|
.short("d")
|
||||||
.long("faucet")
|
.long("drone")
|
||||||
.value_name("HOST:PORT")
|
.value_name("HOST:PORT")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Location of the faucet; defaults to entrypoint:FAUCET_PORT"),
|
.help("Location of the drone; defaults to entrypoint:DRONE_PORT"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("identity")
|
Arg::with_name("identity")
|
||||||
@@ -112,11 +107,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
.long("use-move")
|
.long("use-move")
|
||||||
.help("Use Move language transactions to perform transfers."),
|
.help("Use Move language transactions to perform transfers."),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("no-multi-client")
|
|
||||||
.long("no-multi-client")
|
|
||||||
.help("Disable multi-client support, only transact with the entrypoint."),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("tx_count")
|
Arg::with_name("tx_count")
|
||||||
.long("tx_count")
|
.long("tx_count")
|
||||||
@@ -124,13 +114,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help("Number of transactions to send per batch")
|
.help("Number of transactions to send per batch")
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("keypair_multiplier")
|
|
||||||
.long("keypair-multiplier")
|
|
||||||
.value_name("NUM")
|
|
||||||
.takes_value(true)
|
|
||||||
.help("Multiply by transaction count to determine number of keypairs to create")
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("thread-batch-sleep-ms")
|
Arg::with_name("thread-batch-sleep-ms")
|
||||||
.short("z")
|
.short("z")
|
||||||
@@ -163,15 +146,6 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
|||||||
verification when the cluster is operating at target-signatures-per-slot",
|
verification when the cluster is operating at target-signatures-per-slot",
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("num_lamports_per_account")
|
|
||||||
.long("num-lamports-per-account")
|
|
||||||
.value_name("LAMPORTS")
|
|
||||||
.takes_value(true)
|
|
||||||
.help(
|
|
||||||
"Number of lamports per account.",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||||
@@ -183,21 +157,21 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
let mut args = Config::default();
|
let mut args = Config::default();
|
||||||
|
|
||||||
if let Some(addr) = matches.value_of("entrypoint") {
|
if let Some(addr) = matches.value_of("entrypoint") {
|
||||||
args.entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
|
args.entrypoint_addr = solana_netutil::parse_host_port(addr).unwrap_or_else(|e| {
|
||||||
eprintln!("failed to parse entrypoint address: {}", e);
|
eprintln!("failed to parse entrypoint address: {}", e);
|
||||||
exit(1)
|
exit(1)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(addr) = matches.value_of("faucet") {
|
if let Some(addr) = matches.value_of("drone") {
|
||||||
args.faucet_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
|
args.drone_addr = solana_netutil::parse_host_port(addr).unwrap_or_else(|e| {
|
||||||
eprintln!("failed to parse faucet address: {}", e);
|
eprintln!("failed to parse drone address: {}", e);
|
||||||
exit(1)
|
exit(1)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if matches.is_present("identity") {
|
if matches.is_present("identity") {
|
||||||
args.id = read_keypair_file(matches.value_of("identity").unwrap())
|
args.id = read_keypair(matches.value_of("identity").unwrap())
|
||||||
.expect("can't read client identity");
|
.expect("can't read client identity");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -217,15 +191,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(s) = matches.value_of("tx_count") {
|
if let Some(s) = matches.value_of("tx_count") {
|
||||||
args.tx_count = s.to_string().parse().expect("can't parse tx_count");
|
args.tx_count = s.to_string().parse().expect("can't parse tx_account");
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(s) = matches.value_of("keypair_multiplier") {
|
|
||||||
args.keypair_multiplier = s
|
|
||||||
.to_string()
|
|
||||||
.parse()
|
|
||||||
.expect("can't parse keypair-multiplier");
|
|
||||||
assert!(args.keypair_multiplier >= 2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(t) = matches.value_of("thread-batch-sleep-ms") {
|
if let Some(t) = matches.value_of("thread-batch-sleep-ms") {
|
||||||
@@ -253,11 +219,6 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
args.use_move = matches.is_present("use-move");
|
args.use_move = matches.is_present("use-move");
|
||||||
args.multi_client = !matches.is_present("no-multi-client");
|
|
||||||
|
|
||||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
|
||||||
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
|
||||||
}
|
|
||||||
|
|
||||||
args
|
args
|
||||||
}
|
}
|
||||||
|
@@ -1,2 +0,0 @@
|
|||||||
pub mod bench;
|
|
||||||
pub mod cli;
|
|
@@ -1,64 +1,64 @@
|
|||||||
use log::*;
|
#[cfg(test)]
|
||||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
#[macro_use]
|
||||||
use solana_bench_tps::cli;
|
extern crate solana_move_loader_program;
|
||||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
|
||||||
use solana_genesis::Base64Account;
|
mod bench;
|
||||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
mod cli;
|
||||||
use solana_sdk::signature::{Keypair, Signer};
|
|
||||||
use solana_sdk::system_program;
|
use crate::bench::{
|
||||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
do_bench_tps, generate_and_fund_keypairs, generate_keypairs, Config, NUM_LAMPORTS_PER_ACCOUNT,
|
||||||
|
};
|
||||||
|
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||||
|
use solana_sdk::fee_calculator::FeeCalculator;
|
||||||
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::process::exit;
|
||||||
|
|
||||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
solana_logger::setup_with_default("solana=info");
|
solana_logger::setup_with_filter("solana=info");
|
||||||
solana_metrics::set_panic_hook("bench-tps");
|
solana_metrics::set_panic_hook("bench-tps");
|
||||||
|
|
||||||
let matches = cli::build_args(solana_clap_utils::version!()).get_matches();
|
let matches = cli::build_args().get_matches();
|
||||||
let cli_config = cli::extract_args(&matches);
|
let cli_config = cli::extract_args(&matches);
|
||||||
|
|
||||||
let cli::Config {
|
let cli::Config {
|
||||||
entrypoint_addr,
|
entrypoint_addr,
|
||||||
faucet_addr,
|
drone_addr,
|
||||||
id,
|
id,
|
||||||
|
threads,
|
||||||
num_nodes,
|
num_nodes,
|
||||||
|
duration,
|
||||||
tx_count,
|
tx_count,
|
||||||
keypair_multiplier,
|
thread_batch_sleep_ms,
|
||||||
|
sustained,
|
||||||
client_ids_and_stake_file,
|
client_ids_and_stake_file,
|
||||||
write_to_client_file,
|
write_to_client_file,
|
||||||
read_from_client_file,
|
read_from_client_file,
|
||||||
target_lamports_per_signature,
|
target_lamports_per_signature,
|
||||||
use_move,
|
use_move,
|
||||||
multi_client,
|
} = cli_config;
|
||||||
num_lamports_per_account,
|
|
||||||
..
|
|
||||||
} = &cli_config;
|
|
||||||
|
|
||||||
let keypair_count = *tx_count * keypair_multiplier;
|
if write_to_client_file {
|
||||||
if *write_to_client_file {
|
let (keypairs, _) = generate_keypairs(&id, tx_count as u64 * 2);
|
||||||
info!("Generating {} keypairs", keypair_count);
|
|
||||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
|
||||||
let num_accounts = keypairs.len() as u64;
|
let num_accounts = keypairs.len() as u64;
|
||||||
let max_fee =
|
let max_fee = FeeCalculator::new(target_lamports_per_signature).max_lamports_per_signature;
|
||||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
|
||||||
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
|
||||||
/ num_accounts
|
/ num_accounts
|
||||||
+ num_lamports_per_account;
|
+ NUM_LAMPORTS_PER_ACCOUNT;
|
||||||
let mut accounts = HashMap::new();
|
let mut accounts = HashMap::new();
|
||||||
keypairs.iter().for_each(|keypair| {
|
keypairs.iter().for_each(|keypair| {
|
||||||
accounts.insert(
|
accounts.insert(
|
||||||
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap(),
|
||||||
Base64Account {
|
num_lamports_per_account,
|
||||||
balance: num_lamports_per_account,
|
|
||||||
executable: false,
|
|
||||||
owner: system_program::id().to_string(),
|
|
||||||
data: String::new(),
|
|
||||||
},
|
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
info!("Writing {}", client_ids_and_stake_file);
|
|
||||||
let serialized = serde_yaml::to_string(&accounts).unwrap();
|
let serialized = serde_yaml::to_string(&accounts).unwrap();
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
let mut file = File::create(path).unwrap();
|
let mut file = File::create(path).unwrap();
|
||||||
@@ -66,66 +66,49 @@ fn main() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Connecting to the cluster");
|
println!("Connecting to the cluster");
|
||||||
let (nodes, _archivers) =
|
let (nodes, _replicators) =
|
||||||
discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|err| {
|
||||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||||
exit(1);
|
exit(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
let client = if *multi_client {
|
let (client, num_clients) = get_multi_client(&nodes);
|
||||||
let (client, num_clients) = get_multi_client(&nodes);
|
|
||||||
if nodes.len() < num_clients {
|
|
||||||
eprintln!(
|
|
||||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
|
||||||
num_nodes
|
|
||||||
);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
Arc::new(client)
|
|
||||||
} else {
|
|
||||||
Arc::new(get_client(&nodes))
|
|
||||||
};
|
|
||||||
|
|
||||||
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
|
if nodes.len() < num_clients {
|
||||||
|
eprintln!(
|
||||||
|
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||||
|
num_nodes
|
||||||
|
);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (keypairs, move_keypairs, keypair_balance) = if read_from_client_file && !use_move {
|
||||||
let path = Path::new(&client_ids_and_stake_file);
|
let path = Path::new(&client_ids_and_stake_file);
|
||||||
let file = File::open(path).unwrap();
|
let file = File::open(path).unwrap();
|
||||||
|
|
||||||
info!("Reading {}", client_ids_and_stake_file);
|
let accounts: HashMap<String, u64> = serde_yaml::from_reader(file).unwrap();
|
||||||
let accounts: HashMap<String, Base64Account> = serde_yaml::from_reader(file).unwrap();
|
|
||||||
let mut keypairs = vec![];
|
let mut keypairs = vec![];
|
||||||
let mut last_balance = 0;
|
let mut last_balance = 0;
|
||||||
|
|
||||||
accounts
|
accounts.into_iter().for_each(|(keypair, balance)| {
|
||||||
.into_iter()
|
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||||
.for_each(|(keypair, primordial_account)| {
|
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
||||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
last_balance = balance;
|
||||||
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
});
|
||||||
last_balance = primordial_account.balance;
|
|
||||||
});
|
|
||||||
|
|
||||||
if keypairs.len() < keypair_count {
|
|
||||||
eprintln!(
|
|
||||||
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
|
|
||||||
keypair_count,
|
|
||||||
client_ids_and_stake_file,
|
|
||||||
keypairs.len(),
|
|
||||||
);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||||
// across multiple runs.
|
// across multiple runs.
|
||||||
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
|
||||||
(keypairs, None)
|
(keypairs, None, last_balance)
|
||||||
} else {
|
} else {
|
||||||
generate_and_fund_keypairs(
|
generate_and_fund_keypairs(
|
||||||
client.clone(),
|
&client,
|
||||||
Some(*faucet_addr),
|
Some(drone_addr),
|
||||||
&id,
|
&id,
|
||||||
keypair_count,
|
tx_count,
|
||||||
*num_lamports_per_account,
|
NUM_LAMPORTS_PER_ACCOUNT,
|
||||||
*use_move,
|
use_move,
|
||||||
)
|
)
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
eprintln!("Error could not fund keys: {:?}", e);
|
eprintln!("Error could not fund keys: {:?}", e);
|
||||||
@@ -133,5 +116,21 @@ fn main() {
|
|||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
do_bench_tps(client, cli_config, keypairs, move_keypairs);
|
let config = Config {
|
||||||
|
id,
|
||||||
|
threads,
|
||||||
|
thread_batch_sleep_ms,
|
||||||
|
duration,
|
||||||
|
tx_count,
|
||||||
|
sustained,
|
||||||
|
use_move,
|
||||||
|
};
|
||||||
|
|
||||||
|
do_bench_tps(
|
||||||
|
vec![client],
|
||||||
|
config,
|
||||||
|
keypairs,
|
||||||
|
keypair_balance,
|
||||||
|
move_keypairs,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
@@ -1,86 +0,0 @@
|
|||||||
use serial_test_derive::serial;
|
|
||||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs};
|
|
||||||
use solana_bench_tps::cli::Config;
|
|
||||||
use solana_client::thin_client::create_client;
|
|
||||||
use solana_core::cluster_info::VALIDATOR_PORT_RANGE;
|
|
||||||
use solana_core::validator::ValidatorConfig;
|
|
||||||
use solana_faucet::faucet::run_local_faucet;
|
|
||||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
|
||||||
#[cfg(feature = "move")]
|
|
||||||
use solana_sdk::move_loader::solana_move_loader_program;
|
|
||||||
use solana_sdk::signature::{Keypair, Signer};
|
|
||||||
use std::sync::{mpsc::channel, Arc};
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
fn test_bench_tps_local_cluster(config: Config) {
|
|
||||||
#[cfg(feature = "move")]
|
|
||||||
let native_instruction_processors = vec![solana_move_loader_program()];
|
|
||||||
|
|
||||||
#[cfg(not(feature = "move"))]
|
|
||||||
let native_instruction_processors = vec![];
|
|
||||||
|
|
||||||
solana_logger::setup();
|
|
||||||
const NUM_NODES: usize = 1;
|
|
||||||
let cluster = LocalCluster::new(&ClusterConfig {
|
|
||||||
node_stakes: vec![999_990; NUM_NODES],
|
|
||||||
cluster_lamports: 200_000_000,
|
|
||||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
|
||||||
native_instruction_processors,
|
|
||||||
..ClusterConfig::default()
|
|
||||||
});
|
|
||||||
|
|
||||||
let faucet_keypair = Keypair::new();
|
|
||||||
cluster.transfer(
|
|
||||||
&cluster.funding_keypair,
|
|
||||||
&faucet_keypair.pubkey(),
|
|
||||||
100_000_000,
|
|
||||||
);
|
|
||||||
|
|
||||||
let client = Arc::new(create_client(
|
|
||||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
|
||||||
VALIDATOR_PORT_RANGE,
|
|
||||||
));
|
|
||||||
|
|
||||||
let (addr_sender, addr_receiver) = channel();
|
|
||||||
run_local_faucet(faucet_keypair, addr_sender, None);
|
|
||||||
let faucet_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
|
||||||
|
|
||||||
let lamports_per_account = 100;
|
|
||||||
|
|
||||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
|
||||||
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
|
|
||||||
client.clone(),
|
|
||||||
Some(faucet_addr),
|
|
||||||
&config.id,
|
|
||||||
keypair_count,
|
|
||||||
lamports_per_account,
|
|
||||||
config.use_move,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
|
|
||||||
|
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
assert!(_total > 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[serial]
|
|
||||||
fn test_bench_tps_local_cluster_solana() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.tx_count = 100;
|
|
||||||
config.duration = Duration::from_secs(10);
|
|
||||||
|
|
||||||
test_bench_tps_local_cluster(config);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[serial]
|
|
||||||
fn test_bench_tps_local_cluster_move() {
|
|
||||||
let mut config = Config::default();
|
|
||||||
config.tx_count = 100;
|
|
||||||
config.duration = Duration::from_secs(10);
|
|
||||||
config.use_move = true;
|
|
||||||
|
|
||||||
test_bench_tps_local_cluster(config);
|
|
||||||
}
|
|
@@ -1,7 +1,7 @@
|
|||||||
Building the Solana book
|
Building the Solana book
|
||||||
---
|
---
|
||||||
|
|
||||||
Install dependencies, build, and test the docs:
|
Install the book's dependnecies, build, and test the book:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ ./build.sh
|
$ ./build.sh
|
||||||
@@ -19,7 +19,7 @@ Render markdown as HTML:
|
|||||||
$ make build
|
$ make build
|
||||||
```
|
```
|
||||||
|
|
||||||
Render and view the docs:
|
Render and view the book:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ make open
|
$ make open
|
15
book/art/consensus.msc
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
msc {
|
||||||
|
client,leader,verifier_a,verifier_b,verifier_c;
|
||||||
|
|
||||||
|
client=>leader [ label = "SUBMIT" ] ;
|
||||||
|
leader=>client [ label = "CONFIRMED" ] ;
|
||||||
|
leader=>verifier_a [ label = "CONFIRMED" ] ;
|
||||||
|
leader=>verifier_b [ label = "CONFIRMED" ] ;
|
||||||
|
leader=>verifier_c [ label = "CONFIRMED" ] ;
|
||||||
|
verifier_a=>leader [ label = "VERIFIED" ] ;
|
||||||
|
verifier_b=>leader [ label = "VERIFIED" ] ;
|
||||||
|
leader=>client [ label = "FINALIZED" ] ;
|
||||||
|
leader=>verifier_a [ label = "FINALIZED" ] ;
|
||||||
|
leader=>verifier_b [ label = "FINALIZED" ] ;
|
||||||
|
leader=>verifier_c [ label = "FINALIZED" ] ;
|
||||||
|
}
|
@@ -24,7 +24,7 @@ msc {
|
|||||||
... ;
|
... ;
|
||||||
Validator abox Validator [label="\nmax\nlockout\n"];
|
Validator abox Validator [label="\nmax\nlockout\n"];
|
||||||
|||;
|
|||;
|
||||||
Cluster box Cluster [label="credits redeemed (at epoch)"];
|
StakerX => Cluster [label="StakeState::RedeemCredits()"];
|
||||||
|
StakerY => Cluster [label="StakeState::RedeemCredits()"] ;
|
||||||
|
|
||||||
}
|
}
|
18
book/art/spv-bank-merkle.bob
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
+------------+
|
||||||
|
| Bank-Merkle|
|
||||||
|
+------------+
|
||||||
|
^ ^
|
||||||
|
/ \
|
||||||
|
+-----------------+ +-------------+
|
||||||
|
| Bank-Diff-Merkle| | Block-Merkle|
|
||||||
|
+-----------------+ +-------------+
|
||||||
|
^ ^
|
||||||
|
/ \
|
||||||
|
+------+ +--------------------------+
|
||||||
|
| Hash | | Previous Bank-Diff-Merkle|
|
||||||
|
+------+ +--------------------------+
|
||||||
|
^ ^
|
||||||
|
/ \
|
||||||
|
+---------------+ +---------------+
|
||||||
|
| Hash(Account1)| | Hash(Account2)|
|
||||||
|
+---------------+ +---------------+
|
@@ -7,7 +7,7 @@
|
|||||||
| TVU | |
|
| TVU | |
|
||||||
| | |
|
| | |
|
||||||
| .-------. .------------. .----+---. .---------. |
|
| .-------. .------------. .----+---. .---------. |
|
||||||
.------------. | | Shred | | Retransmit | | Replay | | Storage | |
|
.------------. | | Blob | | Retransmit | | Replay | | Storage | |
|
||||||
| Upstream +----->| Fetch +-->| Stage +-->| Stage +-->| Stage | |
|
| Upstream +----->| Fetch +-->| Stage +-->| Stage +-->| Stage | |
|
||||||
| Validators | | | Stage | | | | | | | |
|
| Validators | | | Stage | | | | | | | |
|
||||||
`------------` | `-------` `----+-------` `----+---` `---------` |
|
`------------` | `-------` `----+-------` `----+---` `---------` |
|
30
book/art/validator.bob
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
.--------------------------------------.
|
||||||
|
| Validator |
|
||||||
|
| |
|
||||||
|
.--------. | .-------------------. |
|
||||||
|
| |---->| | |
|
||||||
|
| Client | | | JSON RPC Service | |
|
||||||
|
| |<----| | |
|
||||||
|
`----+---` | `-------------------` |
|
||||||
|
| | ^ |
|
||||||
|
| | | .----------------. | .------------------.
|
||||||
|
| | | | Gossip Service |<----------| Validators |
|
||||||
|
| | | `----------------` | | |
|
||||||
|
| | | ^ | | |
|
||||||
|
| | | | | | .------------. |
|
||||||
|
| | .---+---. .----+---. .-----------. | | | | |
|
||||||
|
| | | Bank |<-+ Replay | | BlobFetch |<------+ Upstream | |
|
||||||
|
| | | Forks | | Stage | | Stage | | | | Validators | |
|
||||||
|
| | `-------` `--------` `--+--------` | | | | |
|
||||||
|
| | ^ ^ | | | `------------` |
|
||||||
|
| | | | v | | |
|
||||||
|
| | | .--+--------. | | |
|
||||||
|
| | | | Blocktree | | | |
|
||||||
|
| | | `-----------` | | .------------. |
|
||||||
|
| | | ^ | | | | |
|
||||||
|
| | | | | | | Downstream | |
|
||||||
|
| | .--+--. .-------+---. | | | Validators | |
|
||||||
|
`-------->| TPU +---->| Broadcast +--------------->| | |
|
||||||
|
| `-----` | Stage | | | `------------` |
|
||||||
|
| `-----------` | `------------------`
|
||||||
|
`--------------------------------------`
|
6
book/build.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
make -j"$(nproc)" test
|
@@ -1,16 +1,13 @@
|
|||||||
BOB_SRCS=$(wildcard art/*.bob)
|
BOB_SRCS=$(wildcard art/*.bob)
|
||||||
MSC_SRCS=$(wildcard art/*.msc)
|
MSC_SRCS=$(wildcard art/*.msc)
|
||||||
MD_SRCS=$(wildcard src/*.md src/*/*.md)
|
MD_SRCS=$(wildcard src/*.md)
|
||||||
|
|
||||||
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/.gitbook/assets/%.svg) $(MSC_SRCS:art/%.msc=src/.gitbook/assets/%.svg)
|
SVG_IMGS=$(BOB_SRCS:art/%.bob=src/img/%.svg) $(MSC_SRCS:art/%.msc=src/img/%.svg)
|
||||||
|
|
||||||
TARGET=html/index.html
|
TARGET=html/index.html
|
||||||
TEST_STAMP=src/tests.ok
|
TEST_STAMP=src/tests.ok
|
||||||
|
|
||||||
all: $(TARGET)
|
all: $(TARGET)
|
||||||
./set-solana-release-tag.sh
|
|
||||||
|
|
||||||
svg: $(SVG_IMGS)
|
|
||||||
|
|
||||||
test: $(TEST_STAMP)
|
test: $(TEST_STAMP)
|
||||||
|
|
||||||
@@ -20,11 +17,11 @@ open: $(TEST_STAMP)
|
|||||||
watch: $(SVG_IMGS)
|
watch: $(SVG_IMGS)
|
||||||
mdbook watch
|
mdbook watch
|
||||||
|
|
||||||
src/.gitbook/assets/%.svg: art/%.bob
|
src/img/%.svg: art/%.bob
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
svgbob < $< > $@
|
svgbob < $< > $@
|
||||||
|
|
||||||
src/.gitbook/assets/%.svg: art/%.msc
|
src/img/%.svg: art/%.msc
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
mscgen -T svg -i $< -o $@
|
mscgen -T svg -i $< -o $@
|
||||||
|
|
91
book/src/SUMMARY.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Solana Architecture
|
||||||
|
|
||||||
|
- [Introduction](introduction.md)
|
||||||
|
|
||||||
|
- [Terminology](terminology.md)
|
||||||
|
|
||||||
|
- [Getting Started](getting-started.md)
|
||||||
|
- [Testnet Participation](testnet-participation.md)
|
||||||
|
- [Example Client: Web Wallet](webwallet.md)
|
||||||
|
|
||||||
|
- [Programming Model](programs.md)
|
||||||
|
- [Example: Tic-Tac-Toe](tictactoe.md)
|
||||||
|
- [Drones](drones.md)
|
||||||
|
|
||||||
|
- [A Solana Cluster](cluster.md)
|
||||||
|
- [Synchronization](synchronization.md)
|
||||||
|
- [Leader Rotation](leader-rotation.md)
|
||||||
|
- [Fork Generation](fork-generation.md)
|
||||||
|
- [Managing Forks](managing-forks.md)
|
||||||
|
- [Turbine Block Propagation](turbine-block-propagation.md)
|
||||||
|
- [Ledger Replication](ledger-replication.md)
|
||||||
|
- [Secure Vote Signing](vote-signing.md)
|
||||||
|
- [Stake Delegation and Rewards](stake-delegation-and-rewards.md)
|
||||||
|
- [Performance Metrics](performance-metrics.md)
|
||||||
|
|
||||||
|
- [Anatomy of a Validator](validator.md)
|
||||||
|
- [TPU](tpu.md)
|
||||||
|
- [TVU](tvu.md)
|
||||||
|
- [Blocktree](blocktree.md)
|
||||||
|
- [Gossip Service](gossip.md)
|
||||||
|
- [The Runtime](runtime.md)
|
||||||
|
|
||||||
|
- [Anatomy of a Transaction](transaction.md)
|
||||||
|
|
||||||
|
- [Running a Validator](running-validator.md)
|
||||||
|
- [Hardware Requirements](validator-hardware.md)
|
||||||
|
- [Choosing a Testnet](validator-testnet.md)
|
||||||
|
- [Installing the Validator Software](validator-software.md)
|
||||||
|
- [Starting a Validator](validator-start.md)
|
||||||
|
- [Staking](validator-stake.md)
|
||||||
|
- [Monitoring a Validator](validator-monitor.md)
|
||||||
|
- [Publishing Validator Info](validator-info.md)
|
||||||
|
- [Troubleshooting](validator-troubleshoot.md)
|
||||||
|
- [FAQ](validator-faq.md)
|
||||||
|
|
||||||
|
- [Running a Replicator](running-replicator.md)
|
||||||
|
|
||||||
|
- [API Reference](api-reference.md)
|
||||||
|
- [Transaction](transaction-api.md)
|
||||||
|
- [Instruction](instruction-api.md)
|
||||||
|
- [Blockstreamer](blockstreamer.md)
|
||||||
|
- [JSON RPC API](jsonrpc-api.md)
|
||||||
|
- [JavaScript API](javascript-api.md)
|
||||||
|
- [solana CLI](cli.md)
|
||||||
|
|
||||||
|
- [Accepted Design Proposals](proposals.md)
|
||||||
|
- [Ledger Replication](ledger-replication-to-implement.md)
|
||||||
|
- [Secure Vote Signing](vote-signing-to-implement.md)
|
||||||
|
- [Staking Rewards](staking-rewards.md)
|
||||||
|
- [Cluster Economics](ed_overview.md)
|
||||||
|
- [Validation-client Economics](ed_validation_client_economics.md)
|
||||||
|
- [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md)
|
||||||
|
- [State-validation Transaction Fees](ed_vce_state_validation_transaction_fees.md)
|
||||||
|
- [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md)
|
||||||
|
- [Validation Stake Delegation](ed_vce_validation_stake_delegation.md)
|
||||||
|
- [Replication-client Economics](ed_replication_client_economics.md)
|
||||||
|
- [Storage-replication Rewards](ed_rce_storage_replication_rewards.md)
|
||||||
|
- [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md)
|
||||||
|
- [Economic Sustainability](ed_economic_sustainability.md)
|
||||||
|
- [Attack Vectors](ed_attack_vectors.md)
|
||||||
|
- [Economic Design MVP](ed_mvp.md)
|
||||||
|
- [References](ed_references.md)
|
||||||
|
- [Cluster Test Framework](cluster-test-framework.md)
|
||||||
|
- [Validator](validator-proposal.md)
|
||||||
|
- [Simple Payment and State Verification](simple-payment-and-state-verification.md)
|
||||||
|
- [Cross-Program Invocation](cross-program-invocation.md)
|
||||||
|
|
||||||
|
- [Implemented Design Proposals](implemented-proposals.md)
|
||||||
|
- [Blocktree](blocktree.md)
|
||||||
|
- [Cluster Software Installation and Updates](installer.md)
|
||||||
|
- [Deterministic Transaction Fees](transaction-fees.md)
|
||||||
|
- [Tower BFT](tower-bft.md)
|
||||||
|
- [Leader-to-Leader Transition](leader-leader-transition.md)
|
||||||
|
- [Leader-to-Validator Transition](leader-validator-transition.md)
|
||||||
|
- [Passive Stake Delegation and Rewards](passive-stake-delegation-and-rewards.md)
|
||||||
|
- [Persistent Account Storage](persistent-account-storage.md)
|
||||||
|
- [Reliable Vote Transmission](reliable-vote-transmission.md)
|
||||||
|
- [Repair Service](repair-service.md)
|
||||||
|
- [Testing Programs](testing-programs.md)
|
||||||
|
- [Credit-only Accounts](credit-only-credit-debit-accounts.md)
|
||||||
|
- [Embedding the Move Langauge](embedding-move.md)
|
4
book/src/api-reference.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# API Reference
|
||||||
|
|
||||||
|
The following sections contain API references material you may find useful
|
||||||
|
when developing applications utilizing a Solana cluster.
|
@@ -17,7 +17,7 @@ height of the block it is voting on. The account stores the 32 highest heights.
|
|||||||
* Only the validator knows how to find its own votes directly.
|
* Only the validator knows how to find its own votes directly.
|
||||||
|
|
||||||
Other components, such as the one that calculates confirmation time, needs to
|
Other components, such as the one that calculates confirmation time, needs to
|
||||||
be baked into the validator code. The validator code queries the bank for all
|
be baked into the fullnode code. The fullnode code queries the bank for all
|
||||||
accounts owned by the vote program.
|
accounts owned by the vote program.
|
||||||
|
|
||||||
* Voting ballots do not contain a PoH hash. The validator is only voting that
|
* Voting ballots do not contain a PoH hash. The validator is only voting that
|
37
book/src/blockstreamer.md
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
# Blockstreamer
|
||||||
|
|
||||||
|
Solana supports a node type called an *blockstreamer*. This fullnode variation
|
||||||
|
is intended for applications that need to observe the data plane without
|
||||||
|
participating in transaction validation or ledger replication.
|
||||||
|
|
||||||
|
A blockstreamer runs without a vote signer, and can optionally stream ledger
|
||||||
|
entries out to a Unix domain socket as they are processed. The JSON-RPC service
|
||||||
|
still functions as on any other node.
|
||||||
|
|
||||||
|
To run a blockstreamer, include the argument `no-signer` and (optional)
|
||||||
|
`blockstream` socket location:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/validator-x.sh --no-signer --blockstream <SOCKET>
|
||||||
|
```
|
||||||
|
|
||||||
|
The stream will output a series of JSON objects:
|
||||||
|
- An Entry event JSON object is sent when each ledger entry is processed, with
|
||||||
|
the following fields:
|
||||||
|
|
||||||
|
* `dt`, the system datetime, as RFC3339-formatted string
|
||||||
|
* `t`, the event type, always "entry"
|
||||||
|
* `s`, the slot height, as unsigned 64-bit integer
|
||||||
|
* `h`, the tick height, as unsigned 64-bit integer
|
||||||
|
* `entry`, the entry, as JSON object
|
||||||
|
|
||||||
|
|
||||||
|
- A Block event JSON object is sent when a block is complete, with the
|
||||||
|
following fields:
|
||||||
|
|
||||||
|
* `dt`, the system datetime, as RFC3339-formatted string
|
||||||
|
* `t`, the event type, always "block"
|
||||||
|
* `s`, the slot height, as unsigned 64-bit integer
|
||||||
|
* `h`, the tick height, as unsigned 64-bit integer
|
||||||
|
* `l`, the slot leader id, as base-58 encoded string
|
||||||
|
* `id`, the block id, as base-58 encoded string
|
102
book/src/blocktree.md
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# Blocktree
|
||||||
|
|
||||||
|
After a block reaches finality, all blocks from that one on down
|
||||||
|
to the genesis block form a linear chain with the familiar name
|
||||||
|
blockchain. Until that point, however, the validator must maintain all
|
||||||
|
potentially valid chains, called *forks*. The process by which forks
|
||||||
|
naturally form as a result of leader rotation is described in
|
||||||
|
[fork generation](fork-generation.md). The *blocktree* data structure
|
||||||
|
described here is how a validator copes with those forks until blocks
|
||||||
|
are finalized.
|
||||||
|
|
||||||
|
The blocktree allows a validator to record every blob it observes
|
||||||
|
on the network, in any order, as long as the blob is signed by the expected
|
||||||
|
leader for a given slot.
|
||||||
|
|
||||||
|
Blobs are moved to a fork-able key space the tuple of `leader slot` + `blob
|
||||||
|
index` (within the slot). This permits the skip-list structure of the Solana
|
||||||
|
protocol to be stored in its entirety, without a-priori choosing which fork to
|
||||||
|
follow, which Entries to persist or when to persist them.
|
||||||
|
|
||||||
|
Repair requests for recent blobs are served out of RAM or recent files and out
|
||||||
|
of deeper storage for less recent blobs, as implemented by the store backing
|
||||||
|
Blocktree.
|
||||||
|
|
||||||
|
### Functionalities of Blocktree
|
||||||
|
|
||||||
|
1. Persistence: the Blocktree lives in the front of the nodes verification
|
||||||
|
pipeline, right behind network receive and signature verification. If the
|
||||||
|
blob received is consistent with the leader schedule (i.e. was signed by the
|
||||||
|
leader for the indicated slot), it is immediately stored.
|
||||||
|
2. Repair: repair is the same as window repair above, but able to serve any
|
||||||
|
blob that's been received. Blocktree stores blobs with signatures,
|
||||||
|
preserving the chain of origination.
|
||||||
|
3. Forks: Blocktree supports random access of blobs, so can support a
|
||||||
|
validator's need to rollback and replay from a Bank checkpoint.
|
||||||
|
4. Restart: with proper pruning/culling, the Blocktree can be replayed by
|
||||||
|
ordered enumeration of entries from slot 0. The logic of the replay stage
|
||||||
|
(i.e. dealing with forks) will have to be used for the most recent entries in
|
||||||
|
the Blocktree.
|
||||||
|
|
||||||
|
### Blocktree Design
|
||||||
|
|
||||||
|
1. Entries in the Blocktree are stored as key-value pairs, where the key is the concatenated
|
||||||
|
slot index and blob index for an entry, and the value is the entry data. Note blob indexes are zero-based for each slot (i.e. they're slot-relative).
|
||||||
|
|
||||||
|
2. The Blocktree maintains metadata for each slot, in the `SlotMeta` struct containing:
|
||||||
|
* `slot_index` - The index of this slot
|
||||||
|
* `num_blocks` - The number of blocks in the slot (used for chaining to a previous slot)
|
||||||
|
* `consumed` - The highest blob index `n`, such that for all `m < n`, there exists a blob in this slot with blob index equal to `n` (i.e. the highest consecutive blob index).
|
||||||
|
* `received` - The highest received blob index for the slot
|
||||||
|
* `next_slots` - A list of future slots this slot could chain to. Used when rebuilding
|
||||||
|
the ledger to find possible fork points.
|
||||||
|
* `last_index` - The index of the blob that is flagged as the last blob for this slot. This flag on a blob will be set by the leader for a slot when they are transmitting the last blob for a slot.
|
||||||
|
* `is_rooted` - True iff every block from 0...slot forms a full sequence without any holes. We can derive is_rooted for each slot with the following rules. Let slot(n) be the slot with index `n`, and slot(n).is_full() is true if the slot with index `n` has all the ticks expected for that slot. Let is_rooted(n) be the statement that "the slot(n).is_rooted is true". Then:
|
||||||
|
|
||||||
|
is_rooted(0)
|
||||||
|
is_rooted(n+1) iff (is_rooted(n) and slot(n).is_full()
|
||||||
|
|
||||||
|
3. Chaining - When a blob for a new slot `x` arrives, we check the number of blocks (`num_blocks`) for that new slot (this information is encoded in the blob). We then know that this new slot chains to slot `x - num_blocks`.
|
||||||
|
|
||||||
|
4. Subscriptions - The Blocktree records a set of slots that have been "subscribed" to. This means entries that chain to these slots will be sent on the Blocktree channel for consumption by the ReplayStage. See the `Blocktree APIs` for details.
|
||||||
|
|
||||||
|
5. Update notifications - The Blocktree notifies listeners when slot(n).is_rooted is flipped from false to true for any `n`.
|
||||||
|
|
||||||
|
### Blocktree APIs
|
||||||
|
|
||||||
|
The Blocktree offers a subscription based API that ReplayStage uses to ask for entries it's interested in. The entries will be sent on a channel exposed by the Blocktree. These subscription API's are as follows:
|
||||||
|
1. `fn get_slots_since(slot_indexes: &[u64]) -> Vec<SlotMeta>`: Returns new slots connecting to any element of the list `slot_indexes`.
|
||||||
|
|
||||||
|
2. `fn get_slot_entries(slot_index: u64, entry_start_index: usize, max_entries: Option<u64>) -> Vec<Entry>`: Returns the entry vector for the slot starting with `entry_start_index`, capping the result at `max` if `max_entries == Some(max)`, otherwise, no upper limit on the length of the return vector is imposed.
|
||||||
|
|
||||||
|
Note: Cumulatively, this means that the replay stage will now have to know when a slot is finished, and subscribe to the next slot it's interested in to get the next set of entries. Previously, the burden of chaining slots fell on the Blocktree.
|
||||||
|
|
||||||
|
### Interfacing with Bank
|
||||||
|
|
||||||
|
The bank exposes to replay stage:
|
||||||
|
|
||||||
|
1. `prev_hash`: which PoH chain it's working on as indicated by the hash of the last
|
||||||
|
entry it processed
|
||||||
|
2. `tick_height`: the ticks in the PoH chain currently being verified by this
|
||||||
|
bank
|
||||||
|
3. `votes`: a stack of records that contain:
|
||||||
|
|
||||||
|
1. `prev_hashes`: what anything after this vote must chain to in PoH
|
||||||
|
2. `tick_height`: the tick height at which this vote was cast
|
||||||
|
3. `lockout period`: how long a chain must be observed to be in the ledger to
|
||||||
|
be able to be chained below this vote
|
||||||
|
|
||||||
|
Replay stage uses Blocktree APIs to find the longest chain of entries it can
|
||||||
|
hang off a previous vote. If that chain of entries does not hang off the
|
||||||
|
latest vote, the replay stage rolls back the bank to that vote and replays the
|
||||||
|
chain from there.
|
||||||
|
|
||||||
|
### Pruning Blocktree
|
||||||
|
|
||||||
|
Once Blocktree entries are old enough, representing all the possible forks
|
||||||
|
becomes less useful, perhaps even problematic for replay upon restart. Once a
|
||||||
|
validator's votes have reached max lockout, however, any Blocktree contents
|
||||||
|
that are not on the PoH chain for that vote for can be pruned, expunged.
|
||||||
|
|
||||||
|
Replicator nodes will be responsible for storing really old ledger contents,
|
||||||
|
and validators need only persist their bank periodically.
|
366
book/src/cli.md
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
## solana CLI
|
||||||
|
|
||||||
|
The [solana-cli crate](https://crates.io/crates/solana-cli) provides a command-line interface tool for Solana
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
#### Get Pubkey
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana address
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<PUBKEY>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Airdrop Lamports
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana airdrop 123
|
||||||
|
|
||||||
|
// Return
|
||||||
|
"Your balance is: 123"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Get Balance
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana balance
|
||||||
|
|
||||||
|
// Return
|
||||||
|
"Your balance is: 123"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Confirm Transaction
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana confirm <TX_SIGNATURE>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
"Confirmed" / "Not found" / "Transaction failed with error <ERR>"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Deploy program
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana deploy <PATH>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<PROGRAM_ID>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Unconditional Immediate Transfer
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana pay <PUBKEY> 123
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<TX_SIGNATURE>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Post-Dated Transfer
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana pay <PUBKEY> 123 \
|
||||||
|
--after 2018-12-24T23:59:00 --require-timestamp-from <PUBKEY>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||||
|
```
|
||||||
|
*`require-timestamp-from` is optional. If not provided, the transaction will expect a timestamp signed by this wallet's secret key*
|
||||||
|
|
||||||
|
#### Authorized Transfer
|
||||||
|
|
||||||
|
A third party must send a signature to unlock the lamports.
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana pay <PUBKEY> 123 \
|
||||||
|
--require-signature-from <PUBKEY>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Post-Dated and Authorized Transfer
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana pay <PUBKEY> 123 \
|
||||||
|
--after 2018-12-24T23:59 --require-timestamp-from <PUBKEY> \
|
||||||
|
--require-signature-from <PUBKEY>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Multiple Witnesses
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana pay <PUBKEY> 123 \
|
||||||
|
--require-signature-from <PUBKEY> \
|
||||||
|
--require-signature-from <PUBKEY>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Cancelable Transfer
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana pay <PUBKEY> 123 \
|
||||||
|
--require-signature-from <PUBKEY> \
|
||||||
|
--cancelable
|
||||||
|
|
||||||
|
// Return
|
||||||
|
{signature: <TX_SIGNATURE>, processId: <PROCESS_ID>}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Cancel Transfer
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana cancel <PROCESS_ID>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<TX_SIGNATURE>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Send Signature
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana send-signature <PUBKEY> <PROCESS_ID>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<TX_SIGNATURE>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Indicate Elapsed Time
|
||||||
|
|
||||||
|
Use the current system time:
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana send-timestamp <PUBKEY> <PROCESS_ID>
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<TX_SIGNATURE>
|
||||||
|
```
|
||||||
|
|
||||||
|
Or specify some other arbitrary timestamp:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
// Command
|
||||||
|
$ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||||
|
|
||||||
|
// Return
|
||||||
|
<TX_SIGNATURE>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana 0.12.0
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana [FLAGS] [OPTIONS] [SUBCOMMAND]
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
--rpc-tls Enable TLS for the RPC endpoint
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--drone-host <IP ADDRESS> Drone host to use [default: same as --host]
|
||||||
|
--drone-port <PORT> Drone port to use [default: 9900]
|
||||||
|
-n, --host <IP ADDRESS> Host to use for both RPC and drone [default: 127.0.0.1]
|
||||||
|
-k, --keypair <PATH> /path/to/id.json
|
||||||
|
--rpc-host <IP ADDRESS> RPC host to use [default: same as --host]
|
||||||
|
--rpc-port <PORT> RPC port to use [default: 8899]
|
||||||
|
|
||||||
|
SUBCOMMANDS:
|
||||||
|
address Get your public key
|
||||||
|
airdrop Request a batch of lamports
|
||||||
|
balance Get your balance
|
||||||
|
cancel Cancel a transfer
|
||||||
|
confirm Confirm transaction by signature
|
||||||
|
deploy Deploy a program
|
||||||
|
get-transaction-count Get current transaction count
|
||||||
|
help Prints this message or the help of the given subcommand(s)
|
||||||
|
pay Send a payment
|
||||||
|
send-signature Send a signature to authorize a transfer
|
||||||
|
send-timestamp Send a timestamp to unlock a transfer
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-address
|
||||||
|
Get your public key
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana address
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-airdrop
|
||||||
|
Request a batch of lamports
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana airdrop <NUM>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<NUM> The number of lamports to request
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-balance
|
||||||
|
Get your balance
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana balance
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-cancel
|
||||||
|
Cancel a transfer
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana cancel <PROCESS_ID>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<PROCESS_ID> The process id of the transfer to cancel
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-confirm
|
||||||
|
Confirm transaction by signature
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana confirm <SIGNATURE>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<SIGNATURE> The transaction signature to confirm
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-deploy
|
||||||
|
Deploy a program
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana deploy <PATH>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<PATH> /path/to/program.o
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-fees
|
||||||
|
Display current cluster fees
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana fees
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-get-transaction-count
|
||||||
|
Get current transaction count
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana get-transaction-count
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-pay
|
||||||
|
Send a payment
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana pay [FLAGS] [OPTIONS] <PUBKEY> <NUM>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
--cancelable
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--after <DATETIME> A timestamp after which transaction will execute
|
||||||
|
--require-timestamp-from <PUBKEY> Require timestamp from this third party
|
||||||
|
--require-signature-from <PUBKEY>... Any third party signatures required to unlock the lamports
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<PUBKEY> The pubkey of recipient
|
||||||
|
<NUM> The number of lamports to send
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-send-signature
|
||||||
|
Send a signature to authorize a transfer
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana send-signature <PUBKEY> <PROCESS_ID>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<PUBKEY> The pubkey of recipient
|
||||||
|
<PROCESS_ID> The process id of the transfer to authorize
|
||||||
|
```
|
||||||
|
|
||||||
|
```manpage
|
||||||
|
solana-send-timestamp
|
||||||
|
Send a timestamp to unlock a transfer
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
solana send-timestamp [OPTIONS] <PUBKEY> <PROCESS_ID>
|
||||||
|
|
||||||
|
FLAGS:
|
||||||
|
-h, --help Prints help information
|
||||||
|
-V, --version Prints version information
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--date <DATETIME> Optional arbitrary timestamp to apply
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<PUBKEY> The pubkey of recipient
|
||||||
|
<PROCESS_ID> The process id of the transfer to unlock
|
||||||
|
```
|
@@ -1,28 +1,42 @@
|
|||||||
# Cluster Test Framework
|
# Cluster Test Framework
|
||||||
|
|
||||||
This document proposes the Cluster Test Framework \(CTF\). CTF is a test harness that allows tests to execute against a local, in-process cluster or a deployed cluster.
|
This document proposes the Cluster Test Framework (CTF). CTF is a test harness
|
||||||
|
that allows tests to execute against a local, in-process cluster or a
|
||||||
|
deployed cluster.
|
||||||
|
|
||||||
## Motivation
|
## Motivation
|
||||||
|
|
||||||
The goal of CTF is to provide a framework for writing tests independent of where and how the cluster is deployed. Regressions can be captured in these tests and the tests can be run against deployed clusters to verify the deployment. The focus of these tests should be on cluster stability, consensus, fault tolerance, API stability.
|
The goal of CTF is to provide a framework for writing tests independent of where
|
||||||
|
and how the cluster is deployed. Regressions can be captured in these tests and
|
||||||
|
the tests can be run against deployed clusters to verify the deployment. The
|
||||||
|
focus of these tests should be on cluster stability, consensus, fault tolerance,
|
||||||
|
API stability.
|
||||||
|
|
||||||
Tests should verify a single bug or scenario, and should be written with the least amount of internal plumbing exposed to the test.
|
Tests should verify a single bug or scenario, and should be written with the
|
||||||
|
least amount of internal plumbing exposed to the test.
|
||||||
|
|
||||||
## Design Overview
|
## Design Overview
|
||||||
|
|
||||||
Tests are provided an entry point, which is a `contact_info::ContactInfo` structure, and a keypair that has already been funded.
|
Tests are provided an entry point, which is a `contact_info::ContactInfo`
|
||||||
|
structure, and a keypair that has already been funded.
|
||||||
|
|
||||||
Each node in the cluster is configured with a `validator::ValidatorConfig` at boot time. At boot time this configuration specifies any extra cluster configuration required for the test. The cluster should boot with the configuration when it is run in-process or in a data center.
|
Each node in the cluster is configured with a `fullnode::ValidatorConfig` at boot
|
||||||
|
time. At boot time this configuration specifies any extra cluster configuration
|
||||||
|
required for the test. The cluster should boot with the configuration when it
|
||||||
|
is run in-process or in a data center.
|
||||||
|
|
||||||
Once booted, the test will discover the cluster through a gossip entry point and configure any runtime behaviors via validator RPC.
|
Once booted, the test will discover the cluster through a gossip entry point and
|
||||||
|
configure any runtime behaviors via fullnode RPC.
|
||||||
|
|
||||||
## Test Interface
|
## Test Interface
|
||||||
|
|
||||||
Each CTF test starts with an opaque entry point and a funded keypair. The test should not depend on how the cluster is deployed, and should be able to exercise all the cluster functionality through the publicly available interfaces.
|
Each CTF test starts with an opaque entry point and a funded keypair. The test
|
||||||
|
should not depend on how the cluster is deployed, and should be able to exercise
|
||||||
|
all the cluster functionality through the publicly available interfaces.
|
||||||
|
|
||||||
```text
|
```rust,ignore
|
||||||
use crate::contact_info::ContactInfo;
|
use crate::contact_info::ContactInfo;
|
||||||
use solana_sdk::signature::{Keypair, Signer};
|
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||||
pub fn test_this_behavior(
|
pub fn test_this_behavior(
|
||||||
entry_point_info: &ContactInfo,
|
entry_point_info: &ContactInfo,
|
||||||
funding_keypair: &Keypair,
|
funding_keypair: &Keypair,
|
||||||
@@ -30,11 +44,13 @@ pub fn test_this_behavior(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Cluster Discovery
|
## Cluster Discovery
|
||||||
|
|
||||||
At test start, the cluster has already been established and is fully connected. The test can discover most of the available nodes over a few second.
|
At test start, the cluster has already been established and is fully connected.
|
||||||
|
The test can discover most of the available nodes over a few second.
|
||||||
|
|
||||||
```text
|
```rust,ignore
|
||||||
use crate::gossip_service::discover_nodes;
|
use crate::gossip_service::discover_nodes;
|
||||||
|
|
||||||
// Discover the cluster over a few seconds.
|
// Discover the cluster over a few seconds.
|
||||||
@@ -43,13 +59,15 @@ let cluster_nodes = discover_nodes(&entry_point_info, num_nodes);
|
|||||||
|
|
||||||
## Cluster Configuration
|
## Cluster Configuration
|
||||||
|
|
||||||
To enable specific scenarios, the cluster needs to be booted with special configurations. These configurations can be captured in `validator::ValidatorConfig`.
|
To enable specific scenarios, the cluster needs to be booted with special
|
||||||
|
configurations. These configurations can be captured in
|
||||||
|
`fullnode::ValidatorConfig`.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```text
|
```rust,ignore
|
||||||
let mut validator_config = ValidatorConfig::default();
|
let mut validator_config = ValidatorConfig::default();
|
||||||
validator_config.rpc_config.enable_validator_exit = true;
|
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||||
let local = LocalCluster::new_with_config(
|
let local = LocalCluster::new_with_config(
|
||||||
num_nodes,
|
num_nodes,
|
||||||
10_000,
|
10_000,
|
||||||
@@ -60,11 +78,14 @@ let local = LocalCluster::new_with_config(
|
|||||||
|
|
||||||
## How to design a new test
|
## How to design a new test
|
||||||
|
|
||||||
For example, there is a bug that shows that the cluster fails when it is flooded with invalid advertised gossip nodes. Our gossip library and protocol may change, but the cluster still needs to stay resilient to floods of invalid advertised gossip nodes.
|
For example, there is a bug that shows that the cluster fails when it is flooded
|
||||||
|
with invalid advertised gossip nodes. Our gossip library and protocol may
|
||||||
|
change, but the cluster still needs to stay resilient to floods of invalid
|
||||||
|
advertised gossip nodes.
|
||||||
|
|
||||||
Configure the RPC service:
|
Configure the RPC service:
|
||||||
|
|
||||||
```text
|
```rust,ignore
|
||||||
let mut validator_config = ValidatorConfig::default();
|
let mut validator_config = ValidatorConfig::default();
|
||||||
validator_config.rpc_config.enable_rpc_gossip_push = true;
|
validator_config.rpc_config.enable_rpc_gossip_push = true;
|
||||||
validator_config.rpc_config.enable_rpc_gossip_refresh_active_set = true;
|
validator_config.rpc_config.enable_rpc_gossip_refresh_active_set = true;
|
||||||
@@ -72,7 +93,7 @@ validator_config.rpc_config.enable_rpc_gossip_refresh_active_set = true;
|
|||||||
|
|
||||||
Wire the RPCs and write a new test:
|
Wire the RPCs and write a new test:
|
||||||
|
|
||||||
```text
|
```rust,ignore
|
||||||
pub fn test_large_invalid_gossip_nodes(
|
pub fn test_large_invalid_gossip_nodes(
|
||||||
entry_point_info: &ContactInfo,
|
entry_point_info: &ContactInfo,
|
||||||
funding_keypair: &Keypair,
|
funding_keypair: &Keypair,
|
||||||
@@ -81,7 +102,7 @@ pub fn test_large_invalid_gossip_nodes(
|
|||||||
let cluster = discover_nodes(&entry_point_info, num_nodes);
|
let cluster = discover_nodes(&entry_point_info, num_nodes);
|
||||||
|
|
||||||
// Poison the cluster.
|
// Poison the cluster.
|
||||||
let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE);
|
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||||
for _ in 0..(num_nodes * 100) {
|
for _ in 0..(num_nodes * 100) {
|
||||||
client.gossip_push(
|
client.gossip_push(
|
||||||
cluster_info::invalid_contact_info()
|
cluster_info::invalid_contact_info()
|
||||||
@@ -91,7 +112,7 @@ pub fn test_large_invalid_gossip_nodes(
|
|||||||
|
|
||||||
// Force refresh of the active set.
|
// Force refresh of the active set.
|
||||||
for node in &cluster {
|
for node in &cluster {
|
||||||
let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE);
|
let client = create_client(node.client_facing_addr(), FULLNODE_PORT_RANGE);
|
||||||
client.gossip_refresh_active_set();
|
client.gossip_refresh_active_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -99,4 +120,3 @@ pub fn test_large_invalid_gossip_nodes(
|
|||||||
verify_spends(&cluster);
|
verify_spends(&cluster);
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
100
book/src/cluster.md
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
# A Solana Cluster
|
||||||
|
|
||||||
|
A Solana cluster is a set of fullnodes working together to serve client
|
||||||
|
transactions and maintain the integrity of the ledger. Many clusters may
|
||||||
|
coexist. When two clusters share a common genesis block, they attempt to
|
||||||
|
converge. Otherwise, they simply ignore the existence of the other.
|
||||||
|
Transactions sent to the wrong one are quietly rejected. In this chapter, we'll
|
||||||
|
discuss how a cluster is created, how nodes join the cluster, how they share
|
||||||
|
the ledger, how they ensure the ledger is replicated, and how they cope with
|
||||||
|
buggy and malicious nodes.
|
||||||
|
|
||||||
|
## Creating a Cluster
|
||||||
|
|
||||||
|
Before starting any fullnodes, one first needs to create a *genesis block*.
|
||||||
|
The block contains entries referencing two public keys, a *mint* and a
|
||||||
|
*bootstrap leader*. The fullnode holding the bootstrap leader's secret key is
|
||||||
|
responsible for appending the first entries to the ledger. It initializes its
|
||||||
|
internal state with the mint's account. That account will hold the number of
|
||||||
|
native tokens defined by the genesis block. The second fullnode then contacts
|
||||||
|
the bootstrap leader to register as a *validator* or *replicator*. Additional
|
||||||
|
fullnodes then register with any registered member of the cluster.
|
||||||
|
|
||||||
|
A validator receives all entries from the leader and submits votes confirming
|
||||||
|
those entries are valid. After voting, the validator is expected to store those
|
||||||
|
entries until replicator nodes submit proofs that they have stored copies of
|
||||||
|
it. Once the validator observes a sufficient number of copies exist, it deletes
|
||||||
|
its copy.
|
||||||
|
|
||||||
|
## Joining a Cluster
|
||||||
|
|
||||||
|
Validators and replicators enter the cluster via registration messages sent to
|
||||||
|
its *control plane*. The control plane is implemented using a *gossip*
|
||||||
|
protocol, meaning that a node may register with any existing node, and expect
|
||||||
|
its registration to propagate to all nodes in the cluster. The time it takes
|
||||||
|
for all nodes to synchronize is proportional to the square of the number of
|
||||||
|
nodes participating in the cluster. Algorithmically, that's considered very
|
||||||
|
slow, but in exchange for that time, a node is assured that it eventually has
|
||||||
|
all the same information as every other node, and that that information cannot
|
||||||
|
be censored by any one node.
|
||||||
|
|
||||||
|
## Sending Transactions to a Cluster
|
||||||
|
|
||||||
|
Clients send transactions to any fullnode's Transaction Processing Unit (TPU)
|
||||||
|
port. If the node is in the validator role, it forwards the transaction to the
|
||||||
|
designated leader. If in the leader role, the node bundles incoming
|
||||||
|
transactions, timestamps them creating an *entry*, and pushes them onto the
|
||||||
|
cluster's *data plane*. Once on the data plane, the transactions are validated
|
||||||
|
by validator nodes and replicated by replicator nodes, effectively appending
|
||||||
|
them to the ledger.
|
||||||
|
|
||||||
|
## Confirming Transactions
|
||||||
|
|
||||||
|
A Solana cluster is capable of subsecond *confirmation* for up to 150 nodes
|
||||||
|
with plans to scale up to hundreds of thousands of nodes. Once fully
|
||||||
|
implemented, confirmation times are expected to increase only with the
|
||||||
|
logarithm of the number of validators, where the logarithm's base is very high.
|
||||||
|
If the base is one thousand, for example, it means that for the first thousand
|
||||||
|
nodes, confirmation will be the duration of three network hops plus the time it
|
||||||
|
takes the slowest validator of a supermajority to vote. For the next million
|
||||||
|
nodes, confirmation increases by only one network hop.
|
||||||
|
|
||||||
|
Solana defines confirmation as the duration of time from when the leader
|
||||||
|
timestamps a new entry to the moment when it recognizes a supermajority of
|
||||||
|
ledger votes.
|
||||||
|
|
||||||
|
A gossip network is much too slow to achieve subsecond confirmation once the
|
||||||
|
network grows beyond a certain size. The time it takes to send messages to all
|
||||||
|
nodes is proportional to the square of the number of nodes. If a blockchain
|
||||||
|
wants to achieve low confirmation and attempts to do it using a gossip network,
|
||||||
|
it will be forced to centralize to just a handful of nodes.
|
||||||
|
|
||||||
|
Scalable confirmation can be achieved using the follow combination of
|
||||||
|
techniques:
|
||||||
|
|
||||||
|
1. Timestamp transactions with a VDF sample and sign the timestamp.
|
||||||
|
2. Split the transactions into batches, send each to separate nodes and have
|
||||||
|
each node share its batch with its peers.
|
||||||
|
3. Repeat the previous step recursively until all nodes have all batches.
|
||||||
|
|
||||||
|
Solana rotates leaders at fixed intervals, called *slots*. Each leader may only
|
||||||
|
produce entries during its allotted slot. The leader therefore timestamps
|
||||||
|
transactions so that validators may lookup the public key of the designated
|
||||||
|
leader. The leader then signs the timestamp so that a validator may verify the
|
||||||
|
signature, proving the signer is owner of the designated leader's public key.
|
||||||
|
|
||||||
|
Next, transactions are broken into batches so that a node can send transactions
|
||||||
|
to multiple parties without making multiple copies. If, for example, the leader
|
||||||
|
needed to send 60 transactions to 6 nodes, it would break that collection of 60
|
||||||
|
into batches of 10 transactions and send one to each node. This allows the
|
||||||
|
leader to put 60 transactions on the wire, not 60 transactions for each node.
|
||||||
|
Each node then shares its batch with its peers. Once the node has collected all
|
||||||
|
6 batches, it reconstructs the original set of 60 transactions.
|
||||||
|
|
||||||
|
A batch of transactions can only be split so many times before it is so small
|
||||||
|
that header information becomes the primary consumer of network bandwidth. At
|
||||||
|
the time of this writing, the approach is scaling well up to about 150
|
||||||
|
validators. To scale up to hundreds of thousands of validators, each node can
|
||||||
|
apply the same technique as the leader node to another set of nodes of equal
|
||||||
|
size. We call the technique *data plane fanout*; learn more in the [data plan
|
||||||
|
fanout](data-plane-fanout.md) section.
|
140
book/src/credit-only-credit-debit-accounts.md
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
# Credit-Only Accounts
|
||||||
|
|
||||||
|
This design covers the handling of credit-only and credit-debit accounts in the
|
||||||
|
[runtime](runtime.md). Accounts already distinguish themselves as credit-only or
|
||||||
|
credit-debit based on the program ID specified by the transaction's instruction.
|
||||||
|
Programs must treat accounts that are not owned by them as credit-only.
|
||||||
|
|
||||||
|
To identify credit-only accounts by program id would require the account to be
|
||||||
|
fetched and loaded from disk. This operation is expensive, and while it is
|
||||||
|
occurring, the runtime would have to reject any transactions referencing the same
|
||||||
|
account.
|
||||||
|
|
||||||
|
The proposal introduces a `num_readonly_accounts` field to the transaction
|
||||||
|
structure, and removes the `program_ids` dedicated vector for program accounts.
|
||||||
|
|
||||||
|
This design doesn't change the runtime transaction processing rules.
|
||||||
|
Programs still can't write or spend accounts that they do not own, but it
|
||||||
|
allows the runtime to optimistically take the correct lock for each account
|
||||||
|
specified in the transaction before loading the accounts from storage.
|
||||||
|
|
||||||
|
Accounts selected as credit-debit by the transaction can still be treated as
|
||||||
|
credit-only by the instructions.
|
||||||
|
|
||||||
|
## Runtime handling
|
||||||
|
|
||||||
|
credit-only accounts have the following properties:
|
||||||
|
|
||||||
|
* Can be deposited into: Deposits can be implemented as a simple `atomic_add`.
|
||||||
|
* read-only access to account data.
|
||||||
|
|
||||||
|
Instructions that debit or modify the credit-only account data will fail.
|
||||||
|
|
||||||
|
## Account Lock Optimizations
|
||||||
|
|
||||||
|
The Accounts module keeps track of current locked accounts in the runtime,
|
||||||
|
which separates credit-only accounts from the credit-debit accounts. The credit-only
|
||||||
|
accounts can be cached in memory and shared between all the threads executing
|
||||||
|
transactions.
|
||||||
|
|
||||||
|
The current runtime can't predict whether an account is credit-only or credit-debit when
|
||||||
|
the transaction account keys are locked at the start of the transaction
|
||||||
|
processing pipeline. Accounts referenced by the transaction have not been
|
||||||
|
loaded from the disk yet.
|
||||||
|
|
||||||
|
An ideal design would cache the credit-only accounts while they are referenced by
|
||||||
|
any transaction moving through the runtime, and release the cache when the last
|
||||||
|
transaction exits the runtime.
|
||||||
|
|
||||||
|
## Credit-only accounts and read-only account data
|
||||||
|
|
||||||
|
Credit-only account data can be treated as read-only. Credit-debit
|
||||||
|
account data is treated as read-write.
|
||||||
|
|
||||||
|
## Transaction changes
|
||||||
|
|
||||||
|
To enable the possibility of caching accounts only while they are in the
|
||||||
|
runtime, the Transaction structure should be changed in the following way:
|
||||||
|
|
||||||
|
* `program_ids: Vec<Pubkey>` - This vector is removed. Program keys can be
|
||||||
|
placed at the end of the `account_keys` vector within the `num_readonly_accounts`
|
||||||
|
number set to the number of programs.
|
||||||
|
|
||||||
|
* `num_readonly_accounts: u8` - The number of keys from the **end** of the
|
||||||
|
transaction's `account_keys` array that is credit-only.
|
||||||
|
|
||||||
|
The following possible accounts are present in an transaction:
|
||||||
|
|
||||||
|
* paying account
|
||||||
|
* RW accounts
|
||||||
|
* R accounts
|
||||||
|
* Program IDs
|
||||||
|
|
||||||
|
The paying account must be credit-debit, and program IDs must be credit-only. The
|
||||||
|
first account in the `account_keys` array is always the account that pays for
|
||||||
|
the transaction fee, therefore it cannot be credit-only. For these reasons the
|
||||||
|
credit-only accounts are all grouped together at the end of the `account_keys`
|
||||||
|
vector. Counting credit-only accounts from the end allow for the default `0`
|
||||||
|
value to still be functionally correct, since a transaction will succeed with
|
||||||
|
all credit-debit accounts.
|
||||||
|
|
||||||
|
Since accounts can only appear once in the transaction's `account_keys` array,
|
||||||
|
an account can only be credit-only or credit-debit in a single transaction, not
|
||||||
|
both. The runtime treats a transaction as one atomic unit of execution. If any
|
||||||
|
instruction needs credit-debit access to an account, a copy needs to be made. The
|
||||||
|
write lock is held for the entire time the transaction is being processed by
|
||||||
|
the runtime.
|
||||||
|
|
||||||
|
## Starvation
|
||||||
|
|
||||||
|
Read locks for credit-only accounts can keep the runtime from executing
|
||||||
|
transactions requesting a write lock to a credit-debit account.
|
||||||
|
|
||||||
|
When a request for a write lock is made while a read lock is open, the
|
||||||
|
transaction requesting the write lock should be cached. Upon closing the read
|
||||||
|
lock, the pending transactions can be pushed through the runtime.
|
||||||
|
|
||||||
|
While a pending write transaction exists, any additional read lock requests for
|
||||||
|
that account should fail. It follows that any other write lock requests will also
|
||||||
|
fail. Currently, clients must retransmit when a transaction fails because of
|
||||||
|
a pending transaction. This approach would mimic that behavior as closely as
|
||||||
|
possible while preventing write starvation.
|
||||||
|
|
||||||
|
## Program execution with credit-only accounts
|
||||||
|
|
||||||
|
Before handing off the accounts to program execution, the runtime can mark each
|
||||||
|
account in each instruction as a credit-only account. The credit-only accounts can
|
||||||
|
be passed as references without an extra copy. The transaction will abort on a
|
||||||
|
write to credit-only.
|
||||||
|
|
||||||
|
An alternative is to detect writes to credit-only accounts and fail the
|
||||||
|
transactions before commit.
|
||||||
|
|
||||||
|
## Alternative design
|
||||||
|
|
||||||
|
This design attempts to cache a credit-only account after loading without the use
|
||||||
|
of a transaction-specified credit-only accounts list. Instead, the credit-only
|
||||||
|
accounts are held in a reference-counted table inside the runtime as the
|
||||||
|
transactions are processed.
|
||||||
|
|
||||||
|
1. Transaction accounts are locked.
|
||||||
|
a. If the account is present in the ‘credit-only' table, the TX does not fail.
|
||||||
|
The pending state for this TX is marked NeedReadLock.
|
||||||
|
2. Transaction accounts are loaded.
|
||||||
|
a. Transaction accounts that are credit-only increase their reference
|
||||||
|
count in the `credit-only` table.
|
||||||
|
b. Transaction accounts that need a write lock and are present in the
|
||||||
|
`credit-only` table fail.
|
||||||
|
3. Transaction accounts are unlocked.
|
||||||
|
a. Decrement the `credit-only` lock table reference count; remove if its 0
|
||||||
|
b. Remove from the `lock` set if the account is not in the `credit-only`
|
||||||
|
table.
|
||||||
|
|
||||||
|
The downside with this approach is that if the `lock` set mutex is released
|
||||||
|
between lock and load to allow better pipelining of transactions, a request for
|
||||||
|
a credit-only account may fail. Therefore, this approach is not suitable for
|
||||||
|
treating programs as credit-only accounts.
|
||||||
|
|
||||||
|
Holding the accounts lock mutex while fetching the account from disk would
|
||||||
|
potentially have a significant performance hit on the runtime. Fetching from
|
||||||
|
disk is expected to be slow, but can be parallelized between multiple disks.
|
111
book/src/cross-program-invocation.md
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
# Cross-Program Invocation
|
||||||
|
|
||||||
|
## Problem
|
||||||
|
|
||||||
|
In today's implementation a client can create a transaction that modifies two
|
||||||
|
accounts, each owned by a separate on-chain program:
|
||||||
|
|
||||||
|
```rust,ignore
|
||||||
|
let message = Message::new(vec![
|
||||||
|
token_instruction::pay(&alice_pubkey),
|
||||||
|
acme_instruction::launch_missiles(&bob_pubkey),
|
||||||
|
]);
|
||||||
|
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||||
|
```
|
||||||
|
|
||||||
|
The current implementation does not, however, allow the `acme` program to
|
||||||
|
conveniently invoke `token` instructions on the client's behalf:
|
||||||
|
|
||||||
|
```rust,ignore
|
||||||
|
let message = Message::new(vec![
|
||||||
|
acme_instruction::pay_and_launch_missiles(&alice_pubkey, &bob_pubkey),
|
||||||
|
]);
|
||||||
|
client.send_message(&[&alice_keypair, &bob_keypair], &message);
|
||||||
|
```
|
||||||
|
|
||||||
|
Currently, there is no way to create instruction `pay_and_launch_missiles` that executes
|
||||||
|
`token_instruction::pay` from the `acme` program. The workaround is to extend the
|
||||||
|
`acme` program with the implementation of the `token` program, and create `token`
|
||||||
|
accounts with `ACME_PROGRAM_ID`, which the `acme` program is permitted to modify.
|
||||||
|
With that workaround, `acme` can modify token-like accounts created by the `acme`
|
||||||
|
program, but not token accounts created by the `token` program.
|
||||||
|
|
||||||
|
|
||||||
|
## Proposed Solution
|
||||||
|
|
||||||
|
The goal of this design is to modify Solana's runtime such that an on-chain
|
||||||
|
program can invoke an instruction from another program.
|
||||||
|
|
||||||
|
Given two on-chain programs `token` and `acme`, each implementing instructions
|
||||||
|
`pay()` and `launch_missiles()` respectively, we would ideally like to implement
|
||||||
|
the `acme` module with a call to a function defined in the `token` module:
|
||||||
|
|
||||||
|
```rust,ignore
|
||||||
|
use token;
|
||||||
|
|
||||||
|
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||||
|
token::pay(&keyed_accounts[1..])?;
|
||||||
|
|
||||||
|
launch_missiles(keyed_accounts)?;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The above code would require that the `token` crate be dynamically linked,
|
||||||
|
so that a custom linker could intercept calls and validate accesses to
|
||||||
|
`keyed_accounts`. That is, even though the client intends to modify both
|
||||||
|
`token` and `acme` accounts, only `token` program is permitted to modify
|
||||||
|
the `token` account, and only the `acme` program is permitted to modify
|
||||||
|
the `acme` account.
|
||||||
|
|
||||||
|
Backing off from that ideal cross-program call, a slightly more
|
||||||
|
verbose solution is to expose token's existing `process_instruction()`
|
||||||
|
entrypoint to the acme program:
|
||||||
|
|
||||||
|
```rust,ignore
|
||||||
|
use token_instruction;
|
||||||
|
|
||||||
|
fn launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pay_and_launch_missiles(keyed_accounts: &[KeyedAccount]) -> Result<()> {
|
||||||
|
let alice_pubkey = keyed_accounts[1].key;
|
||||||
|
let instruction = token_instruction::pay(&alice_pubkey);
|
||||||
|
process_instruction(&instruction)?;
|
||||||
|
|
||||||
|
launch_missiles(keyed_accounts)?;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
where `process_instruction()` is built into Solana's runtime and responsible
|
||||||
|
for routing the given instruction to the `token` program via the instruction's
|
||||||
|
`program_id` field. Before invoking `pay()`, the runtime must also ensure that
|
||||||
|
`acme` didn't modify any accounts owned by `token`. It does this by calling
|
||||||
|
`runtime::verify_instruction()` and then afterward updating all the `pre_*`
|
||||||
|
variables to tentatively commit `acme`'s account modifications. After `pay()`
|
||||||
|
completes, the runtime must again ensure that `token` didn't modify any
|
||||||
|
accounts owned by `acme`. It should call `verify_instruction()` again, but this
|
||||||
|
time with the `token` program ID. Lastly, after `pay_and_launch_missiles()`
|
||||||
|
completes, the runtime must call `verify_instruction()` one more time, where it
|
||||||
|
normally would, but using all updated `pre_*` variables. If executing
|
||||||
|
`pay_and_launch_missiles()` up to `pay()` made no invalid account changes,
|
||||||
|
`pay()` made no invalid changes, and executing from `pay()` until
|
||||||
|
`pay_and_launch_missiles()` returns made no invalid changes, then the runtime
|
||||||
|
can transitively assume `pay_and_launch_missiles()` as whole made no invalid
|
||||||
|
account changes, and therefore commit all account modifications.
|
||||||
|
|
||||||
|
### Setting `KeyedAccount.is_signer`
|
||||||
|
|
||||||
|
When `process_instruction()` is invoked, the runtime must create a new
|
||||||
|
`KeyedAccounts` parameter using the signatures from the *original* transaction
|
||||||
|
data. Since the `token` program is immutable and existed on-chain prior to the
|
||||||
|
`acme` program, the runtime can safely treat the transaction signature as a
|
||||||
|
signature of a transaction with a `token` instruction. When the runtime sees
|
||||||
|
the given instruction references `alice_pubkey`, it looks up the key in the
|
||||||
|
transaction to see if that key corresponds to a transaction signature. In this
|
||||||
|
case it does and so sets `KeyedAccount.is_signer`, thereby authorizing the
|
||||||
|
`token` program to modify Alice's account.
|
86
book/src/drones.md
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# Creating Signing Services with Drones
|
||||||
|
|
||||||
|
This chapter defines an off-chain service called a *drone*, which acts as
|
||||||
|
custodian of a user's private key. In its simplest form, it can be used to
|
||||||
|
create *airdrop* transactions, a token transfer from the drone's account to a
|
||||||
|
client's account.
|
||||||
|
|
||||||
|
## Signing Service
|
||||||
|
|
||||||
|
A drone is a simple signing service. It listens for requests to sign
|
||||||
|
*transaction data*. Once received, the drone validates the request however it
|
||||||
|
sees fit. It may, for example, only accept transaction data with a
|
||||||
|
`SystemInstruction::Transfer` instruction transferring only up to a certain amount
|
||||||
|
of tokens. If the drone accepts the transaction, it returns an `Ok(Signature)`
|
||||||
|
where `Signature` is a signature of the transaction data using the drone's
|
||||||
|
private key. If it rejects the transaction data, it returns a `DroneError`
|
||||||
|
describing why.
|
||||||
|
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Granting access to an on-chain game
|
||||||
|
|
||||||
|
Creator of on-chain game tic-tac-toe hosts a drone that responds to airdrop
|
||||||
|
requests containing an `InitGame` instruction. The drone signs the transaction
|
||||||
|
data in the request and returns it, thereby authorizing its account to pay the
|
||||||
|
transaction fee and as well as seeding the game's account with enough tokens to
|
||||||
|
play it. The user then creates a transaction for its transaction data and the
|
||||||
|
drones signature and submits it to the Solana cluster. Each time the user
|
||||||
|
interacts with the game, the game pays the user enough tokens to pay the next
|
||||||
|
transaction fee to advance the game. At that point, the user may choose to keep
|
||||||
|
the tokens instead of advancing the game. If the creator wants to defend
|
||||||
|
against that case, they could require the user to return to the drone to sign
|
||||||
|
each instruction.
|
||||||
|
|
||||||
|
### Worldwide airdrop of a new token
|
||||||
|
|
||||||
|
Creator of a new on-chain token (ERC-20 interface), may wish to do a worldwide
|
||||||
|
airdrop to distribute its tokens to millions of users over just a few seconds.
|
||||||
|
That drone cannot spend resources interacting with the Solana cluster. Instead,
|
||||||
|
the drone should only verify the client is unique and human, and then return
|
||||||
|
the signature. It may also want to listen to the Solana cluster for recent
|
||||||
|
entry IDs to support client retries and to ensure the airdrop is targeting the
|
||||||
|
desired cluster.
|
||||||
|
|
||||||
|
|
||||||
|
## Attack vectors
|
||||||
|
|
||||||
|
### Invalid recent_blockhash
|
||||||
|
|
||||||
|
The drone may prefer its airdrops only target a particular Solana cluster. To
|
||||||
|
do that, it listens to the cluster for new entry IDs and ensure any requests
|
||||||
|
reference a recent one.
|
||||||
|
|
||||||
|
Note: to listen for new entry IDs assumes the drone is either a fullnode or a
|
||||||
|
*light* client. At the time of this writing, light clients have not been
|
||||||
|
implemented and no proposal describes them. This document assumes one of the
|
||||||
|
following approaches be taken:
|
||||||
|
|
||||||
|
1. Define and implement a light client
|
||||||
|
2. Embed a fullnode
|
||||||
|
3. Query the jsonrpc API for the latest last id at a rate slightly faster than
|
||||||
|
ticks are produced.
|
||||||
|
|
||||||
|
### Double spends
|
||||||
|
|
||||||
|
A client may request multiple airdrops before the first has been submitted to
|
||||||
|
the ledger. The client may do this maliciously or simply because it thinks the
|
||||||
|
first request was dropped. The drone should not simply query the cluster to
|
||||||
|
ensure the client has not already received an airdrop. Instead, it should use
|
||||||
|
`recent_blockhash` to ensure the previous request is expired before signing another.
|
||||||
|
Note that the Solana cluster will reject any transaction with a `recent_blockhash`
|
||||||
|
beyond a certain *age*.
|
||||||
|
|
||||||
|
### Denial of Service
|
||||||
|
|
||||||
|
If the transaction data size is smaller than the size of the returned signature
|
||||||
|
(or descriptive error), a single client can flood the network. Considering
|
||||||
|
that a simple `Transfer` operation requires two public keys (each 32 bytes) and a
|
||||||
|
`fee` field, and that the returned signature is 64 bytes (and a byte to
|
||||||
|
indicate `Ok`), consideration for this attack may not be required.
|
||||||
|
|
||||||
|
In the current design, the drone accepts TCP connections. This allows clients
|
||||||
|
to DoS the service by simply opening lots of idle connections. Switching to UDP
|
||||||
|
may be preferred. The transaction data will be smaller than a UDP packet since
|
||||||
|
the transaction sent to the Solana cluster is already pinned to using UDP.
|
11
book/src/ed_attack_vectors.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
## Attack Vectors
|
||||||
|
|
||||||
|
### Colluding validation and replication clients
|
||||||
|
|
||||||
|
A colluding validation-client, may take the strategy to mark PoReps from non-colluding replicator nodes as invalid as an attempt to maximize the rewards for the colluding replicator nodes. In this case, it isn’t feasible for the offended-against replicator nodes to petition the network for resolution as this would result in a network-wide vote on each offending PoRep and create too much overhead for the network to progress adequately. Also, this mitigation attempt would still be vulnerable to a >= 51% staked colluder.
|
||||||
|
|
||||||
|
Alternatively, transaction fees from submitted PoReps are pooled and distributed across validation-clients in proportion to the number of valid PoReps discounted by the number of invalid PoReps as voted by each validator-client. Thus invalid votes are directly dis-incentivized through this reward channel. Invalid votes that are revealed by replicator nodes as fishing PoReps, will not be discounted from the payout PoRep count.
|
||||||
|
|
||||||
|
Another collusion attack involves a validator-client who may take the strategy to ignore invalid PoReps from colluding replicator and vote them as valid. In this case, colluding replicator-clients would not have to store the data while still receiving rewards for validated PoReps. Additionally, colluding validator nodes would also receive rewards for validating these PoReps. To mitigate this attack, validators must randomly sample PoReps corresponding to the ledger block they are validating and because of this, there will be multiple validators that will receive the colluding replicator’s invalid submissions. These non-colluding validators will be incentivized to mark these PoReps as invalid as they have no way to determine whether the proposed invalid PoRep is actually a fishing PoRep, for which a confirmation vote would result in the validator’s stake being slashed.
|
||||||
|
|
||||||
|
In this case, the proportion of time a colluding pair will be successful has an upper limit determined by the % of stake of the network claimed by the colluding validator. This also sets bounds to the value of such an attack. For example, if a colluding validator controls 10% of the total validator stake, transaction fees will be lost (likely sent to mining pool) by the colluding replicator 90% of the time and so the attack vector is only profitable if the per-PoRep reward at least 90% higher than the average PoRep transaction fee. While, probabilistically, some colluding replicator-client PoReps will find their way to colluding validation-clients, the network can also monitor rates of paired (validator + replicator) discrepancies in voting patterns and censor identified colluders in these cases.
|
18
book/src/ed_economic_sustainability.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
## Economic Sustainability
|
||||||
|
|
||||||
|
Long term economic sustainability is one of the guiding principles of Solana’s economic design. While it is impossible to predict how decentralized economies will develop over time, especially economies with flexible decentralized governances, we can arrange economic components such that, under certain conditions, a sustainable economy may take shape in the long term. In the case of Solana’s network, these components take the form of the remittances and deposits into and out of the reserve ‘mining pool’.
|
||||||
|
|
||||||
|
The dominant remittances from the Solana mining pool are validator and replicator rewards. The deposit mechanism is a flat, protocol-specified and adjusted, % of each transaction fee.
|
||||||
|
|
||||||
|
The Replicator rewards are to be delivered to replicators from the mining pool after successful PoRep validation. The per-PoRep reward amount is determined as a function of the total network storage redundancy at the time of the PoRep validation and the network goal redundancy. This function is likely to take the form of a discount from a base reward to be delivered when the network has achieved and maintained its goal redundancy. An example of such a reward function is shown in **Figure 3**
|
||||||
|
|
||||||
|
<!--  -->
|
||||||
|
<p style="text-align:center;"><img src="img/porep_reward.png" alt="==PoRep Reward Curve ==" width="800"/></p>
|
||||||
|
|
||||||
|
**Figure 3**: Example PoRep reward design as a function of global network storage redundancy.
|
||||||
|
|
||||||
|
In the example shown in Figure 1, multiple per PoRep base rewards are explored (as a % of Tx Fee) to be delivered when the global ledger replication redundancy meets 10X. When the global ledger replication redundancy is less than 10X, the base reward is discounted as a function of the square of the ratio of the actual ledger replication redundancy to the goal redundancy (i.e. 10X).
|
||||||
|
|
||||||
|
The other protocol-based remittance goes to validation-clients as a reward distributed in proportion to stake-weight for voting to validate the ledger state. The functional issuance of this reward is described in [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md) and is designed to reduce over time until validators are incentivized solely through collection of transaction fees. Therefore, in the long-run, protocol-based rewards to replication-nodes will be the only remittances from the mining pool, and will have to be countered by the portion of each non-PoRep transaction fee that is directed back into the mining pool. I.e. for a long-term self-sustaining economy, replicator-client rewards must be subsidized through a minimum fee on each non-PoRep transaction pre-allocated to the mining pool. Through this constraint, we can write the following inequality:
|
||||||
|
|
||||||
|
**== WIP [here](https://docs.google.com/document/d/1HBDasdkjS4Ja9wC_tIUsZPVcxGAWTuYOq9zf6xoQNps/edit?usp=sharing) ==**
|
12
book/src/ed_mvp.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
## Proposed MVP of Economic Design
|
||||||
|
|
||||||
|
The preceeding sections, outlined in the [Economic Design Overview](ed_overview.md), describe a long-term vision of a sustainable Solana economy. Of course, we don't expect the final implementation to perfectly match what has been described above. We intend to fully engage with network stakeholders throughout the implementation phases (i.e. pre-testnet, testnet, mainnet) to ensure the system supports, and is representative of, the various network participants' interests. The first step toward this goal, however, is outlining a some desired MVP economic features to be available for early pre-testnet and testnet participants. Below is a rough sketch outlining basic economic functionality from which a more complete and functional system can be developed.
|
||||||
|
|
||||||
|
### MVP Economic Features
|
||||||
|
|
||||||
|
* Faucet to deliver testnet SOLs to validators for staking and dapp development.
|
||||||
|
* Mechanism by which validators are rewarded in proportion to their stake. Interest rate mechansism (i.e. to be determined by total % staked) to come later.
|
||||||
|
* Ability to delegate tokens to validator nodes.
|
||||||
|
* Replicators to receive fixed, arbitrary reward for submitting validated PoReps. Reward size mechanism (i.e. PoRep reward as a function of total ledger redundancy) to come later.
|
||||||
|
* Pooling of replicator PoRep transaction fees and weighted distribution to validators based on PoRep verification (see [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). It will be useful to test this protection against attacks on testnet.
|
||||||
|
* Nice-to-have: auto-delegation of replicator rewards to validator.
|
16
book/src/ed_overview.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
## Economic Design Overview
|
||||||
|
|
||||||
|
Solana’s crypto-economic system is designed to promote a healthy, long term self-sustaining economy with participant incentives aligned to the security and decentralization of the network. The main participants in this economy are validation-clients and replication-clients. Their contributions to the network, state validation and data storage respectively, and their requisite remittance mechanisms are discussed below.
|
||||||
|
|
||||||
|
The main channels of participant remittances are referred to as protocol-based rewards and transaction fees. Protocol-based rewards are protocol-derived issuances from a protocol-defined, global inflation rate. These rewards will constitute the total reward delivered to replication clients and a portion of the total rewards for validation clients, the remaining sourced from transaction fees. In the early days of the network, it is likely that protocol-based rewards, deployed based on predefined issuance schedule, will drive the majority of participant incentives to join the network.
|
||||||
|
|
||||||
|
These protocol-based rewards, to be distributed to participating validation and replication clients, are to be a result of a global supply inflation rate, calculated per Solana epoch and distributed amongst the active validator set. As discussed further below, the per annum inflation rate is based on a pre-determined disinflationary schedule. This provides the network with monetary supply predictability which supports long term economic stability and security.
|
||||||
|
|
||||||
|
Transaction fees are market-based participant-to-participant transfers, attached to network interactions as a necessary motivation and compensation for the inclusion and execution of a proposed transaction (be it a state execution or proof-of-replication verification). A mechanism for continuous and long-term economic stability through partial burning of each transaction fee is also discussed below.
|
||||||
|
|
||||||
|
A high-level schematic of Solana’s crypto-economic design is shown below in **Figure 1**. The specifics of validation-client economics are described in sections: [Validation-client Economics](ed_validation_client_economics.md), [State-validation Protocol-based Rewards](ed_vce_state_validation_protocol_based_rewards.md), [State-validation Transaction Fees](ed_vce_state_validation_transaction_fees.md) and [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md). Also, the chapter titled [Validation Stake Delegation](ed_vce_validation_stake_delegation.md) closes with a discussion of validator delegation opportunties and marketplace. Additionally, in [Storage Rent Economics](ed_storage_rend_economics.md), we describe an implementation of storage rent to account for the externality costs of maintaining the active state of the ledger. The [Replication-client Economics](ed_replication_client_economics.md) chapter will review the Solana network design for global ledger storage/redundancy and replicator-client economics ([Storage-replication rewards](ed_rce_storage_replication_rewards.md)) along with a replicator-to-validator delegation mechanism designed to aide participant on-boarding into the Solana economy discussed in [Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md). The [Economic Sustainability](ed_economic_sustainability.md) section dives deeper into Solana’s design for long-term economic sustainability and outlines the constraints and conditions for a self-sustaining economy. An outline of features for an MVP economic design is discussed in the [Economic Design MVP](ed_mvp.md) section. Finally, in chapter [Attack Vectors](ed_attack_vectors.md), various attack vectors will be described and potential vulnerabilities explored and parameterized.
|
||||||
|
|
||||||
|
<!--  -->
|
||||||
|
<p style="text-align:center;"><img src="img/economic_design_infl_230719.png" alt="== Solana Economic Design Diagram ==" width="800"/></p>
|
||||||
|
|
||||||
|
**Figure 1**: Schematic overview of Solana economic incentive design.
|
@@ -0,0 +1,5 @@
|
|||||||
|
### Replication-client Reward Auto-delegation
|
||||||
|
|
||||||
|
The ability for Solana network participant’s to earn rewards by providing storage service is a unique on-boarding path that requires little hardware overhead and minimal upfront capital. It offers an avenue for individuals with extra-storage space on their home laptops or PCs to contribute to the security of the network and become integrated into the Solana economy.
|
||||||
|
|
||||||
|
To enhance this on-boarding ramp and facilitate further participation and investment in the Solana economy, replication-clients have the opportunity to auto-delegate their rewards to validation-clients of their choice. Much like the automatic reinvestment of stock dividends, in this scenario, a replicator-client can earn Solana tokens by providing some storage capacity to the network (i.e. via submitting valid PoReps), have the protocol-based rewards automatically assigned as delegation to a staked validator node and therefore earning interest in the validation-client reward pool.
|
5
book/src/ed_rce_storage_replication_rewards.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
### Storage-replication Rewards
|
||||||
|
|
||||||
|
Replicator-clients download, encrypt and submit PoReps for ledger block sections.3 PoReps submitted to the PoH stream, and subsequently validated, function as evidence that the submitting replicator client is indeed storing the assigned ledger block sections on local hard drive space as a service to the network. Therefore, replicator clients should earn protocol rewards proportional to the amount of storage, and the number of successfully validated PoReps, that they are verifiably providing to the network.
|
||||||
|
|
||||||
|
Additionally, replicator clients have the opportunity to capture a portion of slashed bounties [TBD] of dishonest validator clients. This can be accomplished by a replicator client submitting a verifiably false PoRep for which a dishonest validator client receives and signs as a valid PoRep. This reward incentive is to prevent lazy validators and minimize validator-replicator collusion attacks, more on this below.
|
@@ -1,6 +1,7 @@
|
|||||||
# References
|
## References
|
||||||
|
|
||||||
1. [https://blog.ethereum.org/2016/07/27/inflation-transaction-fees-cryptocurrency-monetary-policy/](https://blog.ethereum.org/2016/07/27/inflation-transaction-fees-cryptocurrency-monetary-policy/)
|
1. [https://blog.ethereum.org/2016/07/27/inflation-transaction-fees-cryptocurrency-monetary-policy/](https://blog.ethereum.org/2016/07/27/inflation-transaction-fees-cryptocurrency-monetary-policy/)
|
||||||
2. [https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281](https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281)
|
|
||||||
3. [https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281](https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281)
|
|
||||||
|
|
||||||
|
2. [https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281](https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281)
|
||||||
|
|
||||||
|
3. [https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281](https://medium.com/solana-labs/how-to-create-decentralized-storage-for-a-multi-petabyte-digital-ledger-2499a3a8c281)
|
3
book/src/ed_replication_client_economics.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
## Replication-client economics
|
||||||
|
|
||||||
|
Replication-clients should be rewarded for providing the network with storage space. Incentivization of the set of replicators provides data security through redundancy of the historical ledger. Replication nodes are rewarded in proportion to the amount of ledger data storage provided. These rewards are captured by generating and entering Proofs of Replication (PoReps) into the PoH stream which can be validated by Validation nodes as described above in the [Replication-validation Transaction Fees](ed_vce_replication_validation_transaction_fees.md) chapter.
|
3
book/src/ed_validation_client_economics.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
## Validation-client Economics
|
||||||
|
|
||||||
|
Validator-clients are eligible to receive protocol-based (i.e. via inflation) rewards issued via stake-based annual interest rates (calculated per epoch) by providing compute (CPU+GPU) resources to validate and vote on a given PoH state. These protocol-based rewards are determined through an algorithmic disinflationary schedule as a function of total amount of circulating tokens. Additionally, these clients may earn revenue through fees via state-validation transactions and Proof-of-Replication (PoRep) transactions. For clarity, we separately describe the design and motivation of these revenue distriubutions for validation-clients below: state-validation protocol-based rewards, state-validation transaction fees and rent, and PoRep-validation transaction fees.
|
@@ -0,0 +1,9 @@
|
|||||||
|
### Replication-validation Transaction Fees
|
||||||
|
|
||||||
|
As previously mentioned, validator-clients will also be responsible for validating PoReps submitted into the PoH stream by replicator-clients. In this case, validators are providing compute (CPU/GPU) and light storage resources to confirm that these replication proofs could only be generated by a client that is storing the referenced PoH leger block.2
|
||||||
|
|
||||||
|
While replication-clients are incentivized and rewarded through protocol-based rewards schedule (see [Replication-client Economics](ed_replication_client_economics.md)), validator-clients will be incentivized to include and validate PoReps in PoH through collection of transaction fees associated with the submitted PoReps and distribution of protocol rewards proportional to the validated PoReps. As will be described in detail in the Section 3.1, replication-client rewards are protocol-based and designed to reward based on a global data redundancy factor. I.e. the protocol will incentivize replication-client participation through rewards based on a target ledger redundancy (e.g. 10x data redundancy).
|
||||||
|
|
||||||
|
The validation of PoReps by validation-clients is computationally more expensive than state-validation (detail in the [Economic Sustainability](ed_economic_sustainability.md) chapter), thus the transaction fees are expected to be proportionally higher.
|
||||||
|
|
||||||
|
There are various attack vectors available for colluding validation and replication clients, as described in detail below in [Economic Sustainability](ed_economic_sustainability). To protect against various collusion attack vectors, for a given epoch, validator rewards are distributed across participating validation-clients in proportion to the number of validated PoReps in the epoch less the number of PoReps that mismatch the replicators challenge. The PoRep challenge game is described in [Ledger Replication](https://github.com/solana-labs/solana/blob/master/book/src/ledger-replication.md#the-porep-game). This design rewards validators proportional to the number of PoReps they process and validate, while providing negative pressure for validation-clients to submit lazy or malicious invalid votes on submitted PoReps (note that it is computationally prohibitive to determine whether a validator-client has marked a valid PoRep as invalid).
|
40
book/src/ed_vce_state_validation_protocol_based_rewards.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
### State-validation protocol-based rewards
|
||||||
|
|
||||||
|
Validator-clients have two functional roles in the Solana network:
|
||||||
|
|
||||||
|
* Validate (vote) the current global state of that PoH along with any Proofs-of-Replication (see [Replication Client Economics](ed_replication_client_economics.md)) that they are eligible to validate.
|
||||||
|
|
||||||
|
* Be elected as ‘leader’ on a stake-weighted round-robin schedule during which time they are responsible for collecting outstanding transactions and Proofs-of-Replication and incorporating them into the PoH, thus updating the global state of the network and providing chain continuity.
|
||||||
|
|
||||||
|
Validator-client rewards for these services are to be distributed at the end of each Solana epoch. Compensation for validator-clients is provided via a protocol-based annual inflation rate dispersed in proportion to the stake-weight of each validator (see below) along with leader-claimed transaction fees available during each leader rotation. I.e. during the time a given validator-client is elected as leader, it has the opportunity to keep a portion of each transaction fee, less a protocol-specified amount that is destroyed (see [Validation-client State Transaction Fees](ed_vce_state_validation_transaction_fees.md)). PoRep transaction fees are also collected by the leader client and validator PoRep rewards are distributed in proportion to the number of validated PoReps less the number of PoReps that mismatch a replicator's challenge. (see [Replication-client Transaction Fees](ed_vce_replication_validation_transaction_fees.md))
|
||||||
|
|
||||||
|
|
||||||
|
The effective protocol-based annual interest rate (%) per epoch to be distributed to validation-clients is to be a function of:
|
||||||
|
|
||||||
|
* the current global inflation rate, derived from the pre-determined dis-inflationary issuance schedule
|
||||||
|
|
||||||
|
* the fraction of staked SOLs out of the current total circulating supply,
|
||||||
|
|
||||||
|
* the up-time/participation [% of available slots that validator had opportunity to vote on] of a given validator over the previous epoch.
|
||||||
|
|
||||||
|
The first factor is a function of protocol parameters only (i.e. independent of validator behavior in a given epoch) and results in a global validation reward schedule designed to incentivize early participation, provide clear montetary stability and provide optimal security in the network.
|
||||||
|
|
||||||
|
At any given point in time, a specific validator's interest rate can be determined based on the porportion of circulating supply that is staked by the network and the validator's uptime/activity in the previous epoch. For an illustrative example, consider a hypothetical instance of the network with an initial circulating token supply of 250MM tokens with an additional 250MM vesting over 3 years. Additionally an inflation rate is specified at network launch of 7.5%, and a disinflationary schedule of 20% decrease in inflation rate per year (the actual rates to be implemented are to be worked out during the testnet experimentation phase of mainnet launch). With these broad assumptions, the 10-year inflation rate (adjusted daily for this example) is shown in **Figure 2**, while the total circulating token supply is illustrated in **Figure 3**. Neglected in this toy-model is the inflation supression due to the portion of each transaction fee that is to be destroyed.
|
||||||
|
|
||||||
|
<p style="text-align:center;"><img src="img/p_ex_schedule.png" alt="drawing" width="800"/></p>
|
||||||
|
**Figure 2:** In this example schedule, the annual inflation rate [%] reduces at around 20% per year, until it reaches the long-term, fixed, 1.5% rate.
|
||||||
|
|
||||||
|
<p style="text-align:center;"><img src="img/p_ex_supply.png" alt="drawing" width="800"/></p>
|
||||||
|
**Figure 3:** The total token supply over a 10-year period, based on an initial 250MM tokens with the disinflationary inflation schedule as shown in **Figure 2**
|
||||||
|
|
||||||
|
Over time, the interest rate, at a fixed network staked percentage, will reduce concordant with network inflation. Validation-client interest rates are designed to be higher in the early days of the network to incentivize participation and jumpstart the network economy. As previously mentioned, the inflation rate is expected to stabalize near 1-2% which also results in a fixed, long-term, interest rate to be provided to validator-clients. This value does not represent the total interest available to validator-clients as transaction fees for both state-validation and ledger storage replication (PoReps) are not accounted for here.
|
||||||
|
|
||||||
|
Given these example parameters, annualized validator-specific interest rates can be determined based on the global fraction of tokens bonded as stake, as well as their uptime/activity in the previous epoch. For the purpose of this example, we assume 100% uptime for all validators and a split in interest-based rewards between validators and replicator nodes of 80%/20%. Additionally, the fraction of staked circulating supply is assummed to be constant. Based on these assumptions, an annualized validation-client interest rate schedule as a function of % circulating token supply that is staked is shown in** Figure 4**.
|
||||||
|
|
||||||
|
<!--  -->
|
||||||
|
|
||||||
|
<p style="text-align:center;"><img src="img/p_ex_interest.png" alt="drawing" width="800"/></p>
|
||||||
|
|
||||||
|
**Figure 4:** Shown here are example validator interest rates over time, neglecting transaction fees, segmented by fraction of total circulating supply bonded as stake.
|
||||||
|
|
||||||
|
This epoch-specific protocol-defined interest rate sets an upper limit of *protocol-generated* annual interest rate (not absolute total interest rate) possible to be delivered to any validator-client per epoch. The distributed interest rate per epoch is then discounted from this value based on the participation of the validator-client during the previous epoch.
|
20
book/src/ed_vce_state_validation_transaction_fees.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
### State-validation Transaction Fees
|
||||||
|
|
||||||
|
Each transaction sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, must contain a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they:
|
||||||
|
|
||||||
|
* provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction,
|
||||||
|
|
||||||
|
* reduce network spam by introducing real cost to transactions,
|
||||||
|
|
||||||
|
* open avenues for a transaction market to incentivize validation-client to collect and process submitted transactions in their function as leader,
|
||||||
|
|
||||||
|
* and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below.
|
||||||
|
|
||||||
|
Many current blockchain economies (e.g. Bitcoin, Ethereum), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above, and replication-clients, as discussed below.
|
||||||
|
|
||||||
|
Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical gas usage. In this way, the protocol can use the minimum fee to target a desired hardware utilisation. By monitoring a protocol specified gas usage with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual gas usage per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level.
|
||||||
|
|
||||||
|
As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechansim that protects against "tax evasion" attacks (i.e. side-channel fee payments)<sup>[1](ed_referenced.md)</sup>.
|
||||||
|
|
||||||
|
Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place.
|
||||||
|
|
@@ -1,32 +1,29 @@
|
|||||||
# Validation Stake Delegation
|
### Validation Stake Delegation
|
||||||
|
|
||||||
**Subject to change.**
|
|
||||||
|
|
||||||
Running a Solana validation-client required relatively modest upfront hardware capital investment. **Table 2** provides an example hardware configuration to support ~1M tx/s with estimated ‘off-the-shelf’ costs:
|
Running a Solana validation-client required relatively modest upfront hardware capital investment. **Table 2** provides an example hardware configuration to support ~1M tx/s with estimated ‘off-the-shelf’ costs:
|
||||||
|
|
||||||
| Component | Example | Estimated Cost |
|
|Component|Example|Estimated Cost|
|
||||||
| :--- | :--- | :--- |
|
|--- |--- |--- |
|
||||||
| GPU | 2x 2080 Ti | $2500 |
|
|GPU|2x 2080 Ti|$2500|
|
||||||
| or | 4x 1080 Ti | $2800 |
|
|or|4x 1080 Ti|$2800|
|
||||||
| OS/Ledger Storage | Samsung 860 Evo 2TB | $370 |
|
|OS/Ledger Storage|Samsung 860 Evo 2TB|$370|
|
||||||
| Accounts storage | 2x Samsung 970 Pro M.2 512GB | $340 |
|
|Accounts storage|2x Samsung 970 Pro M.2 512GB|$340|
|
||||||
| RAM | 32 Gb | $300 |
|
|RAM|32 Gb|$300|
|
||||||
| Motherboard | AMD x399 | $400 |
|
|Motherboard|AMD x399|$400|
|
||||||
| CPU | AMD Threadripper 2920x | $650 |
|
|CPU|AMD Threadripper 2920x|$650|
|
||||||
| Case | | $100 |
|
|Case||$100|
|
||||||
| Power supply | EVGA 1600W | $300 |
|
|Power supply|EVGA 1600W|$300|
|
||||||
| Network | > 500 mbps | |
|
|Network|> 500 mbps||
|
||||||
| Network \(1\) | Google webpass business bay area 1gbps unlimited | $5500/mo |
|
|Network (1)|Google webpass business bay area 1gbps unlimited|$5500/mo|
|
||||||
| Network \(2\) | Hurricane Electric bay area colo 1gbps | $500/mo |
|
|Network (2)|Hurricane Electric bay area colo 1gbps|$500/mo|
|
||||||
|
|
||||||
**Table 2** example high-end hardware setup for running a Solana client.
|
**Table 2** example high-end hardware setup for running a Solana client.
|
||||||
|
|
||||||
Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties still have two options to become involved in the Solana network/economy:
|
Despite the low-barrier to entry as a validation-client, from a capital investment perspective, as in any developing economy, there will be much opportunity and need for trusted validation services as evidenced by node reliability, UX/UI, APIs and other software accessibility tools. Additionally, although Solana’s validator node startup costs are nominal when compared to similar networks, they may still be somewhat restrictive for some potential participants. In the spirit of developing a true decentralized, permissionless network, these interested parties still have two options to become involved in the Solana network/economy:
|
||||||
|
|
||||||
1. Delegation of previously acquired tokens with a reliable validation node to earn a portion of interest generated
|
1. Delegation of previously acquired tokens with a reliable validation node to earn a portion of interest generated
|
||||||
2. Provide local storage space as a replication-client and receive rewards by submitting Proof-of-Replication \(see [Replication-client Economics](../ed_replication_client_economics/README.md)\).
|
|
||||||
|
|
||||||
a. This participant has the additional option to directly delegate their earned storage rewards \([Replication-client Reward Auto-delegation](../ed_replication_client_economics/ed_rce_replication_client_reward_auto_delegation.md)\)
|
2. Provide local storage space as a replication-client and receive rewards by submitting Proof-of-Replication (see [Replication-client Economics](ed_replication_client_economics.md)).
|
||||||
|
|
||||||
Delegation of tokens to validation-clients, via option 1, provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature intends to create a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services.
|
a. This participant has the additional option to directly delegate their earned storage rewards ([Replication-client Reward Auto-delegation](ed_rce_replication_client_reward_auto_delegation.md))
|
||||||
|
|
||||||
|
Delegation of tokens to validation-clients, via option 1, provides a way for passive Solana token holders to become part of the active Solana economy and earn interest rates proportional to the interest rate generated by the delegated validation-client. Additionally, this feature creates a healthy validation-client market, with potential validation-client nodes competing to build reliable, transparent and profitable delegation services.
|
66
book/src/embedding-move.md
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Embedding the Move Language
|
||||||
|
|
||||||
|
## Problem
|
||||||
|
|
||||||
|
Solana enables developers to write on-chain programs in general purpose
|
||||||
|
programming languages such as C or Rust, but those programs contain
|
||||||
|
Solana-specific mechanisms. For example, there isn't another chain that asks
|
||||||
|
developers to create a Rust module with a `process_instruction(KeyedAccounts)`
|
||||||
|
function. Whenever practical, Solana should offer dApp developers more portable
|
||||||
|
options.
|
||||||
|
|
||||||
|
Until just recently, no popular blockchain offered a language that could expose
|
||||||
|
the value of Solana's massively parallel [runtime](runtime.md). Solidity
|
||||||
|
contracts, for example, do not separate references to shared data from contract
|
||||||
|
code, and therefore need to be executed serially to ensure deterministic
|
||||||
|
behavior. In practice we see that the most aggressively optimized EVM-based
|
||||||
|
blockchains all seem to peak out around 1,200 TPS - a small fraction of what
|
||||||
|
Solana can do. The Libra project, on the other hand, designed an on-chain
|
||||||
|
programming language called Move that is more suitable for parallel execution.
|
||||||
|
Like Solana's runtime, Move programs depend on accounts for all shared state.
|
||||||
|
|
||||||
|
The biggest design difference between Solana's runtime and Libra's Move VM is
|
||||||
|
how they manage safe invocations between modules. Solana took an operating
|
||||||
|
systems approach and Libra took the domain-specific language approach. In the
|
||||||
|
runtime, a module must trap back into the runtime to ensure the caller's module
|
||||||
|
did not write to data owned by the callee. Likewise, when the callee completes,
|
||||||
|
it must again trap back to the runtime to ensure the callee did not write to
|
||||||
|
data owned by the caller. Move, on the other hand, includes an advanced type
|
||||||
|
system that allows these checks to be run by its bytecode verifier. Because
|
||||||
|
Move bytecode can be verified, the cost of verification is paid just once, at
|
||||||
|
the time the module is loaded on-chain. In the runtime, the cost is paid each
|
||||||
|
time a transaction crosses between modules. The difference is similar in spirit
|
||||||
|
to the difference between a dynamically-typed language like Python versus a
|
||||||
|
statically-typed language like Java. Solana's runtime allows dApps to be
|
||||||
|
written in general purpose programming languages, but that comes with the cost
|
||||||
|
of runtime checks when jumping between programs.
|
||||||
|
|
||||||
|
This proposal attempts to define a way to embed the Move VM such that:
|
||||||
|
|
||||||
|
* cross-module invocations within Move do not require the runtime's
|
||||||
|
cross-program runtime checks
|
||||||
|
* Move programs can leverage functionality in other Solana programs and vice
|
||||||
|
versa
|
||||||
|
* Solana's runtime parallelism is exposed to batches of Move and non-Move
|
||||||
|
transactions
|
||||||
|
|
||||||
|
## Proposed Solution
|
||||||
|
|
||||||
|
### Move VM as a Solana loader
|
||||||
|
|
||||||
|
The Move VM shall be embedded as a Solana loader under the identifier
|
||||||
|
`MOVE_PROGRAM_ID`, so that Move modules can be marked as `executable` with the
|
||||||
|
VM as its `owner`. This will allow modules to load module dependencies, as well
|
||||||
|
as allow for parallel execution of Move scripts.
|
||||||
|
|
||||||
|
All data accounts owned by Move modules must set their owners to the loader,
|
||||||
|
`MOVE_PROGRAM_ID`. Since Move modules encapsulate their account data in the
|
||||||
|
same way Solana programs encapsulate theirs, the Move module owner should be
|
||||||
|
embedded in the account data. The runtime will grant write access to the Move
|
||||||
|
VM, and Move grants access to the module accounts.
|
||||||
|
|
||||||
|
### Interacting with Solana programs
|
||||||
|
|
||||||
|
To invoke instructions in non-Move programs, Solana would need to extend the
|
||||||
|
Move VM with a `process_instruction()` system call. It would work the same as
|
||||||
|
`process_instruction()` Rust BPF programs.
|
104
book/src/fork-generation.md
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
# Fork Generation
|
||||||
|
|
||||||
|
The chapter describes how forks naturally occur as a consequence of [leader
|
||||||
|
rotation](leader-rotation.md).
|
||||||
|
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Nodes take turns being leader and generating the PoH that encodes state
|
||||||
|
changes. The cluster can tolerate loss of connection to any leader by
|
||||||
|
synthesizing what the leader ***would*** have generated had it been connected
|
||||||
|
but not ingesting any state changes. The possible number of forks is thereby
|
||||||
|
limited to a "there/not-there" skip list of forks that may arise on leader
|
||||||
|
rotation slot boundaries. At any given slot, only a single leader's
|
||||||
|
transactions will be accepted.
|
||||||
|
|
||||||
|
## Message Flow
|
||||||
|
|
||||||
|
1. Transactions are ingested by the current leader.
|
||||||
|
2. Leader filters valid transactions.
|
||||||
|
3. Leader executes valid transactions updating its state.
|
||||||
|
4. Leader packages transactions into entries based off its current PoH slot.
|
||||||
|
5. Leader transmits the entries to validator nodes (in signed blobs)
|
||||||
|
1. The PoH stream includes ticks; empty entries that indicate liveness of
|
||||||
|
the leader and the passage of time on the cluster.
|
||||||
|
2. A leader's stream begins with the tick entries necessary complete the PoH
|
||||||
|
back to the leaders most recently observed prior leader slot.
|
||||||
|
6. Validators retransmit entries to peers in their set and to further
|
||||||
|
downstream nodes.
|
||||||
|
7. Validators validate the transactions and execute them on their state.
|
||||||
|
8. Validators compute the hash of the state.
|
||||||
|
9. At specific times, i.e. specific PoH tick counts, validators transmit votes
|
||||||
|
to the leader.
|
||||||
|
1. Votes are signatures of the hash of the computed state at that PoH tick
|
||||||
|
count
|
||||||
|
2. Votes are also propagated via gossip
|
||||||
|
10. Leader executes the votes as any other transaction and broadcasts them to
|
||||||
|
the cluster.
|
||||||
|
11. Validators observe their votes and all the votes from the cluster.
|
||||||
|
|
||||||
|
## Partitions, Forks
|
||||||
|
|
||||||
|
Forks can arise at PoH tick counts that correspond to a vote. The next leader
|
||||||
|
may not have observed the last vote slot and may start their slot with
|
||||||
|
generated virtual PoH entries. These empty ticks are generated by all nodes in
|
||||||
|
the cluster at a cluster-configured rate for hashes/per/tick `Z`.
|
||||||
|
|
||||||
|
There are only two possible versions of the PoH during a voting slot: PoH with
|
||||||
|
`T` ticks and entries generated by the current leader, or PoH with just ticks.
|
||||||
|
The "just ticks" version of the PoH can be thought of as a virtual ledger, one
|
||||||
|
that all nodes in the cluster can derive from the last tick in the previous
|
||||||
|
slot.
|
||||||
|
|
||||||
|
Validators can ignore forks at other points (e.g. from the wrong leader), or
|
||||||
|
slash the leader responsible for the fork.
|
||||||
|
|
||||||
|
Validators vote based on a greedy choice to maximize their reward described in
|
||||||
|
[Tower BFT](tower-bft.md).
|
||||||
|
|
||||||
|
### Validator's View
|
||||||
|
|
||||||
|
#### Time Progression
|
||||||
|
|
||||||
|
The diagram below represents a validator's view of the
|
||||||
|
PoH stream with possible forks over time. L1, L2, etc. are leader slots, and
|
||||||
|
`E`s represent entries from that leader during that leader's slot. The `x`s
|
||||||
|
represent ticks only, and time flows downwards in the diagram.
|
||||||
|
|
||||||
|
|
||||||
|
<img alt="Fork generation" src="img/fork-generation.svg" class="center"/>
|
||||||
|
|
||||||
|
Note that an `E` appearing on 2 forks at the same slot is a slashable
|
||||||
|
condition, so a validator observing `E3` and `E3'` can slash L3 and safely
|
||||||
|
choose `x` for that slot. Once a validator commits to a forks, other forks can
|
||||||
|
be discarded below that tick count. For any slot, validators need only
|
||||||
|
consider a single "has entries" chain or a "ticks only" chain to be proposed by
|
||||||
|
a leader. But multiple virtual entries may overlap as they link back to the a
|
||||||
|
previous slot.
|
||||||
|
|
||||||
|
#### Time Division
|
||||||
|
|
||||||
|
It's useful to consider leader rotation over PoH tick count as time division of
|
||||||
|
the job of encoding state for the cluster. The following table presents the
|
||||||
|
above tree of forks as a time-divided ledger.
|
||||||
|
|
||||||
|
leader slot | L1 | L2 | L3 | L4 | L5
|
||||||
|
-------|----|----|----|----|----
|
||||||
|
data | E1| E2 | E3 | E4 | E5
|
||||||
|
ticks since prev | | | | x | xx
|
||||||
|
|
||||||
|
Note that only data from leader L3 will be accepted during leader slot L3.
|
||||||
|
Data from L3 may include "catchup" ticks back to a slot other than L2 if L3 did
|
||||||
|
not observe L2's data. L4 and L5's transmissions include the "ticks to prev"
|
||||||
|
PoH entries.
|
||||||
|
|
||||||
|
This arrangement of the network data streams permits nodes to save exactly this
|
||||||
|
to the ledger for replay, restart, and checkpoints.
|
||||||
|
|
||||||
|
### Leader's View
|
||||||
|
|
||||||
|
When a new leader begins a slot, it must first transmit any PoH (ticks)
|
||||||
|
required to link the new slot with the most recently observed and voted slot.
|
||||||
|
The fork the leader proposes would link the current slot to a previous fork
|
||||||
|
that the leader has voted on with virtual ticks.
|
168
book/src/getting-started.md
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
# Getting Started
|
||||||
|
|
||||||
|
The Solana git repository contains all the scripts you might need to spin up your
|
||||||
|
own local testnet. Depending on what you're looking to achieve, you may want to
|
||||||
|
run a different variation, as the full-fledged, performance-enhanced
|
||||||
|
multinode testnet is considerably more complex to set up than a Rust-only,
|
||||||
|
singlenode testnode. If you are looking to develop high-level features, such
|
||||||
|
as experimenting with smart contracts, save yourself some setup headaches and
|
||||||
|
stick to the Rust-only singlenode demo. If you're doing performance optimization
|
||||||
|
of the transaction pipeline, consider the enhanced singlenode demo. If you're
|
||||||
|
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
|
||||||
|
to reproduce our TPS metrics, run the enhanced multinode demo.
|
||||||
|
|
||||||
|
For all four variations, you'd need the latest Rust toolchain and the Solana
|
||||||
|
source code:
|
||||||
|
|
||||||
|
First, install Rust's package manager Cargo.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ curl https://sh.rustup.rs -sSf | sh
|
||||||
|
$ source $HOME/.cargo/env
|
||||||
|
```
|
||||||
|
|
||||||
|
Now checkout the code from github:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/solana-labs/solana.git
|
||||||
|
$ cd solana
|
||||||
|
```
|
||||||
|
|
||||||
|
The demo code is sometimes broken between releases as we add new low-level
|
||||||
|
features, so if this is your first time running the demo, you'll improve
|
||||||
|
your odds of success if you check out the
|
||||||
|
[latest release](https://github.com/solana-labs/solana/releases)
|
||||||
|
before proceeding:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ TAG=$(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
|
$ git checkout $TAG
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Setup
|
||||||
|
|
||||||
|
Ensure important programs such as the vote program are built before any
|
||||||
|
nodes are started
|
||||||
|
```bash
|
||||||
|
$ cargo build --all
|
||||||
|
```
|
||||||
|
|
||||||
|
The network is initialized with a genesis ledger generated by running the
|
||||||
|
following script.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/setup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Drone
|
||||||
|
|
||||||
|
In order for the fullnodes and clients to work, we'll need to
|
||||||
|
spin up a drone to give out some test tokens. The drone delivers Milton
|
||||||
|
Friedman-style "air drops" (free tokens to requesting clients) to be used in
|
||||||
|
test transactions.
|
||||||
|
|
||||||
|
Start the drone with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/drone.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Singlenode Testnet
|
||||||
|
|
||||||
|
Before you start a validator, make sure you know the IP address of the machine you
|
||||||
|
want to be the bootstrap leader for the demo, and make sure that udp ports 8000-10000 are
|
||||||
|
open on all the machines you want to test with.
|
||||||
|
|
||||||
|
Now start the bootstrap leader in a separate shell:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/bootstrap-leader.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to
|
||||||
|
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
|
||||||
|
The drone does not need to be running for subsequent leader starts.
|
||||||
|
|
||||||
|
### Multinode Testnet
|
||||||
|
|
||||||
|
To run a multinode testnet, after starting a leader node, spin up some
|
||||||
|
additional validators in separate shells:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/validator-x.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
To run a performance-enhanced full node on Linux,
|
||||||
|
[CUDA 10.0](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||||
|
your system:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./fetch-perf-libs.sh
|
||||||
|
$ SOLANA_CUDA=1 ./multinode-demo/bootstrap-leader.sh
|
||||||
|
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testnet Client Demo
|
||||||
|
|
||||||
|
Now that your singlenode or multinode testnet is up and running let's send it
|
||||||
|
some transactions!
|
||||||
|
|
||||||
|
In a separate shell start the client:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/client.sh # runs against localhost by default
|
||||||
|
```
|
||||||
|
|
||||||
|
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||||
|
to the testnet as quickly as it can. The client then pings the testnet periodically to see
|
||||||
|
how many transactions it processed in that time. Take note that the demo intentionally
|
||||||
|
floods the network with UDP packets, such that the network will almost certainly drop a
|
||||||
|
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
|
||||||
|
demo completes after it has convinced itself the testnet won't process any additional
|
||||||
|
transactions. You should see several TPS measurements printed to the screen. In the
|
||||||
|
multinode variation, you'll see TPS measurements for each validator node as well.
|
||||||
|
|
||||||
|
### Testnet Debugging
|
||||||
|
|
||||||
|
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||||
|
basis. Before running a leader or validator set the normal RUST\_LOG environment variable.
|
||||||
|
|
||||||
|
For example
|
||||||
|
|
||||||
|
* To enable `info` everywhere and `debug` only in the solana::banking_stage module:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ export RUST_LOG=solana=info,solana::banking_stage=debug
|
||||||
|
```
|
||||||
|
|
||||||
|
* To enable BPF program logging:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ export RUST_LOG=solana_bpf_loader=trace
|
||||||
|
```
|
||||||
|
|
||||||
|
Generally we are using `debug` for infrequent debug messages, `trace` for potentially frequent
|
||||||
|
messages and `info` for performance-related logging.
|
||||||
|
|
||||||
|
You can also attach to a running process with GDB. The leader's process is named
|
||||||
|
_solana-validator_:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ sudo gdb
|
||||||
|
attach <PID>
|
||||||
|
set logging on
|
||||||
|
thread apply all bt
|
||||||
|
```
|
||||||
|
|
||||||
|
This will dump all the threads stack traces into gdb.txt
|
||||||
|
|
||||||
|
## Public Testnet
|
||||||
|
|
||||||
|
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./multinode-demo/client.sh --entrypoint testnet.solana.com:8001 --drone testnet.solana.com:9900 --duration 60 --tx_count 50
|
||||||
|
```
|
||||||
|
|
||||||
|
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
||||||
|
|
128
book/src/gossip.md
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# Gossip Service
|
||||||
|
|
||||||
|
The Gossip Service acts as a gateway to nodes in the control plane. Validators
|
||||||
|
use the service to ensure information is available to all other nodes in a cluster.
|
||||||
|
The service broadcasts information using a gossip protocol.
|
||||||
|
|
||||||
|
## Gossip Overview
|
||||||
|
|
||||||
|
Nodes continuously share signed data objects among themselves in order to
|
||||||
|
manage a cluster. For example, they share their contact information, ledger
|
||||||
|
height, and votes.
|
||||||
|
|
||||||
|
Every tenth of a second, each node sends a "push" message and/or a "pull"
|
||||||
|
message. Push and pull messages may elicit responses, and push messages may be
|
||||||
|
forwarded on to others in the cluster.
|
||||||
|
|
||||||
|
Gossip runs on a well-known UDP/IP port or a port in a well-known range. Once
|
||||||
|
a cluster is bootstrapped, nodes advertise to each other where to find their
|
||||||
|
gossip endpoint (a socket address).
|
||||||
|
|
||||||
|
## Gossip Records
|
||||||
|
|
||||||
|
Records shared over gossip are arbitrary, but signed and versioned (with a
|
||||||
|
timestamp) as needed to make sense to the node receiving them. If a node
|
||||||
|
receives two records from the same source, it updates its own copy with the
|
||||||
|
record with the most recent timestamp.
|
||||||
|
|
||||||
|
## Gossip Service Interface
|
||||||
|
|
||||||
|
### Push Message
|
||||||
|
|
||||||
|
A node sends a push message to tells the cluster it has information to share.
|
||||||
|
Nodes send push messages to `PUSH_FANOUT` push peers.
|
||||||
|
|
||||||
|
Upon receiving a push message, a node examines the message for:
|
||||||
|
|
||||||
|
1. Duplication: if the message has been seen before, the node drops the message
|
||||||
|
and may respond with `PushMessagePrune` if forwarded from a low staked node
|
||||||
|
|
||||||
|
2. New data: if the message is new to the node
|
||||||
|
* Stores the new information with an updated version in its cluster info and
|
||||||
|
purges any previous older value
|
||||||
|
* Stores the message in `pushed_once` (used for detecting duplicates,
|
||||||
|
purged after `PUSH_MSG_TIMEOUT * 5` ms)
|
||||||
|
* Retransmits the messages to its own push peers
|
||||||
|
|
||||||
|
3. Expiration: nodes drop push messages that are older than `PUSH_MSG_TIMEOUT`
|
||||||
|
|
||||||
|
### Push Peers, Prune Message
|
||||||
|
|
||||||
|
A nodes selects its push peers at random from the active set of known peers.
|
||||||
|
The node keeps this selection for a relatively long time. When a prune message
|
||||||
|
is received, the node drops the push peer that sent the prune. Prune is an
|
||||||
|
indication that there is another, higher stake weighted path to that node than direct push.
|
||||||
|
|
||||||
|
The set of push peers is kept fresh by rotating a new node into the set every
|
||||||
|
`PUSH_MSG_TIMEOUT/2` milliseconds.
|
||||||
|
|
||||||
|
### Pull Message
|
||||||
|
|
||||||
|
A node sends a pull message to ask the cluster if there is any new information.
|
||||||
|
A pull message is sent to a single peer at random and comprises a Bloom filter
|
||||||
|
that represents things it already has. A node receiving a pull message
|
||||||
|
iterates over its values and constructs a pull response of things that miss the
|
||||||
|
filter and would fit in a message.
|
||||||
|
|
||||||
|
A node constructs the pull Bloom filter by iterating over current values and
|
||||||
|
recently purged values.
|
||||||
|
|
||||||
|
A node handles items in a pull response the same way it handles new data in a
|
||||||
|
push message.
|
||||||
|
|
||||||
|
|
||||||
|
## Purging
|
||||||
|
|
||||||
|
Nodes retain prior versions of values (those updated by a pull or push) and
|
||||||
|
expired values (those older than `GOSSIP_PULL_CRDS_TIMEOUT_MS`) in
|
||||||
|
`purged_values` (things I recently had). Nodes purge `purged_values` that are
|
||||||
|
older than `5 * GOSSIP_PULL_CRDS_TIMEOUT_MS`.
|
||||||
|
|
||||||
|
## Eclipse Attacks
|
||||||
|
|
||||||
|
An eclipse attack is an attempt to take over the set of node connections with
|
||||||
|
adversarial endpoints.
|
||||||
|
|
||||||
|
This is relevant to our implementation in the following ways.
|
||||||
|
|
||||||
|
* Pull messages select a random node from the network. An eclipse attack on
|
||||||
|
*pull* would require an attacker to influence the random selection in such a way
|
||||||
|
that only adversarial nodes are selected for pull.
|
||||||
|
|
||||||
|
* Push messages maintain an active set of nodes and select a random fanout for
|
||||||
|
every push message. An eclipse attack on *push* would influence the active set
|
||||||
|
selection, or the random fanout selection.
|
||||||
|
|
||||||
|
### Time and Stake based weights
|
||||||
|
|
||||||
|
Weights are calculated based on `time since last picked` and the `natural log` of the `stake weight`.
|
||||||
|
|
||||||
|
Taking the `ln` of the stake weight allows giving all nodes a fairer chance of network
|
||||||
|
coverage in a reasonable amount of time. It helps normalize the large possible `stake weight` differences between nodes.
|
||||||
|
This way a node with low `stake weight`, compared to a node with large `stake weight` will only have to wait a
|
||||||
|
few multiples of ln(`stake`) seconds before it gets picked.
|
||||||
|
|
||||||
|
There is no way for an adversary to influence these parameters.
|
||||||
|
|
||||||
|
### Pull Message
|
||||||
|
|
||||||
|
A node is selected as a pull target based on the weights described above.
|
||||||
|
|
||||||
|
### Push Message
|
||||||
|
|
||||||
|
A prune message can only remove an adversary from a potential connection.
|
||||||
|
|
||||||
|
Just like *pull message*, nodes are selected into the active set based on weights.
|
||||||
|
|
||||||
|
## Notable differences from PlumTree
|
||||||
|
|
||||||
|
The active push protocol described here is based on [Plum
|
||||||
|
Tree](https://haslab.uminho.pt/jop/files/lpr07a.pdf). The main differences are:
|
||||||
|
|
||||||
|
* Push messages have a wallclock that is signed by the originator. Once the
|
||||||
|
wallclock expires the message is dropped. A hop limit is difficult to implement
|
||||||
|
in an adversarial setting.
|
||||||
|
|
||||||
|
* Lazy Push is not implemented because its not obvious how to prevent an
|
||||||
|
adversary from forging the message fingerprint. A naive approach would allow an
|
||||||
|
adversary to be prioritized for pull based on their input.
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 64 KiB |
Before Width: | Height: | Size: 256 KiB After Width: | Height: | Size: 256 KiB |
Before Width: | Height: | Size: 269 KiB After Width: | Height: | Size: 269 KiB |
Before Width: | Height: | Size: 372 KiB After Width: | Height: | Size: 372 KiB |
BIN
book/src/img/validation_client_interest_rates.png
Normal file
After Width: | Height: | Size: 401 KiB |
@@ -1,4 +1,3 @@
|
|||||||
# Implemented Design Proposals
|
# Implemented Design Proposals
|
||||||
|
|
||||||
The following design proposals are fully implemented.
|
The following design proposals are fully implemented.
|
||||||
|
|
@@ -1,59 +1,59 @@
|
|||||||
# Cluster Software Installation and Updates
|
## Cluster Software Installation and Updates
|
||||||
|
Currently users are required to build the solana cluster software themselves
|
||||||
|
from the git repository and manually update it, which is error prone and
|
||||||
|
inconvenient.
|
||||||
|
|
||||||
Currently users are required to build the solana cluster software themselves from the git repository and manually update it, which is error prone and inconvenient.
|
This document proposes an easy to use software install and updater that can be
|
||||||
|
used to deploy pre-built binaries for supported platforms. Users may elect to
|
||||||
This document proposes an easy to use software install and updater that can be used to deploy pre-built binaries for supported platforms. Users may elect to use binaries supplied by Solana or any other party they trust. Deployment of updates is managed using an on-chain update manifest program.
|
use binaries supplied by Solana or any other party they trust. Deployment of
|
||||||
|
updates is managed using an on-chain update manifest program.
|
||||||
## Motivating Examples
|
|
||||||
|
|
||||||
### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
|
||||||
|
|
||||||
|
### Motivating Examples
|
||||||
|
#### Fetch and run a pre-built installer using a bootstrap curl/shell script
|
||||||
The easiest install method for supported platforms:
|
The easiest install method for supported platforms:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there.
|
This script will check github for the latest tagged release and download and run the
|
||||||
|
`solana-install-init` binary from there.
|
||||||
|
|
||||||
If additional arguments need to be specified during the installation, the following shell syntax is used:
|
|
||||||
|
|
||||||
|
If additional arguments need to be specified during the installation, the
|
||||||
|
following shell syntax is used:
|
||||||
```bash
|
```bash
|
||||||
$ init_args=.... # arguments for `solana-install-init ...`
|
$ init_args=.... # arguments for `solana-install-init ...`
|
||||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v1.0.0/install/solana-install-init.sh | sh -s - ${init_args}
|
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Fetch and run a pre-built installer from a Github release
|
#### Fetch and run a pre-built installer from a Github release
|
||||||
|
With a well-known release URL, a pre-built binary can be obtained for supported
|
||||||
With a well-known release URL, a pre-built binary can be obtained for supported platforms:
|
platforms:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v1.0.0/solana-install-init-x86_64-apple-darwin
|
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.18.0/solana-install-init-x86_64-apple-darwin
|
||||||
$ chmod +x ./solana-install-init
|
$ chmod +x ./solana-install-init
|
||||||
$ ./solana-install-init --help
|
$ ./solana-install-init --help
|
||||||
```
|
```
|
||||||
|
|
||||||
### Build and run the installer from source
|
#### Build and run the installer from source
|
||||||
|
If a pre-built binary is not available for a given platform, building the
|
||||||
If a pre-built binary is not available for a given platform, building the installer from source is always an option:
|
installer from source is always an option:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/solana-labs/solana.git
|
$ git clone https://github.com/solana-labs/solana.git
|
||||||
$ cd solana/install
|
$ cd solana/install
|
||||||
$ cargo run -- --help
|
$ cargo run -- --help
|
||||||
```
|
```
|
||||||
|
|
||||||
### Deploy a new update to a cluster
|
#### Deploy a new update to a cluster
|
||||||
|
Given a solana release tarball (as created by `ci/publish-tarball.sh`) that has already been uploaded to a publicly accessible URL,
|
||||||
Given a solana release tarball \(as created by `ci/publish-tarball.sh`\) that has already been uploaded to a publicly accessible URL, the following commands will deploy the update:
|
the following commands will deploy the update:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users
|
$ solana-keygen new -o update-manifest.json # <-- only generated once, the public key is shared with users
|
||||||
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
|
$ solana-install deploy http://example.com/path/to/solana-release.tar.bz2 update-manifest.json
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run a validator node that auto updates itself
|
#### Run a validator node that auto updates itself
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ solana-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates
|
$ solana-install init --pubkey 92DMonmBYXwEMHJ99c9ceRSpAmk9v6i3RdvDdXaVcrfj # <-- pubkey is obtained from whoever is deploying the updates
|
||||||
$ export PATH=~/.local/share/solana-install/bin:$PATH
|
$ export PATH=~/.local/share/solana-install/bin:$PATH
|
||||||
@@ -61,13 +61,17 @@ $ solana-keygen ... # <-- runs the latest solana-keygen
|
|||||||
$ solana-install run solana-validator ... # <-- runs a validator, restarting it as necesary when an update is applied
|
$ solana-install run solana-validator ... # <-- runs a validator, restarting it as necesary when an update is applied
|
||||||
```
|
```
|
||||||
|
|
||||||
## On-chain Update Manifest
|
### On-chain Update Manifest
|
||||||
|
An update manifest is used to advertise the deployment of new release tarballs
|
||||||
|
on a solana cluster. The update manifest is stored using the `config` program,
|
||||||
|
and each update manifest account describes a logical update channel for a given
|
||||||
|
target triple (eg, `x86_64-apple-darwin`). The account public key is well-known
|
||||||
|
between the entity deploying new updates and users consuming those updates.
|
||||||
|
|
||||||
An update manifest is used to advertise the deployment of new release tarballs on a solana cluster. The update manifest is stored using the `config` program, and each update manifest account describes a logical update channel for a given target triple \(eg, `x86_64-apple-darwin`\). The account public key is well-known between the entity deploying new updates and users consuming those updates.
|
The update tarball itself is hosted elsewhere, off-chain and can be fetched from
|
||||||
|
the specified `download_url`.
|
||||||
|
|
||||||
The update tarball itself is hosted elsewhere, off-chain and can be fetched from the specified `download_url`.
|
```rust,ignore
|
||||||
|
|
||||||
```text
|
|
||||||
use solana_sdk::signature::Signature;
|
use solana_sdk::signature::Signature;
|
||||||
|
|
||||||
/// Information required to download and apply a given update
|
/// Information required to download and apply a given update
|
||||||
@@ -77,49 +81,44 @@ pub struct UpdateManifest {
|
|||||||
pub download_sha256: String, // SHA256 digest of the release tar.bz2 file
|
pub download_sha256: String, // SHA256 digest of the release tar.bz2 file
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Data of an Update Manifest program Account.
|
/// Userdata of an Update Manifest program Account.
|
||||||
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
|
#[derive(Serialize, Deserialize, Default, Debug, PartialEq)]
|
||||||
pub struct SignedUpdateManifest {
|
pub struct SignedUpdateManifest {
|
||||||
pub manifest: UpdateManifest,
|
pub manifest: UpdateManifest,
|
||||||
pub manifest_signature: Signature,
|
pub manifest_signature: Signature,
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the `manifest` field itself contains a corresponding signature \(`manifest_signature`\) to guard against man-in-the-middle attacks between the `solana-install` tool and the solana cluster RPC API.
|
Note that the `manifest` field itself contains a corresponding signature
|
||||||
|
(`manifest_signature`) to guard against man-in-the-middle attacks between the
|
||||||
|
`solana-install` tool and the solana cluster RPC API.
|
||||||
|
|
||||||
To guard against rollback attacks, `solana-install` will refuse to install an update with an older `timestamp_secs` than what is currently installed.
|
To guard against rollback attacks, `solana-install` will refuse to install an
|
||||||
|
update with an older `timestamp_secs` than what is currently installed.
|
||||||
|
|
||||||
## Release Archive Contents
|
### Release Archive Contents
|
||||||
|
A release archive is expected to be a tar file compressed with
|
||||||
A release archive is expected to be a tar file compressed with bzip2 with the following internal structure:
|
bzip2 with the following internal structure:
|
||||||
|
|
||||||
* `/version.yml` - a simple YAML file containing the field `"target"` - the
|
* `/version.yml` - a simple YAML file containing the field `"target"` - the
|
||||||
|
target tuple. Any additional fields are ignored.
|
||||||
target tuple. Any additional fields are ignored.
|
|
||||||
|
|
||||||
* `/bin/` -- directory containing available programs in the release.
|
* `/bin/` -- directory containing available programs in the release.
|
||||||
|
|
||||||
`solana-install` will symlink this directory to
|
`solana-install` will symlink this directory to
|
||||||
|
|
||||||
`~/.local/share/solana-install/bin` for use by the `PATH` environment
|
`~/.local/share/solana-install/bin` for use by the `PATH` environment
|
||||||
|
|
||||||
variable.
|
variable.
|
||||||
|
|
||||||
* `...` -- any additional files and directories are permitted
|
* `...` -- any additional files and directories are permitted
|
||||||
|
|
||||||
## solana-install Tool
|
### solana-install Tool
|
||||||
|
|
||||||
The `solana-install` tool is used by the user to install and update their cluster software.
|
The `solana-install` tool is used by the user to install and update their cluster software.
|
||||||
|
|
||||||
It manages the following files and directories in the user's home directory:
|
It manages the following files and directories in the user's home directory:
|
||||||
|
|
||||||
* `~/.config/solana/install/config.yml` - user configuration and information about currently installed software version
|
* `~/.config/solana/install/config.yml` - user configuration and information about currently installed software version
|
||||||
* `~/.local/share/solana/install/bin` - a symlink to the current release. eg, `~/.local/share/solana-update/<update-pubkey>-<manifest_signature>/bin`
|
* `~/.local/share/solana/install/bin` - a symlink to the current release. eg, `~/.local/share/solana-update/<update-pubkey>-<manifest_signature>/bin`
|
||||||
* `~/.local/share/solana/install/releases/<download_sha256>/` - contents of a release
|
* `~/.local/share/solana/install/releases/<download_sha256>/` - contents of a release
|
||||||
|
|
||||||
### Command-line Interface
|
#### Command-line Interface
|
||||||
|
```manpage
|
||||||
```text
|
|
||||||
solana-install 0.16.0
|
solana-install 0.16.0
|
||||||
The solana cluster software installer
|
The solana cluster software installer
|
||||||
|
|
||||||
@@ -142,7 +141,7 @@ SUBCOMMANDS:
|
|||||||
update checks for an update, and if available downloads and applies it
|
update checks for an update, and if available downloads and applies it
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```manpage
|
||||||
solana-install-init
|
solana-install-init
|
||||||
initializes a new installation
|
initializes a new installation
|
||||||
|
|
||||||
@@ -154,11 +153,11 @@ FLAGS:
|
|||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
-d, --data_dir <PATH> Directory to store install data [default: .../Library/Application Support/solana]
|
-d, --data_dir <PATH> Directory to store install data [default: .../Library/Application Support/solana]
|
||||||
-u, --url <URL> JSON RPC URL for the solana cluster [default: http://devnet.solana.com]
|
-u, --url <URL> JSON RPC URL for the solana cluster [default: http://testnet.solana.com:8899]
|
||||||
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
|
-p, --pubkey <PUBKEY> Public key of the update manifest [default: 9XX329sPuskWhH4DQh6k16c87dHKhXLBZTL3Gxmve8Gp]
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```manpage
|
||||||
solana-install-info
|
solana-install-info
|
||||||
displays information about the current installation
|
displays information about the current installation
|
||||||
|
|
||||||
@@ -170,7 +169,7 @@ FLAGS:
|
|||||||
-l, --local only display local information, don't check the cluster for new updates
|
-l, --local only display local information, don't check the cluster for new updates
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```manpage
|
||||||
solana-install-deploy
|
solana-install-deploy
|
||||||
deploys a new update
|
deploys a new update
|
||||||
|
|
||||||
@@ -185,7 +184,7 @@ ARGS:
|
|||||||
<update_manifest_keypair> Keypair file for the update manifest (/path/to/keypair.json)
|
<update_manifest_keypair> Keypair file for the update manifest (/path/to/keypair.json)
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```manpage
|
||||||
solana-install-update
|
solana-install-update
|
||||||
checks for an update, and if available downloads and applies it
|
checks for an update, and if available downloads and applies it
|
||||||
|
|
||||||
@@ -196,7 +195,7 @@ FLAGS:
|
|||||||
-h, --help Prints help information
|
-h, --help Prints help information
|
||||||
```
|
```
|
||||||
|
|
||||||
```text
|
```manpage
|
||||||
solana-install-run
|
solana-install-run
|
||||||
Runs a program while periodically checking and applying software updates
|
Runs a program while periodically checking and applying software updates
|
||||||
|
|
||||||
@@ -212,4 +211,3 @@ ARGS:
|
|||||||
|
|
||||||
The program will be restarted upon a successful software update
|
The program will be restarted upon a successful software update
|
||||||
```
|
```
|
||||||
|
|
25
book/src/instruction-api.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Instructions
|
||||||
|
|
||||||
|
For the purposes of building a [Transaction](transaction.md), a more
|
||||||
|
verbose instruction format is used:
|
||||||
|
|
||||||
|
* **Instruction:**
|
||||||
|
* **program_id:** The pubkey of the on-chain program that executes the
|
||||||
|
instruction
|
||||||
|
* **accounts:** An ordered list of accounts that should be passed to
|
||||||
|
the program processing the instruction, including metadata detailing
|
||||||
|
if an account is a signer of the transaction and if it is a credit
|
||||||
|
only account.
|
||||||
|
* **data:** A byte array that is passed to the program executing the
|
||||||
|
instruction
|
||||||
|
|
||||||
|
A more compact form is actually included in a `Transaction`:
|
||||||
|
|
||||||
|
* **CompiledInstruction:**
|
||||||
|
* **program_id_index:** The index of the `program_id` in the
|
||||||
|
`account_keys` list
|
||||||
|
* **accounts:** An ordered list of indices into `account_keys`
|
||||||
|
specifying the accounds that should be passed to the program
|
||||||
|
processing the instruction.
|
||||||
|
* **data:** A byte array that is passed to the program executing the
|
||||||
|
instruction
|
117
book/src/introduction.md
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
# What is Solana?
|
||||||
|
|
||||||
|
Solana is an open source project implementing a new,
|
||||||
|
high-performance, permissionless blockchain. Solana is also the name of a
|
||||||
|
company headquartered in San Francisco that maintains the open source project.
|
||||||
|
|
||||||
|
# About this Book
|
||||||
|
|
||||||
|
This book describes the Solana open source project, a blockchain built from the
|
||||||
|
ground up for scale. The book covers why Solana is useful, how to use it, how it
|
||||||
|
works, and why it will continue to work long after the company Solana closes
|
||||||
|
its doors. The goal of the Solana architecture is to demonstrate there exists a
|
||||||
|
set of software algorithms that when used in combination to implement a
|
||||||
|
blockchain, removes software as a performance bottleneck, allowing transaction
|
||||||
|
throughput to scale proportionally with network bandwidth. The architecture
|
||||||
|
goes on to satisfy all three desirable properties of a proper blockchain:
|
||||||
|
it is scalable, secure and decentralized.
|
||||||
|
|
||||||
|
The architecture describes a theoretical upper bound of 710 thousand
|
||||||
|
transactions per second (tps) on a standard gigabit network and 28.4 million
|
||||||
|
tps on 40 gigabit. Furthermore, the architecture supports safe, concurrent
|
||||||
|
execution of programs authored in general purpose programming languages such as
|
||||||
|
C or Rust.
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
|
||||||
|
All claims, content, designs, algorithms, estimates, roadmaps, specifications,
|
||||||
|
and performance measurements described in this project are done with the
|
||||||
|
author's best effort. It is up to the reader to check and validate their
|
||||||
|
accuracy and truthfulness. Furthermore, nothing in this project constitutes a
|
||||||
|
solicitation for investment.
|
||||||
|
|
||||||
|
# History of the Solana Codebase
|
||||||
|
|
||||||
|
In November of 2017, Anatoly Yakovenko published a whitepaper describing Proof
|
||||||
|
of History, a technique for keeping time between computers that do not trust
|
||||||
|
one another. From Anatoly's previous experience designing distributed systems
|
||||||
|
at Qualcomm, Mesosphere and Dropbox, he knew that a reliable clock makes
|
||||||
|
network synchronization very simple. When synchronization is simple the
|
||||||
|
resulting network can be blazing fast, bound only by network bandwidth.
|
||||||
|
|
||||||
|
Anatoly watched as blockchain systems without clocks, such as Bitcoin and
|
||||||
|
Ethereum, struggled to scale beyond 15 transactions per second worldwide when
|
||||||
|
centralized payment systems such as Visa required peaks of 65,000 tps. Without a
|
||||||
|
clock, it was clear they'd never graduate to being the global payment system or
|
||||||
|
global supercomputer most had dreamed them to be. When Anatoly solved the problem of
|
||||||
|
getting computers that don’t trust each other to agree on time, he knew he had
|
||||||
|
the key to bring 40 years of distributed systems research to the world of
|
||||||
|
blockchain. The resulting cluster wouldn't be just 10 times faster, or a 100
|
||||||
|
times, or a 1,000 times, but 10,000 times faster, right out of the gate!
|
||||||
|
|
||||||
|
Anatoly's implementation began in a private codebase and was implemented in the
|
||||||
|
C programming language. Greg Fitzgerald, who had previously worked with Anatoly
|
||||||
|
at semiconductor giant Qualcomm Incorporated, encouraged him to reimplement the
|
||||||
|
project in the Rust programming language. Greg had worked on the LLVM compiler
|
||||||
|
infrastructure, which underlies both the Clang C/C++ compiler as well as the
|
||||||
|
Rust compiler. Greg claimed that the language's safety guarantees would improve
|
||||||
|
software productivity and that its lack of a garbage collector would allow
|
||||||
|
programs to perform as well as those written in C. Anatoly gave it a shot and
|
||||||
|
just two weeks later, had migrated his entire codebase to Rust. Sold. With
|
||||||
|
plans to weave all the world's transactions together on a single, scalable
|
||||||
|
blockchain, Anatoly called the project Loom.
|
||||||
|
|
||||||
|
On February 13th of 2018, Greg began prototyping the first open source
|
||||||
|
implementation of Anatoly's whitepaper. The project was published to GitHub
|
||||||
|
under the name Silk in the loomprotocol organization. On February 28th, Greg
|
||||||
|
made his first release, demonstrating 10 thousand signed transactions could be
|
||||||
|
verified and processed in just over half a second. Shortly after, another
|
||||||
|
former Qualcomm cohort, Stephen Akridge, demonstrated throughput could be
|
||||||
|
massively improved by offloading signature verification to graphics processors.
|
||||||
|
Anatoly recruited Greg, Stephen and three others to co-found a company, then
|
||||||
|
called Loom.
|
||||||
|
|
||||||
|
Around the same time, Ethereum-based project Loom Network sprung up and many
|
||||||
|
people were confused about whether they were the same project. The Loom team decided it
|
||||||
|
would rebrand. They chose the name Solana, a nod to a small beach town North of
|
||||||
|
San Diego called Solana Beach, where Anatoly, Greg and Stephen lived and surfed
|
||||||
|
for three years when they worked for Qualcomm. On March 28th, the team created
|
||||||
|
the Solana Labs GitHub organization and renamed Greg's prototype Silk to
|
||||||
|
Solana.
|
||||||
|
|
||||||
|
In June of 2018, the team scaled up the technology to run on cloud-based
|
||||||
|
networks and on July 19th, published a 50-node, permissioned, public testnet
|
||||||
|
consistently supporting bursts of 250,000 transactions per second. In a later release in
|
||||||
|
December, called v0.10 Pillbox, the team published a permissioned testnet
|
||||||
|
running 150 nodes on a gigabit network and demonstrated soak tests processing
|
||||||
|
an *average* of 200 thousand transactions per second with bursts over 500
|
||||||
|
thousand. The project was also extended to support on-chain programs written in
|
||||||
|
the C programming language and run concurrently in a safe execution environment
|
||||||
|
called BPF.
|
||||||
|
|
||||||
|
# What is a Solana Cluster?
|
||||||
|
|
||||||
|
A cluster is a set of computers that work together and can be viewed from the
|
||||||
|
outside as a single system. A Solana cluster is a set of independently owned
|
||||||
|
computers working together (and sometimes against each other) to verify the
|
||||||
|
output of untrusted, user-submitted programs. A Solana cluster can be utilized
|
||||||
|
any time a user wants to preserve an immutable record of events in time or
|
||||||
|
programmatic interpretations of those events. One use is to track which of the
|
||||||
|
computers did meaningful work to keep the cluster running. Another use might be
|
||||||
|
to track the possession of real-world assets. In each case, the cluster
|
||||||
|
produces a record of events called the ledger. It will be preserved for the
|
||||||
|
lifetime of the cluster. As long as someone somewhere in the world maintains a
|
||||||
|
copy of the ledger, the output of its programs (which may contain a record of
|
||||||
|
who possesses what) will forever be reproducible, independent of the
|
||||||
|
organization that launched it.
|
||||||
|
|
||||||
|
# What are Sols?
|
||||||
|
|
||||||
|
A sol is the name of Solana's native token, which can be passed to nodes in a
|
||||||
|
Solana cluster in exchange for running an on-chain program or validating its
|
||||||
|
output. The Solana protocol defines that only 1 billion sols will ever exist,
|
||||||
|
but that the system may perform micropayments of fractional sols, and that a sol
|
||||||
|
may be split as many as 34 times. The fractional sol is called a *lamport*. It
|
||||||
|
is named in honor of Solana's biggest technical influence, [Leslie
|
||||||
|
Lamport](https://en.wikipedia.org/wiki/Leslie_Lamport). A lamport has a value
|
||||||
|
of approximately 0.0000000000582 sol (2^-34).
|
@@ -1,4 +1,3 @@
|
|||||||
# JavaScript API
|
# JavaScript API
|
||||||
|
|
||||||
See [solana-web3](https://solana-labs.github.io/solana-web3.js/).
|
See [solana-web3](https://solana-labs.github.io/solana-web3.js/).
|
||||||
|
|
728
book/src/jsonrpc-api.md
Normal file
@@ -0,0 +1,728 @@
|
|||||||
|
JSON RPC API
|
||||||
|
===
|
||||||
|
|
||||||
|
Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification.
|
||||||
|
|
||||||
|
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
|
||||||
|
|
||||||
|
RPC HTTP Endpoint
|
||||||
|
---
|
||||||
|
|
||||||
|
**Default port:** 8899
|
||||||
|
eg. http://localhost:8899, http://192.168.1.88:8899
|
||||||
|
|
||||||
|
RPC PubSub WebSocket Endpoint
|
||||||
|
---
|
||||||
|
|
||||||
|
**Default port:** 8900
|
||||||
|
eg. ws://localhost:8900, http://192.168.1.88:8900
|
||||||
|
|
||||||
|
|
||||||
|
Methods
|
||||||
|
---
|
||||||
|
|
||||||
|
* [confirmTransaction](#confirmtransaction)
|
||||||
|
* [getAccountInfo](#getaccountinfo)
|
||||||
|
* [getBalance](#getbalance)
|
||||||
|
* [getClusterNodes](#getclusternodes)
|
||||||
|
* [getEpochInfo](#getepochinfo)
|
||||||
|
* [getGenesisBlockhash](#getgenesisblockhash)
|
||||||
|
* [getLeaderSchedule](#getleaderschedule)
|
||||||
|
* [getProgramAccounts](#getprogramaccounts)
|
||||||
|
* [getRecentBlockhash](#getrecentblockhash)
|
||||||
|
* [getSignatureStatus](#getsignaturestatus)
|
||||||
|
* [getSlot](#getslot)
|
||||||
|
* [getSlotLeader](#getslotleader)
|
||||||
|
* [getSlotsPerSegment](#getslotspersegment)
|
||||||
|
* [getStorageTurn](#getstorageturn)
|
||||||
|
* [getStorageTurnRate](#getstorageturnrate)
|
||||||
|
* [getNumBlocksSinceSignatureConfirmation](#getnumblockssincesignatureconfirmation)
|
||||||
|
* [getTransactionCount](#gettransactioncount)
|
||||||
|
* [getTotalSupply](#gettotalsupply)
|
||||||
|
* [getVersion](#getversion)
|
||||||
|
* [getVoteAccounts](#getvoteaccounts)
|
||||||
|
* [requestAirdrop](#requestairdrop)
|
||||||
|
* [sendTransaction](#sendtransaction)
|
||||||
|
* [startSubscriptionChannel](#startsubscriptionchannel)
|
||||||
|
|
||||||
|
* [Subscription Websocket](#subscription-websocket)
|
||||||
|
* [accountSubscribe](#accountsubscribe)
|
||||||
|
* [accountUnsubscribe](#accountunsubscribe)
|
||||||
|
* [programSubscribe](#programsubscribe)
|
||||||
|
* [programUnsubscribe](#programunsubscribe)
|
||||||
|
* [signatureSubscribe](#signaturesubscribe)
|
||||||
|
* [signatureUnsubscribe](#signatureunsubscribe)
|
||||||
|
|
||||||
|
Request Formatting
|
||||||
|
---
|
||||||
|
|
||||||
|
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
|
||||||
|
|
||||||
|
* `jsonrpc`, set to `"2.0"`
|
||||||
|
* `id`, a unique client-generated identifying integer
|
||||||
|
* `method`, a string containing the method to be invoked
|
||||||
|
* `params`, a JSON array of ordered parameter values
|
||||||
|
|
||||||
|
Example using curl:
|
||||||
|
```bash
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' 192.168.1.88:8899
|
||||||
|
```
|
||||||
|
|
||||||
|
The response output will be a JSON object with the following fields:
|
||||||
|
|
||||||
|
* `jsonrpc`, matching the request specification
|
||||||
|
* `id`, matching the request identifier
|
||||||
|
* `result`, requested data or success confirmation
|
||||||
|
|
||||||
|
Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST.
|
||||||
|
|
||||||
|
Definitions
|
||||||
|
---
|
||||||
|
|
||||||
|
* Hash: A SHA-256 hash of a chunk of data.
|
||||||
|
* Pubkey: The public key of a Ed25519 key-pair.
|
||||||
|
* Signature: An Ed25519 signature of a chunk of data.
|
||||||
|
* Transaction: A Solana instruction signed by a client key-pair.
|
||||||
|
|
||||||
|
JSON RPC API Reference
|
||||||
|
---
|
||||||
|
|
||||||
|
### confirmTransaction
|
||||||
|
Returns a transaction receipt
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `boolean` - Transaction status, true if Transaction is confirmed
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":true,"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getAccountInfo
|
||||||
|
Returns all information associated with the account of provided Pubkey
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be a JSON object with the following sub fields:
|
||||||
|
|
||||||
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
|
* `data`, array of bytes representing any data associated with the account
|
||||||
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getBalance
|
||||||
|
Returns the balance of the account of provided Pubkey
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - quantity, as a signed 64-bit integer
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":0,"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getClusterNodes
|
||||||
|
Returns information about all the nodes participating in the cluster
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be an array of JSON objects, each with the following sub fields:
|
||||||
|
* `pubkey` - Node public key, as base-58 encoded string
|
||||||
|
* `gossip` - Gossip network address for the node
|
||||||
|
* `tpu` - TPU network address for the node
|
||||||
|
* `rpc` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getEpochInfo
|
||||||
|
Returns information about the current epoch
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be an object with the following fields:
|
||||||
|
* `epoch`, the current epoch
|
||||||
|
* `slotIndex`, the current slot relative to the start of the current epoch
|
||||||
|
* `slotsInEpoch`, the number of slots in this epoch
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":{"epoch":3,"slotIndex":126,"slotsInEpoch":256},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
### getGenesisBlockhash
|
||||||
|
Returns the genesis block hash
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `string` - a Hash as base-58 encoded string
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getGenesisBlockhash"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getLeaderSchedule
|
||||||
|
Returns the leader schedule for the current epoch
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be an array of leader public keys (as base-58 encoded
|
||||||
|
strings) for each slot in the current epoch
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":[...],"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getProgramAccounts
|
||||||
|
Returns all accounts owned by the provided program Pubkey
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Pubkey of program, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be an array of arrays. Each sub array will contain:
|
||||||
|
* `string` - the account Pubkey as base-58 encoded string
|
||||||
|
and a JSON object, with the following sub fields:
|
||||||
|
|
||||||
|
* `lamports`, number of lamports assigned to this account, as a signed 64-bit integer
|
||||||
|
* `owner`, array of 32 bytes representing the program this account has been assigned to
|
||||||
|
* `data`, array of bytes representing any data associated with the account
|
||||||
|
* `executable`, boolean indicating if the account contains a program (and is strictly read-only)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["8nQwAgzN2yyUzrukXsCa3JELBYqDQrqJ3UyHiWazWxHR"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":[["BqGKYtAKu69ZdWEBtZHh4xgJY1BYa2YBiBReQE3pe383", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":1,"data":[]], ["4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", {"executable":false,"owner":[50,28,250,90,221,24,94,136,147,165,253,136,1,62,196,215,225,34,222,212,99,84,202,223,245,13,149,99,149,231,91,96],"lamports":10,"data":[]]]},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getRecentBlockhash
|
||||||
|
Returns a recent block hash from the ledger, and a fee schedule that can be used
|
||||||
|
to compute the cost of submitting a transaction using it.
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
An array consisting of
|
||||||
|
* `string` - a Hash as base-58 encoded string
|
||||||
|
* `FeeCalculator object` - the fee schedule for this block hash
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC",{"lamportsPerSignature": 0}],"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getSignatureStatus
|
||||||
|
Returns the status of a given signature. This method is similar to
|
||||||
|
[confirmTransaction](#confirmtransaction) but provides more resolution for error
|
||||||
|
events.
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `null` - Unknown transaction
|
||||||
|
* `object` - Transaction status:
|
||||||
|
* `"Ok": null` - Transaction was successful
|
||||||
|
* `"Err": <ERR>` - Transaction failed with TransactionError <ERR> [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
-----
|
||||||
|
|
||||||
|
### getSlot
|
||||||
|
Returns the current slot the node is processing
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `u64` - Current slot
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlot"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"1234","id":1}
|
||||||
|
```
|
||||||
|
-----
|
||||||
|
|
||||||
|
### getSlotLeader
|
||||||
|
Returns the current slot leader
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `string` - Node Id as base-58 encoded string
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotLeader"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
### getSlotsPerSegment
|
||||||
|
Returns the current storage segment size in terms of slots
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `u64` - Number of slots in a storage segment
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"1024","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
### getStorageTurn
|
||||||
|
Returns the current storage turn's blockhash and slot
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
An array consisting of
|
||||||
|
* `string` - a Hash as base-58 encoded string indicating the blockhash of the turn slot
|
||||||
|
* `u64` - the current storage turn slot
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurn"}' http://localhost:8899
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":["GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", "2048"],"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
### getStorageTurnRate
|
||||||
|
Returns the current storage turn rate in terms of slots per turn
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `u64` - Number of slots in storage turn
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getStorageTurnRate"}' http://localhost:8899
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"1024","id":1}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
### getNumBlocksSinceSignatureConfirmation
|
||||||
|
Returns the current number of blocks since signature has been confirmed.
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - count, as unsigned 64-bit integer
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getNumBlocksSinceSignatureConfirmation", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":8,"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getTransactionCount
|
||||||
|
Returns the current Transaction count from the ledger
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - count, as unsigned 64-bit integer
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":268,"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getTotalSupply
|
||||||
|
Returns the current total supply in Lamports
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - Total supply, as unsigned 64-bit integer
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":10126,"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getVersion
|
||||||
|
Returns the current solana versions running on the node
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be a JSON object with the following sub fields:
|
||||||
|
* `solana-core`, software version of solana-core
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":{"solana-core": "0.17.2"},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### getVoteAccounts
|
||||||
|
Returns the account info and associated stake for all the voting accounts in the current bank.
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
None
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
The result field will be a JSON object of `current` and `delinquent` accounts,
|
||||||
|
each containing an array of JSON objects with the following sub fields:
|
||||||
|
* `votePubkey` - Vote account public key, as base-58 encoded string
|
||||||
|
* `nodePubkey` - Node public key, as base-58 encoded string
|
||||||
|
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch
|
||||||
|
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch
|
||||||
|
* `commission`, an 8-bit integer used as a fraction (commission/MAX_U8) for rewards payout
|
||||||
|
* `lastVote` - Most recent slot voted on by this vote account
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### requestAirdrop
|
||||||
|
Requests an airdrop of lamports to a Pubkey
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Pubkey of account to receive lamports, as base-58 encoded string
|
||||||
|
* `integer` - lamports, as a signed 64-bit integer
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `string` - Transaction Signature of airdrop, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"requestAirdrop", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", 50]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### sendTransaction
|
||||||
|
Creates new transaction
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `array` - array of octets containing a fully-signed Transaction
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `string` - Transaction Signature, as base-58 encoded string
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":[[61, 98, 55, 49, 15, 187, 41, 215, 176, 49, 234, 229, 228, 77, 129, 221, 239, 88, 145, 227, 81, 158, 223, 123, 14, 229, 235, 247, 191, 115, 199, 71, 121, 17, 32, 67, 63, 209, 239, 160, 161, 2, 94, 105, 48, 159, 235, 235, 93, 98, 172, 97, 63, 197, 160, 164, 192, 20, 92, 111, 57, 145, 251, 6, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 13, 39, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 11, 12, 106, 49, 74, 226, 201, 16, 161, 192, 28, 84, 124, 97, 190, 201, 171, 186, 6, 18, 70, 142, 89, 185, 176, 154, 115, 61, 26, 163, 77, 1, 88, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}' http://localhost:8899
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Subscription Websocket
|
||||||
|
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||||
|
- Submit subscription requests to the websocket using the methods below
|
||||||
|
- Multiple subscriptions may be active at once
|
||||||
|
- All subscriptions take an optional `confirmations` parameter, which defines
|
||||||
|
how many confirmed blocks the node should wait before sending a notification.
|
||||||
|
The greater the number, the more likely the notification is to represent
|
||||||
|
consensus across the cluster, and the less likely it is to be affected by
|
||||||
|
forking or rollbacks. If unspecified, the default value is 0; the node will
|
||||||
|
send a notification as soon as it witnesses the event. The maximum
|
||||||
|
`confirmations` wait length is the cluster's `MAX_LOCKOUT_HISTORY`, which
|
||||||
|
represents the economic finality of the chain.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### accountSubscribe
|
||||||
|
Subscribe to an account to receive notifications when the lamports or data
|
||||||
|
for a given account public key changes
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - account Pubkey, as base-58 encoded string
|
||||||
|
* `integer` - optional, number of confirmed blocks to wait before notification.
|
||||||
|
Default: 0, Max: `MAX_LOCKOUT_HISTORY` (greater integers rounded down)
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - Subscription id (needed to unsubscribe)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
|
||||||
|
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", 15]}
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Notification Format:
|
||||||
|
```bash
|
||||||
|
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### accountUnsubscribe
|
||||||
|
Unsubscribe from account change notifications
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `integer` - id of account Subscription to cancel
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `bool` - unsubscribe success message
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### programSubscribe
|
||||||
|
Subscribe to a program to receive notifications when the lamports or data
|
||||||
|
for a given account owned by the program changes
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - program_id Pubkey, as base-58 encoded string
|
||||||
|
* `integer` - optional, number of confirmed blocks to wait before notification.
|
||||||
|
Default: 0, Max: `MAX_LOCKOUT_HISTORY` (greater integers rounded down)
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - Subscription id (needed to unsubscribe)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV"]}
|
||||||
|
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"programSubscribe", "params":["9gZbPtbtHrs6hEWgd6MbVY9VPFtS5Z8xKtnYwA2NynHV", 15]}
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Notification Format:
|
||||||
|
* `string` - account Pubkey, as base-58 encoded string
|
||||||
|
* `object` - account info JSON object (see [getAccountInfo](#getaccountinfo) for field details)
|
||||||
|
```bash
|
||||||
|
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### programUnsubscribe
|
||||||
|
Unsubscribe from program-owned account change notifications
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `integer` - id of account Subscription to cancel
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `bool` - unsubscribe success message
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"programUnsubscribe", "params":[0]}
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### signatureSubscribe
|
||||||
|
Subscribe to a transaction signature to receive notification when the transaction is confirmed
|
||||||
|
On `signatureNotification`, the subscription is automatically cancelled
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `string` - Transaction Signature, as base-58 encoded string
|
||||||
|
* `integer` - optional, number of confirmed blocks to wait before notification.
|
||||||
|
Default: 0, Max: `MAX_LOCKOUT_HISTORY` (greater integers rounded down)
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `integer` - subscription id (needed to unsubscribe)
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
|
||||||
|
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", 15]}
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Notification Format:
|
||||||
|
```bash
|
||||||
|
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### signatureUnsubscribe
|
||||||
|
Unsubscribe from signature confirmation notification
|
||||||
|
|
||||||
|
##### Parameters:
|
||||||
|
* `integer` - subscription id to cancel
|
||||||
|
|
||||||
|
##### Results:
|
||||||
|
* `bool` - unsubscribe success message
|
||||||
|
|
||||||
|
##### Example:
|
||||||
|
```bash
|
||||||
|
// Request
|
||||||
|
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
|
||||||
|
|
||||||
|
// Result
|
||||||
|
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||||
|
```
|